code
stringlengths
3
1.01M
repo_name
stringlengths
5
116
path
stringlengths
3
311
language
stringclasses
30 values
license
stringclasses
15 values
size
int64
3
1.01M
/* * Created on 05-Sep-2005 * Created by Paul Gardner * Copyright (C) 2005, 2006 Aelitis, All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * AELITIS, SAS au capital de 46,603.30 euros * 8 Allee Lenotre, La Grille Royale, 78600 Le Mesnil le Roi, France. * */ package org.gudy.azureus2.plugins.ui; /** * * @see UIManager#addUIListener(UIManagerListener) * @see org.gudy.azureus2.ui.swt.plugins.UISWTInstance */ public interface UIManagerListener { /** * Triggered when an UI is attached * * @param instance Check with <code>instanceof</code> to see what UI is being * attached */ public void UIAttached( UIInstance instance ); /** * Triggered when an UI is detached * * @param instance Check with <code>instanceof</code> to see what UI is being * dettached */ public void UIDetached( UIInstance instance ); }
AcademicTorrents/AcademicTorrents-Downloader
vuze/org/gudy/azureus2/plugins/ui/UIManagerListener.java
Java
gpl-2.0
1,583
/* * composite.c - infrastructure for Composite USB Gadgets * * Copyright (C) 2006-2008 David Brownell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ /* #define VERBOSE_DEBUG */ #include <linux/kallsyms.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/device.h> #include <linux/utsname.h> #include <linux/usb/composite.h> #include <linux/usb/gadget_cust.h> #include <asm/unaligned.h> #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE #include "multi_config.h" #endif /* * The code in this file is utility code, used to build a gadget driver * from one or more "function" drivers, one or more "configuration" * objects, and a "usb_composite_driver" by gluing them together along * with the relevant device-wide data. */ static struct usb_gadget_strings **get_containers_gs( struct usb_gadget_string_container *uc) { return (struct usb_gadget_strings **)uc->stash; } #define USB_PRE_CONFIG_CURRENT 100 #define USB_OTG_PRE_CONFIG_CURRENT 2 /** * next_ep_desc() - advance to the next EP descriptor * @t: currect pointer within descriptor array * * Return: next EP descriptor or NULL * * Iterate over @t until either EP descriptor found or * NULL (that indicates end of list) encountered */ static struct usb_descriptor_header** next_ep_desc(struct usb_descriptor_header **t) { for (; *t; t++) { if ((*t)->bDescriptorType == USB_DT_ENDPOINT) return t; } return NULL; } /* * for_each_ep_desc()- iterate over endpoint descriptors in the * descriptors list * @start: pointer within descriptor array. * @ep_desc: endpoint descriptor to use as the loop cursor */ #define for_each_ep_desc(start, ep_desc) \ for (ep_desc = next_ep_desc(start); \ ep_desc; ep_desc = next_ep_desc(ep_desc+1)) /** * config_ep_by_speed() - configures the given endpoint * according to gadget speed. * @g: pointer to the gadget * @f: usb function * @_ep: the endpoint to configure * * Return: error code, 0 on success * * This function chooses the right descriptors for a given * endpoint according to gadget speed and saves it in the * endpoint desc field. If the endpoint already has a descriptor * assigned to it - overwrites it with currently corresponding * descriptor. The endpoint maxpacket field is updated according * to the chosen descriptor. * Note: the supplied function should hold all the descriptors * for supported speeds */ int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f, struct usb_ep *_ep) { struct usb_composite_dev *cdev = get_gadget_data(g); struct usb_endpoint_descriptor *chosen_desc = NULL; struct usb_descriptor_header **speed_desc = NULL; struct usb_ss_ep_comp_descriptor *comp_desc = NULL; int want_comp_desc = 0; struct usb_descriptor_header **d_spd; /* cursor for speed desc */ if (!g || !f || !_ep) return -EIO; /* select desired speed */ switch (g->speed) { case USB_SPEED_SUPER: if (gadget_is_superspeed(g)) { speed_desc = f->ss_descriptors; want_comp_desc = 1; break; } /* else: Fall trough */ case USB_SPEED_HIGH: if (gadget_is_dualspeed(g)) { speed_desc = f->hs_descriptors; break; } /* else: fall through */ default: speed_desc = f->fs_descriptors; } /* find descriptors */ for_each_ep_desc(speed_desc, d_spd) { chosen_desc = (struct usb_endpoint_descriptor *)*d_spd; if (chosen_desc->bEndpointAddress == _ep->address) goto ep_found; } return -EIO; ep_found: /* commit results */ _ep->maxpacket = usb_endpoint_maxp(chosen_desc); _ep->desc = chosen_desc; _ep->comp_desc = NULL; _ep->maxburst = 0; _ep->mult = 0; if (!want_comp_desc) return 0; /* * Companion descriptor should follow EP descriptor * USB 3.0 spec, #9.6.7 */ comp_desc = (struct usb_ss_ep_comp_descriptor *)*(++d_spd); if (!comp_desc || (comp_desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP)) return -EIO; _ep->comp_desc = comp_desc; if (g->speed == USB_SPEED_SUPER) { switch (usb_endpoint_type(_ep->desc)) { case USB_ENDPOINT_XFER_ISOC: /* mult: bits 1:0 of bmAttributes */ _ep->mult = comp_desc->bmAttributes & 0x3; case USB_ENDPOINT_XFER_BULK: case USB_ENDPOINT_XFER_INT: _ep->maxburst = comp_desc->bMaxBurst + 1; break; default: if (comp_desc->bMaxBurst != 0) ERROR(cdev, "ep0 bMaxBurst must be 0\n"); _ep->maxburst = 1; break; } } return 0; } EXPORT_SYMBOL_GPL(config_ep_by_speed); /** * usb_add_function() - add a function to a configuration * @config: the configuration * @function: the function being added * Context: single threaded during gadget setup * * After initialization, each configuration must have one or more * functions added to it. Adding a function involves calling its @bind() * method to allocate resources such as interface and string identifiers * and endpoints. * * This function returns the value of the function's bind(), which is * zero for success else a negative errno value. */ int usb_add_function(struct usb_configuration *config, struct usb_function *function) { int value = -EINVAL; DBG(config->cdev, "adding '%s'/%p to config '%s'/%p\n", function->name, function, config->label, config); if (!function->set_alt || !function->disable) goto done; function->config = config; list_add_tail(&function->list, &config->functions); /* REVISIT *require* function->bind? */ if (function->bind) { value = function->bind(config, function); if (value < 0) { list_del(&function->list); function->config = NULL; } } else value = 0; /* We allow configurations that don't work at both speeds. * If we run into a lowspeed Linux system, treat it the same * as full speed ... it's the function drivers that will need * to avoid bulk and ISO transfers. */ if (!config->fullspeed && function->fs_descriptors) config->fullspeed = true; if (!config->highspeed && function->hs_descriptors) config->highspeed = true; if (!config->superspeed && function->ss_descriptors) config->superspeed = true; done: if (value) DBG(config->cdev, "adding '%s'/%p --> %d\n", function->name, function, value); return value; } EXPORT_SYMBOL_GPL(usb_add_function); void usb_remove_function(struct usb_configuration *c, struct usb_function *f) { if (f->disable) f->disable(f); bitmap_zero(f->endpoints, 32); list_del(&f->list); if (f->unbind) f->unbind(c, f); } EXPORT_SYMBOL_GPL(usb_remove_function); /** * usb_function_deactivate - prevent function and gadget enumeration * @function: the function that isn't yet ready to respond * * Blocks response of the gadget driver to host enumeration by * preventing the data line pullup from being activated. This is * normally called during @bind() processing to change from the * initial "ready to respond" state, or when a required resource * becomes available. * * For example, drivers that serve as a passthrough to a userspace * daemon can block enumeration unless that daemon (such as an OBEX, * MTP, or print server) is ready to handle host requests. * * Not all systems support software control of their USB peripheral * data pullups. * * Returns zero on success, else negative errno. */ int usb_function_deactivate(struct usb_function *function) { struct usb_composite_dev *cdev = function->config->cdev; unsigned long flags; int status = 0; spin_lock_irqsave(&cdev->lock, flags); if (cdev->deactivations == 0) status = usb_gadget_disconnect(cdev->gadget); if (status == 0) cdev->deactivations++; spin_unlock_irqrestore(&cdev->lock, flags); return status; } EXPORT_SYMBOL_GPL(usb_function_deactivate); /** * usb_function_activate - allow function and gadget enumeration * @function: function on which usb_function_activate() was called * * Reverses effect of usb_function_deactivate(). If no more functions * are delaying their activation, the gadget driver will respond to * host enumeration procedures. * * Returns zero on success, else negative errno. */ int usb_function_activate(struct usb_function *function) { struct usb_composite_dev *cdev = function->config->cdev; unsigned long flags; int status = 0; spin_lock_irqsave(&cdev->lock, flags); if (WARN_ON(cdev->deactivations == 0)) status = -EINVAL; else { cdev->deactivations--; if (cdev->deactivations == 0) status = usb_gadget_connect(cdev->gadget); } spin_unlock_irqrestore(&cdev->lock, flags); return status; } EXPORT_SYMBOL_GPL(usb_function_activate); /** * usb_interface_id() - allocate an unused interface ID * @config: configuration associated with the interface * @function: function handling the interface * Context: single threaded during gadget setup * * usb_interface_id() is called from usb_function.bind() callbacks to * allocate new interface IDs. The function driver will then store that * ID in interface, association, CDC union, and other descriptors. It * will also handle any control requests targeted at that interface, * particularly changing its altsetting via set_alt(). There may * also be class-specific or vendor-specific requests to handle. * * All interface identifier should be allocated using this routine, to * ensure that for example different functions don't wrongly assign * different meanings to the same identifier. Note that since interface * identifiers are configuration-specific, functions used in more than * one configuration (or more than once in a given configuration) need * multiple versions of the relevant descriptors. * * Returns the interface ID which was allocated; or -ENODEV if no * more interface IDs can be allocated. */ int usb_interface_id(struct usb_configuration *config, struct usb_function *function) { unsigned id = config->next_interface_id; if (id < MAX_CONFIG_INTERFACES) { config->interface[id] = function; config->next_interface_id = id + 1; return id; } return -ENODEV; } EXPORT_SYMBOL_GPL(usb_interface_id); static u8 encode_bMaxPower(enum usb_device_speed speed, struct usb_configuration *c) { unsigned val; if (c->MaxPower) val = c->MaxPower; else val = CONFIG_USB_GADGET_VBUS_DRAW; if (!val) return 0; switch (speed) { case USB_SPEED_SUPER: return DIV_ROUND_UP(val, 8); default: return DIV_ROUND_UP(val, 2); }; } static int config_buf(struct usb_configuration *config, enum usb_device_speed speed, void *buf, u8 type) { struct usb_config_descriptor *c = buf; void *next = buf + USB_DT_CONFIG_SIZE; int len; struct usb_function *f; int status; len = USB_COMP_EP0_BUFSIZ - USB_DT_CONFIG_SIZE; /* write the config descriptor */ c = buf; c->bLength = USB_DT_CONFIG_SIZE; c->bDescriptorType = type; /* wTotalLength is written later */ c->bNumInterfaces = config->next_interface_id; #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE c->bConfigurationValue = get_config_number() + 1; #else c->bConfigurationValue = config->bConfigurationValue; #endif c->iConfiguration = config->iConfiguration; c->bmAttributes = USB_CONFIG_ATT_ONE | config->bmAttributes; c->bMaxPower = encode_bMaxPower(speed, config); /* There may be e.g. OTG descriptors */ if (config->descriptors) { status = usb_descriptor_fillbuf(next, len, config->descriptors); if (status < 0) return status; len -= status; next += status; } /* add each function's descriptors */ list_for_each_entry(f, &config->functions, list) { struct usb_descriptor_header **descriptors; #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE if (!is_available_function(f->name)) { USB_DBG("skip f->%s\n", f->name); continue; } else { USB_DBG("f->%s\n", f->name); } #endif switch (speed) { case USB_SPEED_SUPER: descriptors = f->ss_descriptors; break; case USB_SPEED_HIGH: descriptors = f->hs_descriptors; break; default: descriptors = f->fs_descriptors; } if (!descriptors) continue; status = usb_descriptor_fillbuf(next, len, (const struct usb_descriptor_header **) descriptors); #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE if (change_conf(f, next, len, config, speed) < 0) { printk("failed to change configuration\n"); return -EINVAL; } #endif if (status < 0) return status; len -= status; next += status; } #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE set_interface_count(config, c); #endif len = next - buf; c->wTotalLength = cpu_to_le16(len); return len; } static int config_desc(struct usb_composite_dev *cdev, unsigned w_value) { struct usb_gadget *gadget = cdev->gadget; struct usb_configuration *c; u8 type = w_value >> 8; enum usb_device_speed speed = USB_SPEED_UNKNOWN; if (gadget->speed == USB_SPEED_SUPER) speed = gadget->speed; else if (gadget_is_dualspeed(gadget)) { int hs = 0; if (gadget->speed == USB_SPEED_HIGH) hs = 1; if (type == USB_DT_OTHER_SPEED_CONFIG) hs = !hs; if (hs) speed = USB_SPEED_HIGH; } /* This is a lookup by config *INDEX* */ w_value &= 0xff; #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE w_value = set_config_number(w_value); #endif list_for_each_entry(c, &cdev->configs, list) { /* ignore configs that won't work at this speed */ switch (speed) { case USB_SPEED_SUPER: if (!c->superspeed) continue; break; case USB_SPEED_HIGH: if (!c->highspeed) continue; break; default: if (!c->fullspeed) continue; } if (w_value == 0) return config_buf(c, speed, cdev->req->buf, type); w_value--; } return -EINVAL; } static int count_configs(struct usb_composite_dev *cdev, unsigned type) { struct usb_gadget *gadget = cdev->gadget; struct usb_configuration *c; unsigned count = 0; int hs = 0; int ss = 0; if (gadget_is_dualspeed(gadget)) { if (gadget->speed == USB_SPEED_HIGH) hs = 1; if (gadget->speed == USB_SPEED_SUPER) ss = 1; if (type == USB_DT_DEVICE_QUALIFIER) hs = !hs; } list_for_each_entry(c, &cdev->configs, list) { /* ignore configs that won't work at this speed */ if (ss) { if (!c->superspeed) continue; } else if (hs) { if (!c->highspeed) continue; } else { if (!c->fullspeed) continue; } count++; #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE count = count_multi_config(c, count); #endif } return count; } /** * bos_desc() - prepares the BOS descriptor. * @cdev: pointer to usb_composite device to generate the bos * descriptor for * * This function generates the BOS (Binary Device Object) * descriptor and its device capabilities descriptors. The BOS * descriptor should be supported by a SuperSpeed device. */ static int bos_desc(struct usb_composite_dev *cdev) { struct usb_ext_cap_descriptor *usb_ext; struct usb_ss_cap_descriptor *ss_cap; struct usb_dcd_config_params dcd_config_params; struct usb_bos_descriptor *bos = cdev->req->buf; bos->bLength = USB_DT_BOS_SIZE; bos->bDescriptorType = USB_DT_BOS; bos->wTotalLength = cpu_to_le16(USB_DT_BOS_SIZE); bos->bNumDeviceCaps = 0; /* * A SuperSpeed device shall include the USB2.0 extension descriptor * and shall support LPM when operating in USB2.0 HS mode. */ usb_ext = cdev->req->buf + le16_to_cpu(bos->wTotalLength); bos->bNumDeviceCaps++; le16_add_cpu(&bos->wTotalLength, USB_DT_USB_EXT_CAP_SIZE); usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE; usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY; usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT; usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT); /* * The Superspeed USB Capability descriptor shall be implemented by all * SuperSpeed devices. */ ss_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength); bos->bNumDeviceCaps++; le16_add_cpu(&bos->wTotalLength, USB_DT_USB_SS_CAP_SIZE); ss_cap->bLength = USB_DT_USB_SS_CAP_SIZE; ss_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY; ss_cap->bDevCapabilityType = USB_SS_CAP_TYPE; ss_cap->bmAttributes = 0; /* LTM is not supported yet */ ss_cap->wSpeedSupported = cpu_to_le16(USB_LOW_SPEED_OPERATION | USB_FULL_SPEED_OPERATION | USB_HIGH_SPEED_OPERATION | USB_5GBPS_OPERATION); ss_cap->bFunctionalitySupport = USB_LOW_SPEED_OPERATION; /* Get Controller configuration */ if (cdev->gadget->ops->get_config_params) cdev->gadget->ops->get_config_params(&dcd_config_params); else { dcd_config_params.bU1devExitLat = USB_DEFAULT_U1_DEV_EXIT_LAT; dcd_config_params.bU2DevExitLat = cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT); } ss_cap->bU1devExitLat = dcd_config_params.bU1devExitLat; ss_cap->bU2DevExitLat = dcd_config_params.bU2DevExitLat; return le16_to_cpu(bos->wTotalLength); } static void device_qual(struct usb_composite_dev *cdev) { struct usb_qualifier_descriptor *qual = cdev->req->buf; qual->bLength = sizeof(*qual); qual->bDescriptorType = USB_DT_DEVICE_QUALIFIER; /* POLICY: same bcdUSB and device type info at both speeds */ qual->bcdUSB = cdev->desc.bcdUSB; qual->bDeviceClass = cdev->desc.bDeviceClass; qual->bDeviceSubClass = cdev->desc.bDeviceSubClass; qual->bDeviceProtocol = cdev->desc.bDeviceProtocol; /* ASSUME same EP0 fifo size at both speeds */ qual->bMaxPacketSize0 = cdev->gadget->ep0->maxpacket; qual->bNumConfigurations = count_configs(cdev, USB_DT_DEVICE_QUALIFIER); qual->bRESERVED = 0; } /*-------------------------------------------------------------------------*/ static void reset_config(struct usb_composite_dev *cdev) { struct usb_function *f; DBG(cdev, "reset config\n"); list_for_each_entry(f, &cdev->config->functions, list) { if (f->disable) f->disable(f); bitmap_zero(f->endpoints, 32); } cdev->config = NULL; } static int set_config(struct usb_composite_dev *cdev, const struct usb_ctrlrequest *ctrl, unsigned number) { struct usb_gadget *gadget = cdev->gadget; struct usb_configuration *c = NULL; int result = -EINVAL; unsigned power = gadget_is_otg(gadget) ? 8 : 100; int tmp; if (number) { list_for_each_entry(c, &cdev->configs, list) { #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE if (c->bConfigurationValue == number || check_config(number)) { #else if (c->bConfigurationValue == number) { #endif /* * We disable the FDs of the previous * configuration only if the new configuration * is a valid one */ if (cdev->config) reset_config(cdev); result = 0; break; } } if (result < 0) goto done; } else { /* Zero configuration value - need to reset the config */ if (cdev->config) reset_config(cdev); result = 0; } INFO(cdev, "%s config #%d: %s\n", usb_speed_string(gadget->speed), number, c ? c->label : "unconfigured"); if (!c) goto done; cdev->config = c; /* Initialize all interfaces by setting them to altsetting zero. */ for (tmp = 0; tmp < MAX_CONFIG_INTERFACES; tmp++) { struct usb_function *f = c->interface[tmp]; struct usb_descriptor_header **descriptors; if (!f) break; /* * Record which endpoints are used by the function. This is used * to dispatch control requests targeted at that endpoint to the * function's setup callback instead of the current * configuration's setup callback. */ switch (gadget->speed) { case USB_SPEED_SUPER: descriptors = f->ss_descriptors; break; case USB_SPEED_HIGH: descriptors = f->hs_descriptors; break; default: descriptors = f->fs_descriptors; } for (; *descriptors; ++descriptors) { struct usb_endpoint_descriptor *ep; int addr; if ((*descriptors)->bDescriptorType != USB_DT_ENDPOINT) continue; ep = (struct usb_endpoint_descriptor *)*descriptors; addr = ((ep->bEndpointAddress & 0x80) >> 3) | (ep->bEndpointAddress & 0x0f); set_bit(addr, f->endpoints); } result = f->set_alt(f, tmp, 0); if (result < 0) { DBG(cdev, "interface %d (%s/%p) alt 0 --> %d\n", tmp, f->name, f, result); reset_config(cdev); goto done; } NPRINTK("%s interface is configured\n", f->name); if (result == USB_GADGET_DELAYED_STATUS) { DBG(cdev, "%s: interface %d (%s) requested delayed status\n", __func__, tmp, f->name); cdev->delayed_status++; DBG(cdev, "delayed_status count %d\n", cdev->delayed_status); } } /* when we return, be sure our power usage is valid */ power = c->MaxPower ? c->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW; done: usb_gadget_vbus_draw(gadget, power); if (result >= 0 && cdev->delayed_status) result = USB_GADGET_DELAYED_STATUS; return result; } int usb_add_config_only(struct usb_composite_dev *cdev, struct usb_configuration *config) { struct usb_configuration *c; if (!config->bConfigurationValue) return -EINVAL; /* Prevent duplicate configuration identifiers */ list_for_each_entry(c, &cdev->configs, list) { if (c->bConfigurationValue == config->bConfigurationValue) return -EBUSY; } config->cdev = cdev; list_add_tail(&config->list, &cdev->configs); INIT_LIST_HEAD(&config->functions); config->next_interface_id = 0; memset(config->interface, 0, sizeof(config->interface)); return 0; } EXPORT_SYMBOL_GPL(usb_add_config_only); /** * usb_add_config() - add a configuration to a device. * @cdev: wraps the USB gadget * @config: the configuration, with bConfigurationValue assigned * @bind: the configuration's bind function * Context: single threaded during gadget setup * * One of the main tasks of a composite @bind() routine is to * add each of the configurations it supports, using this routine. * * This function returns the value of the configuration's @bind(), which * is zero for success else a negative errno value. Binding configurations * assigns global resources including string IDs, and per-configuration * resources such as interface IDs and endpoints. */ int usb_add_config(struct usb_composite_dev *cdev, struct usb_configuration *config, int (*bind)(struct usb_configuration *)) { int status = -EINVAL; if (!bind) goto done; DBG(cdev, "adding config #%u '%s'/%p\n", config->bConfigurationValue, config->label, config); status = usb_add_config_only(cdev, config); if (status) goto done; status = bind(config); if (status < 0) { while (!list_empty(&config->functions)) { struct usb_function *f; f = list_first_entry(&config->functions, struct usb_function, list); list_del(&f->list); if (f->unbind) { DBG(cdev, "unbind function '%s'/%p\n", f->name, f); f->unbind(config, f); /* may free memory for "f" */ } } list_del(&config->list); config->cdev = NULL; } else { unsigned i; DBG(cdev, "cfg %d/%p speeds:%s%s%s\n", config->bConfigurationValue, config, config->superspeed ? " super" : "", config->highspeed ? " high" : "", config->fullspeed ? (gadget_is_dualspeed(cdev->gadget) ? " full" : " full/low") : ""); for (i = 0; i < MAX_CONFIG_INTERFACES; i++) { struct usb_function *f = config->interface[i]; if (!f) continue; DBG(cdev, " interface %d = %s/%p\n", i, f->name, f); } } /* set_alt(), or next bind(), sets up * ep->driver_data as needed. */ usb_ep_autoconfig_reset(cdev->gadget); done: if (status) DBG(cdev, "added config '%s'/%u --> %d\n", config->label, config->bConfigurationValue, status); return status; } EXPORT_SYMBOL_GPL(usb_add_config); static void unbind_config(struct usb_composite_dev *cdev, struct usb_configuration *config) { while (!list_empty(&config->functions)) { struct usb_function *f; f = list_first_entry(&config->functions, struct usb_function, list); list_del(&f->list); if (f->unbind) { DBG(cdev, "unbind function '%s'/%p\n", f->name, f); f->unbind(config, f); /* may free memory for "f" */ } } if (config->unbind) { DBG(cdev, "unbind config '%s'/%p\n", config->label, config); config->unbind(config); /* may free memory for "c" */ } } /** * usb_remove_config() - remove a configuration from a device. * @cdev: wraps the USB gadget * @config: the configuration * * Drivers must call usb_gadget_disconnect before calling this function * to disconnect the device from the host and make sure the host will not * try to enumerate the device while we are changing the config list. */ void usb_remove_config(struct usb_composite_dev *cdev, struct usb_configuration *config) { unsigned long flags; spin_lock_irqsave(&cdev->lock, flags); if (cdev->config == config) reset_config(cdev); list_del(&config->list); spin_unlock_irqrestore(&cdev->lock, flags); unbind_config(cdev, config); } /*-------------------------------------------------------------------------*/ /* We support strings in multiple languages ... string descriptor zero * says which languages are supported. The typical case will be that * only one language (probably English) is used, with I18N handled on * the host side. */ static void collect_langs(struct usb_gadget_strings **sp, __le16 *buf) { const struct usb_gadget_strings *s; __le16 language; __le16 *tmp; while (*sp) { s = *sp; language = cpu_to_le16(s->language); for (tmp = buf; *tmp && tmp < &buf[126]; tmp++) { if (*tmp == language) goto repeat; } *tmp++ = language; repeat: sp++; } } static int lookup_string( struct usb_gadget_strings **sp, void *buf, u16 language, int id ) { struct usb_gadget_strings *s; int value; while (*sp) { s = *sp++; if (s->language != language) continue; value = usb_gadget_get_string(s, id, buf); if (value > 0) return value; } return -EINVAL; } static int get_string(struct usb_composite_dev *cdev, void *buf, u16 language, int id) { struct usb_composite_driver *composite = cdev->driver; struct usb_gadget_string_container *uc; struct usb_configuration *c; struct usb_function *f; int len; /* Yes, not only is USB's I18N support probably more than most * folk will ever care about ... also, it's all supported here. * (Except for UTF8 support for Unicode's "Astral Planes".) */ /* 0 == report all available language codes */ if (id == 0) { struct usb_string_descriptor *s = buf; struct usb_gadget_strings **sp; memset(s, 0, 256); s->bDescriptorType = USB_DT_STRING; sp = composite->strings; if (sp) collect_langs(sp, s->wData); list_for_each_entry(c, &cdev->configs, list) { sp = c->strings; if (sp) collect_langs(sp, s->wData); list_for_each_entry(f, &c->functions, list) { #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE if (!is_available_function(f->name)) { USB_DBG("skip f->%s\n", f->name); continue; } else { USB_DBG("f->%s\n", f->name); } #endif sp = f->strings; if (sp) collect_langs(sp, s->wData); } } list_for_each_entry(uc, &cdev->gstrings, list) { struct usb_gadget_strings **sp; sp = get_containers_gs(uc); collect_langs(sp, s->wData); } for (len = 0; len <= 126 && s->wData[len]; len++) continue; if (!len) return -EINVAL; s->bLength = 2 * (len + 1); return s->bLength; } list_for_each_entry(uc, &cdev->gstrings, list) { struct usb_gadget_strings **sp; sp = get_containers_gs(uc); len = lookup_string(sp, buf, language, id); if (len > 0) return len; } /* String IDs are device-scoped, so we look up each string * table we're told about. These lookups are infrequent; * simpler-is-better here. */ if (composite->strings) { len = lookup_string(composite->strings, buf, language, id); if (len > 0) return len; } list_for_each_entry(c, &cdev->configs, list) { if (c->strings) { len = lookup_string(c->strings, buf, language, id); if (len > 0) return len; } list_for_each_entry(f, &c->functions, list) { if (!f->strings) continue; len = lookup_string(f->strings, buf, language, id); if (len > 0) return len; } } return -EINVAL; } /** * usb_string_id() - allocate an unused string ID * @cdev: the device whose string descriptor IDs are being allocated * Context: single threaded during gadget setup * * @usb_string_id() is called from bind() callbacks to allocate * string IDs. Drivers for functions, configurations, or gadgets will * then store that ID in the appropriate descriptors and string table. * * All string identifier should be allocated using this, * @usb_string_ids_tab() or @usb_string_ids_n() routine, to ensure * that for example different functions don't wrongly assign different * meanings to the same identifier. */ int usb_string_id(struct usb_composite_dev *cdev) { if (cdev->next_string_id < 254) { /* string id 0 is reserved by USB spec for list of * supported languages */ /* 255 reserved as well? -- mina86 */ cdev->next_string_id++; return cdev->next_string_id; } return -ENODEV; } EXPORT_SYMBOL_GPL(usb_string_id); /** * usb_string_ids() - allocate unused string IDs in batch * @cdev: the device whose string descriptor IDs are being allocated * @str: an array of usb_string objects to assign numbers to * Context: single threaded during gadget setup * * @usb_string_ids() is called from bind() callbacks to allocate * string IDs. Drivers for functions, configurations, or gadgets will * then copy IDs from the string table to the appropriate descriptors * and string table for other languages. * * All string identifier should be allocated using this, * @usb_string_id() or @usb_string_ids_n() routine, to ensure that for * example different functions don't wrongly assign different meanings * to the same identifier. */ int usb_string_ids_tab(struct usb_composite_dev *cdev, struct usb_string *str) { int next = cdev->next_string_id; for (; str->s; ++str) { if (unlikely(next >= 254)) return -ENODEV; str->id = ++next; } cdev->next_string_id = next; return 0; } EXPORT_SYMBOL_GPL(usb_string_ids_tab); static struct usb_gadget_string_container *copy_gadget_strings( struct usb_gadget_strings **sp, unsigned n_gstrings, unsigned n_strings) { struct usb_gadget_string_container *uc; struct usb_gadget_strings **gs_array; struct usb_gadget_strings *gs; struct usb_string *s; unsigned mem; unsigned n_gs; unsigned n_s; void *stash; mem = sizeof(*uc); mem += sizeof(void *) * (n_gstrings + 1); mem += sizeof(struct usb_gadget_strings) * n_gstrings; mem += sizeof(struct usb_string) * (n_strings + 1) * (n_gstrings); uc = kmalloc(mem, GFP_KERNEL); if (!uc) return ERR_PTR(-ENOMEM); gs_array = get_containers_gs(uc); stash = uc->stash; stash += sizeof(void *) * (n_gstrings + 1); for (n_gs = 0; n_gs < n_gstrings; n_gs++) { struct usb_string *org_s; gs_array[n_gs] = stash; gs = gs_array[n_gs]; stash += sizeof(struct usb_gadget_strings); gs->language = sp[n_gs]->language; gs->strings = stash; org_s = sp[n_gs]->strings; for (n_s = 0; n_s < n_strings; n_s++) { s = stash; stash += sizeof(struct usb_string); if (org_s->s) s->s = org_s->s; else s->s = ""; org_s++; } s = stash; s->s = NULL; stash += sizeof(struct usb_string); } gs_array[n_gs] = NULL; return uc; } /** * usb_gstrings_attach() - attach gadget strings to a cdev and assign ids * @cdev: the device whose string descriptor IDs are being allocated * and attached. * @sp: an array of usb_gadget_strings to attach. * @n_strings: number of entries in each usb_strings array (sp[]->strings) * * This function will create a deep copy of usb_gadget_strings and usb_string * and attach it to the cdev. The actual string (usb_string.s) will not be * copied but only a referenced will be made. The struct usb_gadget_strings * array may contain multiple languges and should be NULL terminated. * The ->language pointer of each struct usb_gadget_strings has to contain the * same amount of entries. * For instance: sp[0] is en-US, sp[1] is es-ES. It is expected that the first * usb_string entry of es-ES containts the translation of the first usb_string * entry of en-US. Therefore both entries become the same id assign. */ struct usb_string *usb_gstrings_attach(struct usb_composite_dev *cdev, struct usb_gadget_strings **sp, unsigned n_strings) { struct usb_gadget_string_container *uc; struct usb_gadget_strings **n_gs; unsigned n_gstrings = 0; unsigned i; int ret; for (i = 0; sp[i]; i++) n_gstrings++; if (!n_gstrings) return ERR_PTR(-EINVAL); uc = copy_gadget_strings(sp, n_gstrings, n_strings); if (IS_ERR(uc)) return ERR_PTR(PTR_ERR(uc)); n_gs = get_containers_gs(uc); ret = usb_string_ids_tab(cdev, n_gs[0]->strings); if (ret) goto err; for (i = 1; i < n_gstrings; i++) { struct usb_string *m_s; struct usb_string *s; unsigned n; m_s = n_gs[0]->strings; s = n_gs[i]->strings; for (n = 0; n < n_strings; n++) { s->id = m_s->id; s++; m_s++; } } list_add_tail(&uc->list, &cdev->gstrings); return n_gs[0]->strings; err: kfree(uc); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(usb_gstrings_attach); /** * usb_string_ids_n() - allocate unused string IDs in batch * @c: the device whose string descriptor IDs are being allocated * @n: number of string IDs to allocate * Context: single threaded during gadget setup * * Returns the first requested ID. This ID and next @n-1 IDs are now * valid IDs. At least provided that @n is non-zero because if it * is, returns last requested ID which is now very useful information. * * @usb_string_ids_n() is called from bind() callbacks to allocate * string IDs. Drivers for functions, configurations, or gadgets will * then store that ID in the appropriate descriptors and string table. * * All string identifier should be allocated using this, * @usb_string_id() or @usb_string_ids_n() routine, to ensure that for * example different functions don't wrongly assign different meanings * to the same identifier. */ int usb_string_ids_n(struct usb_composite_dev *c, unsigned n) { unsigned next = c->next_string_id; if (unlikely(n > 254 || (unsigned)next + n > 254)) return -ENODEV; c->next_string_id += n; return next + 1; } EXPORT_SYMBOL_GPL(usb_string_ids_n); /*-------------------------------------------------------------------------*/ static void composite_setup_complete(struct usb_ep *ep, struct usb_request *req) { if (req->status || req->actual != req->length) DBG((struct usb_composite_dev *) ep->driver_data, "setup complete --> %d, %d/%d\n", req->status, req->actual, req->length); } /* * The setup() callback implements all the ep0 functionality that's * not handled lower down, in hardware or the hardware driver(like * device and endpoint feature flags, and their status). It's all * housekeeping for the gadget function we're implementing. Most of * the work is in config and function specific setup. */ int composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) { struct usb_composite_dev *cdev = get_gadget_data(gadget); struct usb_request *req = cdev->req; int value = -EOPNOTSUPP; int status = 0; u16 w_index = le16_to_cpu(ctrl->wIndex); u8 intf = w_index & 0xFF; u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); struct usb_function *f = NULL; u8 endp; struct usb_otg_descriptor* potgdesc; /* partial re-init of the response message; the function or the * gadget might need to intercept e.g. a control-OUT completion * when we delegate to it. */ req->zero = 0; req->complete = composite_setup_complete; req->length = 0; gadget->ep0->driver_data = cdev; switch (ctrl->bRequest) { /* we handle all standard USB descriptors */ case USB_REQ_GET_DESCRIPTOR: if (ctrl->bRequestType != USB_DIR_IN) goto unknown; switch (w_value >> 8) { case USB_DT_DEVICE: cdev->desc.bNumConfigurations = count_configs(cdev, USB_DT_DEVICE); cdev->desc.bMaxPacketSize0 = cdev->gadget->ep0->maxpacket; if (gadget_is_superspeed(gadget)) { if (gadget->speed >= USB_SPEED_SUPER) { cdev->desc.bcdUSB = cpu_to_le16(0x0300); cdev->desc.bMaxPacketSize0 = 9; } else { cdev->desc.bcdUSB = cpu_to_le16(0x0210); } } value = min(w_length, (u16) sizeof cdev->desc); memcpy(req->buf, &cdev->desc, value); break; case USB_DT_OTG: value = min(w_length, (u16) sizeof(struct usb_otg_descriptor)); potgdesc = (struct usb_otg_descriptor* )req->buf; potgdesc->bLength = (u16) sizeof(struct usb_otg_descriptor); potgdesc->bDescriptorType = USB_DT_OTG; potgdesc->bmAttributes = 0; potgdesc->bcdOTG = __constant_cpu_to_le16(0x0200); break; case USB_DT_DEVICE_QUALIFIER: if (!gadget_is_dualspeed(gadget) || gadget->speed >= USB_SPEED_SUPER) break; device_qual(cdev); value = min_t(int, w_length, sizeof(struct usb_qualifier_descriptor)); break; case USB_DT_OTHER_SPEED_CONFIG: if (!gadget_is_dualspeed(gadget) || gadget->speed >= USB_SPEED_SUPER) break; /* FALLTHROUGH */ case USB_DT_CONFIG: value = config_desc(cdev, w_value); if (value >= 0) value = min(w_length, (u16) value); break; case USB_DT_STRING: #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE set_string_mode(w_length); #endif value = get_string(cdev, req->buf, w_index, w_value & 0xff); if (value >= 0) value = min(w_length, (u16) value); break; case USB_DT_BOS: if (gadget_is_superspeed(gadget)) { value = bos_desc(cdev); value = min(w_length, (u16) value); } break; } break; /* any number of configs can work */ case USB_REQ_SET_CONFIGURATION: if (ctrl->bRequestType != 0) goto unknown; if (gadget_is_otg(gadget)) { if (gadget->a_hnp_support) DBG(cdev, "HNP available\n"); else if (gadget->a_alt_hnp_support) DBG(cdev, "HNP on another port\n"); else VDBG(cdev, "HNP inactive\n"); } spin_lock(&cdev->lock); value = set_config(cdev, ctrl, w_value); spin_unlock(&cdev->lock); break; case USB_REQ_GET_CONFIGURATION: if (ctrl->bRequestType != USB_DIR_IN) goto unknown; if (cdev->config){ #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE *(u8 *)req->buf = get_config_number() + 1; #else *(u8 *)req->buf = cdev->config->bConfigurationValue; #endif }else *(u8 *)req->buf = 0; value = min(w_length, (u16) 1); break; /* function drivers must handle get/set altsetting; if there's * no get() method, we know only altsetting zero works. */ case USB_REQ_SET_INTERFACE: if (ctrl->bRequestType != USB_RECIP_INTERFACE) goto unknown; if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) break; f = cdev->config->interface[intf]; if (!f) break; if (w_value && !f->set_alt) break; value = f->set_alt(f, w_index, w_value); if (value == USB_GADGET_DELAYED_STATUS) { DBG(cdev, "%s: interface %d (%s) requested delayed status\n", __func__, intf, f->name); cdev->delayed_status++; DBG(cdev, "delayed_status count %d\n", cdev->delayed_status); } break; case USB_REQ_GET_INTERFACE: if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE)) goto unknown; if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) break; f = cdev->config->interface[intf]; if (!f) break; /* lots of interfaces only need altsetting zero... */ value = f->get_alt ? f->get_alt(f, w_index) : 0; if (value < 0) break; *((u8 *)req->buf) = value; value = min(w_length, (u16) 1); break; /* * USB 3.0 additions: * Function driver should handle get_status request. If such cb * wasn't supplied we respond with default value = 0 * Note: function driver should supply such cb only for the first * interface of the function */ case USB_REQ_GET_STATUS: if (!gadget_is_superspeed(gadget)) goto unknown; if (ctrl->bRequestType != (USB_DIR_IN | USB_RECIP_INTERFACE)) goto unknown; value = 2; /* This is the length of the get_status reply */ put_unaligned_le16(0, req->buf); if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) break; f = cdev->config->interface[intf]; if (!f) break; status = f->get_status ? f->get_status(f) : 0; if (status < 0) break; put_unaligned_le16(status & 0x0000ffff, req->buf); break; /* * Function drivers should handle SetFeature/ClearFeature * (FUNCTION_SUSPEND) request. function_suspend cb should be supplied * only for the first interface of the function */ case USB_REQ_CLEAR_FEATURE: case USB_REQ_SET_FEATURE: if (!gadget_is_superspeed(gadget)) goto unknown; if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_INTERFACE)) goto unknown; switch (w_value) { case USB_INTRF_FUNC_SUSPEND: if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) break; f = cdev->config->interface[intf]; if (!f) break; value = 0; if (f->func_suspend) value = f->func_suspend(f, w_index >> 8); if (value < 0) { ERROR(cdev, "func_suspend() returned error %d\n", value); value = 0; } break; } break; default: unknown: VDBG(cdev, "non-core control req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); /* functions always handle their interfaces and endpoints... * punt other recipients (other, WUSB, ...) to the current * configuration code. * * REVISIT it could make sense to let the composite device * take such requests too, if that's ever needed: to work * in config 0, etc. */ switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_INTERFACE: if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) break; f = cdev->config->interface[intf]; break; case USB_RECIP_ENDPOINT: endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f); list_for_each_entry(f, &cdev->config->functions, list) { if (test_bit(endp, f->endpoints)) break; } if (&f->list == &cdev->config->functions) f = NULL; break; } if (f && f->setup) value = f->setup(f, ctrl); else { struct usb_configuration *c; c = cdev->config; if (c && c->setup) value = c->setup(c, ctrl); } goto done; } /* respond with data transfer before status phase? */ if (value >= 0 && value != USB_GADGET_DELAYED_STATUS) { req->length = value; req->zero = value < w_length; value = usb_ep_queue(gadget->ep0, req, GFP_ATOMIC); if (value < 0) { DBG(cdev, "ep_queue --> %d\n", value); req->status = 0; composite_setup_complete(gadget->ep0, req); } } else if (value == USB_GADGET_DELAYED_STATUS && w_length != 0) { WARN(cdev, "%s: Delayed status not supported for w_length != 0", __func__); } done: /* device either stalls (value < 0) or reports success */ return value; } void composite_disconnect(struct usb_gadget *gadget) { struct usb_composite_dev *cdev = get_gadget_data(gadget); unsigned long flags; #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE set_string_mode(0); #endif /* REVISIT: should we have config and device level * disconnect callbacks? */ spin_lock_irqsave(&cdev->lock, flags); if (cdev->config) reset_config(cdev); if (cdev->driver->disconnect) cdev->driver->disconnect(cdev); spin_unlock_irqrestore(&cdev->lock, flags); usb_gadget_vbus_draw(gadget, gadget_is_otg(gadget) ? USB_OTG_PRE_CONFIG_CURRENT : USB_PRE_CONFIG_CURRENT); } /*-------------------------------------------------------------------------*/ static ssize_t composite_show_suspended(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_gadget *gadget = dev_to_usb_gadget(dev); struct usb_composite_dev *cdev = get_gadget_data(gadget); return sprintf(buf, "%d\n", cdev->suspended); } static DEVICE_ATTR(suspended, 0444, composite_show_suspended, NULL); static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver) { struct usb_composite_dev *cdev = get_gadget_data(gadget); /* composite_disconnect() must already have been called * by the underlying peripheral controller driver! * so there's no i/o concurrency that could affect the * state protected by cdev->lock. */ WARN_ON(cdev->config); while (!list_empty(&cdev->configs)) { struct usb_configuration *c; c = list_first_entry(&cdev->configs, struct usb_configuration, list); list_del(&c->list); unbind_config(cdev, c); } if (cdev->driver->unbind && unbind_driver) cdev->driver->unbind(cdev); composite_dev_cleanup(cdev); kfree(cdev->def_manufacturer); kfree(cdev); set_gadget_data(gadget, NULL); } static void composite_unbind(struct usb_gadget *gadget) { __composite_unbind(gadget, true); } static void update_unchanged_dev_desc(struct usb_device_descriptor *new, const struct usb_device_descriptor *old) { __le16 idVendor; __le16 idProduct; __le16 bcdDevice; u8 iSerialNumber; u8 iManufacturer; u8 iProduct; /* * these variables may have been set in * usb_composite_overwrite_options() */ idVendor = new->idVendor; idProduct = new->idProduct; bcdDevice = new->bcdDevice; iSerialNumber = new->iSerialNumber; iManufacturer = new->iManufacturer; iProduct = new->iProduct; *new = *old; if (idVendor) new->idVendor = idVendor; if (idProduct) new->idProduct = idProduct; if (bcdDevice) new->bcdDevice = bcdDevice; else new->bcdDevice = cpu_to_le16(get_default_bcdDevice()); if (iSerialNumber) new->iSerialNumber = iSerialNumber; if (iManufacturer) new->iManufacturer = iManufacturer; if (iProduct) new->iProduct = iProduct; } int composite_dev_prepare(struct usb_composite_driver *composite, struct usb_composite_dev *cdev) { struct usb_gadget *gadget = cdev->gadget; int ret = -ENOMEM; /* preallocate control response and buffer */ cdev->req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL); if (!cdev->req) return -ENOMEM; cdev->req->buf = kmalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL); if (!cdev->req->buf) goto fail; ret = device_create_file(&gadget->dev, &dev_attr_suspended); if (ret) goto fail_dev; cdev->req->complete = composite_setup_complete; gadget->ep0->driver_data = cdev; cdev->driver = composite; /* * As per USB compliance update, a device that is actively drawing * more than 100mA from USB must report itself as bus-powered in * the GetStatus(DEVICE) call. */ if (CONFIG_USB_GADGET_VBUS_DRAW <= USB_SELF_POWER_VBUS_MAX_DRAW) usb_gadget_set_selfpowered(gadget); /* interface and string IDs start at zero via kzalloc. * we force endpoints to start unassigned; few controller * drivers will zero ep->driver_data. */ usb_ep_autoconfig_reset(gadget); return 0; fail_dev: kfree(cdev->req->buf); fail: usb_ep_free_request(gadget->ep0, cdev->req); cdev->req = NULL; return ret; } void composite_dev_cleanup(struct usb_composite_dev *cdev) { struct usb_gadget_string_container *uc, *tmp; list_for_each_entry_safe(uc, tmp, &cdev->gstrings, list) { list_del(&uc->list); kfree(uc); } if (cdev->req) { kfree(cdev->req->buf); usb_ep_free_request(cdev->gadget->ep0, cdev->req); } cdev->next_string_id = 0; device_remove_file(&cdev->gadget->dev, &dev_attr_suspended); } static int composite_bind(struct usb_gadget *gadget, struct usb_gadget_driver *gdriver) { struct usb_composite_dev *cdev; struct usb_composite_driver *composite = to_cdriver(gdriver); int status = -ENOMEM; cdev = kzalloc(sizeof *cdev, GFP_KERNEL); if (!cdev) return status; spin_lock_init(&cdev->lock); cdev->gadget = gadget; set_gadget_data(gadget, cdev); INIT_LIST_HEAD(&cdev->configs); INIT_LIST_HEAD(&cdev->gstrings); status = composite_dev_prepare(composite, cdev); if (status) goto fail; /* composite gadget needs to assign strings for whole device (like * serial number), register function drivers, potentially update * power state and consumption, etc */ status = composite->bind(cdev); if (status < 0) goto fail; update_unchanged_dev_desc(&cdev->desc, composite->dev); /* has userspace failed to provide a serial number? */ if (composite->needs_serial && !cdev->desc.iSerialNumber) WARNING(cdev, "userspace failed to provide iSerialNumber\n"); INFO(cdev, "%s ready\n", composite->name); return 0; fail: __composite_unbind(gadget, false); return status; } /*-------------------------------------------------------------------------*/ static void composite_suspend(struct usb_gadget *gadget) { struct usb_composite_dev *cdev = get_gadget_data(gadget); struct usb_function *f; /* REVISIT: should we have config level * suspend/resume callbacks? */ DBG(cdev, "suspend\n"); if (cdev->config) { list_for_each_entry(f, &cdev->config->functions, list) { if (f->suspend) f->suspend(f); } } if (cdev->driver->suspend) cdev->driver->suspend(cdev); cdev->suspended = 1; usb_gadget_vbus_draw(gadget, 2); } static void composite_resume(struct usb_gadget *gadget) { struct usb_composite_dev *cdev = get_gadget_data(gadget); struct usb_function *f; u8 maxpower; /* REVISIT: should we have config level * suspend/resume callbacks? */ DBG(cdev, "resume\n"); if (cdev->driver->resume) cdev->driver->resume(cdev); if (cdev->config) { list_for_each_entry(f, &cdev->config->functions, list) { if (f->resume) f->resume(f); } maxpower = cdev->config->MaxPower; usb_gadget_vbus_draw(gadget, maxpower ? maxpower : CONFIG_USB_GADGET_VBUS_DRAW); } cdev->suspended = 0; } /*-------------------------------------------------------------------------*/ static const struct usb_gadget_driver composite_driver_template = { .bind = composite_bind, .unbind = composite_unbind, .setup = composite_setup, .disconnect = composite_disconnect, .suspend = composite_suspend, .resume = composite_resume, .driver = { .owner = THIS_MODULE, }, }; /** * usb_composite_probe() - register a composite driver * @driver: the driver to register * * Context: single threaded during gadget setup * * This function is used to register drivers using the composite driver * framework. The return value is zero, or a negative errno value. * Those values normally come from the driver's @bind method, which does * all the work of setting up the driver to match the hardware. * * On successful return, the gadget is ready to respond to requests from * the host, unless one of its components invokes usb_gadget_disconnect() * while it was binding. That would usually be done in order to wait for * some userspace participation. */ int usb_composite_probe(struct usb_composite_driver *driver) { struct usb_gadget_driver *gadget_driver; if (!driver || !driver->dev || !driver->bind) return -EINVAL; if (!driver->name) driver->name = "composite"; driver->gadget_driver = composite_driver_template; gadget_driver = &driver->gadget_driver; gadget_driver->function = (char *) driver->name; gadget_driver->driver.name = driver->name; gadget_driver->max_speed = driver->max_speed; return usb_gadget_probe_driver(gadget_driver); } EXPORT_SYMBOL_GPL(usb_composite_probe); /** * usb_composite_unregister() - unregister a composite driver * @driver: the driver to unregister * * This function is used to unregister drivers using the composite * driver framework. */ void usb_composite_unregister(struct usb_composite_driver *driver) { usb_gadget_unregister_driver(&driver->gadget_driver); } EXPORT_SYMBOL_GPL(usb_composite_unregister); /** * usb_composite_setup_continue() - Continue with the control transfer * @cdev: the composite device who's control transfer was kept waiting * * This function must be called by the USB function driver to continue * with the control transfer's data/status stage in case it had requested to * delay the data/status stages. A USB function's setup handler (e.g. set_alt()) * can request the composite framework to delay the setup request's data/status * stages by returning USB_GADGET_DELAYED_STATUS. */ void usb_composite_setup_continue(struct usb_composite_dev *cdev) { int value; struct usb_request *req = cdev->req; unsigned long flags; DBG(cdev, "%s\n", __func__); spin_lock_irqsave(&cdev->lock, flags); if (cdev->delayed_status == 0) { WARN(cdev, "%s: Unexpected call\n", __func__); } else if (--cdev->delayed_status == 0) { DBG(cdev, "%s: Completing delayed status\n", __func__); req->length = 0; value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); if (value < 0) { DBG(cdev, "ep_queue --> %d\n", value); req->status = 0; composite_setup_complete(cdev->gadget->ep0, req); } } spin_unlock_irqrestore(&cdev->lock, flags); } EXPORT_SYMBOL_GPL(usb_composite_setup_continue); static char *composite_default_mfr(struct usb_gadget *gadget) { char *mfr; int len; len = snprintf(NULL, 0, "%s %s with %s", init_utsname()->sysname, init_utsname()->release, gadget->name); len++; mfr = kmalloc(len, GFP_KERNEL); if (!mfr) return NULL; snprintf(mfr, len, "%s %s with %s", init_utsname()->sysname, init_utsname()->release, gadget->name); return mfr; } void usb_composite_overwrite_options(struct usb_composite_dev *cdev, struct usb_composite_overwrite *covr) { struct usb_device_descriptor *desc = &cdev->desc; struct usb_gadget_strings *gstr = cdev->driver->strings[0]; struct usb_string *dev_str = gstr->strings; if (covr->idVendor) desc->idVendor = cpu_to_le16(covr->idVendor); if (covr->idProduct) desc->idProduct = cpu_to_le16(covr->idProduct); if (covr->bcdDevice) desc->bcdDevice = cpu_to_le16(covr->bcdDevice); if (covr->serial_number) { desc->iSerialNumber = dev_str[USB_GADGET_SERIAL_IDX].id; dev_str[USB_GADGET_SERIAL_IDX].s = covr->serial_number; } if (covr->manufacturer) { desc->iManufacturer = dev_str[USB_GADGET_MANUFACTURER_IDX].id; dev_str[USB_GADGET_MANUFACTURER_IDX].s = covr->manufacturer; } else if (!strlen(dev_str[USB_GADGET_MANUFACTURER_IDX].s)) { desc->iManufacturer = dev_str[USB_GADGET_MANUFACTURER_IDX].id; cdev->def_manufacturer = composite_default_mfr(cdev->gadget); dev_str[USB_GADGET_MANUFACTURER_IDX].s = cdev->def_manufacturer; } if (covr->product) { desc->iProduct = dev_str[USB_GADGET_PRODUCT_IDX].id; dev_str[USB_GADGET_PRODUCT_IDX].s = covr->product; } } EXPORT_SYMBOL_GPL(usb_composite_overwrite_options); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Brownell");
TheNikiz/android_kernel_samsung_hawaii
drivers/usb/gadget/composite.c
C
gpl-2.0
54,161
/* * arch/powerpc/platforms/embedded6xx/flipper-pic.h * * Nintendo GameCube/Wii interrupt controller support. * Copyright (C) 2004-2009 The GameCube Linux Team * Copyright (C) 2007,2008,2009 Albert Herranz * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * */ #ifndef __FLIPPER_PIC_H #define __FLIPPER_PIC_H #define FLIPPER_NR_IRQS 32 /* * Each interrupt has a corresponding bit in both * the Interrupt Cause (ICR) and Interrupt Mask (IMR) registers. * * Enabling/disabling an interrupt line involves asserting/clearing * the corresponding bit in IMR. ACK'ing a request simply involves * asserting the corresponding bit in ICR. */ #define FLIPPER_ICR 0x00 #define FLIPPER_ICR_RSS (1<<16) /* reset switch state */ #define FLIPPER_IMR 0x04 #define FLIPPER_RESET 0x24 unsigned int flipper_pic_get_irq(void); void __init flipper_pic_probe(void); void flipper_quiesce(void); void flipper_platform_reset(void); int flipper_is_reset_button_pressed(void); #endif
Linux-Wii-Mod/linux-wii-2.6.32
arch/powerpc/platforms/embedded6xx/flipper-pic.h
C
gpl-2.0
1,188
/******************************************************************************* * This file is part of OpenNMS(R). * * Copyright (C) 2007-2015 The OpenNMS Group, Inc. * OpenNMS(R) is Copyright (C) 1999-2015 The OpenNMS Group, Inc. * * OpenNMS(R) is a registered trademark of The OpenNMS Group, Inc. * * OpenNMS(R) is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, * or (at your option) any later version. * * OpenNMS(R) is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with OpenNMS(R). If not, see: * http://www.gnu.org/licenses/ * * For more information contact: * OpenNMS(R) Licensing <license@opennms.org> * http://www.opennms.org/ * http://www.opennms.com/ *******************************************************************************/ package org.opennms.netmgt.provision.detector; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import java.net.UnknownHostException; import java.util.HashMap; import org.junit.Before; import org.junit.Ignore; import org.junit.Test; import org.junit.runner.RunWith; import org.opennms.core.test.MockLogAppender; import org.opennms.core.utils.InetAddressUtils; import org.opennms.netmgt.provision.detector.simple.NrpeDetector; import org.opennms.netmgt.provision.detector.simple.NrpeDetectorFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; /** * @author Donald Desloge * */ @RunWith(SpringJUnit4ClassRunner.class) @ContextConfiguration(locations={"classpath:/META-INF/opennms/detectors.xml"}) @Ignore public class NrpeDetectorTest { @Autowired private NrpeDetectorFactory m_detectorFactory; private NrpeDetector m_detector; @Before public void setUp() { MockLogAppender.setupLogging(); m_detector = m_detectorFactory.createDetector(new HashMap<>()); m_detector.setPort(5666); m_detector.init(); } //Tested against a local windows box with NSClient++ @Test(timeout=20000) public void testDetectorSuccess() throws UnknownHostException { assertTrue(m_detector.isServiceDetected(InetAddressUtils.addr("192.168.1.103"))); } @Test(timeout=20000) public void testDetectorFailWrongPort() throws UnknownHostException { m_detector.setPort(12489); assertFalse(m_detector.isServiceDetected(InetAddressUtils.addr("192.168.1.103"))); } @Test(timeout=20000) public void testDetectorFailNotUsingSSL() throws UnknownHostException { m_detector.setUseSsl(false); assertFalse(m_detector.isServiceDetected(InetAddressUtils.addr("192.168.1.103"))); } }
jeffgdotorg/opennms
opennms-provision/opennms-detector-lineoriented/src/test/java/org/opennms/netmgt/provision/detector/NrpeDetectorTest.java
Java
gpl-2.0
3,187
$(".menu-toggle").click(function(){$("#mobile-navigation").toggleClass("active")}),$(".menu-item-has-children").click(function(){$(this).toggleClass("current-menu-item"),$(".sub-menu").toggleClass("active")});
NoviumDesign/Grey-Advokatbyr-
wp-content/themes/grey/js/min/navigation-min.js
JavaScript
gpl-2.0
209
/* * APIC support - internal interfaces * * Copyright (c) 2004-2005 Fabrice Bellard * Copyright (c) 2011 Jan Kiszka, Siemens AG * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/> */ #ifndef QEMU_APIC_INTERNAL_H #define QEMU_APIC_INTERNAL_H #include "cpu.h" #include "exec/memory.h" #include "qemu/timer.h" #include "target/i386/cpu-qom.h" #include "qom/object.h" /* APIC Local Vector Table */ #define APIC_LVT_TIMER 0 #define APIC_LVT_THERMAL 1 #define APIC_LVT_PERFORM 2 #define APIC_LVT_LINT0 3 #define APIC_LVT_LINT1 4 #define APIC_LVT_ERROR 5 #define APIC_LVT_NB 6 /* APIC delivery modes */ #define APIC_DM_FIXED 0 #define APIC_DM_LOWPRI 1 #define APIC_DM_SMI 2 #define APIC_DM_NMI 4 #define APIC_DM_INIT 5 #define APIC_DM_SIPI 6 #define APIC_DM_EXTINT 7 /* APIC destination mode */ #define APIC_DESTMODE_FLAT 0xf #define APIC_DESTMODE_CLUSTER 1 #define APIC_TRIGGER_EDGE 0 #define APIC_TRIGGER_LEVEL 1 #define APIC_VECTOR_MASK 0xff #define APIC_DCR_MASK 0xf #define APIC_LVT_TIMER_SHIFT 17 #define APIC_LVT_MASKED_SHIFT 16 #define APIC_LVT_LEVEL_TRIGGER_SHIFT 15 #define APIC_LVT_REMOTE_IRR_SHIFT 14 #define APIC_LVT_INT_POLARITY_SHIFT 13 #define APIC_LVT_DELIV_STS_SHIFT 12 #define APIC_LVT_DELIV_MOD_SHIFT 8 #define APIC_LVT_TIMER_TSCDEADLINE (2 << APIC_LVT_TIMER_SHIFT) #define APIC_LVT_TIMER_PERIODIC (1 << APIC_LVT_TIMER_SHIFT) #define APIC_LVT_MASKED (1 << APIC_LVT_MASKED_SHIFT) #define APIC_LVT_LEVEL_TRIGGER (1 << APIC_LVT_LEVEL_TRIGGER_SHIFT) #define APIC_LVT_REMOTE_IRR (1 << APIC_LVT_REMOTE_IRR_SHIFT) #define APIC_LVT_INT_POLARITY (1 << APIC_LVT_INT_POLARITY_SHIFT) #define APIC_LVT_DELIV_STS (1 << APIC_LVT_DELIV_STS_SHIFT) #define APIC_LVT_DELIV_MOD (7 << APIC_LVT_DELIV_MOD_SHIFT) #define APIC_ESR_ILL_ADDRESS_SHIFT 7 #define APIC_ESR_RECV_ILL_VECT_SHIFT 6 #define APIC_ESR_SEND_ILL_VECT_SHIFT 5 #define APIC_ESR_RECV_ACCEPT_SHIFT 3 #define APIC_ESR_SEND_ACCEPT_SHIFT 2 #define APIC_ESR_RECV_CHECK_SUM_SHIFT 1 #define APIC_ESR_ILLEGAL_ADDRESS (1 << APIC_ESR_ILL_ADDRESS_SHIFT) #define APIC_ESR_RECV_ILLEGAL_VECT (1 << APIC_ESR_RECV_ILL_VECT_SHIFT) #define APIC_ESR_SEND_ILLEGAL_VECT (1 << APIC_ESR_SEND_ILL_VECT_SHIFT) #define APIC_ESR_RECV_ACCEPT (1 << APIC_ESR_RECV_ACCEPT_SHIFT) #define APIC_ESR_SEND_ACCEPT (1 << APIC_ESR_SEND_ACCEPT_SHIFT) #define APIC_ESR_RECV_CHECK_SUM (1 << APIC_ESR_RECV_CHECK_SUM_SHIFT) #define APIC_ESR_SEND_CHECK_SUM 1 #define APIC_ICR_DEST_SHIFT 24 #define APIC_ICR_DEST_SHORT_SHIFT 18 #define APIC_ICR_TRIGGER_MOD_SHIFT 15 #define APIC_ICR_LEVEL_SHIFT 14 #define APIC_ICR_DELIV_STS_SHIFT 12 #define APIC_ICR_DEST_MOD_SHIFT 11 #define APIC_ICR_DELIV_MOD_SHIFT 8 #define APIC_ICR_DEST_SHORT (3 << APIC_ICR_DEST_SHORT_SHIFT) #define APIC_ICR_TRIGGER_MOD (1 << APIC_ICR_TRIGGER_MOD_SHIFT) #define APIC_ICR_LEVEL (1 << APIC_ICR_LEVEL_SHIFT) #define APIC_ICR_DELIV_STS (1 << APIC_ICR_DELIV_STS_SHIFT) #define APIC_ICR_DEST_MOD (1 << APIC_ICR_DEST_MOD_SHIFT) #define APIC_ICR_DELIV_MOD (7 << APIC_ICR_DELIV_MOD_SHIFT) #define APIC_PR_CLASS_SHIFT 4 #define APIC_PR_SUB_CLASS 0xf #define APIC_LOGDEST_XAPIC_SHIFT 4 #define APIC_LOGDEST_XAPIC_ID 0xf #define APIC_LOGDEST_X2APIC_SHIFT 16 #define APIC_LOGDEST_X2APIC_ID 0xffff #define APIC_SPURIO_FOCUS_SHIFT 9 #define APIC_SPURIO_ENABLED_SHIFT 8 #define APIC_SPURIO_FOCUS (1 << APIC_SPURIO_FOCUS_SHIFT) #define APIC_SPURIO_ENABLED (1 << APIC_SPURIO_ENABLED_SHIFT) #define APIC_SV_DIRECTED_IO (1 << 12) #define APIC_SV_ENABLE (1 << 8) #define VAPIC_ENABLE_BIT 0 #define VAPIC_ENABLE_MASK (1 << VAPIC_ENABLE_BIT) typedef struct APICCommonState APICCommonState; #define TYPE_APIC_COMMON "apic-common" typedef struct APICCommonClass APICCommonClass; DECLARE_OBJ_CHECKERS(APICCommonState, APICCommonClass, APIC_COMMON, TYPE_APIC_COMMON) struct APICCommonClass { DeviceClass parent_class; DeviceRealize realize; DeviceUnrealize unrealize; void (*set_base)(APICCommonState *s, uint64_t val); void (*set_tpr)(APICCommonState *s, uint8_t val); uint8_t (*get_tpr)(APICCommonState *s); void (*enable_tpr_reporting)(APICCommonState *s, bool enable); void (*vapic_base_update)(APICCommonState *s); void (*external_nmi)(APICCommonState *s); void (*pre_save)(APICCommonState *s); void (*post_load)(APICCommonState *s); void (*reset)(APICCommonState *s); /* send_msi emulates an APIC bus and its proper place would be in a new * device, but it's convenient to have it here for now. */ void (*send_msi)(MSIMessage *msi); }; struct APICCommonState { /*< private >*/ DeviceState parent_obj; /*< public >*/ MemoryRegion io_memory; X86CPU *cpu; uint32_t apicbase; uint8_t id; /* legacy APIC ID */ uint32_t initial_apic_id; uint8_t version; uint8_t arb_id; uint8_t tpr; uint32_t spurious_vec; uint8_t log_dest; uint8_t dest_mode; uint32_t isr[8]; /* in service register */ uint32_t tmr[8]; /* trigger mode register */ uint32_t irr[8]; /* interrupt request register */ uint32_t lvt[APIC_LVT_NB]; uint32_t esr; /* error register */ uint32_t icr[2]; uint32_t divide_conf; int count_shift; uint32_t initial_count; int64_t initial_count_load_time; int64_t next_time; QEMUTimer *timer; int64_t timer_expiry; int sipi_vector; int wait_for_sipi; uint32_t vapic_control; DeviceState *vapic; hwaddr vapic_paddr; /* note: persistence via kvmvapic */ bool legacy_instance_id; }; typedef struct VAPICState { uint8_t tpr; uint8_t isr; uint8_t zero; uint8_t irr; uint8_t enabled; } QEMU_PACKED VAPICState; extern bool apic_report_tpr_access; void apic_report_irq_delivered(int delivered); bool apic_next_timer(APICCommonState *s, int64_t current_time); void apic_enable_tpr_access_reporting(DeviceState *d, bool enable); void apic_enable_vapic(DeviceState *d, hwaddr paddr); void vapic_report_tpr_access(DeviceState *dev, CPUState *cpu, target_ulong ip, TPRAccess access); int apic_get_ppr(APICCommonState *s); uint32_t apic_get_current_count(APICCommonState *s); static inline void apic_set_bit(uint32_t *tab, int index) { int i, mask; i = index >> 5; mask = 1 << (index & 0x1f); tab[i] |= mask; } static inline int apic_get_bit(uint32_t *tab, int index) { int i, mask; i = index >> 5; mask = 1 << (index & 0x1f); return !!(tab[i] & mask); } APICCommonClass *apic_get_class(void); #endif /* QEMU_APIC_INTERNAL_H */
dslutz/qemu
include/hw/i386/apic_internal.h
C
gpl-2.0
7,974
// Copyright 2015, VIXL authors // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of ARM Limited nor the names of its contributors may be // used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --------------------------------------------------------------------- // This file is auto generated using tools/generate_simulator_traces.py. // // PLEASE DO NOT EDIT. // --------------------------------------------------------------------- #ifndef VIXL_SIM_FMUL_4H_TRACE_AARCH64_H_ #define VIXL_SIM_FMUL_4H_TRACE_AARCH64_H_ const uint16_t kExpected_NEON_fmul_4H[] = { 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7f23, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7f23, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0x7f23, 0x7e01, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x7f23, 0x7e01, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x0009, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x0009, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x0009, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x0012, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x0012, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x0012, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x001b, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x00b4, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x2c7f, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x8009, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x8009, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x8009, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x8012, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x8012, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x8012, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x801b, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x80b4, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0xac7f, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0xfe01, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0000, 0x01ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0009, 0x0200, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0009, 0x0200, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0009, 0x03ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0012, 0x03ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0012, 0x0400, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0012, 0x05fe, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x001b, 0x10ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x00b4, 0x43fd, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x2c7f, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8000, 0x81ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8009, 0x8200, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8009, 0x8200, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8009, 0x83ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8012, 0x83ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8012, 0x8400, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8012, 0x85fe, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x801b, 0x90ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x80b4, 0xc3fd, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0xac7f, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0xff23, 0xfe01, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0xfe01, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8000, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0000, 0x01ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0009, 0x0200, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0009, 0x0200, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0009, 0x03ff, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0012, 0x03ff, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0012, 0x0400, 0x0002, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0012, 0x05fe, 0x000a, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x001b, 0x10ff, 0x1bff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x00b4, 0x43fd, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x2c7f, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0000, 0x0000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0000, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8000, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8000, 0x81ff, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8009, 0x8200, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8009, 0x8200, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8009, 0x83ff, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8012, 0x83ff, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8012, 0x8400, 0x8002, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8012, 0x85fe, 0x800a, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x801b, 0x90ff, 0x9bff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x80b4, 0xc3fd, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0xac7f, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x8000, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x01ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0009, 0x0200, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0009, 0x0200, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0009, 0x03ff, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0012, 0x03ff, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0012, 0x0400, 0x0002, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0012, 0x05fe, 0x000a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x001b, 0x10ff, 0x1bff, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x00b4, 0x43fd, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x2c7f, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x81ff, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8009, 0x8200, 0x8001, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8009, 0x8200, 0x8001, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8009, 0x83ff, 0x8001, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8012, 0x83ff, 0x8001, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8012, 0x8400, 0x8002, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8012, 0x85fe, 0x800a, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x801b, 0x90ff, 0x9bff, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x80b4, 0xc3fd, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xac7f, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x8000, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x0000, 0x01ff, 0x0000, 0x0000, 0x0400, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x0001, 0x0000, 0x0400, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x0001, 0x0000, 0x0401, 0x0000, 0x0000, 0x0000, 0x0000, 0x03ff, 0x0001, 0x0000, 0x0600, 0x0000, 0x0000, 0x0000, 0x0000, 0x03ff, 0x0001, 0x0000, 0x1100, 0x0000, 0x0000, 0x0000, 0x0000, 0x0400, 0x0002, 0x0000, 0x43ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x05fe, 0x000a, 0x0000, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x10ff, 0x1bff, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x43fd, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x8200, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x8000, 0x8200, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x8000, 0x8200, 0x0000, 0x0000, 0x0000, 0x0000, 0x81ff, 0x8000, 0x8000, 0x8400, 0x0000, 0x0000, 0x0000, 0x0000, 0x8200, 0x8001, 0x8000, 0x8400, 0x0000, 0x0000, 0x0000, 0x0000, 0x8200, 0x8001, 0x8000, 0x8401, 0x0000, 0x0000, 0x0000, 0x0000, 0x83ff, 0x8001, 0x8000, 0x8600, 0x0000, 0x0000, 0x0000, 0x0000, 0x83ff, 0x8001, 0x8000, 0x9100, 0x0000, 0x0000, 0x0000, 0x0000, 0x8400, 0x8002, 0x8000, 0xc3ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x85fe, 0x800a, 0x8000, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0x90ff, 0x9bff, 0x7e00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xc3fd, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0x8009, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0x8000, 0x81ff, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x8000, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0000, 0x33fe, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x33ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x3400, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x37fe, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0400, 0x37ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000, 0x0400, 0x3800, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000, 0x0401, 0x39ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000, 0x0600, 0x44ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000, 0x1100, 0x77fe, 0x0000, 0x0000, 0x0000, 0x0000, 0x0002, 0x0000, 0x43ff, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x000a, 0x0000, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x1bff, 0x7e00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x0009, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0000, 0x01ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0xb3fe, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x8200, 0xb3ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x8200, 0xb400, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x8200, 0xb7fe, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x8400, 0xb7ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8001, 0x8000, 0x8400, 0xb800, 0x0000, 0x0000, 0x0000, 0x0000, 0x8001, 0x8000, 0x8401, 0xb9ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8001, 0x8000, 0x8600, 0xc4ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8001, 0x8000, 0x9100, 0xf7fe, 0x0000, 0x0000, 0x0000, 0x0000, 0x8002, 0x8000, 0xc3ff, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0x800a, 0x8000, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x9bff, 0x7e00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0x8009, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0x8009, 0x8200, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x8000, 0x81ff, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0200, 0x33ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x33fe, 0x3400, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x33ff, 0x3401, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x3400, 0x37ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x37fe, 0x3800, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0400, 0x37ff, 0x3801, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0400, 0x3800, 0x3a00, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0401, 0x39ff, 0x4500, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0600, 0x44ff, 0x77ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x1100, 0x77fe, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x43ff, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x0009, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0009, 0x0200, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0000, 0x01ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0xb3ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0xb3fe, 0xb400, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0xb3ff, 0xb401, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0xb400, 0xb7ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0xb7fe, 0xb800, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8400, 0xb7ff, 0xb801, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8400, 0xb800, 0xba00, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8401, 0xb9ff, 0xc500, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8600, 0xc4ff, 0xf7ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x9100, 0xf7fe, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0xc3ff, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0x8009, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0x8009, 0x8200, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x8009, 0x8200, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x81ff, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0200, 0x3400, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x33ff, 0x3401, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x33fe, 0x3400, 0x3402, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x33ff, 0x3401, 0x3800, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x3400, 0x37ff, 0x3801, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x37fe, 0x3800, 0x3802, 0x0000, 0x0000, 0x0000, 0x0000, 0x0400, 0x37ff, 0x3801, 0x3a02, 0x0000, 0x0000, 0x0000, 0x0000, 0x0400, 0x3800, 0x3a00, 0x4501, 0x0000, 0x0000, 0x0000, 0x0000, 0x0401, 0x39ff, 0x4500, 0x7800, 0x0000, 0x0000, 0x0000, 0x0000, 0x0600, 0x44ff, 0x77ff, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x1100, 0x77fe, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x43ff, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x0009, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0009, 0x0200, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0009, 0x0200, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x01ff, 0x0000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0xb400, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0xb3ff, 0xb401, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0xb3fe, 0xb400, 0xb402, 0x0000, 0x0000, 0x0000, 0x0000, 0x8200, 0xb3ff, 0xb401, 0xb800, 0x0000, 0x0000, 0x0000, 0x0000, 0x8200, 0xb400, 0xb7ff, 0xb801, 0x0000, 0x0000, 0x0000, 0x0000, 0x8200, 0xb7fe, 0xb800, 0xb802, 0x0000, 0x0000, 0x0000, 0x0000, 0x8400, 0xb7ff, 0xb801, 0xba02, 0x0000, 0x0000, 0x0000, 0x0000, 0x8400, 0xb800, 0xba00, 0xc501, 0x0000, 0x0000, 0x0000, 0x0000, 0x8401, 0xb9ff, 0xc500, 0xf800, 0x0000, 0x0000, 0x0000, 0x0000, 0x8600, 0xc4ff, 0xf7ff, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0x9100, 0xf7fe, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xc3ff, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0x8012, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0x8009, 0x83ff, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x8009, 0x8200, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x8009, 0x8200, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x81ff, 0x8000, 0x0000, 0x0400, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0200, 0x37fe, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x3400, 0x37ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x33ff, 0x3401, 0x3800, 0x0000, 0x0000, 0x0000, 0x0000, 0x33fe, 0x3400, 0x3402, 0x3bfe, 0x0000, 0x0000, 0x0000, 0x0000, 0x33ff, 0x3401, 0x3800, 0x3bff, 0x0000, 0x0000, 0x0000, 0x0000, 0x3400, 0x37ff, 0x3801, 0x3c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x37fe, 0x3800, 0x3802, 0x3dff, 0x0000, 0x0000, 0x0000, 0x0000, 0x37ff, 0x3801, 0x3a02, 0x48ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x3800, 0x3a00, 0x4501, 0x7bfe, 0x0000, 0x0000, 0x0000, 0x0000, 0x39ff, 0x4500, 0x7800, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x44ff, 0x77ff, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x77fe, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x0012, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0009, 0x03ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0009, 0x0200, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0009, 0x0200, 0x0001, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x01ff, 0x0000, 0x8000, 0x8400, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0xb7fe, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0xb400, 0xb7ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8200, 0xb3ff, 0xb401, 0xb800, 0x0000, 0x0000, 0x0000, 0x0000, 0xb3fe, 0xb400, 0xb402, 0xbbfe, 0x0000, 0x0000, 0x0000, 0x0000, 0xb3ff, 0xb401, 0xb800, 0xbbff, 0x0000, 0x0000, 0x0000, 0x0000, 0xb400, 0xb7ff, 0xb801, 0xbc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xb7fe, 0xb800, 0xb802, 0xbdff, 0x0000, 0x0000, 0x0000, 0x0000, 0xb7ff, 0xb801, 0xba02, 0xc8ff, 0x0000, 0x0000, 0x0000, 0x0000, 0xb800, 0xba00, 0xc501, 0xfbfe, 0x0000, 0x0000, 0x0000, 0x0000, 0xb9ff, 0xc500, 0xf800, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xc4ff, 0xf7ff, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xf7fe, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0x8012, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0x8012, 0x83ff, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x8009, 0x83ff, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x8009, 0x8200, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8200, 0x8001, 0x0000, 0x0400, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0400, 0x37ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x37fe, 0x3800, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x3400, 0x37ff, 0x3801, 0x0000, 0x0000, 0x0000, 0x0000, 0x33ff, 0x3401, 0x3800, 0x3bff, 0x0000, 0x0000, 0x0000, 0x0000, 0x3400, 0x3402, 0x3bfe, 0x3c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3401, 0x3800, 0x3bff, 0x3c01, 0x0000, 0x0000, 0x0000, 0x0000, 0x37ff, 0x3801, 0x3c00, 0x3e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3800, 0x3802, 0x3dff, 0x4900, 0x0000, 0x0000, 0x0000, 0x0000, 0x3801, 0x3a02, 0x48ff, 0x7bff, 0x0000, 0x0000, 0x0000, 0x0000, 0x3a00, 0x4501, 0x7bfe, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x4500, 0x7800, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x77ff, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x0012, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0012, 0x03ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0009, 0x03ff, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0009, 0x0200, 0x0001, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x0001, 0x8000, 0x8400, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8400, 0xb7ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0xb7fe, 0xb800, 0x0000, 0x0000, 0x0000, 0x0000, 0x8200, 0xb400, 0xb7ff, 0xb801, 0x0000, 0x0000, 0x0000, 0x0000, 0xb3ff, 0xb401, 0xb800, 0xbbff, 0x0000, 0x0000, 0x0000, 0x0000, 0xb400, 0xb402, 0xbbfe, 0xbc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xb401, 0xb800, 0xbbff, 0xbc01, 0x0000, 0x0000, 0x0000, 0x0000, 0xb7ff, 0xb801, 0xbc00, 0xbe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xb800, 0xb802, 0xbdff, 0xc900, 0x0000, 0x0000, 0x0000, 0x0000, 0xb801, 0xba02, 0xc8ff, 0xfbff, 0x0000, 0x0000, 0x0000, 0x0000, 0xba00, 0xc501, 0xfbfe, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xc500, 0xf800, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xf7ff, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0x8012, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0x8012, 0x8400, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x8012, 0x83ff, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x8009, 0x83ff, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8200, 0x8001, 0x0000, 0x0401, 0x0000, 0x0000, 0x0000, 0x0000, 0x8001, 0x0000, 0x0400, 0x3800, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0400, 0x37ff, 0x3801, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x37fe, 0x3800, 0x3802, 0x0000, 0x0000, 0x0000, 0x0000, 0x3400, 0x37ff, 0x3801, 0x3c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3401, 0x3800, 0x3bff, 0x3c01, 0x0000, 0x0000, 0x0000, 0x0000, 0x3402, 0x3bfe, 0x3c00, 0x3c02, 0x0000, 0x0000, 0x0000, 0x0000, 0x3800, 0x3bff, 0x3c01, 0x3e02, 0x0000, 0x0000, 0x0000, 0x0000, 0x3801, 0x3c00, 0x3e00, 0x4901, 0x0000, 0x0000, 0x0000, 0x0000, 0x3802, 0x3dff, 0x4900, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3a02, 0x48ff, 0x7bff, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x4501, 0x7bfe, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7800, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x0012, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0012, 0x0400, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0012, 0x03ff, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0009, 0x03ff, 0x0001, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x0001, 0x8000, 0x8401, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x8000, 0x8400, 0xb800, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8400, 0xb7ff, 0xb801, 0x0000, 0x0000, 0x0000, 0x0000, 0x8200, 0xb7fe, 0xb800, 0xb802, 0x0000, 0x0000, 0x0000, 0x0000, 0xb400, 0xb7ff, 0xb801, 0xbc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xb401, 0xb800, 0xbbff, 0xbc01, 0x0000, 0x0000, 0x0000, 0x0000, 0xb402, 0xbbfe, 0xbc00, 0xbc02, 0x0000, 0x0000, 0x0000, 0x0000, 0xb800, 0xbbff, 0xbc01, 0xbe02, 0x0000, 0x0000, 0x0000, 0x0000, 0xb801, 0xbc00, 0xbe00, 0xc901, 0x0000, 0x0000, 0x0000, 0x0000, 0xb802, 0xbdff, 0xc900, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xba02, 0xc8ff, 0xfbff, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xc501, 0xfbfe, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xf800, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0x801b, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0x8012, 0x85fe, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x8012, 0x8400, 0x8002, 0x0000, 0x0000, 0x0000, 0x0000, 0x8012, 0x83ff, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x83ff, 0x8001, 0x0000, 0x0600, 0x0000, 0x0000, 0x0000, 0x0000, 0x8001, 0x0000, 0x0401, 0x39ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0400, 0x3800, 0x3a00, 0x0000, 0x0000, 0x0000, 0x0000, 0x0400, 0x37ff, 0x3801, 0x3a02, 0x0000, 0x0000, 0x0000, 0x0000, 0x37fe, 0x3800, 0x3802, 0x3dff, 0x0000, 0x0000, 0x0000, 0x0000, 0x37ff, 0x3801, 0x3c00, 0x3e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3800, 0x3bff, 0x3c01, 0x3e02, 0x0000, 0x0000, 0x0000, 0x0000, 0x3bfe, 0x3c00, 0x3c02, 0x4080, 0x0000, 0x0000, 0x0000, 0x0000, 0x3bff, 0x3c01, 0x3e02, 0x4b80, 0x0000, 0x0000, 0x0000, 0x0000, 0x3c00, 0x3e00, 0x4901, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3dff, 0x4900, 0x7c00, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x48ff, 0x7bff, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7bfe, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x001b, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0012, 0x05fe, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0012, 0x0400, 0x0002, 0x0000, 0x0000, 0x0000, 0x0000, 0x0012, 0x03ff, 0x0001, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x03ff, 0x0001, 0x8000, 0x8600, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x8000, 0x8401, 0xb9ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8400, 0xb800, 0xba00, 0x0000, 0x0000, 0x0000, 0x0000, 0x8400, 0xb7ff, 0xb801, 0xba02, 0x0000, 0x0000, 0x0000, 0x0000, 0xb7fe, 0xb800, 0xb802, 0xbdff, 0x0000, 0x0000, 0x0000, 0x0000, 0xb7ff, 0xb801, 0xbc00, 0xbe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xb800, 0xbbff, 0xbc01, 0xbe02, 0x0000, 0x0000, 0x0000, 0x0000, 0xbbfe, 0xbc00, 0xbc02, 0xc080, 0x0000, 0x0000, 0x0000, 0x0000, 0xbbff, 0xbc01, 0xbe02, 0xcb80, 0x0000, 0x0000, 0x0000, 0x0000, 0xbc00, 0xbe00, 0xc901, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xbdff, 0xc900, 0xfc00, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xc8ff, 0xfbff, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfbfe, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0x80b4, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0x801b, 0x90ff, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x8012, 0x85fe, 0x800a, 0x0000, 0x0000, 0x0000, 0x0000, 0x8012, 0x8400, 0x8002, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x83ff, 0x8001, 0x0000, 0x1100, 0x0000, 0x0000, 0x0000, 0x0000, 0x8001, 0x0000, 0x0600, 0x44ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0401, 0x39ff, 0x4500, 0x0000, 0x0000, 0x0000, 0x0000, 0x0400, 0x3800, 0x3a00, 0x4501, 0x0000, 0x0000, 0x0000, 0x0000, 0x37ff, 0x3801, 0x3a02, 0x48ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x3800, 0x3802, 0x3dff, 0x4900, 0x0000, 0x0000, 0x0000, 0x0000, 0x3801, 0x3c00, 0x3e00, 0x4901, 0x0000, 0x0000, 0x0000, 0x0000, 0x3bff, 0x3c01, 0x3e02, 0x4b80, 0x0000, 0x0000, 0x0000, 0x0000, 0x3c00, 0x3c02, 0x4080, 0x5640, 0x0000, 0x0000, 0x0000, 0x0000, 0x3c01, 0x3e02, 0x4b80, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3e00, 0x4901, 0x7c00, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x4900, 0x7c00, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7bff, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x00b4, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x001b, 0x10ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0012, 0x05fe, 0x000a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0012, 0x0400, 0x0002, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x03ff, 0x0001, 0x8000, 0x9100, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x8000, 0x8600, 0xc4ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8401, 0xb9ff, 0xc500, 0x0000, 0x0000, 0x0000, 0x0000, 0x8400, 0xb800, 0xba00, 0xc501, 0x0000, 0x0000, 0x0000, 0x0000, 0xb7ff, 0xb801, 0xba02, 0xc8ff, 0x0000, 0x0000, 0x0000, 0x0000, 0xb800, 0xb802, 0xbdff, 0xc900, 0x0000, 0x0000, 0x0000, 0x0000, 0xb801, 0xbc00, 0xbe00, 0xc901, 0x0000, 0x0000, 0x0000, 0x0000, 0xbbff, 0xbc01, 0xbe02, 0xcb80, 0x0000, 0x0000, 0x0000, 0x0000, 0xbc00, 0xbc02, 0xc080, 0xd640, 0x0000, 0x0000, 0x0000, 0x0000, 0xbc01, 0xbe02, 0xcb80, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xbe00, 0xc901, 0xfc00, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xc900, 0xfc00, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfbff, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0xac7f, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0x80b4, 0xc3fd, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x801b, 0x90ff, 0x9bff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8012, 0x85fe, 0x800a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8400, 0x8002, 0x0000, 0x43ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8001, 0x0000, 0x1100, 0x77fe, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0600, 0x44ff, 0x77ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0401, 0x39ff, 0x4500, 0x7800, 0x0000, 0x0000, 0x0000, 0x0000, 0x3800, 0x3a00, 0x4501, 0x7bfe, 0x0000, 0x0000, 0x0000, 0x0000, 0x3801, 0x3a02, 0x48ff, 0x7bff, 0x0000, 0x0000, 0x0000, 0x0000, 0x3802, 0x3dff, 0x4900, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3c00, 0x3e00, 0x4901, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3c01, 0x3e02, 0x4b80, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3c02, 0x4080, 0x5640, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3e02, 0x4b80, 0x7c00, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x4901, 0x7c00, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x2c7f, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x00b4, 0x43fd, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x001b, 0x10ff, 0x1bff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0012, 0x05fe, 0x000a, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0400, 0x0002, 0x8000, 0xc3ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x8000, 0x9100, 0xf7fe, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8600, 0xc4ff, 0xf7ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8401, 0xb9ff, 0xc500, 0xf800, 0x0000, 0x0000, 0x0000, 0x0000, 0xb800, 0xba00, 0xc501, 0xfbfe, 0x0000, 0x0000, 0x0000, 0x0000, 0xb801, 0xba02, 0xc8ff, 0xfbff, 0x0000, 0x0000, 0x0000, 0x0000, 0xb802, 0xbdff, 0xc900, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xbc00, 0xbe00, 0xc901, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xbc01, 0xbe02, 0xcb80, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xbc02, 0xc080, 0xd640, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xbe02, 0xcb80, 0xfc00, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xc901, 0xfc00, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0xac7f, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x80b4, 0xc3fd, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0x801b, 0x90ff, 0x9bff, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x85fe, 0x800a, 0x0000, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x8002, 0x0000, 0x43ff, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x1100, 0x77fe, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x0600, 0x44ff, 0x77ff, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x39ff, 0x4500, 0x7800, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3a00, 0x4501, 0x7bfe, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3a02, 0x48ff, 0x7bff, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3dff, 0x4900, 0x7c00, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3e00, 0x4901, 0x7c00, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3e02, 0x4b80, 0x7c00, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x4080, 0x5640, 0x7c00, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x4b80, 0x7c00, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x2c7f, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x00b4, 0x43fd, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x001b, 0x10ff, 0x1bff, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x05fe, 0x000a, 0x8000, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0x0002, 0x8000, 0xc3ff, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x9100, 0xf7fe, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0x8600, 0xc4ff, 0xf7ff, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xb9ff, 0xc500, 0xf800, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xba00, 0xc501, 0xfbfe, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xba02, 0xc8ff, 0xfbff, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xbdff, 0xc900, 0xfc00, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xbe00, 0xc901, 0xfc00, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xbe02, 0xcb80, 0xfc00, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xc080, 0xd640, 0xfc00, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xcb80, 0xfc00, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0xfc00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0xac7f, 0xfc00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x80b4, 0xc3fd, 0xfc00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x90ff, 0x9bff, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x800a, 0x0000, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x43ff, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x1100, 0x77fe, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x44ff, 0x77ff, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x4500, 0x7800, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x4501, 0x7bfe, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x48ff, 0x7bff, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x4900, 0x7c00, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x4901, 0x7c00, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x4b80, 0x7c00, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x5640, 0x7c00, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7c00, 0x7f23, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x2c7f, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x00b4, 0x43fd, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x10ff, 0x1bff, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x000a, 0x8000, 0xfc00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0xc3ff, 0xfc00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x9100, 0xf7fe, 0xfc00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xc4ff, 0xf7ff, 0xfc00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xc500, 0xf800, 0xfc00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xc501, 0xfbfe, 0xfc00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xc8ff, 0xfbff, 0xfc00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xc900, 0xfc00, 0xfc00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xc901, 0xfc00, 0xfc00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xcb80, 0xfc00, 0xfc00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xd640, 0xfc00, 0xfc00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xfc00, 0xff23, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0xfc00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0xac7f, 0xfc00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0xc3fd, 0xfc00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x9bff, 0x7e00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x43ff, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x77fe, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x77ff, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7800, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7bfe, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7bff, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7f23, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x2c7f, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x43fd, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x1bff, 0x7e00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0xfc00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0xc3ff, 0xfc00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0xf7fe, 0xfc00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0xf7ff, 0xfc00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0xf800, 0xfc00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfbfe, 0xfc00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfbff, 0xfc00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xfc00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xfc00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xfc00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xfc00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0x7f23, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0x7f23, 0xff23, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7f23, 0x7f23, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0xff23, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7f23, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0x7f23, 0x7e01, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x7f23, 0x7e01, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x0009, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x0009, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x0009, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x0012, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x0012, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x0012, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x001b, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x00b4, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x2c7f, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x8009, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x8009, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x8009, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x8012, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x8012, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x8012, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x801b, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x80b4, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0xac7f, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0xfe01, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0000, 0x01ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0009, 0x0200, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0009, 0x0200, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0009, 0x03ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0012, 0x03ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0012, 0x0400, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0012, 0x05fe, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x001b, 0x10ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x00b4, 0x43fd, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x2c7f, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8000, 0x81ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8009, 0x8200, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8009, 0x8200, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8009, 0x83ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8012, 0x83ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8012, 0x8400, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8012, 0x85fe, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x801b, 0x90ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x80b4, 0xc3fd, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0xac7f, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0xff23, 0xfe01, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0xfe01, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8000, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0000, 0x01ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0009, 0x0200, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0009, 0x0200, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0009, 0x03ff, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0012, 0x03ff, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0012, 0x0400, 0x0002, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0012, 0x05fe, 0x000a, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x001b, 0x10ff, 0x1bff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x00b4, 0x43fd, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x2c7f, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0000, 0x0000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0000, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8000, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8000, 0x81ff, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8009, 0x8200, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8009, 0x8200, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8009, 0x83ff, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8012, 0x83ff, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8012, 0x8400, 0x8002, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8012, 0x85fe, 0x800a, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x801b, 0x90ff, 0x9bff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x80b4, 0xc3fd, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0xac7f, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x0000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x01ff, 0x0000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0009, 0x0200, 0x0001, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0009, 0x0200, 0x0001, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0009, 0x03ff, 0x0001, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0012, 0x03ff, 0x0001, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0012, 0x0400, 0x0002, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0012, 0x05fe, 0x000a, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x001b, 0x10ff, 0x1bff, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x00b4, 0x43fd, 0x7c00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x2c7f, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x0000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0000, 0x0000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x81ff, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8009, 0x8200, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8009, 0x8200, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8009, 0x83ff, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8012, 0x83ff, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8012, 0x8400, 0x8002, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8012, 0x85fe, 0x800a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x801b, 0x90ff, 0x9bff, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x80b4, 0xc3fd, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xac7f, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x0000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x8000, 0x8200, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0x0000, 0x0000, 0x0000, 0x0000, 0x01ff, 0x0000, 0x8000, 0x8400, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x0001, 0x8000, 0x8400, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x0001, 0x8000, 0x8401, 0x0000, 0x0000, 0x0000, 0x0000, 0x03ff, 0x0001, 0x8000, 0x8600, 0x0000, 0x0000, 0x0000, 0x0000, 0x03ff, 0x0001, 0x8000, 0x9100, 0x0000, 0x0000, 0x0000, 0x0000, 0x0400, 0x0002, 0x8000, 0xc3ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x05fe, 0x000a, 0x8000, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0x10ff, 0x1bff, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x43fd, 0x7c00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x0000, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x0000, 0x81ff, 0x8000, 0x0000, 0x0400, 0x0000, 0x0000, 0x0000, 0x0000, 0x8200, 0x8001, 0x0000, 0x0400, 0x0000, 0x0000, 0x0000, 0x0000, 0x8200, 0x8001, 0x0000, 0x0401, 0x0000, 0x0000, 0x0000, 0x0000, 0x83ff, 0x8001, 0x0000, 0x0600, 0x0000, 0x0000, 0x0000, 0x0000, 0x83ff, 0x8001, 0x0000, 0x1100, 0x0000, 0x0000, 0x0000, 0x0000, 0x8400, 0x8002, 0x0000, 0x43ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x85fe, 0x800a, 0x0000, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x90ff, 0x9bff, 0x7e00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xc3fd, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0x0009, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0x0000, 0x01ff, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x8000, 0x8200, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x8000, 0xb3fe, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0xb3ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0xb400, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0xb7fe, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8400, 0xb7ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x8000, 0x8400, 0xb800, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x8000, 0x8401, 0xb9ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x8000, 0x8600, 0xc4ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x8000, 0x9100, 0xf7fe, 0x0000, 0x0000, 0x0000, 0x0000, 0x0002, 0x8000, 0xc3ff, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0x000a, 0x8000, 0xfc00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x1bff, 0x7e00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x8009, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8000, 0x81ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8000, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x33fe, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0200, 0x33ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0200, 0x3400, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0200, 0x37fe, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0400, 0x37ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8001, 0x0000, 0x0400, 0x3800, 0x0000, 0x0000, 0x0000, 0x0000, 0x8001, 0x0000, 0x0401, 0x39ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8001, 0x0000, 0x0600, 0x44ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8001, 0x0000, 0x1100, 0x77fe, 0x0000, 0x0000, 0x0000, 0x0000, 0x8002, 0x0000, 0x43ff, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x800a, 0x0000, 0x7c00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x9bff, 0x7e00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0x0009, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0x0009, 0x0200, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x0000, 0x01ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0xb3ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0xb3fe, 0xb400, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0xb3ff, 0xb401, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0xb400, 0xb7ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0xb7fe, 0xb800, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8400, 0xb7ff, 0xb801, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8400, 0xb800, 0xba00, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8401, 0xb9ff, 0xc500, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8600, 0xc4ff, 0xf7ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x9100, 0xf7fe, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0xc3ff, 0xfc00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0xfc00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x8009, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8009, 0x8200, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8000, 0x81ff, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0200, 0x33ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x33fe, 0x3400, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x33ff, 0x3401, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x3400, 0x37ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x37fe, 0x3800, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0400, 0x37ff, 0x3801, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0400, 0x3800, 0x3a00, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0401, 0x39ff, 0x4500, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0600, 0x44ff, 0x77ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x1100, 0x77fe, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x43ff, 0x7c00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0x0009, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0x0009, 0x0200, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x0009, 0x0200, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x01ff, 0x0000, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0xb400, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0xb3ff, 0xb401, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0xb3fe, 0xb400, 0xb402, 0x0000, 0x0000, 0x0000, 0x0000, 0x8200, 0xb3ff, 0xb401, 0xb800, 0x0000, 0x0000, 0x0000, 0x0000, 0x8200, 0xb400, 0xb7ff, 0xb801, 0x0000, 0x0000, 0x0000, 0x0000, 0x8200, 0xb7fe, 0xb800, 0xb802, 0x0000, 0x0000, 0x0000, 0x0000, 0x8400, 0xb7ff, 0xb801, 0xba02, 0x0000, 0x0000, 0x0000, 0x0000, 0x8400, 0xb800, 0xba00, 0xc501, 0x0000, 0x0000, 0x0000, 0x0000, 0x8401, 0xb9ff, 0xc500, 0xf800, 0x0000, 0x0000, 0x0000, 0x0000, 0x8600, 0xc4ff, 0xf7ff, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0x9100, 0xf7fe, 0xfc00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xc3ff, 0xfc00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x8009, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8009, 0x8200, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8009, 0x8200, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x81ff, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8000, 0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0200, 0x3400, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x33ff, 0x3401, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x33fe, 0x3400, 0x3402, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x33ff, 0x3401, 0x3800, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x3400, 0x37ff, 0x3801, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x37fe, 0x3800, 0x3802, 0x0000, 0x0000, 0x0000, 0x0000, 0x0400, 0x37ff, 0x3801, 0x3a02, 0x0000, 0x0000, 0x0000, 0x0000, 0x0400, 0x3800, 0x3a00, 0x4501, 0x0000, 0x0000, 0x0000, 0x0000, 0x0401, 0x39ff, 0x4500, 0x7800, 0x0000, 0x0000, 0x0000, 0x0000, 0x0600, 0x44ff, 0x77ff, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x1100, 0x77fe, 0x7c00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x43ff, 0x7c00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0x0012, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0x0009, 0x03ff, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x0009, 0x0200, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0009, 0x0200, 0x0001, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x01ff, 0x0000, 0x8000, 0x8400, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0xb7fe, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0xb400, 0xb7ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8200, 0xb3ff, 0xb401, 0xb800, 0x0000, 0x0000, 0x0000, 0x0000, 0xb3fe, 0xb400, 0xb402, 0xbbfe, 0x0000, 0x0000, 0x0000, 0x0000, 0xb3ff, 0xb401, 0xb800, 0xbbff, 0x0000, 0x0000, 0x0000, 0x0000, 0xb400, 0xb7ff, 0xb801, 0xbc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xb7fe, 0xb800, 0xb802, 0xbdff, 0x0000, 0x0000, 0x0000, 0x0000, 0xb7ff, 0xb801, 0xba02, 0xc8ff, 0x0000, 0x0000, 0x0000, 0x0000, 0xb800, 0xba00, 0xc501, 0xfbfe, 0x0000, 0x0000, 0x0000, 0x0000, 0xb9ff, 0xc500, 0xf800, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xc4ff, 0xf7ff, 0xfc00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xf7fe, 0xfc00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x8012, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8009, 0x83ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8009, 0x8200, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x8009, 0x8200, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x81ff, 0x8000, 0x0000, 0x0400, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0200, 0x37fe, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x3400, 0x37ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x33ff, 0x3401, 0x3800, 0x0000, 0x0000, 0x0000, 0x0000, 0x33fe, 0x3400, 0x3402, 0x3bfe, 0x0000, 0x0000, 0x0000, 0x0000, 0x33ff, 0x3401, 0x3800, 0x3bff, 0x0000, 0x0000, 0x0000, 0x0000, 0x3400, 0x37ff, 0x3801, 0x3c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x37fe, 0x3800, 0x3802, 0x3dff, 0x0000, 0x0000, 0x0000, 0x0000, 0x37ff, 0x3801, 0x3a02, 0x48ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x3800, 0x3a00, 0x4501, 0x7bfe, 0x0000, 0x0000, 0x0000, 0x0000, 0x39ff, 0x4500, 0x7800, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x44ff, 0x77ff, 0x7c00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x77fe, 0x7c00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0x0012, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0x0012, 0x03ff, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x0009, 0x03ff, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0009, 0x0200, 0x0001, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x0001, 0x8000, 0x8400, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8400, 0xb7ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8200, 0xb7fe, 0xb800, 0x0000, 0x0000, 0x0000, 0x0000, 0x8200, 0xb400, 0xb7ff, 0xb801, 0x0000, 0x0000, 0x0000, 0x0000, 0xb3ff, 0xb401, 0xb800, 0xbbff, 0x0000, 0x0000, 0x0000, 0x0000, 0xb400, 0xb402, 0xbbfe, 0xbc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xb401, 0xb800, 0xbbff, 0xbc01, 0x0000, 0x0000, 0x0000, 0x0000, 0xb7ff, 0xb801, 0xbc00, 0xbe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xb800, 0xb802, 0xbdff, 0xc900, 0x0000, 0x0000, 0x0000, 0x0000, 0xb801, 0xba02, 0xc8ff, 0xfbff, 0x0000, 0x0000, 0x0000, 0x0000, 0xba00, 0xc501, 0xfbfe, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xc500, 0xf800, 0xfc00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xf7ff, 0xfc00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x8012, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8012, 0x83ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8009, 0x83ff, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x8009, 0x8200, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8200, 0x8001, 0x0000, 0x0400, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x0000, 0x0400, 0x37ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x37fe, 0x3800, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x3400, 0x37ff, 0x3801, 0x0000, 0x0000, 0x0000, 0x0000, 0x33ff, 0x3401, 0x3800, 0x3bff, 0x0000, 0x0000, 0x0000, 0x0000, 0x3400, 0x3402, 0x3bfe, 0x3c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3401, 0x3800, 0x3bff, 0x3c01, 0x0000, 0x0000, 0x0000, 0x0000, 0x37ff, 0x3801, 0x3c00, 0x3e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3800, 0x3802, 0x3dff, 0x4900, 0x0000, 0x0000, 0x0000, 0x0000, 0x3801, 0x3a02, 0x48ff, 0x7bff, 0x0000, 0x0000, 0x0000, 0x0000, 0x3a00, 0x4501, 0x7bfe, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x4500, 0x7800, 0x7c00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x77ff, 0x7c00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0x0012, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0x0012, 0x0400, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x0012, 0x03ff, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0009, 0x03ff, 0x0001, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x0001, 0x8000, 0x8401, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x8000, 0x8400, 0xb800, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8400, 0xb7ff, 0xb801, 0x0000, 0x0000, 0x0000, 0x0000, 0x8200, 0xb7fe, 0xb800, 0xb802, 0x0000, 0x0000, 0x0000, 0x0000, 0xb400, 0xb7ff, 0xb801, 0xbc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xb401, 0xb800, 0xbbff, 0xbc01, 0x0000, 0x0000, 0x0000, 0x0000, 0xb402, 0xbbfe, 0xbc00, 0xbc02, 0x0000, 0x0000, 0x0000, 0x0000, 0xb800, 0xbbff, 0xbc01, 0xbe02, 0x0000, 0x0000, 0x0000, 0x0000, 0xb801, 0xbc00, 0xbe00, 0xc901, 0x0000, 0x0000, 0x0000, 0x0000, 0xb802, 0xbdff, 0xc900, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xba02, 0xc8ff, 0xfbff, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xc501, 0xfbfe, 0xfc00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xf800, 0xfc00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x8012, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8012, 0x8400, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8012, 0x83ff, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x8009, 0x83ff, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8200, 0x8001, 0x0000, 0x0401, 0x0000, 0x0000, 0x0000, 0x0000, 0x8001, 0x0000, 0x0400, 0x3800, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0400, 0x37ff, 0x3801, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x37fe, 0x3800, 0x3802, 0x0000, 0x0000, 0x0000, 0x0000, 0x3400, 0x37ff, 0x3801, 0x3c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3401, 0x3800, 0x3bff, 0x3c01, 0x0000, 0x0000, 0x0000, 0x0000, 0x3402, 0x3bfe, 0x3c00, 0x3c02, 0x0000, 0x0000, 0x0000, 0x0000, 0x3800, 0x3bff, 0x3c01, 0x3e02, 0x0000, 0x0000, 0x0000, 0x0000, 0x3801, 0x3c00, 0x3e00, 0x4901, 0x0000, 0x0000, 0x0000, 0x0000, 0x3802, 0x3dff, 0x4900, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3a02, 0x48ff, 0x7bff, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x4501, 0x7bfe, 0x7c00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7800, 0x7c00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0x001b, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0x0012, 0x05fe, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x0012, 0x0400, 0x0002, 0x0000, 0x0000, 0x0000, 0x0000, 0x0012, 0x03ff, 0x0001, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x03ff, 0x0001, 0x8000, 0x8600, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x8000, 0x8401, 0xb9ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8400, 0xb800, 0xba00, 0x0000, 0x0000, 0x0000, 0x0000, 0x8400, 0xb7ff, 0xb801, 0xba02, 0x0000, 0x0000, 0x0000, 0x0000, 0xb7fe, 0xb800, 0xb802, 0xbdff, 0x0000, 0x0000, 0x0000, 0x0000, 0xb7ff, 0xb801, 0xbc00, 0xbe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xb800, 0xbbff, 0xbc01, 0xbe02, 0x0000, 0x0000, 0x0000, 0x0000, 0xbbfe, 0xbc00, 0xbc02, 0xc080, 0x0000, 0x0000, 0x0000, 0x0000, 0xbbff, 0xbc01, 0xbe02, 0xcb80, 0x0000, 0x0000, 0x0000, 0x0000, 0xbc00, 0xbe00, 0xc901, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xbdff, 0xc900, 0xfc00, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xc8ff, 0xfbff, 0xfc00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfbfe, 0xfc00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x801b, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x8012, 0x85fe, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8012, 0x8400, 0x8002, 0x0000, 0x0000, 0x0000, 0x0000, 0x8012, 0x83ff, 0x8001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x83ff, 0x8001, 0x0000, 0x0600, 0x0000, 0x0000, 0x0000, 0x0000, 0x8001, 0x0000, 0x0401, 0x39ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0400, 0x3800, 0x3a00, 0x0000, 0x0000, 0x0000, 0x0000, 0x0400, 0x37ff, 0x3801, 0x3a02, 0x0000, 0x0000, 0x0000, 0x0000, 0x37fe, 0x3800, 0x3802, 0x3dff, 0x0000, 0x0000, 0x0000, 0x0000, 0x37ff, 0x3801, 0x3c00, 0x3e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3800, 0x3bff, 0x3c01, 0x3e02, 0x0000, 0x0000, 0x0000, 0x0000, 0x3bfe, 0x3c00, 0x3c02, 0x4080, 0x0000, 0x0000, 0x0000, 0x0000, 0x3bff, 0x3c01, 0x3e02, 0x4b80, 0x0000, 0x0000, 0x0000, 0x0000, 0x3c00, 0x3e00, 0x4901, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3dff, 0x4900, 0x7c00, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x48ff, 0x7bff, 0x7c00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7bfe, 0x7c00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0x00b4, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0x001b, 0x10ff, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x0012, 0x05fe, 0x000a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0012, 0x0400, 0x0002, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x03ff, 0x0001, 0x8000, 0x9100, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x8000, 0x8600, 0xc4ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8401, 0xb9ff, 0xc500, 0x0000, 0x0000, 0x0000, 0x0000, 0x8400, 0xb800, 0xba00, 0xc501, 0x0000, 0x0000, 0x0000, 0x0000, 0xb7ff, 0xb801, 0xba02, 0xc8ff, 0x0000, 0x0000, 0x0000, 0x0000, 0xb800, 0xb802, 0xbdff, 0xc900, 0x0000, 0x0000, 0x0000, 0x0000, 0xb801, 0xbc00, 0xbe00, 0xc901, 0x0000, 0x0000, 0x0000, 0x0000, 0xbbff, 0xbc01, 0xbe02, 0xcb80, 0x0000, 0x0000, 0x0000, 0x0000, 0xbc00, 0xbc02, 0xc080, 0xd640, 0x0000, 0x0000, 0x0000, 0x0000, 0xbc01, 0xbe02, 0xcb80, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xbe00, 0xc901, 0xfc00, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xc900, 0xfc00, 0xfc00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfbff, 0xfc00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0x80b4, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x801b, 0x90ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x8012, 0x85fe, 0x800a, 0x0000, 0x0000, 0x0000, 0x0000, 0x8012, 0x8400, 0x8002, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x83ff, 0x8001, 0x0000, 0x1100, 0x0000, 0x0000, 0x0000, 0x0000, 0x8001, 0x0000, 0x0600, 0x44ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0401, 0x39ff, 0x4500, 0x0000, 0x0000, 0x0000, 0x0000, 0x0400, 0x3800, 0x3a00, 0x4501, 0x0000, 0x0000, 0x0000, 0x0000, 0x37ff, 0x3801, 0x3a02, 0x48ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x3800, 0x3802, 0x3dff, 0x4900, 0x0000, 0x0000, 0x0000, 0x0000, 0x3801, 0x3c00, 0x3e00, 0x4901, 0x0000, 0x0000, 0x0000, 0x0000, 0x3bff, 0x3c01, 0x3e02, 0x4b80, 0x0000, 0x0000, 0x0000, 0x0000, 0x3c00, 0x3c02, 0x4080, 0x5640, 0x0000, 0x0000, 0x0000, 0x0000, 0x3c01, 0x3e02, 0x4b80, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3e00, 0x4901, 0x7c00, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x4900, 0x7c00, 0x7c00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7bff, 0x7c00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0x2c7f, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0x00b4, 0x43fd, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x001b, 0x10ff, 0x1bff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0012, 0x05fe, 0x000a, 0x8000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0400, 0x0002, 0x8000, 0xc3ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x8000, 0x9100, 0xf7fe, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x8600, 0xc4ff, 0xf7ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8401, 0xb9ff, 0xc500, 0xf800, 0x0000, 0x0000, 0x0000, 0x0000, 0xb800, 0xba00, 0xc501, 0xfbfe, 0x0000, 0x0000, 0x0000, 0x0000, 0xb801, 0xba02, 0xc8ff, 0xfbff, 0x0000, 0x0000, 0x0000, 0x0000, 0xb802, 0xbdff, 0xc900, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xbc00, 0xbe00, 0xc901, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xbc01, 0xbe02, 0xcb80, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xbc02, 0xc080, 0xd640, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xbe02, 0xcb80, 0xfc00, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xc901, 0xfc00, 0xfc00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xfc00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0xac7f, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0x80b4, 0xc3fd, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x801b, 0x90ff, 0x9bff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8012, 0x85fe, 0x800a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8400, 0x8002, 0x0000, 0x43ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x8001, 0x0000, 0x1100, 0x77fe, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0600, 0x44ff, 0x77ff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0401, 0x39ff, 0x4500, 0x7800, 0x0000, 0x0000, 0x0000, 0x0000, 0x3800, 0x3a00, 0x4501, 0x7bfe, 0x0000, 0x0000, 0x0000, 0x0000, 0x3801, 0x3a02, 0x48ff, 0x7bff, 0x0000, 0x0000, 0x0000, 0x0000, 0x3802, 0x3dff, 0x4900, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3c00, 0x3e00, 0x4901, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3c01, 0x3e02, 0x4b80, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3c02, 0x4080, 0x5640, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3e02, 0x4b80, 0x7c00, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x4901, 0x7c00, 0x7c00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7c00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0x2c7f, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x00b4, 0x43fd, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x001b, 0x10ff, 0x1bff, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x05fe, 0x000a, 0x8000, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0x0002, 0x8000, 0xc3ff, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x9100, 0xf7fe, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0x8600, 0xc4ff, 0xf7ff, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xb9ff, 0xc500, 0xf800, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xba00, 0xc501, 0xfbfe, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xba02, 0xc8ff, 0xfbff, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xbdff, 0xc900, 0xfc00, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xbe00, 0xc901, 0xfc00, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xbe02, 0xcb80, 0xfc00, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xc080, 0xd640, 0xfc00, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0xcb80, 0xfc00, 0xfc00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xfc00, 0x7f23, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0xac7f, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0x80b4, 0xc3fd, 0xfc00, 0x0000, 0x0000, 0x0000, 0x0000, 0x801b, 0x90ff, 0x9bff, 0x7e00, 0x0000, 0x0000, 0x0000, 0x0000, 0x85fe, 0x800a, 0x0000, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x8002, 0x0000, 0x43ff, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x1100, 0x77fe, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x0600, 0x44ff, 0x77ff, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x39ff, 0x4500, 0x7800, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3a00, 0x4501, 0x7bfe, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3a02, 0x48ff, 0x7bff, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3dff, 0x4900, 0x7c00, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3e00, 0x4901, 0x7c00, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x3e02, 0x4b80, 0x7c00, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x4080, 0x5640, 0x7c00, 0x7c00, 0x0000, 0x0000, 0x0000, 0x0000, 0x4b80, 0x7c00, 0x7c00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7c00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0x7c00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x2c7f, 0x7c00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x00b4, 0x43fd, 0x7c00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x10ff, 0x1bff, 0x7e00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x000a, 0x8000, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0xc3ff, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x9100, 0xf7fe, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xc4ff, 0xf7ff, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xc500, 0xf800, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xc501, 0xfbfe, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xc8ff, 0xfbff, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xc900, 0xfc00, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xc901, 0xfc00, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xcb80, 0xfc00, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xd640, 0xfc00, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xfc00, 0x7f23, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0x7f23, 0x7e00, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0xac7f, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x80b4, 0xc3fd, 0xfc00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x90ff, 0x9bff, 0x7e00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x800a, 0x0000, 0x7c00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x43ff, 0x7c00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x1100, 0x77fe, 0x7c00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x44ff, 0x77ff, 0x7c00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x4500, 0x7800, 0x7c00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x4501, 0x7bfe, 0x7c00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x48ff, 0x7bff, 0x7c00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x4900, 0x7c00, 0x7c00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x4901, 0x7c00, 0x7c00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x4b80, 0x7c00, 0x7c00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x5640, 0x7c00, 0x7c00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7c00, 0xff23, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe00, 0xff23, 0xfe01, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0x7c00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x2c7f, 0x7c00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x43fd, 0x7c00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x1bff, 0x7e00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xc3ff, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xf7fe, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xf7ff, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xf800, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfbfe, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfbff, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0x7f23, 0xff23, 0x7f23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e00, 0x7f23, 0x7e01, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xac7f, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0xc3fd, 0xfc00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x9bff, 0x7e00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x43ff, 0x7c00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x77fe, 0x7c00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x77ff, 0x7c00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7800, 0x7c00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7bfe, 0x7c00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7bff, 0x7c00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7c00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7c00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7c00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0x7c00, 0xff23, 0xfe00, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0xff23, 0xff23, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xff23, 0xff23, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe00, 0xff23, 0xfe01, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xff23, 0xfe01, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfe01, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0xff23, 0x7f23, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0x7f23, 0x7e01, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7f23, 0x7e01, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e01, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0xfc00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7e00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, 0x7c00, 0xff23, 0xfe00, 0xff23, 0x0000, 0x0000, 0x0000, 0x0000, }; const unsigned kExpectedCount_NEON_fmul_4H = 1444; #endif // VIXL_SIM_FMUL_4H_TRACE_AARCH64_H_
MerryMage/dynarmic
externals/vixl/vixl/test/aarch64/traces/sim-fmul-4h-trace-aarch64.h
C
gpl-2.0
97,339
<?php $postcounter = 1; if (have_posts()) : ?> <?php while (have_posts()) : $postcounter = $postcounter + 1; the_post(); $do_not_duplicate = $post->ID; $the_post_ids = get_the_ID(); ?> <div class="post post-<?php echo $postCount ;?>"><?php include (TEMPLATEPATH . '/thumb.php'); ?> <div class="posttitle"><h2><a href="<?php the_permalink() ?>" rel="bookmark" title="<?php the_title(); ?>"><?php the_title(); ?></a></h2></div> <div class="tags"><?php the_time('l, F jS Y') ?> &#124; <?php the_category(', ') ?></div> <?php echo excerpt(20); ?> <div style="clear: both"></div></div> <?php endwhile; ?> <?php include (TEMPLATEPATH . '/ads.php'); ?> <?php include (TEMPLATEPATH . '/navigator.php'); ?> <?php else : ?> <div class="post"><h2>Not Found</h2>Sorry, but you are looking for something that isn't here.</div> <?php endif; ?>
seandrome/WPIFRAME2
wp-content/themes/doa-ibu/loop.php
PHP
gpl-2.0
832
/* support for ap->rresolv missing */ /* Modifications: 1998-07-01 - Arnaldo Carvalho de Melo - GNU gettext instead of catgets, snprintf instead of sprintf */ #include "config.h" #if HAVE_AFIPX #include <asm/types.h> #include <sys/types.h> #include <sys/socket.h> #if (__GLIBC__ > 2) || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 1) #include <netipx/ipx.h> #else #include "ipx.h" #endif #include <stdlib.h> #include <stdio.h> #include <errno.h> #include <ctype.h> #include <string.h> #include <unistd.h> #include <netinet/in.h> #include "version.h" #include "net-support.h" #include "pathnames.h" #include "intl.h" #include "util.h" /* UGLY */ int IPX_rprint(int options) { /* int ext = options & FLAG_EXT; */ int numeric = options & FLAG_NUM_HOST; char buff[1024]; char net[128], router_net[128]; char router_node[128]; int num; FILE *fp; const struct aftype *ap; struct sockaddr_storage sas; fp = fopen(_PATH_PROCNET_IPX_ROUTE1, "r"); if (!fp) { fp = fopen(_PATH_PROCNET_IPX_ROUTE2, "r"); } if (!fp) { perror(NULL); printf(_("IPX routing not in file %s or %s found.\n"), _PATH_PROCNET_IPX_ROUTE1, _PATH_PROCNET_IPX_ROUTE2); return 1; } if ((ap = get_afntype(AF_IPX)) == NULL) { EINTERN("lib/ipx_rt.c", "AF_IPX missing"); return (-1); } printf(_("Kernel IPX routing table\n")); /* xxx */ printf(_("Destination Router Net Router Node\n")); if (fgets(buff, 1023, fp)) /* eat line */; while (fgets(buff, 1023, fp)) { num = sscanf(buff, "%s %s %s", net, router_net, router_node); if (num < 3) continue; /* Fetch and resolve the Destination */ (void) ap->input(1, net, &sas); safe_strncpy(net, ap->sprint(&sas, numeric), sizeof(net)); /* Fetch and resolve the Router Net */ (void) ap->input(1, router_net, &sas); safe_strncpy(router_net, ap->sprint(&sas, numeric), sizeof(router_net)); /* Fetch and resolve the Router Node */ (void) ap->input(2, router_node, &sas); safe_strncpy(router_node, ap->sprint(&sas, numeric), sizeof(router_node)); printf("%-25s %-25s %-25s\n", net, router_net, router_node); } (void) fclose(fp); return (0); } #endif /* HAVE_AFIPX */
pexip/os-net-tools
lib/ipx_gr.c
C
gpl-2.0
2,248
#ifndef __DEFINES_H__ #define __DEFINES_H__ #endif
Cpasjuste/psms
sdl/defines.h
C
gpl-2.0
68
<?php /** * @Project NUKEVIET 4.x * @Author VINADES.,JSC (contact@vinades.vn) * @Copyright (C) 2014 VINADES.,JSC. All rights reserved * @License GNU/GPL version 2 or any later version * @Createdate Sat, 08 Feb 2014 06:33:39 GMT */ if (!defined('NV_MAINFILE')) { die('Stop!!!'); } if (!nv_function_exists('nv_facebook_comment_box_blocks')) { /** * nv_block_config_facebook_comment_box_blocks() * * @param mixed $module * @param mixed $data_block * @param mixed $lang_block * @return */ function nv_block_config_facebook_comment_box_blocks($module, $data_block, $lang_block) { $html = ''; $html .= '<div class="form-group">'; $html .= ' <label class="control-label col-sm-6">' . $lang_block['facebookappid'] . ':</label>'; $html .= ' <div class="col-sm-18"><input class="form-control" type="text" name="config_facebookappid" value="' . $data_block['facebookappid'] . '"/></div>'; $html .= '</div>'; $html .= '<div class="form-group">'; $html .= ' <label class="control-label col-sm-6">' . $lang_block['width'] . ':</label>'; $html .= ' <div class="col-sm-18"><input class="form-control" type="text" name="config_width" value="' . $data_block['width'] . '"/></div>'; $html .= '</div>'; $html .= '<div class="form-group">'; $html .= ' <label class="control-label col-sm-6">' . $lang_block['numpost'] . ':</label>'; $html .= ' <div class="col-sm-18"><input class="form-control" type="text" name="config_numpost" value="' . $data_block['numpost'] . '"/></div>'; $html .= '</div>'; $html .= '<div class="form-group">'; $html .= ' <label class="control-label col-sm-6">' . $lang_block['scheme'] . ':</label>'; $html .= ' <div class="col-sm-9"> <select class="form-control" name="config_scheme"> '; $se1 = ($data_block['scheme'] == 'light') ? 'selected="selected"' : ''; $se2 = ($data_block['scheme'] == 'dark') ? 'selected="selected"' : ''; $html .= ' <option value="light"' . $se1 . '> Light </option>'; $html .= ' <option value="dark"' . $se2 . ' >Dark </option>'; $html .= ' <\select>'; $html .= '</div>'; $html .= '</div>'; return $html; } /** * nv_block_config_facebook_comment_box_blocks_submit() * * @param mixed $module * @param mixed $lang_block * @return */ function nv_block_config_facebook_comment_box_blocks_submit($module, $lang_block) { global $nv_Request; $return = array(); $return['error'] = array(); $return['config'] = array(); $return['config']['facebookappid'] = $nv_Request->get_title('config_facebookappid', 'post', 0); $return['config']['width'] = $nv_Request->get_string('config_width', 'post', 0); $return['config']['numpost'] = $nv_Request->get_int('config_numpost', 'post', 0); $return['config']['scheme'] = $nv_Request->get_title('config_scheme', 'post', 0); return $return; } /** * nv_facebook_comment_box_blocks() * * @param mixed $block_config * @return */ function nv_facebook_comment_box_blocks($block_config) { global $client_info, $module_name; $content = ''; if (!defined('FACEBOOK_JSSDK')) { $lang = (NV_LANG_DATA == 'vi') ? 'vi_VN' : 'en_US'; $facebookappid = (isset($module_config[$module_name]['facebookappid'])) ? $module_config[$module_name]['facebookappid'] : $block_config['facebookappid']; $content .= "<div id=\"fb-root\"></div> <script type=\"text/javascript\"> (function(d, s, id) { var js, fjs = d.getElementsByTagName(s)[0]; if (d.getElementById(id)) return; js = d.createElement(s); js.id = id; js.src = \"//connect.facebook.net/" . $lang . "/all.js#xfbml=1&appId=" . $facebookappid . "\"; fjs.parentNode.insertBefore(js, fjs); }(document, 'script', 'facebook-jssdk')); </script>"; define('FACEBOOK_JSSDK', true); } $content .= '<div class="fb-comments" data-href="' . $client_info['selfurl'] . '" data-num-posts="' . $block_config['numpost'] . '" data-width="' . $block_config['width'] . '" data-colorscheme="' . $block_config['scheme'] . '"></div>'; return $content; } } if (defined('NV_SYSTEM')) { $content = nv_facebook_comment_box_blocks($block_config); }
ngocphan123/nukeviet-1
modules/comment/blocks/global.block_facebook_comment_box.php
PHP
gpl-2.0
4,466
/* * Hydrogen * Copyright(c) 2002-2008 by Alex >Comix< Cominu [comix@users.sourceforge.net] * * http://www.hydrogen-music.org * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY, without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <hydrogen/smf/SMFEvent.h> #include <hydrogen/timehelper.h> namespace H2Core { const char* SMFBuffer::__class_name = "SMFBuffer"; SMFBuffer::SMFBuffer() : Object( __class_name ) { } void SMFBuffer::writeByte( short int nByte ) { // infoLog( "[writeByte] " + to_string( nByte ) ); m_buffer.push_back( nByte ); } void SMFBuffer::writeWord( int nVal ) { // infoLog( "writeWord" ); writeByte( nVal >> 8 ); writeByte( nVal ); } void SMFBuffer::writeDWord( long nVal ) { writeByte( nVal >> 24 ); writeByte( nVal >> 16 ); writeByte( nVal >> 8 ); writeByte( nVal ); } void SMFBuffer::writeString( const QString& sMsg ) { // infoLog( "writeString" ); writeVarLen( sMsg.length() ); for ( int i = 0; i < sMsg.length(); i++ ) { writeByte( sMsg.toLocal8Bit().at(i) ); } } void SMFBuffer::writeVarLen( long value ) { // infoLog( "[writeVarLen]" ); long buffer; buffer = value & 0x7f; while ( ( value >>= 7 ) > 0 ) { INFOLOG( "." ); buffer <<= 8; buffer |= 0x80; buffer += ( value & 0x7f ); } while ( true ) { // putc( buffer, outfile ); writeByte( ( char )buffer ); if ( buffer & 0x80 ) { buffer >>= 8; } else { break; } } } // :::::::::::::::::: const char* SMFTrackNameMetaEvent::__class_name = "SMFTrackNameMetaEvent"; SMFTrackNameMetaEvent::SMFTrackNameMetaEvent( const QString& sTrackName, unsigned nTicks ) : SMFEvent( __class_name, nTicks ) , m_sTrackName( sTrackName ) { // it's always at the start of the song m_nDeltaTime = 0; } std::vector<char> SMFTrackNameMetaEvent::getBuffer() { SMFBuffer buf; buf.writeVarLen( m_nDeltaTime ); buf.writeByte( 0xFF ); buf.writeByte( TRACK_NAME ); buf.writeString( m_sTrackName ); return buf.getBuffer(); } // :::::::::::::::::: const char* SMFSetTempoMetaEvent::__class_name = "SMFSetTempoMetaEvent"; SMFSetTempoMetaEvent::SMFSetTempoMetaEvent( float fBPM, unsigned nTicks ) : SMFEvent( __class_name, nTicks ) , m_fBPM( fBPM ) { // it's always at the start of the song m_nDeltaTime = 0; } std::vector<char> SMFSetTempoMetaEvent::getBuffer() { SMFBuffer buf; long msPerBeat; msPerBeat = long( 60000000 / m_fBPM ); // 60 seconds * mills \ BPM buf.writeVarLen( m_nDeltaTime ); buf.writeByte( 0xFF ); buf.writeByte( SET_TEMPO ); buf.writeByte( 0x03 ); // Length buf.writeByte( msPerBeat >> 16 ); buf.writeByte( msPerBeat >> 8 ); buf.writeByte( msPerBeat ); return buf.getBuffer(); } // :::::::::::::::::: const char* SMFCopyRightNoticeMetaEvent::__class_name = "SMFCopyRightNoticeMetaEvent"; SMFCopyRightNoticeMetaEvent::SMFCopyRightNoticeMetaEvent( const QString& sAuthor, unsigned nTicks ) : SMFEvent( __class_name, nTicks ) , m_sAuthor( sAuthor ) { // it's always at the start of the song m_nDeltaTime = 0; } std::vector<char> SMFCopyRightNoticeMetaEvent::getBuffer() { SMFBuffer buf; QString sCopyRightString; time_t now = time(0); tm *ltm = localtime(&now); // Extract the local system time. // Construct the copyright string in the form "(C) [Author] [CurrentYear]" sCopyRightString.append("(C) "); // Start with the copyright symbol and a seperator space. sCopyRightString.append( m_sAuthor ); // add the author sCopyRightString.append(" "); // add a seperator space sCopyRightString.append( QString::number( 1900 + ltm->tm_year, 10 ) ); // and finish with the year. buf.writeVarLen( m_nDeltaTime ); buf.writeByte( 0xFF ); buf.writeByte( COPYRIGHT_NOTICE ); buf.writeString( sCopyRightString ); return buf.getBuffer(); } // :::::::::::::::::: const char* SMFTimeSignatureMetaEvent::__class_name = "SMFTimeSignatureMetaEvent"; SMFTimeSignatureMetaEvent::SMFTimeSignatureMetaEvent( unsigned nBeats, unsigned nNote , unsigned nMTPMC , unsigned nTSNP24 , unsigned nTicks ) : SMFEvent( __class_name, nTicks ) , m_nBeats( nBeats ) , m_nNote( nNote ) , m_nMTPMC( nMTPMC ) , m_nTSNP24( nTSNP24 ) , m_nTicks( nTicks ) { // it's always at the start of the song m_nDeltaTime = 0; } std::vector<char> SMFTimeSignatureMetaEvent::getBuffer() { SMFBuffer buf; unsigned nBeatsCopy = m_nNote , Note2Log = 0; // Copy Nbeats as the process to generate Note2Log alters the value. while (nBeatsCopy >>= 1) ++Note2Log; // Generate a log to base 2 of the note value, so 8 (as in 6/8) becomes 3 buf.writeVarLen( m_nDeltaTime ); buf.writeByte( 0xFF ); buf.writeByte( TIME_SIGNATURE ); buf.writeByte( 0x04 ); // Event length in bytes. buf.writeByte( m_nBeats ); // Top line of time signature, eg 6 for 6/8 time buf.writeByte( Note2Log ); // Bottom line of time signature expressed as Log2 of the Note value. buf.writeByte( m_nMTPMC ); // MIDI Ticks per Metronome click, normally 24 ( i.e. each quarter note ). buf.writeByte( m_nTSNP24 ); // Thirty Second Notes ( as in 1/32 ) per 24 MIDI clocks, normally 8. return buf.getBuffer(); } // ::::::::::::: SMFEvent::SMFEvent( const char* sEventName, unsigned nTicks ) : Object( sEventName ) , m_nTicks( nTicks ) , m_nDeltaTime( -1 ) { //infoLog( "INIT" ); } SMFEvent::~SMFEvent() { //infoLog( "DESTROY" ); } // :::::::::::::: const char* SMFNoteOnEvent::__class_name = "SMFNoteOnEvent"; SMFNoteOnEvent::SMFNoteOnEvent( unsigned nTicks, int nChannel, int nPitch, int nVelocity ) : SMFEvent( __class_name, nTicks ) , m_nChannel( nChannel ) , m_nPitch( nPitch ) , m_nVelocity( nVelocity ) { if ( nChannel >= 16 ) { ERRORLOG( QString( "nChannel >= 16! nChannel=%1" ).arg( nChannel ) ); } } std::vector<char> SMFNoteOnEvent::getBuffer() { SMFBuffer buf; buf.writeVarLen( m_nDeltaTime ); buf.writeByte( NOTE_ON + m_nChannel ); buf.writeByte( m_nPitch ); buf.writeByte( m_nVelocity ); return buf.getBuffer(); } // ::::::::::: const char* SMFNoteOffEvent::__class_name = "SMFNoteOffEvent"; SMFNoteOffEvent::SMFNoteOffEvent( unsigned nTicks, int nChannel, int nPitch, int nVelocity ) : SMFEvent( __class_name, nTicks ) , m_nChannel( nChannel ) , m_nPitch( nPitch ) , m_nVelocity( nVelocity ) { if ( nChannel >= 16 ) { ERRORLOG( QString( "nChannel >= 16! nChannel=%1" ).arg( nChannel ) ); } } std::vector<char> SMFNoteOffEvent::getBuffer() { SMFBuffer buf; buf.writeVarLen( m_nDeltaTime ); buf.writeByte( NOTE_OFF + m_nChannel ); buf.writeByte( m_nPitch ); buf.writeByte( m_nVelocity ); return buf.getBuffer(); } };
blablack/hydrogen
src/core/src/smf/smf_event.cpp
C++
gpl-2.0
7,193
/* Nord Modular patch file format 3.03 parser Copyright (C) 2002 Marcus Andersson This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "nmpatch/ctrlmap.h" CtrlMap::CtrlMap(ModuleSection::Type section, Module* module, ModuleType::Parameter parameter) { this->section = section; this->module = module; this->parameter = parameter; } ModuleSection::Type CtrlMap::getModuleSectionType() { return section; } Module* CtrlMap::getModule() { return module; } ModuleType::Parameter CtrlMap::getParameter() { return parameter; } void CtrlMap::setCC(int CC) { this->CC = CC; } int CtrlMap::getCC() { return CC; }
mrlea/nmedit
libs/libnmpatch/src/ctrlmap.cc
C++
gpl-2.0
1,316
/** ****************************************************************************** * @file stm32f0xx_hal_gpio.h * @author MCD Application Team * @version V1.1.0 * @date 03-Oct-2014 * @brief Header file of GPIO HAL module. ****************************************************************************** * @attention * * <h2><center>&copy; COPYRIGHT(c) 2014 STMicroelectronics</center></h2> * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of STMicroelectronics nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************** */ /* Define to prevent recursive inclusion -------------------------------------*/ #ifndef __STM32F0xx_HAL_GPIO_H #define __STM32F0xx_HAL_GPIO_H #ifdef __cplusplus extern "C" { #endif /* Includes ------------------------------------------------------------------*/ #include "stm32f0xx_hal_def.h" /** @addtogroup STM32F0xx_HAL_Driver * @{ */ /** @addtogroup GPIO * @{ */ /* Exported types ------------------------------------------------------------*/ /** @defgroup GPIO_Exported_Types GPIO Exported Types * @{ */ /** * @brief GPIO Init structure definition */ typedef struct { uint32_t Pin; /*!< Specifies the GPIO pins to be configured. This parameter can be any value of @ref GPIO_pins */ uint32_t Mode; /*!< Specifies the operating mode for the selected pins. This parameter can be a value of @ref GPIO_mode */ uint32_t Pull; /*!< Specifies the Pull-up or Pull-Down activation for the selected pins. This parameter can be a value of @ref GPIO_pull */ uint32_t Speed; /*!< Specifies the speed for the selected pins. This parameter can be a value of @ref GPIO_speed */ uint32_t Alternate; /*!< Peripheral to be connected to the selected pins This parameter can be a value of @ref GPIOEx_Alternate_function_selection */ }GPIO_InitTypeDef; /** * @brief GPIO Bit SET and Bit RESET enumeration */ typedef enum { GPIO_PIN_RESET = 0, GPIO_PIN_SET }GPIO_PinState; /** * @} */ /* Exported constants --------------------------------------------------------*/ /** @defgroup GPIO_Exported_Constants GPIO Exported Constants * @{ */ /** @defgroup GPIO_pin_actions GPIO pin actions * @{ */ #define IS_GPIO_PIN_ACTION(ACTION) (((ACTION) == GPIO_PIN_RESET) || ((ACTION) == GPIO_PIN_SET)) /** * @} */ /** @defgroup GPIO_pins GPIO pins * @{ */ #define GPIO_PIN_0 ((uint16_t)0x0001) /* Pin 0 selected */ #define GPIO_PIN_1 ((uint16_t)0x0002) /* Pin 1 selected */ #define GPIO_PIN_2 ((uint16_t)0x0004) /* Pin 2 selected */ #define GPIO_PIN_3 ((uint16_t)0x0008) /* Pin 3 selected */ #define GPIO_PIN_4 ((uint16_t)0x0010) /* Pin 4 selected */ #define GPIO_PIN_5 ((uint16_t)0x0020) /* Pin 5 selected */ #define GPIO_PIN_6 ((uint16_t)0x0040) /* Pin 6 selected */ #define GPIO_PIN_7 ((uint16_t)0x0080) /* Pin 7 selected */ #define GPIO_PIN_8 ((uint16_t)0x0100) /* Pin 8 selected */ #define GPIO_PIN_9 ((uint16_t)0x0200) /* Pin 9 selected */ #define GPIO_PIN_10 ((uint16_t)0x0400) /* Pin 10 selected */ #define GPIO_PIN_11 ((uint16_t)0x0800) /* Pin 11 selected */ #define GPIO_PIN_12 ((uint16_t)0x1000) /* Pin 12 selected */ #define GPIO_PIN_13 ((uint16_t)0x2000) /* Pin 13 selected */ #define GPIO_PIN_14 ((uint16_t)0x4000) /* Pin 14 selected */ #define GPIO_PIN_15 ((uint16_t)0x8000) /* Pin 15 selected */ #define GPIO_PIN_All ((uint16_t)0xFFFF) /* All pins selected */ #define GPIO_PIN_MASK ((uint32_t)0x0000FFFF) /* PIN mask for assert test */ #define IS_GPIO_PIN(PIN) (((PIN) & GPIO_PIN_MASK) != (uint32_t)0x00) /** * @} */ /** @defgroup GPIO_mode GPIO mode * @brief GPIO Configuration Mode * Elements values convention: 0xX0yz00YZ * - X : GPIO mode or EXTI Mode * - y : External IT or Event trigger detection * - z : IO configuration on External IT or Event * - Y : Output type (Push Pull or Open Drain) * - Z : IO Direction mode (Input, Output, Alternate or Analog) * @{ */ #define GPIO_MODE_INPUT ((uint32_t)0x00000000) /*!< Input Floating Mode */ #define GPIO_MODE_OUTPUT_PP ((uint32_t)0x00000001) /*!< Output Push Pull Mode */ #define GPIO_MODE_OUTPUT_OD ((uint32_t)0x00000011) /*!< Output Open Drain Mode */ #define GPIO_MODE_AF_PP ((uint32_t)0x00000002) /*!< Alternate Function Push Pull Mode */ #define GPIO_MODE_AF_OD ((uint32_t)0x00000012) /*!< Alternate Function Open Drain Mode */ #define GPIO_MODE_ANALOG ((uint32_t)0x00000003) /*!< Analog Mode */ #define GPIO_MODE_IT_RISING ((uint32_t)0x10110000) /*!< External Interrupt Mode with Rising edge trigger detection */ #define GPIO_MODE_IT_FALLING ((uint32_t)0x10210000) /*!< External Interrupt Mode with Falling edge trigger detection */ #define GPIO_MODE_IT_RISING_FALLING ((uint32_t)0x10310000) /*!< External Interrupt Mode with Rising/Falling edge trigger detection */ #define GPIO_MODE_EVT_RISING ((uint32_t)0x10120000) /*!< External Event Mode with Rising edge trigger detection */ #define GPIO_MODE_EVT_FALLING ((uint32_t)0x10220000) /*!< External Event Mode with Falling edge trigger detection */ #define GPIO_MODE_EVT_RISING_FALLING ((uint32_t)0x10320000) /*!< External Event Mode with Rising/Falling edge trigger detection */ #define IS_GPIO_MODE(MODE) (((MODE) == GPIO_MODE_INPUT) ||\ ((MODE) == GPIO_MODE_OUTPUT_PP) ||\ ((MODE) == GPIO_MODE_OUTPUT_OD) ||\ ((MODE) == GPIO_MODE_AF_PP) ||\ ((MODE) == GPIO_MODE_AF_OD) ||\ ((MODE) == GPIO_MODE_IT_RISING) ||\ ((MODE) == GPIO_MODE_IT_FALLING) ||\ ((MODE) == GPIO_MODE_IT_RISING_FALLING) ||\ ((MODE) == GPIO_MODE_EVT_RISING) ||\ ((MODE) == GPIO_MODE_EVT_FALLING) ||\ ((MODE) == GPIO_MODE_EVT_RISING_FALLING) ||\ ((MODE) == GPIO_MODE_ANALOG)) /** * @} */ /** @defgroup GPIO_speed GPIO speed * @brief GPIO Output Maximum frequency * @{ */ #define GPIO_SPEED_LOW ((uint32_t)0x00000000) /*!< Low speed */ #define GPIO_SPEED_MEDIUM ((uint32_t)0x00000001) /*!< Medium speed */ #define GPIO_SPEED_HIGH ((uint32_t)0x00000003) /*!< High speed */ #define IS_GPIO_SPEED(SPEED) (((SPEED) == GPIO_SPEED_LOW) || ((SPEED) == GPIO_SPEED_MEDIUM) || \ ((SPEED) == GPIO_SPEED_HIGH)) /** * @} */ /** @defgroup GPIO_pull GPIO pull * @brief GPIO Pull-Up or Pull-Down Activation * @{ */ #define GPIO_NOPULL ((uint32_t)0x00000000) /*!< No Pull-up or Pull-down activation */ #define GPIO_PULLUP ((uint32_t)0x00000001) /*!< Pull-up activation */ #define GPIO_PULLDOWN ((uint32_t)0x00000002) /*!< Pull-down activation */ #define IS_GPIO_PULL(PULL) (((PULL) == GPIO_NOPULL) || ((PULL) == GPIO_PULLUP) || \ ((PULL) == GPIO_PULLDOWN)) /** * @} */ /** * @} */ /* Exported macro ------------------------------------------------------------*/ /** @defgroup GPIO_Exported_Macros GPIO Exported Macros * @{ */ /** * @brief Checks whether the specified EXTI line flag is set or not. * @param __EXTI_LINE__: specifies the EXTI line flag to check. * This parameter can be GPIO_PIN_x where x can be(0..15) * @retval The new state of __EXTI_LINE__ (SET or RESET). */ #define __HAL_GPIO_EXTI_GET_FLAG(__EXTI_LINE__) (EXTI->PR & (__EXTI_LINE__)) /** * @brief Clears the EXTI's line pending flags. * @param __EXTI_LINE__: specifies the EXTI lines flags to clear. * This parameter can be any combination of GPIO_PIN_x where x can be (0..15) * @retval None */ #define __HAL_GPIO_EXTI_CLEAR_FLAG(__EXTI_LINE__) (EXTI->PR = (__EXTI_LINE__)) /** * @brief Checks whether the specified EXTI line is asserted or not. * @param __EXTI_LINE__: specifies the EXTI line to check. * This parameter can be GPIO_PIN_x where x can be(0..15) * @retval The new state of __EXTI_LINE__ (SET or RESET). */ #define __HAL_GPIO_EXTI_GET_IT(__EXTI_LINE__) (EXTI->PR & (__EXTI_LINE__)) /** * @brief Clears the EXTI's line pending bits. * @param __EXTI_LINE__: specifies the EXTI lines to clear. * This parameter can be any combination of GPIO_PIN_x where x can be (0..15) * @retval None */ #define __HAL_GPIO_EXTI_CLEAR_IT(__EXTI_LINE__) (EXTI->PR = (__EXTI_LINE__)) /** * @brief Generates a Software interrupt on selected EXTI line. * @param __EXTI_LINE__: specifies the EXTI line to check. * This parameter can be GPIO_PIN_x where x can be(0..15) * @retval None */ #define __HAL_GPIO_EXTI_GENERATE_SWIT(__EXTI_LINE__) (EXTI->SWIER |= (__EXTI_LINE__)) /** * @} */ /* Include GPIO HAL Extension module */ #include "stm32f0xx_hal_gpio_ex.h" /* Exported functions --------------------------------------------------------*/ /** @addtogroup GPIO_Exported_Functions GPIO Exported Functions * @{ */ /** @addtogroup GPIO_Exported_Functions_Group1 Initialization/de-initialization functions * @brief Initialization and Configuration functions * @{ */ /* Initialization and de-initialization functions *****************************/ void HAL_GPIO_Init(GPIO_TypeDef *GPIOx, GPIO_InitTypeDef *GPIO_Init); void HAL_GPIO_DeInit(GPIO_TypeDef *GPIOx, uint32_t GPIO_Pin); /** * @} */ /** @addtogroup GPIO_Exported_Functions_Group2 IO operation functions * @{ */ /* IO operation functions *****************************************************/ GPIO_PinState HAL_GPIO_ReadPin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin); void HAL_GPIO_WritePin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin, GPIO_PinState PinState); void HAL_GPIO_TogglePin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin); HAL_StatusTypeDef HAL_GPIO_LockPin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin); void HAL_GPIO_EXTI_IRQHandler(uint16_t GPIO_Pin); void HAL_GPIO_EXTI_Callback(uint16_t GPIO_Pin); /** * @} */ /** * @} */ /** * @} */ /** * @} */ #ifdef __cplusplus } #endif #endif /* __STM32F0xx_HAL_GPIO_H */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
xiaogan-Studio/baozi_gesture
software/firmware/CMSIS/stm32f0xx_hal_gpio.h
C
gpl-2.0
12,868
/* * Copyright (c) 1997-1999, 2003 Massachusetts Institute of Technology * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* This file was automatically generated --- DO NOT EDIT */ /* Generated on Mon Mar 24 02:08:33 EST 2003 */ #include "fftw-int.h" #include "fftw.h" /* Generated by: /homee/stevenj/cvs/fftw/gensrc/genfft -magic-alignment-check -magic-twiddle-load-all -magic-variables 4 -magic-loopi -twiddleinv 10 */ /* * This function contains 102 FP additions, 60 FP multiplications, * (or, 72 additions, 30 multiplications, 30 fused multiply/add), * 42 stack variables, and 40 memory accesses */ static const fftw_real K951056516 = FFTW_KONST(+0.951056516295153572116439333379382143405698634); static const fftw_real K587785252 = FFTW_KONST(+0.587785252292473129168705954639072768597652438); static const fftw_real K250000000 = FFTW_KONST(+0.250000000000000000000000000000000000000000000); static const fftw_real K559016994 = FFTW_KONST(+0.559016994374947424102293417182819058860154590); /* * Generator Id's : * $Id: exprdag.ml,v 1.43 2003/03/16 23:43:46 stevenj Exp $ * $Id: fft.ml,v 1.44 2003/03/16 23:43:46 stevenj Exp $ * $Id: to_c.ml,v 1.26 2003/03/16 23:43:46 stevenj Exp $ */ void fftwi_twiddle_10(fftw_complex *A, const fftw_complex *W, int iostride, int m, int dist) { int i; fftw_complex *inout; inout = A; for (i = m; i > 0; i = i - 1, inout = inout + dist, W = W + 9) { fftw_real tmp7; fftw_real tmp55; fftw_real tmp100; fftw_real tmp115; fftw_real tmp41; fftw_real tmp52; fftw_real tmp53; fftw_real tmp59; fftw_real tmp60; fftw_real tmp61; fftw_real tmp75; fftw_real tmp78; fftw_real tmp113; fftw_real tmp89; fftw_real tmp90; fftw_real tmp96; fftw_real tmp18; fftw_real tmp29; fftw_real tmp30; fftw_real tmp56; fftw_real tmp57; fftw_real tmp58; fftw_real tmp68; fftw_real tmp71; fftw_real tmp112; fftw_real tmp86; fftw_real tmp87; fftw_real tmp95; ASSERT_ALIGNED_DOUBLE; { fftw_real tmp1; fftw_real tmp99; fftw_real tmp6; fftw_real tmp98; ASSERT_ALIGNED_DOUBLE; tmp1 = c_re(inout[0]); tmp99 = c_im(inout[0]); { fftw_real tmp3; fftw_real tmp5; fftw_real tmp2; fftw_real tmp4; ASSERT_ALIGNED_DOUBLE; tmp3 = c_re(inout[5 * iostride]); tmp5 = c_im(inout[5 * iostride]); tmp2 = c_re(W[4]); tmp4 = c_im(W[4]); tmp6 = (tmp2 * tmp3) + (tmp4 * tmp5); tmp98 = (tmp2 * tmp5) - (tmp4 * tmp3); } tmp7 = tmp1 - tmp6; tmp55 = tmp1 + tmp6; tmp100 = tmp98 + tmp99; tmp115 = tmp99 - tmp98; } { fftw_real tmp35; fftw_real tmp73; fftw_real tmp51; fftw_real tmp77; fftw_real tmp40; fftw_real tmp74; fftw_real tmp46; fftw_real tmp76; ASSERT_ALIGNED_DOUBLE; { fftw_real tmp32; fftw_real tmp34; fftw_real tmp31; fftw_real tmp33; ASSERT_ALIGNED_DOUBLE; tmp32 = c_re(inout[4 * iostride]); tmp34 = c_im(inout[4 * iostride]); tmp31 = c_re(W[3]); tmp33 = c_im(W[3]); tmp35 = (tmp31 * tmp32) + (tmp33 * tmp34); tmp73 = (tmp31 * tmp34) - (tmp33 * tmp32); } { fftw_real tmp48; fftw_real tmp50; fftw_real tmp47; fftw_real tmp49; ASSERT_ALIGNED_DOUBLE; tmp48 = c_re(inout[iostride]); tmp50 = c_im(inout[iostride]); tmp47 = c_re(W[0]); tmp49 = c_im(W[0]); tmp51 = (tmp47 * tmp48) + (tmp49 * tmp50); tmp77 = (tmp47 * tmp50) - (tmp49 * tmp48); } { fftw_real tmp37; fftw_real tmp39; fftw_real tmp36; fftw_real tmp38; ASSERT_ALIGNED_DOUBLE; tmp37 = c_re(inout[9 * iostride]); tmp39 = c_im(inout[9 * iostride]); tmp36 = c_re(W[8]); tmp38 = c_im(W[8]); tmp40 = (tmp36 * tmp37) + (tmp38 * tmp39); tmp74 = (tmp36 * tmp39) - (tmp38 * tmp37); } { fftw_real tmp43; fftw_real tmp45; fftw_real tmp42; fftw_real tmp44; ASSERT_ALIGNED_DOUBLE; tmp43 = c_re(inout[6 * iostride]); tmp45 = c_im(inout[6 * iostride]); tmp42 = c_re(W[5]); tmp44 = c_im(W[5]); tmp46 = (tmp42 * tmp43) + (tmp44 * tmp45); tmp76 = (tmp42 * tmp45) - (tmp44 * tmp43); } tmp41 = tmp35 - tmp40; tmp52 = tmp46 - tmp51; tmp53 = tmp41 + tmp52; tmp59 = tmp35 + tmp40; tmp60 = tmp46 + tmp51; tmp61 = tmp59 + tmp60; tmp75 = tmp73 - tmp74; tmp78 = tmp76 - tmp77; tmp113 = tmp75 + tmp78; tmp89 = tmp73 + tmp74; tmp90 = tmp76 + tmp77; tmp96 = tmp89 + tmp90; } { fftw_real tmp12; fftw_real tmp66; fftw_real tmp28; fftw_real tmp70; fftw_real tmp17; fftw_real tmp67; fftw_real tmp23; fftw_real tmp69; ASSERT_ALIGNED_DOUBLE; { fftw_real tmp9; fftw_real tmp11; fftw_real tmp8; fftw_real tmp10; ASSERT_ALIGNED_DOUBLE; tmp9 = c_re(inout[2 * iostride]); tmp11 = c_im(inout[2 * iostride]); tmp8 = c_re(W[1]); tmp10 = c_im(W[1]); tmp12 = (tmp8 * tmp9) + (tmp10 * tmp11); tmp66 = (tmp8 * tmp11) - (tmp10 * tmp9); } { fftw_real tmp25; fftw_real tmp27; fftw_real tmp24; fftw_real tmp26; ASSERT_ALIGNED_DOUBLE; tmp25 = c_re(inout[3 * iostride]); tmp27 = c_im(inout[3 * iostride]); tmp24 = c_re(W[2]); tmp26 = c_im(W[2]); tmp28 = (tmp24 * tmp25) + (tmp26 * tmp27); tmp70 = (tmp24 * tmp27) - (tmp26 * tmp25); } { fftw_real tmp14; fftw_real tmp16; fftw_real tmp13; fftw_real tmp15; ASSERT_ALIGNED_DOUBLE; tmp14 = c_re(inout[7 * iostride]); tmp16 = c_im(inout[7 * iostride]); tmp13 = c_re(W[6]); tmp15 = c_im(W[6]); tmp17 = (tmp13 * tmp14) + (tmp15 * tmp16); tmp67 = (tmp13 * tmp16) - (tmp15 * tmp14); } { fftw_real tmp20; fftw_real tmp22; fftw_real tmp19; fftw_real tmp21; ASSERT_ALIGNED_DOUBLE; tmp20 = c_re(inout[8 * iostride]); tmp22 = c_im(inout[8 * iostride]); tmp19 = c_re(W[7]); tmp21 = c_im(W[7]); tmp23 = (tmp19 * tmp20) + (tmp21 * tmp22); tmp69 = (tmp19 * tmp22) - (tmp21 * tmp20); } tmp18 = tmp12 - tmp17; tmp29 = tmp23 - tmp28; tmp30 = tmp18 + tmp29; tmp56 = tmp12 + tmp17; tmp57 = tmp23 + tmp28; tmp58 = tmp56 + tmp57; tmp68 = tmp66 - tmp67; tmp71 = tmp69 - tmp70; tmp112 = tmp68 + tmp71; tmp86 = tmp66 + tmp67; tmp87 = tmp69 + tmp70; tmp95 = tmp86 + tmp87; } { fftw_real tmp64; fftw_real tmp54; fftw_real tmp63; fftw_real tmp80; fftw_real tmp82; fftw_real tmp72; fftw_real tmp79; fftw_real tmp81; fftw_real tmp65; ASSERT_ALIGNED_DOUBLE; tmp64 = K559016994 * (tmp30 - tmp53); tmp54 = tmp30 + tmp53; tmp63 = tmp7 - (K250000000 * tmp54); tmp72 = tmp68 - tmp71; tmp79 = tmp75 - tmp78; tmp80 = (K587785252 * tmp72) - (K951056516 * tmp79); tmp82 = (K951056516 * tmp72) + (K587785252 * tmp79); c_re(inout[5 * iostride]) = tmp7 + tmp54; tmp81 = tmp64 + tmp63; c_re(inout[iostride]) = tmp81 - tmp82; c_re(inout[9 * iostride]) = tmp81 + tmp82; tmp65 = tmp63 - tmp64; c_re(inout[7 * iostride]) = tmp65 - tmp80; c_re(inout[3 * iostride]) = tmp65 + tmp80; } { fftw_real tmp114; fftw_real tmp116; fftw_real tmp117; fftw_real tmp111; fftw_real tmp120; fftw_real tmp109; fftw_real tmp110; fftw_real tmp119; fftw_real tmp118; ASSERT_ALIGNED_DOUBLE; tmp114 = K559016994 * (tmp112 - tmp113); tmp116 = tmp112 + tmp113; tmp117 = tmp115 - (K250000000 * tmp116); tmp109 = tmp18 - tmp29; tmp110 = tmp41 - tmp52; tmp111 = (K951056516 * tmp109) + (K587785252 * tmp110); tmp120 = (K587785252 * tmp109) - (K951056516 * tmp110); c_im(inout[5 * iostride]) = tmp116 + tmp115; tmp119 = tmp117 - tmp114; c_im(inout[3 * iostride]) = tmp119 - tmp120; c_im(inout[7 * iostride]) = tmp120 + tmp119; tmp118 = tmp114 + tmp117; c_im(inout[iostride]) = tmp111 + tmp118; c_im(inout[9 * iostride]) = tmp118 - tmp111; } { fftw_real tmp84; fftw_real tmp62; fftw_real tmp83; fftw_real tmp92; fftw_real tmp94; fftw_real tmp88; fftw_real tmp91; fftw_real tmp93; fftw_real tmp85; ASSERT_ALIGNED_DOUBLE; tmp84 = K559016994 * (tmp58 - tmp61); tmp62 = tmp58 + tmp61; tmp83 = tmp55 - (K250000000 * tmp62); tmp88 = tmp86 - tmp87; tmp91 = tmp89 - tmp90; tmp92 = (K587785252 * tmp88) - (K951056516 * tmp91); tmp94 = (K951056516 * tmp88) + (K587785252 * tmp91); c_re(inout[0]) = tmp55 + tmp62; tmp93 = tmp84 + tmp83; c_re(inout[6 * iostride]) = tmp93 - tmp94; c_re(inout[4 * iostride]) = tmp93 + tmp94; tmp85 = tmp83 - tmp84; c_re(inout[2 * iostride]) = tmp85 - tmp92; c_re(inout[8 * iostride]) = tmp85 + tmp92; } { fftw_real tmp105; fftw_real tmp97; fftw_real tmp104; fftw_real tmp103; fftw_real tmp108; fftw_real tmp101; fftw_real tmp102; fftw_real tmp107; fftw_real tmp106; ASSERT_ALIGNED_DOUBLE; tmp105 = K559016994 * (tmp95 - tmp96); tmp97 = tmp95 + tmp96; tmp104 = tmp100 - (K250000000 * tmp97); tmp101 = tmp56 - tmp57; tmp102 = tmp59 - tmp60; tmp103 = (K587785252 * tmp101) - (K951056516 * tmp102); tmp108 = (K951056516 * tmp101) + (K587785252 * tmp102); c_im(inout[0]) = tmp97 + tmp100; tmp107 = tmp105 + tmp104; c_im(inout[4 * iostride]) = tmp107 - tmp108; c_im(inout[6 * iostride]) = tmp108 + tmp107; tmp106 = tmp104 - tmp105; c_im(inout[2 * iostride]) = tmp103 + tmp106; c_im(inout[8 * iostride]) = tmp106 - tmp103; } } } static const int twiddle_order[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 }; fftw_codelet_desc fftwi_twiddle_10_desc = { "fftwi_twiddle_10", (void (*)()) fftwi_twiddle_10, 10, FFTW_BACKWARD, FFTW_TWIDDLE, 231, 9, twiddle_order, };
akinori-ito/peaqb-fast
fftw/ftwi_10.c
C
gpl-2.0
11,447
/* * kernel/sched/core.c * * Kernel scheduler and related syscalls * * Copyright (C) 1991-2002 Linus Torvalds * * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and * make semaphores SMP safe * 1998-11-19 Implemented schedule_timeout() and related stuff * by Andrea Arcangeli * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar: * hybrid priority-list and round-robin design with * an array-switch method of distributing timeslices * and per-CPU runqueues. Cleanups and useful suggestions * by Davide Libenzi, preemptible kernel bits by Robert Love. * 2003-09-03 Interactivity tuning by Con Kolivas. * 2004-04-02 Scheduler domains code by Nick Piggin * 2007-04-15 Work begun on replacing all interactivity tuning with a * fair scheduling design by Con Kolivas. * 2007-05-05 Load balancing (smp-nice) and other improvements * by Peter Williams * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins, * Thomas Gleixner, Mike Kravetz */ #include <linux/mm.h> #include <linux/module.h> #include <linux/nmi.h> #include <linux/init.h> #include <linux/uaccess.h> #include <linux/highmem.h> #include <asm/mmu_context.h> #include <linux/interrupt.h> #include <linux/capability.h> #include <linux/completion.h> #include <linux/kernel_stat.h> #include <linux/debug_locks.h> #include <linux/perf_event.h> #include <linux/security.h> #include <linux/notifier.h> #include <linux/profile.h> #include <linux/freezer.h> #include <linux/vmalloc.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/pid_namespace.h> #include <linux/smp.h> #include <linux/threads.h> #include <linux/timer.h> #include <linux/rcupdate.h> #include <linux/cpu.h> #include <linux/cpuset.h> #include <linux/percpu.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/sysctl.h> #include <linux/syscalls.h> #include <linux/times.h> #include <linux/tsacct_kern.h> #include <linux/kprobes.h> #include <linux/delayacct.h> #include <linux/unistd.h> #include <linux/pagemap.h> #include <linux/hrtimer.h> #include <linux/tick.h> #include <linux/debugfs.h> #include <linux/ctype.h> #include <linux/ftrace.h> #include <linux/slab.h> #include <linux/init_task.h> #include <linux/binfmts.h> #include <linux/context_tracking.h> #include <linux/cpufreq.h> #include <asm/switch_to.h> #include <asm/tlb.h> #include <asm/irq_regs.h> #include <asm/mutex.h> #ifdef CONFIG_PARAVIRT #include <asm/paravirt.h> #endif #include "sched.h" #include "../workqueue_internal.h" #include "../smpboot.h" #define CREATE_TRACE_POINTS #include <trace/events/sched.h> #include <soc/qcom/watchdog.h> #define DLOG_SIZE 15000 #define MAX_CTXSW_LATENCY 1000000000 static DEFINE_PER_CPU(char[DLOG_SIZE], dbuf); static DEFINE_PER_CPU(char *, dptr); #define dlog(x...) \ do { \ unsigned long dflags; \ char *ptr, *buf; \ int dcpu; \ local_irq_save(dflags); \ dcpu = smp_processor_id(); \ buf = per_cpu(dbuf, dcpu); \ ptr = per_cpu(dptr, dcpu); \ ptr += snprintf(ptr, 10, "CPU %d ", dcpu); \ ptr += snprintf(ptr, 490, x); \ if (ptr - buf > DLOG_SIZE - 500) \ ptr = buf; \ per_cpu(dptr, dcpu) = ptr; \ local_irq_restore(dflags); \ } while (0) static atomic_t __su_instances; int su_instances(void) { return atomic_read(&__su_instances); } bool su_running(void) { return su_instances() > 0; } bool su_visible(void) { kuid_t uid = current_uid(); if (su_running()) return true; if (uid_eq(uid, GLOBAL_ROOT_UID) || uid_eq(uid, GLOBAL_SYSTEM_UID)) return true; return false; } void su_exec(void) { atomic_inc(&__su_instances); } void su_exit(void) { atomic_dec(&__su_instances); } const char *task_event_names[] = {"PUT_PREV_TASK", "PICK_NEXT_TASK", "TASK_WAKE", "TASK_MIGRATE", "TASK_UPDATE", "IRQ_UPDATE"}; ATOMIC_NOTIFIER_HEAD(migration_notifier_head); ATOMIC_NOTIFIER_HEAD(load_alert_notifier_head); #ifdef smp_mb__before_atomic void __smp_mb__before_atomic(void) { smp_mb__before_atomic(); } EXPORT_SYMBOL(__smp_mb__before_atomic); #endif #ifdef smp_mb__after_atomic void __smp_mb__after_atomic(void) { smp_mb__after_atomic(); } EXPORT_SYMBOL(__smp_mb__after_atomic); #endif void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period) { unsigned long delta; ktime_t soft, hard, now; for (;;) { if (hrtimer_active(period_timer)) break; now = hrtimer_cb_get_time(period_timer); hrtimer_forward(period_timer, now, period); soft = hrtimer_get_softexpires(period_timer); hard = hrtimer_get_expires(period_timer); delta = ktime_to_ns(ktime_sub(hard, soft)); __hrtimer_start_range_ns(period_timer, soft, delta, HRTIMER_MODE_ABS_PINNED, 0); } } DEFINE_MUTEX(sched_domains_mutex); DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); static void update_rq_clock_task(struct rq *rq, s64 delta); void update_rq_clock(struct rq *rq) { s64 delta; if (rq->skip_clock_update > 0) return; delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; rq->clock += delta; update_rq_clock_task(rq, delta); } /* * Debugging: various feature bits */ #define SCHED_FEAT(name, enabled) \ (1UL << __SCHED_FEAT_##name) * enabled | const_debug unsigned int sysctl_sched_features = #include "features.h" 0; #undef SCHED_FEAT #ifdef CONFIG_SCHED_DEBUG #define SCHED_FEAT(name, enabled) \ #name , static const char * const sched_feat_names[] = { #include "features.h" }; #undef SCHED_FEAT static int sched_feat_show(struct seq_file *m, void *v) { int i; for (i = 0; i < __SCHED_FEAT_NR; i++) { if (!(sysctl_sched_features & (1UL << i))) seq_puts(m, "NO_"); seq_printf(m, "%s ", sched_feat_names[i]); } seq_puts(m, "\n"); return 0; } #ifdef HAVE_JUMP_LABEL #define jump_label_key__true STATIC_KEY_INIT_TRUE #define jump_label_key__false STATIC_KEY_INIT_FALSE #define SCHED_FEAT(name, enabled) \ jump_label_key__##enabled , struct static_key sched_feat_keys[__SCHED_FEAT_NR] = { #include "features.h" }; #undef SCHED_FEAT static void sched_feat_disable(int i) { static_key_disable(&sched_feat_keys[i]); } static void sched_feat_enable(int i) { static_key_enable(&sched_feat_keys[i]); } #else static void sched_feat_disable(int i) { }; static void sched_feat_enable(int i) { }; #endif /* HAVE_JUMP_LABEL */ static int sched_feat_set(char *cmp) { int i; int neg = 0; if (strncmp(cmp, "NO_", 3) == 0) { neg = 1; cmp += 3; } for (i = 0; i < __SCHED_FEAT_NR; i++) { if (strcmp(cmp, sched_feat_names[i]) == 0) { if (neg) { sysctl_sched_features &= ~(1UL << i); sched_feat_disable(i); } else { sysctl_sched_features |= (1UL << i); sched_feat_enable(i); } break; } } return i; } static ssize_t sched_feat_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[64]; char *cmp; int i; if (cnt > 63) cnt = 63; if (copy_from_user(&buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; cmp = strstrip(buf); i = sched_feat_set(cmp); if (i == __SCHED_FEAT_NR) return -EINVAL; *ppos += cnt; return cnt; } static int sched_feat_open(struct inode *inode, struct file *filp) { return single_open(filp, sched_feat_show, NULL); } static const struct file_operations sched_feat_fops = { .open = sched_feat_open, .write = sched_feat_write, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static __init int sched_init_debug(void) { debugfs_create_file("sched_features", 0644, NULL, NULL, &sched_feat_fops); return 0; } late_initcall(sched_init_debug); #endif /* CONFIG_SCHED_DEBUG */ /* * Number of tasks to iterate in a single balance run. * Limited because this is done with IRQs disabled. */ const_debug unsigned int sysctl_sched_nr_migrate = 32; /* * period over which we average the RT time consumption, measured * in ms. * * default: 1s */ const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC; /* * period over which we measure -rt task cpu usage in us. * default: 1s */ unsigned int sysctl_sched_rt_period = 1000000; __read_mostly int scheduler_running; /* * part of the period that we allow rt tasks to run in us. * default: 0.95s */ int sysctl_sched_rt_runtime = 950000; /* * __task_rq_lock - lock the rq @p resides on. */ static inline struct rq *__task_rq_lock(struct task_struct *p) __acquires(rq->lock) { struct rq *rq; lockdep_assert_held(&p->pi_lock); for (;;) { rq = task_rq(p); raw_spin_lock(&rq->lock); if (likely(rq == task_rq(p))) return rq; raw_spin_unlock(&rq->lock); } } /* * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. */ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) __acquires(p->pi_lock) __acquires(rq->lock) { struct rq *rq; for (;;) { raw_spin_lock_irqsave(&p->pi_lock, *flags); rq = task_rq(p); raw_spin_lock(&rq->lock); if (likely(rq == task_rq(p))) return rq; raw_spin_unlock(&rq->lock); raw_spin_unlock_irqrestore(&p->pi_lock, *flags); } } static void __task_rq_unlock(struct rq *rq) __releases(rq->lock) { raw_spin_unlock(&rq->lock); } static inline void task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags) __releases(rq->lock) __releases(p->pi_lock) { raw_spin_unlock(&rq->lock); raw_spin_unlock_irqrestore(&p->pi_lock, *flags); } /* * this_rq_lock - lock this runqueue and disable interrupts. */ static struct rq *this_rq_lock(void) __acquires(rq->lock) { struct rq *rq; local_irq_disable(); rq = this_rq(); raw_spin_lock(&rq->lock); return rq; } #ifdef CONFIG_SCHED_HRTICK /* * Use HR-timers to deliver accurate preemption points. * * Its all a bit involved since we cannot program an hrt while holding the * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a * reschedule event. * * When we get rescheduled we reprogram the hrtick_timer outside of the * rq->lock. */ static void hrtick_clear(struct rq *rq) { if (hrtimer_active(&rq->hrtick_timer)) hrtimer_cancel(&rq->hrtick_timer); } /* * High-resolution timer tick. * Runs from hardirq context with interrupts disabled. */ static enum hrtimer_restart hrtick(struct hrtimer *timer) { struct rq *rq = container_of(timer, struct rq, hrtick_timer); WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); raw_spin_lock(&rq->lock); update_rq_clock(rq); rq->curr->sched_class->task_tick(rq, rq->curr, 1); raw_spin_unlock(&rq->lock); return HRTIMER_NORESTART; } #ifdef CONFIG_SMP /* * called from hardirq (IPI) context */ static void __hrtick_start(void *arg) { struct rq *rq = arg; struct hrtimer *timer = &rq->hrtick_timer; ktime_t soft, hard; unsigned long delta; soft = hrtimer_get_softexpires(timer); hard = hrtimer_get_expires(timer); delta = ktime_to_ns(ktime_sub(hard, soft)); raw_spin_lock(&rq->lock); __hrtimer_start_range_ns(timer, soft, delta, HRTIMER_MODE_ABS, 0); rq->hrtick_csd_pending = 0; raw_spin_unlock(&rq->lock); } /* * Called to set the hrtick timer state. * * called with rq->lock held and irqs disabled */ void hrtick_start(struct rq *rq, u64 delay) { struct hrtimer *timer = &rq->hrtick_timer; ktime_t time = ktime_add_ns(timer->base->get_time(), delay); hrtimer_set_expires(timer, time); if (rq == this_rq()) { __hrtimer_start_range_ns(timer, ns_to_ktime(delay), 0, HRTIMER_MODE_REL_PINNED, 0); } else if (!rq->hrtick_csd_pending) { __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0); rq->hrtick_csd_pending = 1; } } static int hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu) { int cpu = (int)(long)hcpu; switch (action) { case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: hrtick_clear(cpu_rq(cpu)); return NOTIFY_OK; } return NOTIFY_DONE; } static __init void init_hrtick(void) { hotcpu_notifier(hotplug_hrtick, 0); } #else /* * Called to set the hrtick timer state. * * called with rq->lock held and irqs disabled */ void hrtick_start(struct rq *rq, u64 delay) { __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0, HRTIMER_MODE_REL_PINNED, 0); } static inline void init_hrtick(void) { } #endif /* CONFIG_SMP */ static void init_rq_hrtick(struct rq *rq) { #ifdef CONFIG_SMP rq->hrtick_csd_pending = 0; rq->hrtick_csd.flags = 0; rq->hrtick_csd.func = __hrtick_start; rq->hrtick_csd.info = rq; #endif hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); rq->hrtick_timer.function = hrtick; } #else /* CONFIG_SCHED_HRTICK */ static inline void hrtick_clear(struct rq *rq) { } static inline void init_rq_hrtick(struct rq *rq) { } static inline void init_hrtick(void) { } #endif /* CONFIG_SCHED_HRTICK */ void wake_q_add(struct wake_q_head *head, struct task_struct *task) { struct wake_q_node *node = &task->wake_q; /* * Atomically grab the task, if ->wake_q is !nil already it means * its already queued (either by us or someone else) and will get the * wakeup due to that. * * This cmpxchg() implies a full barrier, which pairs with the write * barrier implied by the wakeup in wake_up_list(). */ if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL)) return; get_task_struct(task); /* * The head is context local, there can be no concurrency. */ *head->lastp = node; head->lastp = &node->next; } void wake_up_q(struct wake_q_head *head) { struct wake_q_node *node = head->first; while (node != WAKE_Q_TAIL) { struct task_struct *task; task = container_of(node, struct task_struct, wake_q); BUG_ON(!task); /* task can safely be re-inserted now */ node = node->next; task->wake_q.next = NULL; /* * wake_up_process() implies a wmb() to pair with the queueing * in wake_q_add() so as not to miss wakeups. */ wake_up_process(task); put_task_struct(task); } } /* * resched_curr - mark rq's current task 'to be rescheduled now'. * * On UP this means the setting of the need_resched flag, on SMP it * might also involve a cross-CPU call to trigger the scheduler on * the target CPU. */ #ifdef CONFIG_SMP void resched_task(struct task_struct *p) { int cpu; assert_raw_spin_locked(&task_rq(p)->lock); if (test_tsk_need_resched(p)) return; set_tsk_need_resched(p); cpu = task_cpu(p); if (cpu == smp_processor_id()) return; /* NEED_RESCHED must be visible before we test polling */ smp_mb(); if (!tsk_is_polling(p)) smp_send_reschedule(cpu); } void resched_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long flags; if (!raw_spin_trylock_irqsave(&rq->lock, flags)) return; resched_task(cpu_curr(cpu)); raw_spin_unlock_irqrestore(&rq->lock, flags); } #ifdef CONFIG_NO_HZ_COMMON /* * In the semi idle case, use the nearest busy cpu for migrating timers * from an idle cpu. This is good for power-savings. * * We don't do similar optimization for completely idle system, as * selecting an idle cpu will add more delays to the timers than intended * (as that cpu's timer base may not be uptodate wrt jiffies etc). */ int get_nohz_timer_target(void) { int cpu = smp_processor_id(); int i; struct sched_domain *sd; rcu_read_lock(); for_each_domain(cpu, sd) { for_each_cpu(i, sched_domain_span(sd)) { if (!idle_cpu(i)) { cpu = i; goto unlock; } } } unlock: rcu_read_unlock(); return cpu; } /* * When add_timer_on() enqueues a timer into the timer wheel of an * idle CPU then this timer might expire before the next timer event * which is scheduled to wake up that CPU. In case of a completely * idle system the next event might even be infinite time into the * future. wake_up_idle_cpu() ensures that the CPU is woken up and * leaves the inner idle loop so the newly added timer is taken into * account when the CPU goes back to idle and evaluates the timer * wheel for the next timer event. */ static void wake_up_idle_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); if (cpu == smp_processor_id()) return; /* * This is safe, as this function is called with the timer * wheel base lock of (cpu) held. When the CPU is on the way * to idle and has not yet set rq->curr to idle then it will * be serialized on the timer wheel base lock and take the new * timer into account automatically. */ if (rq->curr != rq->idle) return; /* * We can set TIF_RESCHED on the idle task of the other CPU * lockless. The worst case is that the other CPU runs the * idle task through an additional NOOP schedule() */ set_tsk_need_resched(rq->idle); /* NEED_RESCHED must be visible before we test polling */ smp_mb(); if (!tsk_is_polling(rq->idle)) smp_send_reschedule(cpu); } static bool wake_up_full_nohz_cpu(int cpu) { if (tick_nohz_full_cpu(cpu)) { if (cpu != smp_processor_id() || tick_nohz_tick_stopped()) smp_send_reschedule(cpu); return true; } return false; } void wake_up_nohz_cpu(int cpu) { if (!wake_up_full_nohz_cpu(cpu)) wake_up_idle_cpu(cpu); } static inline bool got_nohz_idle_kick(void) { int cpu = smp_processor_id(); if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu))) return false; if (idle_cpu(cpu) && !need_resched()) return true; /* * We can't run Idle Load Balance on this CPU for this time so we * cancel it and clear NOHZ_BALANCE_KICK */ clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); return false; } #else /* CONFIG_NO_HZ_COMMON */ static inline bool got_nohz_idle_kick(void) { return false; } #endif /* CONFIG_NO_HZ_COMMON */ #ifdef CONFIG_NO_HZ_FULL bool sched_can_stop_tick(void) { struct rq *rq; rq = this_rq(); /* Make sure rq->nr_running update is visible after the IPI */ smp_rmb(); /* More than one running task need preemption */ if (rq->nr_running > 1) return false; return true; } #endif /* CONFIG_NO_HZ_FULL */ void sched_avg_update(struct rq *rq) { s64 period = sched_avg_period(); while ((s64)(rq->clock - rq->age_stamp) > period) { /* * Inline assembly required to prevent the compiler * optimising this loop into a divmod call. * See __iter_div_u64_rem() for another example of this. */ asm("" : "+rm" (rq->age_stamp)); rq->age_stamp += period; rq->rt_avg /= 2; } } /* * Note C-state for (idle) cpus. * * @cstate = cstate index, 0 -> active state * @wakeup_energy = energy spent in waking up cpu * @wakeup_latency = latency to wakeup from cstate * */ void sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency) { struct rq *rq = cpu_rq(cpu); rq->cstate = cstate; /* C1, C2 etc */ rq->wakeup_energy = wakeup_energy; rq->wakeup_latency = wakeup_latency; } #else /* !CONFIG_SMP */ void resched_task(struct task_struct *p) { assert_raw_spin_locked(&task_rq(p)->lock); set_tsk_need_resched(p); } #endif /* CONFIG_SMP */ #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) /* * Iterate task_group tree rooted at *from, calling @down when first entering a * node and @up when leaving it for the final time. * * Caller must hold rcu_lock or sufficient equivalent. */ int walk_tg_tree_from(struct task_group *from, tg_visitor down, tg_visitor up, void *data) { struct task_group *parent, *child; int ret; parent = from; down: ret = (*down)(parent, data); if (ret) goto out; list_for_each_entry_rcu(child, &parent->children, siblings) { parent = child; goto down; up: continue; } ret = (*up)(parent, data); if (ret || parent == from) goto out; child = parent; parent = parent->parent; if (parent) goto up; out: return ret; } int tg_nop(struct task_group *tg, void *data) { return 0; } #endif static void set_load_weight(struct task_struct *p) { int prio = p->static_prio - MAX_RT_PRIO; struct load_weight *load = &p->se.load; /* * SCHED_IDLE tasks get minimal weight: */ if (p->policy == SCHED_IDLE) { load->weight = scale_load(WEIGHT_IDLEPRIO); load->inv_weight = WMULT_IDLEPRIO; return; } load->weight = scale_load(prio_to_weight[prio]); load->inv_weight = prio_to_wmult[prio]; } static void enqueue_task(struct rq *rq, struct task_struct *p, int flags) { update_rq_clock(rq); sched_info_queued(p); p->sched_class->enqueue_task(rq, p, flags); trace_sched_enq_deq_task(p, 1, cpumask_bits(&p->cpus_allowed)[0]); inc_cumulative_runnable_avg(rq, p); } static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) { update_rq_clock(rq); sched_info_dequeued(p); p->sched_class->dequeue_task(rq, p, flags); trace_sched_enq_deq_task(p, 0, cpumask_bits(&p->cpus_allowed)[0]); dec_cumulative_runnable_avg(rq, p); } void activate_task(struct rq *rq, struct task_struct *p, int flags) { if (task_contributes_to_load(p)) rq->nr_uninterruptible--; enqueue_task(rq, p, flags); } void deactivate_task(struct rq *rq, struct task_struct *p, int flags) { if (task_contributes_to_load(p)) rq->nr_uninterruptible++; dequeue_task(rq, p, flags); } static void update_rq_clock_task(struct rq *rq, s64 delta) { /* * In theory, the compile should just see 0 here, and optimize out the call * to sched_rt_avg_update. But I don't trust it... */ #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) s64 steal = 0, irq_delta = 0; #endif #ifdef CONFIG_IRQ_TIME_ACCOUNTING irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; /* * Since irq_time is only updated on {soft,}irq_exit, we might run into * this case when a previous update_rq_clock() happened inside a * {soft,}irq region. * * When this happens, we stop ->clock_task and only update the * prev_irq_time stamp to account for the part that fit, so that a next * update will consume the rest. This ensures ->clock_task is * monotonic. * * It does however cause some slight miss-attribution of {soft,}irq * time, a more accurate solution would be to update the irq_time using * the current rq->clock timestamp, except that would require using * atomic ops. */ if (irq_delta > delta) irq_delta = delta; rq->prev_irq_time += irq_delta; delta -= irq_delta; #endif #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING if (static_key_false((&paravirt_steal_rq_enabled))) { u64 st; steal = paravirt_steal_clock(cpu_of(rq)); steal -= rq->prev_steal_time_rq; if (unlikely(steal > delta)) steal = delta; st = steal_ticks(steal); steal = st * TICK_NSEC; rq->prev_steal_time_rq += steal; delta -= steal; } #endif rq->clock_task += delta; #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) if ((irq_delta + steal) && sched_feat(NONTASK_POWER)) sched_rt_avg_update(rq, irq_delta + steal); #endif } void sched_set_stop_task(int cpu, struct task_struct *stop) { struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; struct task_struct *old_stop = cpu_rq(cpu)->stop; if (stop) { /* * Make it appear like a SCHED_FIFO task, its something * userspace knows about and won't get confused about. * * Also, it will make PI more or less work without too * much confusion -- but then, stop work should not * rely on PI working anyway. */ sched_setscheduler_nocheck(stop, SCHED_FIFO, &param); stop->sched_class = &stop_sched_class; } cpu_rq(cpu)->stop = stop; if (old_stop) { /* * Reset it back to a normal scheduling class so that * it can die in pieces. */ old_stop->sched_class = &rt_sched_class; } } /* * __normal_prio - return the priority that is based on the static prio */ static inline int __normal_prio(struct task_struct *p) { return p->static_prio; } /* * Calculate the expected normal priority: i.e. priority * without taking RT-inheritance into account. Might be * boosted by interactivity modifiers. Changes upon fork, * setprio syscalls, and whenever the interactivity * estimator recalculates. */ static inline int normal_prio(struct task_struct *p) { int prio; if (task_has_rt_policy(p)) prio = MAX_RT_PRIO-1 - p->rt_priority; else prio = __normal_prio(p); return prio; } /* * Calculate the current priority, i.e. the priority * taken into account by the scheduler. This value might * be boosted by RT tasks, or might be boosted by * interactivity modifiers. Will be RT if the task got * RT-boosted. If not then it returns p->normal_prio. */ static int effective_prio(struct task_struct *p) { p->normal_prio = normal_prio(p); /* * If we are RT tasks or we were boosted to RT priority, * keep the priority unchanged. Otherwise, update priority * to the normal priority: */ if (!rt_prio(p->prio)) return p->normal_prio; return p->prio; } /** * task_curr - is this task currently executing on a CPU? * @p: the task in question. */ inline int task_curr(const struct task_struct *p) { return cpu_curr(task_cpu(p)) == p; } static inline void check_class_changed(struct rq *rq, struct task_struct *p, const struct sched_class *prev_class, int oldprio) { if (prev_class != p->sched_class) { if (prev_class->switched_from) prev_class->switched_from(rq, p); p->sched_class->switched_to(rq, p); } else if (oldprio != p->prio) p->sched_class->prio_changed(rq, p, oldprio); } void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) { const struct sched_class *class; if (p->sched_class == rq->curr->sched_class) { rq->curr->sched_class->check_preempt_curr(rq, p, flags); } else { for_each_class(class) { if (class == rq->curr->sched_class) break; if (class == p->sched_class) { resched_task(rq->curr); break; } } } /* * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ if (rq->curr->on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } static ATOMIC_NOTIFIER_HEAD(task_migration_notifier); void register_task_migration_notifier(struct notifier_block *n) { atomic_notifier_chain_register(&task_migration_notifier, n); } #ifdef CONFIG_SCHED_HMP static int __init set_sched_enable_hmp(char *str) { int enable_hmp = 0; get_option(&str, &enable_hmp); sched_enable_hmp = !!enable_hmp; return 0; } early_param("sched_enable_hmp", set_sched_enable_hmp); static int __init set_sched_enable_power_aware(char *str) { int enable_power_aware = 0; get_option(&str, &enable_power_aware); sched_enable_power_aware = !!enable_power_aware; return 0; } early_param("sched_enable_power_aware", set_sched_enable_power_aware); static inline int got_boost_kick(void) { int cpu = smp_processor_id(); struct rq *rq = cpu_rq(cpu); return test_bit(BOOST_KICK, &rq->hmp_flags); } static inline void clear_boost_kick(int cpu) { struct rq *rq = cpu_rq(cpu); clear_bit(BOOST_KICK, &rq->hmp_flags); } void boost_kick(int cpu) { struct rq *rq = cpu_rq(cpu); if (!test_and_set_bit(BOOST_KICK, &rq->hmp_flags)) smp_send_reschedule(cpu); } /* Clear any HMP scheduler related requests pending from or on cpu */ static inline void clear_hmp_request(int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long flags; clear_boost_kick(cpu); clear_reserved(cpu); if (rq->push_task) { raw_spin_lock_irqsave(&rq->lock, flags); if (rq->push_task) { clear_reserved(rq->push_cpu); put_task_struct(rq->push_task); rq->push_task = NULL; } rq->active_balance = 0; raw_spin_unlock_irqrestore(&rq->lock, flags); } } #else static inline int got_boost_kick(void) { return 0; } static inline void clear_boost_kick(int cpu) { } static inline void clear_hmp_request(int cpu) { } #endif /* CONFIG_SCHED_HMP */ #if defined(CONFIG_SCHED_HMP) /* * sched_window_stats_policy, sched_account_wait_time, sched_ravg_hist_size, * sched_migration_fixup, sched_freq_account_wait_time have a 'sysctl' copy * associated with them. This is required for atomic update of those variables * when being modifed via sysctl interface. * * IMPORTANT: Initialize both copies to same value!! */ static __read_mostly unsigned int sched_ravg_hist_size = 5; __read_mostly unsigned int sysctl_sched_ravg_hist_size = 5; static __read_mostly unsigned int sched_window_stats_policy = WINDOW_STATS_MAX_RECENT_AVG; __read_mostly unsigned int sysctl_sched_window_stats_policy = WINDOW_STATS_MAX_RECENT_AVG; static __read_mostly unsigned int sched_account_wait_time = 1; __read_mostly unsigned int sysctl_sched_account_wait_time = 1; #ifdef CONFIG_SCHED_FREQ_INPUT static __read_mostly unsigned int sched_migration_fixup = 1; __read_mostly unsigned int sysctl_sched_migration_fixup = 1; static __read_mostly unsigned int sched_freq_account_wait_time; __read_mostly unsigned int sysctl_sched_freq_account_wait_time; /* * For increase, send notification if * freq_required - cur_freq > sysctl_sched_freq_inc_notify */ __read_mostly int sysctl_sched_freq_inc_notify = 10 * 1024 * 1024; /* + 10GHz */ /* * For decrease, send notification if * cur_freq - freq_required > sysctl_sched_freq_dec_notify */ __read_mostly int sysctl_sched_freq_dec_notify = 10 * 1024 * 1024; /* - 10GHz */ static __read_mostly unsigned int sched_io_is_busy; #endif /* CONFIG_SCHED_FREQ_INPUT */ /* 1 -> use PELT based load stats, 0 -> use window-based load stats */ unsigned int __read_mostly sched_use_pelt; unsigned int max_possible_efficiency = 1024; unsigned int min_possible_efficiency = 1024; /* * Maximum possible frequency across all cpus. Task demand and cpu * capacity (cpu_power) metrics are scaled in reference to it. */ unsigned int max_possible_freq = 1; /* * Minimum possible max_freq across all cpus. This will be same as * max_possible_freq on homogeneous systems and could be different from * max_possible_freq on heterogenous systems. min_max_freq is used to derive * capacity (cpu_power) of cpus. */ unsigned int min_max_freq = 1; unsigned int max_capacity = 1024; /* max(rq->capacity) */ unsigned int min_capacity = 1024; /* min(rq->capacity) */ unsigned int max_load_scale_factor = 1024; /* max possible load scale factor */ unsigned int max_possible_capacity = 1024; /* max(rq->max_possible_capacity) */ unsigned int min_max_possible_capacity = 1024; /* min(max_possible_capacity) */ unsigned int min_max_capacity_delta_pct; /* Window size (in ns) */ __read_mostly unsigned int sched_ravg_window = 10000000; /* Min window size (in ns) = 10ms */ #define MIN_SCHED_RAVG_WINDOW 10000000 /* Max window size (in ns) = 1s */ #define MAX_SCHED_RAVG_WINDOW 1000000000 /* Temporarily disable window-stats activity on all cpus */ unsigned int __read_mostly sched_disable_window_stats; static unsigned int sync_cpu; #define EXITING_TASK_MARKER 0xdeaddead static inline int exiting_task(struct task_struct *p) { return (p->ravg.sum_history[0] == EXITING_TASK_MARKER); } static int __init set_sched_ravg_window(char *str) { get_option(&str, &sched_ravg_window); sched_use_pelt = (sched_ravg_window < MIN_SCHED_RAVG_WINDOW || sched_ravg_window > MAX_SCHED_RAVG_WINDOW); return 0; } early_param("sched_ravg_window", set_sched_ravg_window); static inline void update_window_start(struct rq *rq, u64 wallclock) { s64 delta; int nr_windows; delta = wallclock - rq->window_start; BUG_ON(delta < 0); if (delta < sched_ravg_window) return; nr_windows = div64_u64(delta, sched_ravg_window); rq->window_start += (u64)nr_windows * (u64)sched_ravg_window; } static inline u64 scale_exec_time(u64 delta, struct rq *rq) { unsigned int cur_freq = rq->cur_freq; int sf; if (unlikely(cur_freq > max_possible_freq || (cur_freq == rq->max_freq && rq->max_freq < rq->max_possible_freq))) cur_freq = rq->max_possible_freq; delta = div64_u64(delta * cur_freq, max_possible_freq); sf = (rq->efficiency * 1024) / max_possible_efficiency; delta *= sf; delta >>= 10; return delta; } #ifdef CONFIG_SCHED_FREQ_INPUT static inline int cpu_is_waiting_on_io(struct rq *rq) { if (!sched_io_is_busy) return 0; return atomic_read(&rq->nr_iowait); } /* Does freq_required sufficiently exceed or fall behind cur_freq? */ static inline int nearly_same_freq(unsigned int cur_freq, unsigned int freq_required) { int delta = freq_required - cur_freq; if (freq_required > cur_freq) return delta < sysctl_sched_freq_inc_notify; delta = -delta; return delta < sysctl_sched_freq_dec_notify; } /* Convert busy time to frequency equivalent */ static inline unsigned int load_to_freq(struct rq *rq, u64 load) { unsigned int freq; load = scale_load_to_cpu(load, cpu_of(rq)); load *= 128; load = div64_u64(load, max_task_load()); freq = load * rq->max_possible_freq; freq /= 128; return freq; } /* Should scheduler alert governor for changing frequency? */ static int send_notification(struct rq *rq) { unsigned int cur_freq, freq_required; unsigned long flags; int rc = 0; if (!sched_enable_hmp) return 0; cur_freq = load_to_freq(rq, rq->old_busy_time); freq_required = load_to_freq(rq, rq->prev_runnable_sum); if (nearly_same_freq(cur_freq, freq_required)) return 0; raw_spin_lock_irqsave(&rq->lock, flags); if (!rq->notifier_sent) { rq->notifier_sent = 1; rc = 1; } raw_spin_unlock_irqrestore(&rq->lock, flags); return rc; } /* Alert governor if there is a need to change frequency */ void check_for_freq_change(struct rq *rq) { int cpu = cpu_of(rq); if (!send_notification(rq)) return; trace_sched_freq_alert(cpu, rq->old_busy_time, rq->prev_runnable_sum); atomic_notifier_call_chain( &load_alert_notifier_head, 0, (void *)(long)cpu); } static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p, u64 irqtime, int event) { if (is_idle_task(p)) { /* TASK_WAKE && TASK_MIGRATE is not possible on idle task! */ if (event == PICK_NEXT_TASK) return 0; /* PUT_PREV_TASK, TASK_UPDATE && IRQ_UPDATE are left */ return irqtime || cpu_is_waiting_on_io(rq); } if (event == TASK_WAKE) return 0; if (event == PUT_PREV_TASK || event == IRQ_UPDATE || event == TASK_UPDATE) return 1; /* Only TASK_MIGRATE && PICK_NEXT_TASK left */ return sched_freq_account_wait_time; } static inline int heavy_task_wakeup(struct task_struct *p, struct rq *rq, int event) { u32 task_demand = p->ravg.demand; if (!sched_heavy_task || event != TASK_WAKE || task_demand < sched_heavy_task || exiting_task(p)) return 0; if (p->ravg.mark_start > rq->window_start) return 0; /* has a full window elapsed since task slept? */ return (rq->window_start - p->ravg.mark_start > sched_ravg_window); } /* * Account cpu activity in its busy time counters (rq->curr/prev_runnable_sum) */ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq, int event, u64 wallclock, u64 irqtime) { int new_window, nr_full_windows = 0; int p_is_curr_task = (p == rq->curr); u64 mark_start = p->ravg.mark_start; u64 window_start = rq->window_start; u32 window_size = sched_ravg_window; u64 delta; new_window = mark_start < window_start; if (new_window) nr_full_windows = div64_u64((window_start - mark_start), window_size); /* Handle per-task window rollover. We don't care about the idle * task or exiting tasks. */ if (new_window && !is_idle_task(p) && !exiting_task(p)) { u32 curr_window = 0; if (!nr_full_windows) curr_window = p->ravg.curr_window; p->ravg.prev_window = curr_window; p->ravg.curr_window = 0; } if (!account_busy_for_cpu_time(rq, p, irqtime, event)) { /* account_busy_for_cpu_time() = 0, so no update to the * task's current window needs to be made. This could be * for example * * - a wakeup event on a task within the current * window (!new_window below, no action required), * - switching to a new task from idle (PICK_NEXT_TASK) * in a new window where irqtime is 0 and we aren't * waiting on IO */ if (!new_window) return; /* A new window has started. The RQ demand must be rolled * over if p is the current task. */ if (p_is_curr_task) { u64 prev_sum = 0; /* p is either idle task or an exiting task */ if (!nr_full_windows) prev_sum = rq->curr_runnable_sum; rq->prev_runnable_sum = prev_sum; rq->curr_runnable_sum = 0; } else if (heavy_task_wakeup(p, rq, event)) { /* A new window has started. If p is a waking * heavy task its prev_window contribution is faked * to be its window-based demand. Note that this can * introduce phantom load into the system depending * on the window policy and task behavior. This feature * can be controlled via the sched_heavy_task * tunable. */ p->ravg.prev_window = p->ravg.demand; rq->prev_runnable_sum += p->ravg.demand; } return; } if (!new_window) { /* account_busy_for_cpu_time() = 1 so busy time needs * to be accounted to the current window. No rollover * since we didn't start a new window. An example of this is * when a task starts execution and then sleeps within the * same window. */ if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq)) delta = wallclock - mark_start; else delta = irqtime; delta = scale_exec_time(delta, rq); rq->curr_runnable_sum += delta; if (!is_idle_task(p) && !exiting_task(p)) p->ravg.curr_window += delta; return; } if (!p_is_curr_task) { /* account_busy_for_cpu_time() = 1 so busy time needs * to be accounted to the current window. A new window * has also started, but p is not the current task, so the * window is not rolled over - just split up and account * as necessary into curr and prev. The window is only * rolled over when a new window is processed for the current * task. * * Irqtime can't be accounted by a task that isn't the * currently running task. */ if (!nr_full_windows) { /* A full window hasn't elapsed, account partial * contribution to previous completed window. */ delta = scale_exec_time(window_start - mark_start, rq); if (!exiting_task(p)) p->ravg.prev_window += delta; } else { /* Since at least one full window has elapsed, * the contribution to the previous window is the * full window (window_size). */ delta = scale_exec_time(window_size, rq); if (!exiting_task(p)) p->ravg.prev_window = delta; } rq->prev_runnable_sum += delta; /* Account piece of busy time in the current window. */ delta = scale_exec_time(wallclock - window_start, rq); rq->curr_runnable_sum += delta; if (!exiting_task(p)) p->ravg.curr_window = delta; return; } if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq)) { /* account_busy_for_cpu_time() = 1 so busy time needs * to be accounted to the current window. A new window * has started and p is the current task so rollover is * needed. If any of these three above conditions are true * then this busy time can't be accounted as irqtime. * * Busy time for the idle task or exiting tasks need not * be accounted. * * An example of this would be a task that starts execution * and then sleeps once a new window has begun. */ if (!nr_full_windows) { /* A full window hasn't elapsed, account partial * contribution to previous completed window. */ delta = scale_exec_time(window_start - mark_start, rq); if (!is_idle_task(p) && !exiting_task(p)) p->ravg.prev_window += delta; delta += rq->curr_runnable_sum; } else { /* Since at least one full window has elapsed, * the contribution to the previous window is the * full window (window_size). */ delta = scale_exec_time(window_size, rq); if (!is_idle_task(p) && !exiting_task(p)) p->ravg.prev_window = delta; } /* Rollover is done here by overwriting the values in * prev_runnable_sum and curr_runnable_sum. */ rq->prev_runnable_sum = delta; /* Account piece of busy time in the current window. */ delta = scale_exec_time(wallclock - window_start, rq); rq->curr_runnable_sum = delta; if (!is_idle_task(p) && !exiting_task(p)) p->ravg.curr_window = delta; return; } if (irqtime) { /* account_busy_for_cpu_time() = 1 so busy time needs * to be accounted to the current window. A new window * has started and p is the current task so rollover is * needed. The current task must be the idle task because * irqtime is not accounted for any other task. * * Irqtime will be accounted each time we process IRQ activity * after a period of idleness, so we know the IRQ busy time * started at wallclock - irqtime. */ BUG_ON(!is_idle_task(p)); mark_start = wallclock - irqtime; /* Roll window over. If IRQ busy time was just in the current * window then that is all that need be accounted. */ rq->prev_runnable_sum = rq->curr_runnable_sum; if (mark_start > window_start) { rq->curr_runnable_sum = scale_exec_time(irqtime, rq); return; } /* The IRQ busy time spanned multiple windows. Process the * busy time preceding the current window start first. */ delta = window_start - mark_start; if (delta > window_size) { delta = window_size; /* If there's 1 or more full windows of IRQ busy time * then the entire prev_runnable_sum will be a window * of IRQ time - there should be no contribution from * anything else. */ rq->prev_runnable_sum = 0; } delta = scale_exec_time(delta, rq); rq->prev_runnable_sum += delta; /* Process the remaining IRQ busy time in the current window. */ delta = wallclock - window_start; rq->curr_runnable_sum = scale_exec_time(delta, rq); return; } BUG(); } #else /* CONFIG_SCHED_FREQ_INPUT */ static inline void update_cpu_busy_time(struct task_struct *p, struct rq *rq, int event, u64 wallclock, u64 irqtime) { } #endif /* CONFIG_SCHED_FREQ_INPUT */ static int account_busy_for_task_demand(struct task_struct *p, int event) { /* No need to bother updating task demand for exiting tasks * or the idle task. */ if (exiting_task(p) || is_idle_task(p)) return 0; /* When a task is waking up it is completing a segment of non-busy * time. Likewise, if wait time is not treated as busy time, then * when a task begins to run or is migrated, it is not running and * is completing a segment of non-busy time. */ if (event == TASK_WAKE || (!sched_account_wait_time && (event == PICK_NEXT_TASK || event == TASK_MIGRATE))) return 0; return 1; } /* * Called when new window is starting for a task, to record cpu usage over * recently concluded window(s). Normally 'samples' should be 1. It can be > 1 * when, say, a real-time task runs without preemption for several windows at a * stretch. */ static void update_history(struct rq *rq, struct task_struct *p, u32 runtime, int samples, int event) { u32 *hist = &p->ravg.sum_history[0]; int ridx, widx; u32 max = 0, avg, demand; u64 sum = 0; /* Ignore windows where task had no activity */ if (!runtime || is_idle_task(p) || exiting_task(p) || !samples) goto done; /* Push new 'runtime' value onto stack */ widx = sched_ravg_hist_size - 1; ridx = widx - samples; for (; ridx >= 0; --widx, --ridx) { hist[widx] = hist[ridx]; sum += hist[widx]; if (hist[widx] > max) max = hist[widx]; } for (widx = 0; widx < samples && widx < sched_ravg_hist_size; widx++) { hist[widx] = runtime; sum += hist[widx]; if (hist[widx] > max) max = hist[widx]; } p->ravg.sum = 0; if (p->on_rq) { rq->cumulative_runnable_avg -= p->ravg.demand; BUG_ON((s64)rq->cumulative_runnable_avg < 0); if (p->sched_class == &fair_sched_class) dec_nr_big_small_task(rq, p); } avg = div64_u64(sum, sched_ravg_hist_size); if (sched_window_stats_policy == WINDOW_STATS_RECENT) demand = runtime; else if (sched_window_stats_policy == WINDOW_STATS_MAX) demand = max; else if (sched_window_stats_policy == WINDOW_STATS_AVG) demand = avg; else demand = max(avg, runtime); p->ravg.demand = demand; if (p->on_rq) { rq->cumulative_runnable_avg += p->ravg.demand; if (p->sched_class == &fair_sched_class) inc_nr_big_small_task(rq, p); } done: trace_sched_update_history(rq, p, runtime, samples, event); } static void add_to_task_demand(struct rq *rq, struct task_struct *p, u64 delta) { delta = scale_exec_time(delta, rq); p->ravg.sum += delta; if (unlikely(p->ravg.sum > sched_ravg_window)) p->ravg.sum = sched_ravg_window; } /* * Account cpu demand of task and/or update task's cpu demand history * * ms = p->ravg.mark_start; * wc = wallclock * ws = rq->window_start * * Three possibilities: * * a) Task event is contained within one window. * window_start < mark_start < wallclock * * ws ms wc * | | | * V V V * |---------------| * * In this case, p->ravg.sum is updated *iff* event is appropriate * (ex: event == PUT_PREV_TASK) * * b) Task event spans two windows. * mark_start < window_start < wallclock * * ms ws wc * | | | * V V V * -----|------------------- * * In this case, p->ravg.sum is updated with (ws - ms) *iff* event * is appropriate, then a new window sample is recorded followed * by p->ravg.sum being set to (wc - ws) *iff* event is appropriate. * * c) Task event spans more than two windows. * * ms ws_tmp ws wc * | | | | * V V V V * ---|-------|-------|-------|-------|------ * | | * |<------ nr_full_windows ------>| * * In this case, p->ravg.sum is updated with (ws_tmp - ms) first *iff* * event is appropriate, window sample of p->ravg.sum is recorded, * 'nr_full_window' samples of window_size is also recorded *iff* * event is appropriate and finally p->ravg.sum is set to (wc - ws) * *iff* event is appropriate. * * IMPORTANT : Leave p->ravg.mark_start unchanged, as update_cpu_busy_time() * depends on it! */ static void update_task_demand(struct task_struct *p, struct rq *rq, int event, u64 wallclock) { u64 mark_start = p->ravg.mark_start; u64 delta, window_start = rq->window_start; int new_window, nr_full_windows; u32 window_size = sched_ravg_window; new_window = mark_start < window_start; if (!account_busy_for_task_demand(p, event)) { if (new_window) /* If the time accounted isn't being accounted as * busy time, and a new window started, only the * previous window need be closed out with the * pre-existing demand. Multiple windows may have * elapsed, but since empty windows are dropped, * it is not necessary to account those. */ update_history(rq, p, p->ravg.sum, 1, event); return; } if (!new_window) { /* The simple case - busy time contained within the existing * window. */ add_to_task_demand(rq, p, wallclock - mark_start); return; } /* Busy time spans at least two windows. Temporarily rewind * window_start to first window boundary after mark_start. */ delta = window_start - mark_start; nr_full_windows = div64_u64(delta, window_size); window_start -= (u64)nr_full_windows * (u64)window_size; /* Process (window_start - mark_start) first */ add_to_task_demand(rq, p, window_start - mark_start); /* Push new sample(s) into task's demand history */ update_history(rq, p, p->ravg.sum, 1, event); if (nr_full_windows) update_history(rq, p, scale_exec_time(window_size, rq), nr_full_windows, event); /* Roll window_start back to current to process any remainder * in current window. */ window_start += (u64)nr_full_windows * (u64)window_size; /* Process (wallclock - window_start) next */ mark_start = window_start; add_to_task_demand(rq, p, wallclock - mark_start); } /* Reflect task activity on its demand and cpu's busy time statistics */ static void update_task_ravg(struct task_struct *p, struct rq *rq, int event, u64 wallclock, u64 irqtime) { if (sched_use_pelt || !rq->window_start || sched_disable_window_stats) return; lockdep_assert_held(&rq->lock); update_window_start(rq, wallclock); if (!p->ravg.mark_start) goto done; update_task_demand(p, rq, event, wallclock); update_cpu_busy_time(p, rq, event, wallclock, irqtime); done: trace_sched_update_task_ravg(p, rq, event, wallclock, irqtime); p->ravg.mark_start = wallclock; } void sched_account_irqtime(int cpu, struct task_struct *curr, u64 delta, u64 wallclock) { struct rq *rq = cpu_rq(cpu); unsigned long flags; if (!is_idle_task(curr)) return; raw_spin_lock_irqsave(&rq->lock, flags); update_task_ravg(curr, rq, IRQ_UPDATE, wallclock, delta); raw_spin_unlock_irqrestore(&rq->lock, flags); } unsigned long __weak arch_get_cpu_efficiency(int cpu) { return SCHED_LOAD_SCALE; } static void init_cpu_efficiency(void) { int i, efficiency; unsigned int max = 0, min = UINT_MAX; if (!sched_enable_hmp) return; for_each_possible_cpu(i) { efficiency = arch_get_cpu_efficiency(i); cpu_rq(i)->efficiency = efficiency; if (efficiency > max) max = efficiency; if (efficiency < min) min = efficiency; } BUG_ON(!max || !min); max_possible_efficiency = max; min_possible_efficiency = min; } static void reset_task_stats(struct task_struct *p) { u32 sum = 0; if (exiting_task(p)) sum = EXITING_TASK_MARKER; memset(&p->ravg, 0, sizeof(struct ravg)); /* Retain EXITING_TASK marker */ p->ravg.sum_history[0] = sum; } static inline void mark_task_starting(struct task_struct *p) { struct rq *rq = task_rq(p); u64 wallclock = sched_clock(); if (!rq->window_start || sched_disable_window_stats) { reset_task_stats(p); return; } p->ravg.mark_start = wallclock; } static inline void set_window_start(struct rq *rq) { int cpu = cpu_of(rq); struct rq *sync_rq = cpu_rq(sync_cpu); if (rq->window_start || !sched_enable_hmp || !sched_clock_initialized() || !sched_clock_cpu(cpu)) return; if (cpu == sync_cpu) { rq->window_start = sched_clock(); } else { raw_spin_unlock(&rq->lock); double_rq_lock(rq, sync_rq); rq->window_start = cpu_rq(sync_cpu)->window_start; #ifdef CONFIG_SCHED_FREQ_INPUT rq->curr_runnable_sum = rq->prev_runnable_sum = 0; #endif raw_spin_unlock(&sync_rq->lock); } rq->curr->ravg.mark_start = rq->window_start; } static inline void migrate_sync_cpu(int cpu) { if (cpu == sync_cpu) sync_cpu = smp_processor_id(); } static void reset_all_task_stats(void) { struct task_struct *g, *p; read_lock(&tasklist_lock); do_each_thread(g, p) { reset_task_stats(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); } /* * sched_exit() - Set EXITING_TASK_MARKER in task's ravg.demand field * * Stop accounting (exiting) task's future cpu usage * * We need this so that reset_all_windows_stats() can function correctly. * reset_all_window_stats() depends on do_each_thread/for_each_thread task * iterators to reset *all* task's statistics. Exiting tasks however become * invisible to those iterators. sched_exit() is called on a exiting task prior * to being removed from task_list, which will let reset_all_window_stats() * function correctly. */ void sched_exit(struct task_struct *p) { unsigned long flags; int cpu = get_cpu(); struct rq *rq = cpu_rq(cpu); u64 wallclock; raw_spin_lock_irqsave(&rq->lock, flags); /* rq->curr == p */ wallclock = sched_clock(); update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); dequeue_task(rq, p, 0); reset_task_stats(p); p->ravg.mark_start = wallclock; p->ravg.sum_history[0] = EXITING_TASK_MARKER; enqueue_task(rq, p, 0); raw_spin_unlock_irqrestore(&rq->lock, flags); put_cpu(); } static void disable_window_stats(void) { unsigned long flags; int i; local_irq_save(flags); for_each_possible_cpu(i) raw_spin_lock(&cpu_rq(i)->lock); sched_disable_window_stats = 1; for_each_possible_cpu(i) raw_spin_unlock(&cpu_rq(i)->lock); local_irq_restore(flags); } /* Called with all cpu's rq->lock held */ static void enable_window_stats(void) { sched_disable_window_stats = 0; } enum reset_reason_code { WINDOW_CHANGE, POLICY_CHANGE, ACCOUNT_WAIT_TIME_CHANGE, HIST_SIZE_CHANGE, MIGRATION_FIXUP_CHANGE, FREQ_ACCOUNT_WAIT_TIME_CHANGE }; const char *sched_window_reset_reasons[] = { "WINDOW_CHANGE", "POLICY_CHANGE", "ACCOUNT_WAIT_TIME_CHANGE", "HIST_SIZE_CHANGE", "MIGRATION_FIXUP_CHANGE", "FREQ_ACCOUNT_WAIT_TIME_CHANGE"}; /* Called with IRQs enabled */ void reset_all_window_stats(u64 window_start, unsigned int window_size) { int cpu; unsigned long flags; u64 start_ts = sched_clock(); int reason = WINDOW_CHANGE; unsigned int old = 0, new = 0; disable_window_stats(); reset_all_task_stats(); local_irq_save(flags); for_each_possible_cpu(cpu) { struct rq *rq = cpu_rq(cpu); raw_spin_lock(&rq->lock); } if (window_size) { sched_ravg_window = window_size * TICK_NSEC; set_hmp_defaults(); } enable_window_stats(); for_each_possible_cpu(cpu) { struct rq *rq = cpu_rq(cpu); if (window_start) rq->window_start = window_start; #ifdef CONFIG_SCHED_FREQ_INPUT rq->curr_runnable_sum = rq->prev_runnable_sum = 0; #endif rq->cumulative_runnable_avg = 0; fixup_nr_big_small_task(cpu); } if (sched_window_stats_policy != sysctl_sched_window_stats_policy) { reason = POLICY_CHANGE; old = sched_window_stats_policy; new = sysctl_sched_window_stats_policy; sched_window_stats_policy = sysctl_sched_window_stats_policy; } else if (sched_account_wait_time != sysctl_sched_account_wait_time) { reason = ACCOUNT_WAIT_TIME_CHANGE; old = sched_account_wait_time; new = sysctl_sched_account_wait_time; sched_account_wait_time = sysctl_sched_account_wait_time; } else if (sched_ravg_hist_size != sysctl_sched_ravg_hist_size) { reason = HIST_SIZE_CHANGE; old = sched_ravg_hist_size; new = sysctl_sched_ravg_hist_size; sched_ravg_hist_size = sysctl_sched_ravg_hist_size; } #ifdef CONFIG_SCHED_FREQ_INPUT else if (sched_migration_fixup != sysctl_sched_migration_fixup) { reason = MIGRATION_FIXUP_CHANGE; old = sched_migration_fixup; new = sysctl_sched_migration_fixup; sched_migration_fixup = sysctl_sched_migration_fixup; } else if (sched_freq_account_wait_time != sysctl_sched_freq_account_wait_time) { reason = FREQ_ACCOUNT_WAIT_TIME_CHANGE; old = sched_freq_account_wait_time; new = sysctl_sched_freq_account_wait_time; sched_freq_account_wait_time = sysctl_sched_freq_account_wait_time; } #endif for_each_possible_cpu(cpu) { struct rq *rq = cpu_rq(cpu); raw_spin_unlock(&rq->lock); } local_irq_restore(flags); trace_sched_reset_all_window_stats(window_start, window_size, sched_clock() - start_ts, reason, old, new); } #ifdef CONFIG_SCHED_FREQ_INPUT static inline u64 scale_load_to_freq(u64 load, unsigned int src_freq, unsigned int dst_freq) { return div64_u64(load * (u64)src_freq, (u64)dst_freq); } unsigned long sched_get_busy(int cpu) { unsigned long flags; struct rq *rq = cpu_rq(cpu); u64 load; /* * This function could be called in timer context, and the * current task may have been executing for a long time. Ensure * that the window stats are current by doing an update. */ raw_spin_lock_irqsave(&rq->lock, flags); update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), 0); load = rq->old_busy_time = rq->prev_runnable_sum; /* * Scale load in reference to rq->max_possible_freq. * * Note that scale_load_to_cpu() scales load in reference to * rq->max_freq */ load = scale_load_to_cpu(load, cpu); if (!rq->notifier_sent) { u64 load_at_cur_freq; load_at_cur_freq = scale_load_to_freq(load, rq->max_freq, rq->cur_freq); if (load_at_cur_freq > sched_ravg_window) load_at_cur_freq = sched_ravg_window; load = scale_load_to_freq(load_at_cur_freq, rq->cur_freq, rq->max_possible_freq); } else { load = scale_load_to_freq(load, rq->max_freq, rq->max_possible_freq); rq->notifier_sent = 0; } load = div64_u64(load, NSEC_PER_USEC); raw_spin_unlock_irqrestore(&rq->lock, flags); trace_sched_get_busy(cpu, load); return load; } void sched_set_io_is_busy(int val) { sched_io_is_busy = val; } int sched_set_window(u64 window_start, unsigned int window_size) { u64 now, cur_jiffies, jiffy_sched_clock; s64 ws; unsigned long flags; if (sched_use_pelt || (window_size * TICK_NSEC < MIN_SCHED_RAVG_WINDOW)) return -EINVAL; mutex_lock(&policy_mutex); /* Get a consistent view of sched_clock, jiffies, and the time * since the last jiffy (based on last_jiffies_update). */ local_irq_save(flags); cur_jiffies = jiffy_to_sched_clock(&now, &jiffy_sched_clock); local_irq_restore(flags); /* translate window_start from jiffies to nanoseconds */ ws = (window_start - cur_jiffies); /* jiffy difference */ ws *= TICK_NSEC; ws += jiffy_sched_clock; /* roll back calculated window start so that it is in * the past (window stats must have a current window) */ while (ws > now) ws -= (window_size * TICK_NSEC); BUG_ON(sched_clock() < ws); reset_all_window_stats(ws, window_size); mutex_unlock(&policy_mutex); return 0; } static void fixup_busy_time(struct task_struct *p, int new_cpu) { struct rq *src_rq = task_rq(p); struct rq *dest_rq = cpu_rq(new_cpu); u64 wallclock; if (!sched_enable_hmp || !sched_migration_fixup || exiting_task(p) || (!p->on_rq && p->state != TASK_WAKING)) return; if (p->state == TASK_WAKING) double_rq_lock(src_rq, dest_rq); if (sched_disable_window_stats) goto done; wallclock = sched_clock(); update_task_ravg(task_rq(p)->curr, task_rq(p), TASK_UPDATE, wallclock, 0); update_task_ravg(dest_rq->curr, dest_rq, TASK_UPDATE, wallclock, 0); /* * In case of migration of task on runqueue, on_rq =1, * however its load is removed from its runqueue. * update_task_ravg() below can update its demand, which * will require its load on runqueue to be adjusted to * reflect new demand. Restore load temporarily for such * task on its runqueue */ if (p->on_rq) { inc_cumulative_runnable_avg(src_rq, p); if (p->sched_class == &fair_sched_class) inc_nr_big_small_task(src_rq, p); } update_task_ravg(p, task_rq(p), TASK_MIGRATE, wallclock, 0); /* * Remove task's load from rq as its now migrating to * another cpu. */ if (p->on_rq) { dec_cumulative_runnable_avg(src_rq, p); if (p->sched_class == &fair_sched_class) dec_nr_big_small_task(src_rq, p); } if (p->ravg.curr_window) { src_rq->curr_runnable_sum -= p->ravg.curr_window; dest_rq->curr_runnable_sum += p->ravg.curr_window; } if (p->ravg.prev_window) { src_rq->prev_runnable_sum -= p->ravg.prev_window; dest_rq->prev_runnable_sum += p->ravg.prev_window; } BUG_ON((s64)src_rq->prev_runnable_sum < 0); BUG_ON((s64)src_rq->curr_runnable_sum < 0); trace_sched_migration_update_sum(src_rq, p); trace_sched_migration_update_sum(dest_rq, p); done: if (p->state == TASK_WAKING) double_rq_unlock(src_rq, dest_rq); } #else static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { } static inline int heavy_task_wakeup(struct task_struct *p, struct rq *rq, int event) { return 0; } #endif /* CONFIG_SCHED_FREQ_INPUT */ /* Keep track of max/min capacity possible across CPUs "currently" */ static void update_min_max_capacity(void) { int i; int max = 0, min = INT_MAX; int max_pc = INT_MIN, min_pc = INT_MAX; int max_lsf = 0; for_each_possible_cpu(i) { if (cpu_rq(i)->capacity > max) max = cpu_rq(i)->capacity; if (cpu_rq(i)->capacity < min) min = cpu_rq(i)->capacity; if (cpu_rq(i)->load_scale_factor > max_lsf) max_lsf = cpu_rq(i)->load_scale_factor; max_pc = max(cpu_rq(i)->max_possible_capacity, max_pc); if (cpu_rq(i)->max_possible_capacity > 0) min_pc = min(cpu_rq(i)->max_possible_capacity, min_pc); } max_capacity = max; min_capacity = min; max_load_scale_factor = max_lsf; max_possible_capacity = max_pc; min_max_possible_capacity = min_pc; BUG_ON(max_possible_capacity < min_max_possible_capacity); min_max_capacity_delta_pct = div64_u64((u64)(max_possible_capacity - min_max_possible_capacity) * 100, min_max_possible_capacity); } /* * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that * least efficient cpu gets capacity of 1024 */ unsigned long capacity_scale_cpu_efficiency(int cpu) { return (1024 * cpu_rq(cpu)->efficiency) / min_possible_efficiency; } /* * Return 'capacity' of a cpu in reference to cpu with lowest max_freq * (min_max_freq), such that one with lowest max_freq gets capacity of 1024. */ unsigned long capacity_scale_cpu_freq(int cpu) { return (1024 * cpu_rq(cpu)->max_freq) / min_max_freq; } /* * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so * that "most" efficient cpu gets a load_scale_factor of 1 */ static inline unsigned long load_scale_cpu_efficiency(int cpu) { return (1024 * max_possible_efficiency) / cpu_rq(cpu)->efficiency; } /* * Return load_scale_factor of a cpu in reference to cpu with best max_freq * (max_possible_freq), so that one with best max_freq gets a load_scale_factor * of 1. */ static inline unsigned long load_scale_cpu_freq(int cpu) { return (1024 * max_possible_freq) / cpu_rq(cpu)->max_freq; } static int compute_capacity(int cpu) { int capacity = 1024; capacity *= capacity_scale_cpu_efficiency(cpu); capacity >>= 10; capacity *= capacity_scale_cpu_freq(cpu); capacity >>= 10; return capacity; } static int compute_load_scale_factor(int cpu) { int load_scale = 1024; /* * load_scale_factor accounts for the fact that task load * is in reference to "best" performing cpu. Task's load will need to be * scaled (up) by a factor to determine suitability to be placed on a * (little) cpu. */ load_scale *= load_scale_cpu_efficiency(cpu); load_scale >>= 10; load_scale *= load_scale_cpu_freq(cpu); load_scale >>= 10; return load_scale; } static int cpufreq_notifier_policy(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_policy *policy = (struct cpufreq_policy *)data; int i; const struct cpumask *cpus = policy->related_cpus; unsigned int orig_min_max_freq = min_max_freq; unsigned int orig_max_possible_freq = max_possible_freq; /* Initialized to policy->max in case policy->related_cpus is empty! */ unsigned int orig_max_freq = policy->max; if (val != CPUFREQ_NOTIFY) return 0; for_each_cpu(i, policy->related_cpus) { cpumask_copy(&cpu_rq(i)->freq_domain_cpumask, policy->related_cpus); orig_max_freq = cpu_rq(i)->max_freq; cpu_rq(i)->min_freq = policy->min; cpu_rq(i)->max_freq = policy->max; cpu_rq(i)->max_possible_freq = policy->cpuinfo.max_freq; } max_possible_freq = max(max_possible_freq, policy->cpuinfo.max_freq); if (min_max_freq == 1) min_max_freq = UINT_MAX; min_max_freq = min(min_max_freq, policy->cpuinfo.max_freq); BUG_ON(!min_max_freq); BUG_ON(!policy->max); if (orig_max_possible_freq == max_possible_freq && orig_min_max_freq == min_max_freq && orig_max_freq == policy->max) return 0; /* * A changed min_max_freq or max_possible_freq (possible during bootup) * needs to trigger re-computation of load_scale_factor and capacity for * all possible cpus (even those offline). It also needs to trigger * re-computation of nr_big/small_task count on all online cpus. * * A changed rq->max_freq otoh needs to trigger re-computation of * load_scale_factor and capacity for just the cluster of cpus involved. * Since small task definition depends on max_load_scale_factor, a * changed load_scale_factor of one cluster could influence small_task * classification of tasks in another cluster. Hence a changed * rq->max_freq will need to trigger re-computation of nr_big/small_task * count on all online cpus. * * While it should be sufficient for nr_big/small_tasks to be * re-computed for only online cpus, we have inadequate context * information here (in policy notifier) with regard to hotplug-safety * context in which notification is issued. As a result, we can't use * get_online_cpus() here, as it can lead to deadlock. Until cpufreq is * fixed up to issue notification always in hotplug-safe context, * re-compute nr_big/small_task for all possible cpus. */ if (orig_min_max_freq != min_max_freq || orig_max_possible_freq != max_possible_freq) cpus = cpu_possible_mask; /* * Changed load_scale_factor can trigger reclassification of tasks as * big or small. Make this change "atomic" so that tasks are accounted * properly due to changed load_scale_factor */ pre_big_small_task_count_change(cpu_possible_mask); for_each_cpu(i, cpus) { struct rq *rq = cpu_rq(i); rq->capacity = compute_capacity(i); rq->max_possible_capacity = rq->capacity * rq->max_possible_freq / rq->max_freq; rq->load_scale_factor = compute_load_scale_factor(i); } update_min_max_capacity(); post_big_small_task_count_change(cpu_possible_mask); return 0; } static int cpufreq_notifier_trans(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = (struct cpufreq_freqs *)data; unsigned int cpu = freq->cpu, new_freq = freq->new; unsigned long flags; int i; if (val != CPUFREQ_POSTCHANGE) return 0; BUG_ON(!new_freq); if (cpu_rq(cpu)->cur_freq == new_freq) return 0; for_each_cpu(i, &cpu_rq(cpu)->freq_domain_cpumask) { struct rq *rq = cpu_rq(i); raw_spin_lock_irqsave(&rq->lock, flags); update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), 0); rq->cur_freq = new_freq; raw_spin_unlock_irqrestore(&rq->lock, flags); } return 0; } static struct notifier_block notifier_policy_block = { .notifier_call = cpufreq_notifier_policy }; static struct notifier_block notifier_trans_block = { .notifier_call = cpufreq_notifier_trans }; static int register_sched_callback(void) { int ret; if (!sched_enable_hmp) return 0; ret = cpufreq_register_notifier(&notifier_policy_block, CPUFREQ_POLICY_NOTIFIER); if (!ret) ret = cpufreq_register_notifier(&notifier_trans_block, CPUFREQ_TRANSITION_NOTIFIER); return 0; } /* * cpufreq callbacks can be registered at core_initcall or later time. * Any registration done prior to that is "forgotten" by cpufreq. See * initialization of variable init_cpufreq_transition_notifier_list_called * for further information. */ core_initcall(register_sched_callback); static u64 orig_mark_start(struct task_struct *p) { return p->ravg.mark_start; } static void restore_orig_mark_start(struct task_struct *p, u64 mark_start) { p->ravg.mark_start = mark_start; } #else /* CONFIG_SCHED_HMP */ static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { } static inline int heavy_task_wakeup(struct task_struct *p, struct rq *rq, int event) { return 0; } static inline void update_task_ravg(struct task_struct *p, struct rq *rq, int event, u64 wallclock, u64 irqtime) { } static inline void init_cpu_efficiency(void) {} static inline void mark_task_starting(struct task_struct *p) {} static inline void set_window_start(struct rq *rq) {} static inline void migrate_sync_cpu(int cpu) {} static inline u64 orig_mark_start(struct task_struct *p) { return 0; } static inline void restore_orig_mark_start(struct task_struct *p, u64 mark_start) { } #endif /* CONFIG_SCHED_HMP */ #ifdef CONFIG_SMP void set_task_cpu(struct task_struct *p, unsigned int new_cpu) { #ifdef CONFIG_SCHED_DEBUG /* * We should never call set_task_cpu() on a blocked task, * ttwu() will sort out the placement. */ WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)); #ifdef CONFIG_LOCKDEP /* * The caller should hold either p->pi_lock or rq->lock, when changing * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. * * sched_move_task() holds both and thus holding either pins the cgroup, * see task_group(). * * Furthermore, all task_rq users should acquire both locks, see * task_rq_lock(). */ WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || lockdep_is_held(&task_rq(p)->lock))); #endif #endif trace_sched_migrate_task(p, new_cpu, pct_task_load(p)); if (task_cpu(p) != new_cpu) { struct task_migration_notifier tmn; if (p->sched_class->migrate_task_rq) p->sched_class->migrate_task_rq(p, new_cpu); p->se.nr_migrations++; perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0); tmn.task = p; tmn.from_cpu = task_cpu(p); tmn.to_cpu = new_cpu; atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn); fixup_busy_time(p, new_cpu); } __set_task_cpu(p, new_cpu); } struct migration_arg { struct task_struct *task; int dest_cpu; }; static int migration_cpu_stop(void *data); /* * wait_task_inactive - wait for a thread to unschedule. * * If @match_state is nonzero, it's the @p->state value just checked and * not expected to change. If it changes, i.e. @p might have woken up, * then return zero. When we succeed in waiting for @p to be off its CPU, * we return a positive number (its total switch count). If a second call * a short while later returns the same number, the caller can be sure that * @p has remained unscheduled the whole time. * * The caller must ensure that the task *will* unschedule sometime soon, * else this function might spin for a *long* time. This function can't * be called with interrupts off, or it may introduce deadlock with * smp_call_function() if an IPI is sent by the same process we are * waiting to become inactive. */ unsigned long wait_task_inactive(struct task_struct *p, long match_state) { unsigned long flags; int running, on_rq; unsigned long ncsw; struct rq *rq; for (;;) { /* * We do the initial early heuristics without holding * any task-queue locks at all. We'll only try to get * the runqueue lock when things look like they will * work out! */ rq = task_rq(p); /* * If the task is actively running on another CPU * still, just relax and busy-wait without holding * any locks. * * NOTE! Since we don't hold any locks, it's not * even sure that "rq" stays as the right runqueue! * But we don't care, since "task_running()" will * return false if the runqueue has changed and p * is actually now running somewhere else! */ while (task_running(rq, p)) { if (match_state && unlikely(p->state != match_state)) return 0; cpu_relax(); } /* * Ok, time to look more closely! We need the rq * lock now, to be *sure*. If we're wrong, we'll * just go back and repeat. */ rq = task_rq_lock(p, &flags); trace_sched_wait_task(p); running = task_running(rq, p); on_rq = p->on_rq; ncsw = 0; if (!match_state || p->state == match_state) ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ task_rq_unlock(rq, p, &flags); /* * If it changed from the expected state, bail out now. */ if (unlikely(!ncsw)) break; /* * Was it really running after all now that we * checked with the proper locks actually held? * * Oops. Go back and try again.. */ if (unlikely(running)) { cpu_relax(); continue; } /* * It's not enough that it's not actively running, * it must be off the runqueue _entirely_, and not * preempted! * * So if it was still runnable (but just not actively * running right now), it's preempted, and we should * yield - it could be a while. */ if (unlikely(on_rq)) { ktime_t to = ktime_set(0, NSEC_PER_MSEC); set_current_state(TASK_UNINTERRUPTIBLE); schedule_hrtimeout(&to, HRTIMER_MODE_REL); continue; } /* * Ahh, all good. It wasn't running, and it wasn't * runnable, which means that it will never become * running in the future either. We're all done! */ break; } return ncsw; } /*** * kick_process - kick a running thread to enter/exit the kernel * @p: the to-be-kicked thread * * Cause a process which is running on another CPU to enter * kernel-mode, without any delay. (to get signals handled.) * * NOTE: this function doesn't have to take the runqueue lock, * because all it wants to ensure is that the remote task enters * the kernel. If the IPI races and the task has been migrated * to another CPU then no harm is done and the purpose has been * achieved as well. */ void kick_process(struct task_struct *p) { int cpu; preempt_disable(); cpu = task_cpu(p); if ((cpu != smp_processor_id()) && task_curr(p)) smp_send_reschedule(cpu); preempt_enable(); } EXPORT_SYMBOL_GPL(kick_process); #endif /* CONFIG_SMP */ #ifdef CONFIG_SMP /* * ->cpus_allowed is protected by both rq->lock and p->pi_lock */ static int select_fallback_rq(int cpu, struct task_struct *p) { int nid = cpu_to_node(cpu); const struct cpumask *nodemask = NULL; enum { cpuset, possible, fail } state = cpuset; int dest_cpu; /* * If the node that the cpu is on has been offlined, cpu_to_node() * will return -1. There is no cpu on the node, and we should * select the cpu on the other node. */ if (nid != -1) { nodemask = cpumask_of_node(nid); /* Look for allowed, online CPU in same node. */ for_each_cpu(dest_cpu, nodemask) { if (!cpu_online(dest_cpu)) continue; if (!cpu_active(dest_cpu)) continue; if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) return dest_cpu; } } for (;;) { /* Any allowed, online CPU? */ for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { if (!cpu_online(dest_cpu)) continue; if (!cpu_active(dest_cpu)) continue; goto out; } switch (state) { case cpuset: /* No more Mr. Nice Guy. */ cpuset_cpus_allowed_fallback(p); state = possible; break; case possible: do_set_cpus_allowed(p, cpu_possible_mask); state = fail; break; case fail: BUG(); break; } } out: if (state != cpuset) { /* * Don't tell them about moving exiting tasks or * kernel threads (both mm NULL), since they never * leave kernel. */ if (p->mm && printk_ratelimit()) { printk_deferred("process %d (%s) no longer affine to cpu%d\n", task_pid_nr(p), p->comm, cpu); } } return dest_cpu; } /* * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. */ static inline int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) { int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags); /* * In order not to call set_task_cpu() on a blocking task we need * to rely on ttwu() to place the task on a valid ->cpus_allowed * cpu. * * Since this is common to all placement strategies, this lives here. * * [ this allows ->select_task() to simply return task_cpu(p) and * not worry about this generic constraint ] */ if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) || !cpu_online(cpu))) cpu = select_fallback_rq(task_cpu(p), p); return cpu; } static void update_avg(u64 *avg, u64 sample) { s64 diff = sample - *avg; *avg += diff >> 3; } #endif static void ttwu_stat(struct task_struct *p, int cpu, int wake_flags) { #ifdef CONFIG_SCHEDSTATS struct rq *rq = this_rq(); #ifdef CONFIG_SMP int this_cpu = smp_processor_id(); if (cpu == this_cpu) { schedstat_inc(rq, ttwu_local); schedstat_inc(p, se.statistics.nr_wakeups_local); } else { struct sched_domain *sd; schedstat_inc(p, se.statistics.nr_wakeups_remote); rcu_read_lock(); for_each_domain(this_cpu, sd) { if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { schedstat_inc(sd, ttwu_wake_remote); break; } } rcu_read_unlock(); } if (wake_flags & WF_MIGRATED) schedstat_inc(p, se.statistics.nr_wakeups_migrate); #endif /* CONFIG_SMP */ schedstat_inc(rq, ttwu_count); schedstat_inc(p, se.statistics.nr_wakeups); if (wake_flags & WF_SYNC) schedstat_inc(p, se.statistics.nr_wakeups_sync); #endif /* CONFIG_SCHEDSTATS */ } static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) { activate_task(rq, p, en_flags); p->on_rq = 1; /* if a worker is waking up, notify workqueue */ if (p->flags & PF_WQ_WORKER) wq_worker_waking_up(p, cpu_of(rq)); } /* * Mark the task runnable and perform wakeup-preemption. */ static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) { check_preempt_curr(rq, p, wake_flags); trace_sched_wakeup(p, true); p->state = TASK_RUNNING; #ifdef CONFIG_SMP if (p->sched_class->task_woken) p->sched_class->task_woken(rq, p); if (rq->idle_stamp) { u64 delta = rq->clock - rq->idle_stamp; u64 max = 2*sysctl_sched_migration_cost; if (delta > max) rq->avg_idle = max; else update_avg(&rq->avg_idle, delta); rq->idle_stamp = 0; } #endif } static void ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags) { #ifdef CONFIG_SMP if (p->sched_contributes_to_load) rq->nr_uninterruptible--; #endif ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING); ttwu_do_wakeup(rq, p, wake_flags); } /* * Called in case the task @p isn't fully descheduled from its runqueue, * in this case we must do a remote wakeup. Its a 'light' wakeup though, * since all we need to do is flip p->state to TASK_RUNNING, since * the task is still ->on_rq. */ static int ttwu_remote(struct task_struct *p, int wake_flags) { struct rq *rq; int ret = 0; rq = __task_rq_lock(p); if (p->on_rq) { ttwu_do_wakeup(rq, p, wake_flags); ret = 1; } __task_rq_unlock(rq); return ret; } #ifdef CONFIG_SMP static void sched_ttwu_pending(void) { struct rq *rq = this_rq(); struct llist_node *llist = llist_del_all(&rq->wake_list); struct task_struct *p; raw_spin_lock(&rq->lock); while (llist) { p = llist_entry(llist, struct task_struct, wake_entry); llist = llist_next(llist); ttwu_do_activate(rq, p, 0); } raw_spin_unlock(&rq->lock); } void scheduler_ipi(void) { int cpu = smp_processor_id(); if (llist_empty(&this_rq()->wake_list) && !tick_nohz_full_cpu(cpu) && !got_nohz_idle_kick() && !got_boost_kick()) return; if (got_boost_kick()) { struct rq *rq = cpu_rq(cpu); if (rq->curr->sched_class == &fair_sched_class) check_for_migration(rq, rq->curr); clear_boost_kick(cpu); } /* * Not all reschedule IPI handlers call irq_enter/irq_exit, since * traditionally all their work was done from the interrupt return * path. Now that we actually do some work, we need to make sure * we do call them. * * Some archs already do call them, luckily irq_enter/exit nest * properly. * * Arguably we should visit all archs and update all handlers, * however a fair share of IPIs are still resched only so this would * somewhat pessimize the simple resched case. */ irq_enter(); tick_nohz_full_check(); sched_ttwu_pending(); /* * Check if someone kicked us for doing the nohz idle load balance. */ if (unlikely(got_nohz_idle_kick())) { this_rq()->idle_balance = 1; raise_softirq_irqoff(SCHED_SOFTIRQ); } irq_exit(); } static void ttwu_queue_remote(struct task_struct *p, int cpu) { if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) smp_send_reschedule(cpu); } bool cpus_share_cache(int this_cpu, int that_cpu) { return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); } #endif /* CONFIG_SMP */ static void ttwu_queue(struct task_struct *p, int cpu) { struct rq *rq = cpu_rq(cpu); #if defined(CONFIG_SMP) if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) { sched_clock_cpu(cpu); /* sync clocks x-cpu */ ttwu_queue_remote(p, cpu); return; } #endif raw_spin_lock(&rq->lock); ttwu_do_activate(rq, p, 0); raw_spin_unlock(&rq->lock); } __read_mostly unsigned int sysctl_sched_wakeup_load_threshold = 110; /** * try_to_wake_up - wake up a thread * @p: the thread to be awakened * @state: the mask of task states that can be woken * @wake_flags: wake modifier flags (WF_*) * * Put it on the run-queue if it's not already there. The "current" * thread is always on the run-queue (except when the actual * re-schedule is in progress), and as such you're allowed to do * the simpler "current->state = TASK_RUNNING" to mark yourself * runnable without the overhead of this. * * Returns %true if @p was woken up, %false if it was already running * or @state didn't match @p's state. */ static int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) { unsigned long flags; int cpu, success = 0; unsigned long src_cpu; int notify = 0; struct migration_notify_data mnd; int heavy_task = 0; #ifdef CONFIG_SMP struct rq *rq; u64 wallclock; #endif bool freq_notif_allowed = !(wake_flags & WF_NO_NOTIFIER); wake_flags &= ~WF_NO_NOTIFIER; /* * If we are going to wake up a thread waiting for CONDITION we * need to ensure that CONDITION=1 done by the caller can not be * reordered with p->state check below. This pairs with mb() in * set_current_state() the waiting thread does. */ smp_mb__before_spinlock(); raw_spin_lock_irqsave(&p->pi_lock, flags); src_cpu = cpu = task_cpu(p); if (!(p->state & state)) goto out; success = 1; /* we're going to change ->state */ /* * Ensure we load p->on_rq _after_ p->state, otherwise it would * be possible to, falsely, observe p->on_rq == 0 and get stuck * in smp_cond_load_acquire() below. * * sched_ttwu_pending() try_to_wake_up() * [S] p->on_rq = 1; [L] P->state * UNLOCK rq->lock -----. * \ * +--- RMB * schedule() / * LOCK rq->lock -----' * UNLOCK rq->lock * * [task p] * [S] p->state = UNINTERRUPTIBLE [L] p->on_rq * * Pairs with the UNLOCK+LOCK on rq->lock from the * last wakeup of our task and the schedule that got our task * current. */ smp_rmb(); if (p->on_rq && ttwu_remote(p, wake_flags)) goto stat; #ifdef CONFIG_SMP /* * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be * possible to, falsely, observe p->on_cpu == 0. * * One must be running (->on_cpu == 1) in order to remove oneself * from the runqueue. * * [S] ->on_cpu = 1; [L] ->on_rq * UNLOCK rq->lock * RMB * LOCK rq->lock * [S] ->on_rq = 0; [L] ->on_cpu * * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock * from the consecutive calls to schedule(); the first switching to our * task, the second putting it to sleep. */ smp_rmb(); /* * If the owning (remote) cpu is still in the middle of schedule() with * this task as prev, wait until its done referencing the task. */ while (p->on_cpu) cpu_relax(); /* * Pairs with the smp_wmb() in finish_lock_switch(). */ smp_rmb(); rq = cpu_rq(task_cpu(p)); raw_spin_lock(&rq->lock); wallclock = sched_clock(); update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); heavy_task = heavy_task_wakeup(p, rq, TASK_WAKE); update_task_ravg(p, rq, TASK_WAKE, wallclock, 0); raw_spin_unlock(&rq->lock); p->sched_contributes_to_load = !!task_contributes_to_load(p); p->state = TASK_WAKING; if (p->sched_class->task_waking) p->sched_class->task_waking(p); cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags); /* Refresh src_cpu as it could have changed since we last read it */ src_cpu = task_cpu(p); if (src_cpu != cpu) { wake_flags |= WF_MIGRATED; set_task_cpu(p, cpu); } #endif /* CONFIG_SMP */ ttwu_queue(p, cpu); stat: ttwu_stat(p, cpu, wake_flags); if (task_notify_on_migrate(p)) { mnd.src_cpu = src_cpu; mnd.dest_cpu = cpu; mnd.load = pct_task_load(p); /* * Call the migration notifier with mnd for foreground task * migrations as well as for wakeups if their load is above * sysctl_sched_wakeup_load_threshold. This would prompt the * cpu-boost to boost the CPU frequency on wake up of a heavy * weight foreground task */ if ((src_cpu != cpu) || (mnd.load > sysctl_sched_wakeup_load_threshold)) notify = 1; } out: raw_spin_unlock_irqrestore(&p->pi_lock, flags); if (notify) atomic_notifier_call_chain(&migration_notifier_head, 0, (void *)&mnd); if (freq_notif_allowed) { if (!same_freq_domain(src_cpu, cpu)) { check_for_freq_change(cpu_rq(cpu)); check_for_freq_change(cpu_rq(src_cpu)); } else if (heavy_task) { check_for_freq_change(cpu_rq(cpu)); } } return success; } /** * try_to_wake_up_local - try to wake up a local task with rq lock held * @p: the thread to be awakened * * Put @p on the run-queue if it's not already there. The caller must * ensure that this_rq() is locked, @p is bound to this_rq() and not * the current task. */ static void try_to_wake_up_local(struct task_struct *p) { struct rq *rq = task_rq(p); if (rq != this_rq() || p == current) { printk_deferred("%s: Failed to wakeup task %d (%s), rq = %p, this_rq = %p, p = %p, current = %p\n", __func__, task_pid_nr(p), p->comm, rq, this_rq(), p, current); return; } lockdep_assert_held(&rq->lock); if (!raw_spin_trylock(&p->pi_lock)) { raw_spin_unlock(&rq->lock); raw_spin_lock(&p->pi_lock); raw_spin_lock(&rq->lock); } if (!(p->state & TASK_NORMAL)) goto out; if (!p->on_rq) { u64 wallclock = sched_clock(); update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); update_task_ravg(p, rq, TASK_WAKE, wallclock, 0); ttwu_activate(rq, p, ENQUEUE_WAKEUP); } ttwu_do_wakeup(rq, p, 0); ttwu_stat(p, smp_processor_id(), 0); out: raw_spin_unlock(&p->pi_lock); /* Todo : Send cpufreq notifier */ } /** * wake_up_process - Wake up a specific process * @p: The process to be woken up. * * Attempt to wake up the nominated process and move it to the set of runnable * processes. Returns 1 if the process was woken up, 0 if it was already * running. * * It may be assumed that this function implies a write memory barrier before * changing the task state if and only if any tasks are woken up. */ int wake_up_process(struct task_struct *p) { return try_to_wake_up(p, TASK_NORMAL, 0); } EXPORT_SYMBOL(wake_up_process); /** * wake_up_process_no_notif - Wake up a specific process without notifying * governor * @p: The process to be woken up. * * Attempt to wake up the nominated process and move it to the set of runnable * processes. * * Return: 1 if the process was woken up, 0 if it was already running. * * It may be assumed that this function implies a write memory barrier before * changing the task state if and only if any tasks are woken up. */ int wake_up_process_no_notif(struct task_struct *p) { WARN_ON(task_is_stopped_or_traced(p)); return try_to_wake_up(p, TASK_NORMAL, WF_NO_NOTIFIER); } EXPORT_SYMBOL(wake_up_process_no_notif); int wake_up_state(struct task_struct *p, unsigned int state) { return try_to_wake_up(p, state, 0); } /* * Perform scheduler related setup for a newly forked process p. * p is forked by current. * * __sched_fork() is basic setup used by init_idle() too: */ static void __sched_fork(struct task_struct *p) { p->on_rq = 0; p->se.on_rq = 0; p->se.exec_start = 0; p->se.sum_exec_runtime = 0; p->se.prev_sum_exec_runtime = 0; p->se.nr_migrations = 0; p->se.vruntime = 0; INIT_LIST_HEAD(&p->se.group_node); /* * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be * removed when useful for applications beyond shares distribution (e.g. * load-balance). */ #if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED) init_new_task_load(p); #endif #ifdef CONFIG_SCHEDSTATS memset(&p->se.statistics, 0, sizeof(p->se.statistics)); #endif INIT_LIST_HEAD(&p->rt.run_list); #ifdef CONFIG_PREEMPT_NOTIFIERS INIT_HLIST_HEAD(&p->preempt_notifiers); #endif #ifdef CONFIG_NUMA_BALANCING if (p->mm && atomic_read(&p->mm->mm_users) == 1) { p->mm->numa_next_scan = jiffies; p->mm->numa_next_reset = jiffies; p->mm->numa_scan_seq = 0; } p->node_stamp = 0ULL; p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0; p->numa_migrate_seq = p->mm ? p->mm->numa_scan_seq - 1 : 0; p->numa_scan_period = sysctl_numa_balancing_scan_delay; p->numa_work.next = &p->numa_work; #endif /* CONFIG_NUMA_BALANCING */ } #ifdef CONFIG_NUMA_BALANCING #ifdef CONFIG_SCHED_DEBUG void set_numabalancing_state(bool enabled) { if (enabled) sched_feat_set("NUMA"); else sched_feat_set("NO_NUMA"); } #else __read_mostly bool numabalancing_enabled; void set_numabalancing_state(bool enabled) { numabalancing_enabled = enabled; } #endif /* CONFIG_SCHED_DEBUG */ #endif /* CONFIG_NUMA_BALANCING */ /* * fork()/clone()-time setup: */ void sched_fork(struct task_struct *p) { unsigned long flags; int cpu = get_cpu(); __sched_fork(p); /* * We mark the process as running here. This guarantees that * nobody will actually run it, and a signal or other external * event cannot wake it up and insert it on the runqueue either. */ p->state = TASK_RUNNING; /* * Make sure we do not leak PI boosting priority to the child. */ p->prio = current->normal_prio; /* * Revert to default priority/policy on fork if requested. */ if (unlikely(p->sched_reset_on_fork)) { if (task_has_rt_policy(p)) { p->policy = SCHED_NORMAL; p->static_prio = NICE_TO_PRIO(0); p->rt_priority = 0; } else if (PRIO_TO_NICE(p->static_prio) < 0) p->static_prio = NICE_TO_PRIO(0); p->prio = p->normal_prio = __normal_prio(p); set_load_weight(p); /* * We don't need the reset flag anymore after the fork. It has * fulfilled its duty: */ p->sched_reset_on_fork = 0; } if (!rt_prio(p->prio)) p->sched_class = &fair_sched_class; if (p->sched_class->task_fork) p->sched_class->task_fork(p); /* * The child is not yet in the pid-hash so no cgroup attach races, * and the cgroup is pinned to this child due to cgroup_fork() * is ran before sched_fork(). * * Silence PROVE_RCU. */ raw_spin_lock_irqsave(&p->pi_lock, flags); set_task_cpu(p, cpu); raw_spin_unlock_irqrestore(&p->pi_lock, flags); #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) if (likely(sched_info_on())) memset(&p->sched_info, 0, sizeof(p->sched_info)); #endif #if defined(CONFIG_SMP) p->on_cpu = 0; #endif #ifdef CONFIG_PREEMPT_COUNT /* Want to start with kernel preemption disabled. */ task_thread_info(p)->preempt_count = 1; #endif #ifdef CONFIG_SMP plist_node_init(&p->pushable_tasks, MAX_PRIO); #endif put_cpu(); } /* * wake_up_new_task - wake up a newly created task for the first time. * * This function will do some initial scheduler statistics housekeeping * that must be done for every newly created context, then puts the task * on the runqueue and wakes it. */ void wake_up_new_task(struct task_struct *p) { unsigned long flags; struct rq *rq; raw_spin_lock_irqsave(&p->pi_lock, flags); #ifdef CONFIG_SMP /* * Fork balancing, do it here and not earlier because: * - cpus_allowed can change in the fork path * - any previously selected cpu might disappear through hotplug */ set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0)); #endif rq = __task_rq_lock(p); mark_task_starting(p); activate_task(rq, p, 0); p->on_rq = 1; trace_sched_wakeup_new(p, true); check_preempt_curr(rq, p, WF_FORK); #ifdef CONFIG_SMP if (p->sched_class->task_woken) p->sched_class->task_woken(rq, p); #endif task_rq_unlock(rq, p, &flags); } #ifdef CONFIG_PREEMPT_NOTIFIERS /** * preempt_notifier_register - tell me when current is being preempted & rescheduled * @notifier: notifier struct to register */ void preempt_notifier_register(struct preempt_notifier *notifier) { hlist_add_head(&notifier->link, &current->preempt_notifiers); } EXPORT_SYMBOL_GPL(preempt_notifier_register); /** * preempt_notifier_unregister - no longer interested in preemption notifications * @notifier: notifier struct to unregister * * This is safe to call from within a preemption notifier. */ void preempt_notifier_unregister(struct preempt_notifier *notifier) { hlist_del(&notifier->link); } EXPORT_SYMBOL_GPL(preempt_notifier_unregister); static void fire_sched_in_preempt_notifiers(struct task_struct *curr) { struct preempt_notifier *notifier; hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) notifier->ops->sched_in(notifier, raw_smp_processor_id()); } static void fire_sched_out_preempt_notifiers(struct task_struct *curr, struct task_struct *next) { struct preempt_notifier *notifier; hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) notifier->ops->sched_out(notifier, next); } #else /* !CONFIG_PREEMPT_NOTIFIERS */ static void fire_sched_in_preempt_notifiers(struct task_struct *curr) { } static void fire_sched_out_preempt_notifiers(struct task_struct *curr, struct task_struct *next) { } #endif /* CONFIG_PREEMPT_NOTIFIERS */ /** * prepare_task_switch - prepare to switch tasks * @rq: the runqueue preparing to switch * @prev: the current task that is being switched out * @next: the task we are going to switch to. * * This is called with the rq lock held and interrupts off. It must * be paired with a subsequent finish_task_switch after the context * switch. * * prepare_task_switch sets up locking and calls architecture specific * hooks. */ static inline void prepare_task_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) { trace_sched_switch(prev, next); #ifdef CONFIG_ARCH_WANTS_CTXSW_LOGGING dlog("%s: end trace at %llu\n", __func__, sched_clock()); #endif sched_info_switch(prev, next); perf_event_task_sched_out(prev, next); fire_sched_out_preempt_notifiers(prev, next); prepare_lock_switch(rq, next); prepare_arch_switch(next); } /** * finish_task_switch - clean up after a task-switch * @rq: runqueue associated with task-switch * @prev: the thread we just switched away from. * * finish_task_switch must be called after the context switch, paired * with a prepare_task_switch call before the context switch. * finish_task_switch will reconcile locking set up by prepare_task_switch, * and do any other architecture-specific cleanup actions. * * Note that we may have delayed dropping an mm in context_switch(). If * so, we finish that here outside of the runqueue lock. (Doing it * with the lock held can cause deadlocks; see schedule() for * details.) */ static void finish_task_switch(struct rq *rq, struct task_struct *prev) __releases(rq->lock) { struct mm_struct *mm = rq->prev_mm; long prev_state; rq->prev_mm = NULL; /* * A task struct has one reference for the use as "current". * If a task dies, then it sets TASK_DEAD in tsk->state and calls * schedule one last time. The schedule call will never return, and * the scheduled task must drop that reference. * The test for TASK_DEAD must occur while the runqueue locks are * still held, otherwise prev could be scheduled on another cpu, die * there before we look at prev->state, and then the reference would * be dropped twice. * Manfred Spraul <manfred@colorfullife.com> */ prev_state = prev->state; vtime_task_switch(prev); finish_arch_switch(prev); perf_event_task_sched_in(prev, current); finish_lock_switch(rq, prev); finish_arch_post_lock_switch(); fire_sched_in_preempt_notifiers(current); if (mm) mmdrop(mm); if (unlikely(prev_state == TASK_DEAD)) { /* * Remove function-return probe instances associated with this * task and put them back on the free list. */ kprobe_flush_task(prev); put_task_struct(prev); } tick_nohz_task_switch(current); } #ifdef CONFIG_SMP /* assumes rq->lock is held */ static inline void pre_schedule(struct rq *rq, struct task_struct *prev) { if (prev->sched_class->pre_schedule) prev->sched_class->pre_schedule(rq, prev); } /* rq->lock is NOT held, but preemption is disabled */ static inline void post_schedule(struct rq *rq) { if (rq->post_schedule) { unsigned long flags; raw_spin_lock_irqsave(&rq->lock, flags); if (rq->curr->sched_class->post_schedule) rq->curr->sched_class->post_schedule(rq); raw_spin_unlock_irqrestore(&rq->lock, flags); rq->post_schedule = 0; } } #else static inline void pre_schedule(struct rq *rq, struct task_struct *p) { } static inline void post_schedule(struct rq *rq) { } #endif /** * schedule_tail - first thing a freshly forked thread must call. * @prev: the thread we just switched away from. */ asmlinkage void schedule_tail(struct task_struct *prev) __releases(rq->lock) { struct rq *rq = this_rq(); finish_task_switch(rq, prev); /* * FIXME: do we need to worry about rq being invalidated by the * task_switch? */ post_schedule(rq); #ifdef __ARCH_WANT_UNLOCKED_CTXSW /* In this case, finish_task_switch does not reenable preemption */ preempt_enable(); #endif if (current->set_child_tid) put_user(task_pid_vnr(current), current->set_child_tid); } /* * context_switch - switch to the new MM and the new * thread's register state. */ static inline void context_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) { struct mm_struct *mm, *oldmm; #ifdef CONFIG_ARCH_WANTS_CTXSW_LOGGING u64 start, end; #endif prepare_task_switch(rq, prev, next); mm = next->mm; oldmm = prev->active_mm; #ifdef CONFIG_ARCH_WANTS_CTXSW_LOGGING start = sched_clock(); dlog("%s: n_mm: %p, finish pts at %llu\n", __func__, mm, start); #endif /* * For paravirt, this is coupled with an exit in switch_to to * combine the page table reload and the switch backend into * one hypercall. */ arch_start_context_switch(prev); if (!mm) { next->active_mm = oldmm; atomic_inc(&oldmm->mm_count); enter_lazy_tlb(oldmm, next); } else switch_mm(oldmm, mm, next); if (!prev->mm) { prev->active_mm = NULL; rq->prev_mm = oldmm; } #ifdef CONFIG_ARCH_WANTS_CTXSW_LOGGING end = sched_clock(); dlog("%s: start task switch at %llu\n", __func__, end); if (end - start > MAX_CTXSW_LATENCY) msm_trigger_wdog_bite(); #endif /* * Since the runqueue lock will be released by the next * task (which is an invalid locking op but in the case * of the scheduler it's an obvious special-case), so we * do an early lockdep release here: */ #ifndef __ARCH_WANT_UNLOCKED_CTXSW spin_release(&rq->lock.dep_map, 1, _THIS_IP_); #endif context_tracking_task_switch(prev, next); /* Here we just switch the register state and the stack. */ switch_to(prev, next, prev); barrier(); /* * this_rq must be evaluated again because prev may have moved * CPUs since it called schedule(), thus the 'rq' on its stack * frame will be invalid. */ finish_task_switch(this_rq(), prev); } /* * nr_running and nr_context_switches: * * externally visible scheduler statistics: current number of runnable * threads, total number of context switches performed since bootup. */ unsigned long nr_running(void) { unsigned long i, sum = 0; for_each_online_cpu(i) sum += cpu_rq(i)->nr_running; return sum; } unsigned long long nr_context_switches(void) { int i; unsigned long long sum = 0; for_each_possible_cpu(i) sum += cpu_rq(i)->nr_switches; return sum; } unsigned long nr_iowait(void) { unsigned long i, sum = 0; for_each_possible_cpu(i) sum += atomic_read(&cpu_rq(i)->nr_iowait); return sum; } unsigned long nr_iowait_cpu(int cpu) { struct rq *this = cpu_rq(cpu); return atomic_read(&this->nr_iowait); } unsigned long this_cpu_load(void) { struct rq *this = this_rq(); return this->cpu_load[0]; } /* * Global load-average calculations * * We take a distributed and async approach to calculating the global load-avg * in order to minimize overhead. * * The global load average is an exponentially decaying average of nr_running + * nr_uninterruptible. * * Once every LOAD_FREQ: * * nr_active = 0; * for_each_possible_cpu(cpu) * nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible; * * avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n) * * Due to a number of reasons the above turns in the mess below: * * - for_each_possible_cpu() is prohibitively expensive on machines with * serious number of cpus, therefore we need to take a distributed approach * to calculating nr_active. * * \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0 * = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) } * * So assuming nr_active := 0 when we start out -- true per definition, we * can simply take per-cpu deltas and fold those into a global accumulate * to obtain the same result. See calc_load_fold_active(). * * Furthermore, in order to avoid synchronizing all per-cpu delta folding * across the machine, we assume 10 ticks is sufficient time for every * cpu to have completed this task. * * This places an upper-bound on the IRQ-off latency of the machine. Then * again, being late doesn't loose the delta, just wrecks the sample. * * - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because * this would add another cross-cpu cacheline miss and atomic operation * to the wakeup path. Instead we increment on whatever cpu the task ran * when it went into uninterruptible state and decrement on whatever cpu * did the wakeup. This means that only the sum of nr_uninterruptible over * all cpus yields the correct result. * * This covers the NO_HZ=n code, for extra head-aches, see the comment below. */ /* Variables and functions for calc_load */ static atomic_long_t calc_load_tasks; static unsigned long calc_load_update; unsigned long avenrun[3]; EXPORT_SYMBOL(avenrun); /* should be removed */ /** * get_avenrun - get the load average array * @loads: pointer to dest load array * @offset: offset to add * @shift: shift count to shift the result left * * These values are estimates at best, so no need for locking. */ void get_avenrun(unsigned long *loads, unsigned long offset, int shift) { loads[0] = (avenrun[0] + offset) << shift; loads[1] = (avenrun[1] + offset) << shift; loads[2] = (avenrun[2] + offset) << shift; } static long calc_load_fold_active(struct rq *this_rq) { long nr_active, delta = 0; nr_active = this_rq->nr_running; nr_active += (long) this_rq->nr_uninterruptible; if (nr_active != this_rq->calc_load_active) { delta = nr_active - this_rq->calc_load_active; this_rq->calc_load_active = nr_active; } return delta; } /* * a1 = a0 * e + a * (1 - e) */ static unsigned long calc_load(unsigned long load, unsigned long exp, unsigned long active) { load *= exp; load += active * (FIXED_1 - exp); load += 1UL << (FSHIFT - 1); return load >> FSHIFT; } #ifdef CONFIG_NO_HZ_COMMON /* * Handle NO_HZ for the global load-average. * * Since the above described distributed algorithm to compute the global * load-average relies on per-cpu sampling from the tick, it is affected by * NO_HZ. * * The basic idea is to fold the nr_active delta into a global idle-delta upon * entering NO_HZ state such that we can include this as an 'extra' cpu delta * when we read the global state. * * Obviously reality has to ruin such a delightfully simple scheme: * * - When we go NO_HZ idle during the window, we can negate our sample * contribution, causing under-accounting. * * We avoid this by keeping two idle-delta counters and flipping them * when the window starts, thus separating old and new NO_HZ load. * * The only trick is the slight shift in index flip for read vs write. * * 0s 5s 10s 15s * +10 +10 +10 +10 * |-|-----------|-|-----------|-|-----------|-| * r:0 0 1 1 0 0 1 1 0 * w:0 1 1 0 0 1 1 0 0 * * This ensures we'll fold the old idle contribution in this window while * accumlating the new one. * * - When we wake up from NO_HZ idle during the window, we push up our * contribution, since we effectively move our sample point to a known * busy state. * * This is solved by pushing the window forward, and thus skipping the * sample, for this cpu (effectively using the idle-delta for this cpu which * was in effect at the time the window opened). This also solves the issue * of having to deal with a cpu having been in NOHZ idle for multiple * LOAD_FREQ intervals. * * When making the ILB scale, we should try to pull this in as well. */ static atomic_long_t calc_load_idle[2]; static int calc_load_idx; static inline int calc_load_write_idx(void) { int idx = calc_load_idx; /* * See calc_global_nohz(), if we observe the new index, we also * need to observe the new update time. */ smp_rmb(); /* * If the folding window started, make sure we start writing in the * next idle-delta. */ if (!time_before(jiffies, calc_load_update)) idx++; return idx & 1; } static inline int calc_load_read_idx(void) { return calc_load_idx & 1; } void calc_load_enter_idle(void) { struct rq *this_rq = this_rq(); long delta; /* * We're going into NOHZ mode, if there's any pending delta, fold it * into the pending idle delta. */ delta = calc_load_fold_active(this_rq); if (delta) { int idx = calc_load_write_idx(); atomic_long_add(delta, &calc_load_idle[idx]); } } void calc_load_exit_idle(void) { struct rq *this_rq = this_rq(); /* * If we're still before the sample window, we're done. */ if (time_before(jiffies, this_rq->calc_load_update)) return; /* * We woke inside or after the sample window, this means we're already * accounted through the nohz accounting, so skip the entire deal and * sync up for the next window. */ this_rq->calc_load_update = calc_load_update; if (time_before(jiffies, this_rq->calc_load_update + 10)) this_rq->calc_load_update += LOAD_FREQ; } static long calc_load_fold_idle(void) { int idx = calc_load_read_idx(); long delta = 0; if (atomic_long_read(&calc_load_idle[idx])) delta = atomic_long_xchg(&calc_load_idle[idx], 0); return delta; } /** * fixed_power_int - compute: x^n, in O(log n) time * * @x: base of the power * @frac_bits: fractional bits of @x * @n: power to raise @x to. * * By exploiting the relation between the definition of the natural power * function: x^n := x*x*...*x (x multiplied by itself for n times), and * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i, * (where: n_i \elem {0, 1}, the binary vector representing n), * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is * of course trivially computable in O(log_2 n), the length of our binary * vector. */ static unsigned long fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n) { unsigned long result = 1UL << frac_bits; if (n) for (;;) { if (n & 1) { result *= x; result += 1UL << (frac_bits - 1); result >>= frac_bits; } n >>= 1; if (!n) break; x *= x; x += 1UL << (frac_bits - 1); x >>= frac_bits; } return result; } /* * a1 = a0 * e + a * (1 - e) * * a2 = a1 * e + a * (1 - e) * = (a0 * e + a * (1 - e)) * e + a * (1 - e) * = a0 * e^2 + a * (1 - e) * (1 + e) * * a3 = a2 * e + a * (1 - e) * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e) * = a0 * e^3 + a * (1 - e) * (1 + e + e^2) * * ... * * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1] * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e) * = a0 * e^n + a * (1 - e^n) * * [1] application of the geometric series: * * n 1 - x^(n+1) * S_n := \Sum x^i = ------------- * i=0 1 - x */ static unsigned long calc_load_n(unsigned long load, unsigned long exp, unsigned long active, unsigned int n) { return calc_load(load, fixed_power_int(exp, FSHIFT, n), active); } /* * NO_HZ can leave us missing all per-cpu ticks calling * calc_load_account_active(), but since an idle CPU folds its delta into * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold * in the pending idle delta if our idle period crossed a load cycle boundary. * * Once we've updated the global active value, we need to apply the exponential * weights adjusted to the number of cycles missed. */ static void calc_global_nohz(void) { long delta, active, n; if (!time_before(jiffies, calc_load_update + 10)) { /* * Catch-up, fold however many we are behind still */ delta = jiffies - calc_load_update - 10; n = 1 + (delta / LOAD_FREQ); active = atomic_long_read(&calc_load_tasks); active = active > 0 ? active * FIXED_1 : 0; avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n); avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); calc_load_update += n * LOAD_FREQ; } /* * Flip the idle index... * * Make sure we first write the new time then flip the index, so that * calc_load_write_idx() will see the new time when it reads the new * index, this avoids a double flip messing things up. */ smp_wmb(); calc_load_idx++; } #else /* !CONFIG_NO_HZ_COMMON */ static inline long calc_load_fold_idle(void) { return 0; } static inline void calc_global_nohz(void) { } #endif /* CONFIG_NO_HZ_COMMON */ /* * calc_load - update the avenrun load estimates 10 ticks after the * CPUs have updated calc_load_tasks. */ void calc_global_load(unsigned long ticks) { long active, delta; if (time_before(jiffies, calc_load_update + 10)) return; /* * Fold the 'old' idle-delta to include all NO_HZ cpus. */ delta = calc_load_fold_idle(); if (delta) atomic_long_add(delta, &calc_load_tasks); active = atomic_long_read(&calc_load_tasks); active = active > 0 ? active * FIXED_1 : 0; avenrun[0] = calc_load(avenrun[0], EXP_1, active); avenrun[1] = calc_load(avenrun[1], EXP_5, active); avenrun[2] = calc_load(avenrun[2], EXP_15, active); calc_load_update += LOAD_FREQ; /* * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk. */ calc_global_nohz(); } /* * Called from update_cpu_load() to periodically update this CPU's * active count. */ static void calc_load_account_active(struct rq *this_rq) { long delta; if (time_before(jiffies, this_rq->calc_load_update)) return; delta = calc_load_fold_active(this_rq); if (delta) atomic_long_add(delta, &calc_load_tasks); this_rq->calc_load_update += LOAD_FREQ; } /* * End of global load-average stuff */ /* * The exact cpuload at various idx values, calculated at every tick would be * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load * * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called * on nth tick when cpu may be busy, then we have: * load = ((2^idx - 1) / 2^idx)^(n-1) * load * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load * * decay_load_missed() below does efficient calculation of * load = ((2^idx - 1) / 2^idx)^(n-1) * load * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load * * The calculation is approximated on a 128 point scale. * degrade_zero_ticks is the number of ticks after which load at any * particular idx is approximated to be zero. * degrade_factor is a precomputed table, a row for each load idx. * Each column corresponds to degradation factor for a power of two ticks, * based on 128 point scale. * Example: * row 2, col 3 (=12) says that the degradation at load idx 2 after * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8). * * With this power of 2 load factors, we can degrade the load n times * by looking at 1 bits in n and doing as many mult/shift instead of * n mult/shifts needed by the exact degradation. */ #define DEGRADE_SHIFT 7 static const unsigned char degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128}; static const unsigned char degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = { {0, 0, 0, 0, 0, 0, 0, 0}, {64, 32, 8, 0, 0, 0, 0, 0}, {96, 72, 40, 12, 1, 0, 0}, {112, 98, 75, 43, 15, 1, 0}, {120, 112, 98, 76, 45, 16, 2} }; /* * Update cpu_load for any missed ticks, due to tickless idle. The backlog * would be when CPU is idle and so we just decay the old load without * adding any new load. */ static unsigned long decay_load_missed(unsigned long load, unsigned long missed_updates, int idx) { int j = 0; if (!missed_updates) return load; if (missed_updates >= degrade_zero_ticks[idx]) return 0; if (idx == 1) return load >> missed_updates; while (missed_updates) { if (missed_updates % 2) load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT; missed_updates >>= 1; j++; } return load; } /* * Update rq->cpu_load[] statistics. This function is usually called every * scheduler tick (TICK_NSEC). With tickless idle this will not be called * every tick. We fix it up based on jiffies. */ static void __update_cpu_load(struct rq *this_rq, unsigned long this_load, unsigned long pending_updates) { int i, scale; this_rq->nr_load_updates++; /* Update our load: */ this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */ for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) { unsigned long old_load, new_load; /* scale is effectively 1 << i now, and >> i divides by scale */ old_load = this_rq->cpu_load[i]; old_load = decay_load_missed(old_load, pending_updates - 1, i); new_load = this_load; /* * Round up the averaging division if load is increasing. This * prevents us from getting stuck on 9 if the load is 10, for * example. */ if (new_load > old_load) new_load += scale - 1; this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; } sched_avg_update(this_rq); } #ifdef CONFIG_NO_HZ_COMMON /* * There is no sane way to deal with nohz on smp when using jiffies because the * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}. * * Therefore we cannot use the delta approach from the regular tick since that * would seriously skew the load calculation. However we'll make do for those * updates happening while idle (nohz_idle_balance) or coming out of idle * (tick_nohz_idle_exit). * * This means we might still be one tick off for nohz periods. */ /* * Called from nohz_idle_balance() to update the load ratings before doing the * idle balance. */ void update_idle_cpu_load(struct rq *this_rq) { unsigned long curr_jiffies = ACCESS_ONCE(jiffies); unsigned long load = this_rq->load.weight; unsigned long pending_updates; /* * bail if there's load or we're actually up-to-date. */ if (load || curr_jiffies == this_rq->last_load_update_tick) return; pending_updates = curr_jiffies - this_rq->last_load_update_tick; this_rq->last_load_update_tick = curr_jiffies; __update_cpu_load(this_rq, load, pending_updates); } /* * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed. */ void update_cpu_load_nohz(void) { struct rq *this_rq = this_rq(); unsigned long curr_jiffies = ACCESS_ONCE(jiffies); unsigned long pending_updates; if (curr_jiffies == this_rq->last_load_update_tick) return; raw_spin_lock(&this_rq->lock); pending_updates = curr_jiffies - this_rq->last_load_update_tick; if (pending_updates) { this_rq->last_load_update_tick = curr_jiffies; /* * We were idle, this means load 0, the current load might be * !0 due to remote wakeups and the sort. */ __update_cpu_load(this_rq, 0, pending_updates); } raw_spin_unlock(&this_rq->lock); } #endif /* CONFIG_NO_HZ_COMMON */ /* * Called from scheduler_tick() */ static void update_cpu_load_active(struct rq *this_rq) { /* * See the mess around update_idle_cpu_load() / update_cpu_load_nohz(). */ this_rq->last_load_update_tick = jiffies; __update_cpu_load(this_rq, this_rq->load.weight, 1); calc_load_account_active(this_rq); } #if defined(CONFIG_SMP) /* * sched_exec - execve() is a valuable balancing opportunity, because at * this point the task has the smallest effective memory and cache footprint. */ void sched_exec(void) { struct task_struct *p = current; unsigned long flags; int dest_cpu; if (sched_enable_hmp) return; raw_spin_lock_irqsave(&p->pi_lock, flags); dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0); if (dest_cpu == smp_processor_id()) goto unlock; if (likely(cpu_active(dest_cpu))) { struct migration_arg arg = { p, dest_cpu }; raw_spin_unlock_irqrestore(&p->pi_lock, flags); stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); return; } unlock: raw_spin_unlock_irqrestore(&p->pi_lock, flags); } #endif DEFINE_PER_CPU(struct kernel_stat, kstat); DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); EXPORT_PER_CPU_SYMBOL(kstat); EXPORT_PER_CPU_SYMBOL(kernel_cpustat); /* * Return any ns on the sched_clock that have not yet been accounted in * @p in case that task is currently running. * * Called with task_rq_lock() held on @rq. */ static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) { u64 ns = 0; if (task_current(rq, p)) { update_rq_clock(rq); ns = rq->clock_task - p->se.exec_start; if ((s64)ns < 0) ns = 0; } return ns; } unsigned long long task_delta_exec(struct task_struct *p) { unsigned long flags; struct rq *rq; u64 ns = 0; rq = task_rq_lock(p, &flags); ns = do_task_delta_exec(p, rq); task_rq_unlock(rq, p, &flags); return ns; } /* * Return accounted runtime for the task. * In case the task is currently running, return the runtime plus current's * pending runtime that have not been accounted yet. */ unsigned long long task_sched_runtime(struct task_struct *p) { unsigned long flags; struct rq *rq; u64 ns = 0; rq = task_rq_lock(p, &flags); ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq); task_rq_unlock(rq, p, &flags); return ns; } /* * This function gets called by the timer code, with HZ frequency. * We call it with interrupts disabled. */ void scheduler_tick(void) { int cpu = smp_processor_id(); struct rq *rq = cpu_rq(cpu); struct task_struct *curr = rq->curr; sched_clock_tick(); raw_spin_lock(&rq->lock); set_window_start(rq); update_rq_clock(rq); update_cpu_load_active(rq); curr->sched_class->task_tick(rq, curr, 0); update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), 0); raw_spin_unlock(&rq->lock); perf_event_task_tick(); #ifdef CONFIG_SMP rq->idle_balance = idle_cpu(cpu); trigger_load_balance(rq, cpu); #endif rq_last_tick_reset(rq); if (curr->sched_class == &fair_sched_class) check_for_migration(rq, curr); } #ifdef CONFIG_NO_HZ_FULL /** * scheduler_tick_max_deferment * * Keep at least one tick per second when a single * active task is running because the scheduler doesn't * yet completely support full dynticks environment. * * This makes sure that uptime, CFS vruntime, load * balancing, etc... continue to move forward, even * with a very low granularity. */ u64 scheduler_tick_max_deferment(void) { struct rq *rq = this_rq(); unsigned long next, now = ACCESS_ONCE(jiffies); next = rq->last_sched_tick + HZ; if (time_before_eq(next, now)) return 0; return jiffies_to_usecs(next - now) * NSEC_PER_USEC; } #endif notrace unsigned long get_parent_ip(unsigned long addr) { if (in_lock_functions(addr)) { addr = CALLER_ADDR2; if (in_lock_functions(addr)) addr = CALLER_ADDR3; } return addr; } #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ defined(CONFIG_PREEMPT_TRACER)) void __kprobes add_preempt_count(int val) { #ifdef CONFIG_DEBUG_PREEMPT /* * Underflow? */ if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) return; #endif preempt_count() += val; #ifdef CONFIG_DEBUG_PREEMPT /* * Spinlock count overflowing soon? */ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK - 10); #endif if (preempt_count() == val) trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); } EXPORT_SYMBOL(add_preempt_count); void __kprobes sub_preempt_count(int val) { #ifdef CONFIG_DEBUG_PREEMPT /* * Underflow? */ if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) return; /* * Is the spinlock portion underflowing? */ if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && !(preempt_count() & PREEMPT_MASK))) return; #endif if (preempt_count() == val) trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); preempt_count() -= val; } EXPORT_SYMBOL(sub_preempt_count); #endif /* * Print scheduling while atomic bug: */ static noinline void __schedule_bug(struct task_struct *prev) { if (oops_in_progress) return; printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", prev->comm, prev->pid, preempt_count()); debug_show_held_locks(prev); print_modules(); if (irqs_disabled()) print_irqtrace_events(prev); dump_stack(); add_taint(TAINT_WARN, LOCKDEP_STILL_OK); } /* * Various schedule()-time debugging checks and statistics: */ static inline void schedule_debug(struct task_struct *prev) { /* * Test if we are atomic. Since do_exit() needs to call into * schedule() atomically, we ignore that path for now. * Otherwise, whine if we are scheduling when we should not be. */ if (unlikely(in_atomic_preempt_off() && !prev->exit_state)) __schedule_bug(prev); rcu_sleep_check(); profile_hit(SCHED_PROFILING, __builtin_return_address(0)); schedstat_inc(this_rq(), sched_count); } static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->on_rq || rq->skip_clock_update < 0) update_rq_clock(rq); prev->sched_class->put_prev_task(rq, prev); } /* * Pick up the highest-prio task: */ static inline struct task_struct * pick_next_task(struct rq *rq) { const struct sched_class *class; struct task_struct *p; /* * Optimization: we know that if all tasks are in * the fair class we can call that function directly: */ if (likely(rq->nr_running == rq->cfs.h_nr_running)) { p = fair_sched_class.pick_next_task(rq); if (likely(p)) return p; } for_each_class(class) { p = class->pick_next_task(rq); if (p) return p; } BUG(); /* the idle class will always have a runnable task */ } /* * __schedule() is the main scheduler function. * * The main means of driving the scheduler and thus entering this function are: * * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. * * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return * paths. For example, see arch/x86/entry_64.S. * * To drive preemption between tasks, the scheduler sets the flag in timer * interrupt handler scheduler_tick(). * * 3. Wakeups don't really cause entry into schedule(). They add a * task to the run-queue and that's it. * * Now, if the new task added to the run-queue preempts the current * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets * called on the nearest possible occasion: * * - If the kernel is preemptible (CONFIG_PREEMPT=y): * * - in syscall or exception context, at the next outmost * preempt_enable(). (this might be as soon as the wake_up()'s * spin_unlock()!) * * - in IRQ context, return from interrupt-handler to * preemptible context * * - If the kernel is not preemptible (CONFIG_PREEMPT is not set) * then at the next: * * - cond_resched() call * - explicit schedule() call * - return from syscall or exception to user-space * - return from interrupt-handler to user-space */ static void __sched __schedule(void) { struct task_struct *prev, *next; unsigned long *switch_count; struct rq *rq; int cpu; u64 wallclock; need_resched: preempt_disable(); cpu = smp_processor_id(); rq = cpu_rq(cpu); rcu_note_context_switch(cpu); prev = rq->curr; schedule_debug(prev); if (sched_feat(HRTICK)) hrtick_clear(rq); /* * Make sure that signal_pending_state()->signal_pending() below * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) * done by the caller to avoid the race with signal_wake_up(). */ smp_mb__before_spinlock(); raw_spin_lock_irq(&rq->lock); #ifdef CONFIG_ARCH_WANTS_CTXSW_LOGGING dlog("%s: locked %p at %llu\n", __func__, &rq->lock, sched_clock()); #endif switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { if (unlikely(signal_pending_state(prev->state, prev))) { prev->state = TASK_RUNNING; } else { deactivate_task(rq, prev, DEQUEUE_SLEEP); prev->on_rq = 0; /* * If a worker went to sleep, notify and ask workqueue * whether it wants to wake up a task to maintain * concurrency. */ if (prev->flags & PF_WQ_WORKER) { struct task_struct *to_wakeup; to_wakeup = wq_worker_sleeping(prev, cpu); if (to_wakeup) try_to_wake_up_local(to_wakeup); } } switch_count = &prev->nvcsw; } pre_schedule(rq, prev); if (unlikely(!rq->nr_running)) idle_balance(cpu, rq); put_prev_task(rq, prev); next = pick_next_task(rq); wallclock = sched_clock(); update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0); update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0); clear_tsk_need_resched(prev); rq->skip_clock_update = 0; BUG_ON(task_cpu(next) != cpu_of(rq)); if (likely(prev != next)) { rq->nr_switches++; rq->curr = next; ++*switch_count; #ifdef CONFIG_ARCH_WANTS_CTXSW_LOGGING dlog("%s: enter context_switch at %llu\n", __func__, sched_clock()); #endif context_switch(rq, prev, next); /* unlocks the rq */ /* * The context switch have flipped the stack from under us * and restored the local variables which were saved when * this task called schedule() in the past. prev == current * is still correct, but it can be moved to another cpu/rq. */ cpu = smp_processor_id(); rq = cpu_rq(cpu); } else raw_spin_unlock_irq(&rq->lock); post_schedule(rq); sched_preempt_enable_no_resched(); if (need_resched()) goto need_resched; } static inline void sched_submit_work(struct task_struct *tsk) { if (!tsk->state || tsk_is_pi_blocked(tsk)) return; /* * If we are going to sleep and we have plugged IO queued, * make sure to submit it to avoid deadlocks. */ if (blk_needs_flush_plug(tsk)) blk_schedule_flush_plug(tsk); } asmlinkage void __sched schedule(void) { struct task_struct *tsk = current; sched_submit_work(tsk); __schedule(); } EXPORT_SYMBOL(schedule); #ifdef CONFIG_CONTEXT_TRACKING asmlinkage void __sched schedule_user(void) { /* * If we come here after a random call to set_need_resched(), * or we have been woken up remotely but the IPI has not yet arrived, * we haven't yet exited the RCU idle mode. Do it here manually until * we find a better solution. */ user_exit(); schedule(); user_enter(); } #endif /** * schedule_preempt_disabled - called with preemption disabled * * Returns with preemption disabled. Note: preempt_count must be 1 */ void __sched schedule_preempt_disabled(void) { sched_preempt_enable_no_resched(); schedule(); preempt_disable(); } #ifdef CONFIG_PREEMPT /* * this is the entry point to schedule() from in-kernel preemption * off of preempt_enable. Kernel preemptions off return from interrupt * occur there and call schedule directly. */ asmlinkage void __sched notrace preempt_schedule(void) { struct thread_info *ti = current_thread_info(); /* * If there is a non-zero preempt_count or interrupts are disabled, * we do not want to preempt the current task. Just return.. */ if (likely(ti->preempt_count || irqs_disabled())) return; do { add_preempt_count_notrace(PREEMPT_ACTIVE); __schedule(); sub_preempt_count_notrace(PREEMPT_ACTIVE); /* * Check again in case we missed a preemption opportunity * between schedule and now. */ barrier(); } while (need_resched()); } EXPORT_SYMBOL(preempt_schedule); /* * this is the entry point to schedule() from kernel preemption * off of irq context. * Note, that this is called and return with irqs disabled. This will * protect us against recursive calling from irq. */ asmlinkage void __sched preempt_schedule_irq(void) { struct thread_info *ti = current_thread_info(); enum ctx_state prev_state; /* Catch callers which need to be fixed */ BUG_ON(ti->preempt_count || !irqs_disabled()); prev_state = exception_enter(); do { add_preempt_count(PREEMPT_ACTIVE); local_irq_enable(); __schedule(); local_irq_disable(); sub_preempt_count(PREEMPT_ACTIVE); /* * Check again in case we missed a preemption opportunity * between schedule and now. */ barrier(); } while (need_resched()); exception_exit(prev_state); } #endif /* CONFIG_PREEMPT */ int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, void *key) { return try_to_wake_up(curr->private, mode, wake_flags); } EXPORT_SYMBOL(default_wake_function); /* * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve * number) then we wake all the non-exclusive tasks and one exclusive task. * * There are circumstances in which we can try to wake a task which has already * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns * zero in this (rare) case, and we handle it by continuing to scan the queue. */ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int wake_flags, void *key) { wait_queue_t *curr, *next; list_for_each_entry_safe(curr, next, &q->task_list, task_list) { unsigned flags = curr->flags; if (curr->func(curr, mode, wake_flags, key) && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) break; } } /** * __wake_up - wake up threads blocked on a waitqueue. * @q: the waitqueue * @mode: which threads * @nr_exclusive: how many wake-one or wake-many threads to wake up * @key: is directly passed to the wakeup function * * It may be assumed that this function implies a write memory barrier before * changing the task state if and only if any tasks are woken up. */ void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, void *key) { unsigned long flags; spin_lock_irqsave(&q->lock, flags); __wake_up_common(q, mode, nr_exclusive, 0, key); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL(__wake_up); /* * Same as __wake_up but called with the spinlock in wait_queue_head_t held. */ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr) { __wake_up_common(q, mode, nr, 0, NULL); } EXPORT_SYMBOL_GPL(__wake_up_locked); void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key) { __wake_up_common(q, mode, 1, 0, key); } EXPORT_SYMBOL_GPL(__wake_up_locked_key); /** * __wake_up_sync_key - wake up threads blocked on a waitqueue. * @q: the waitqueue * @mode: which threads * @nr_exclusive: how many wake-one or wake-many threads to wake up * @key: opaque value to be passed to wakeup targets * * The sync wakeup differs that the waker knows that it will schedule * away soon, so while the target thread will be woken up, it will not * be migrated to another CPU - ie. the two threads are 'synchronized' * with each other. This can prevent needless bouncing between CPUs. * * On UP it can prevent extra preemption. * * It may be assumed that this function implies a write memory barrier before * changing the task state if and only if any tasks are woken up. */ void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, void *key) { unsigned long flags; int wake_flags = WF_SYNC; if (unlikely(!q)) return; if (unlikely(!nr_exclusive)) wake_flags = 0; spin_lock_irqsave(&q->lock, flags); __wake_up_common(q, mode, nr_exclusive, wake_flags, key); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL_GPL(__wake_up_sync_key); /* * __wake_up_sync - see __wake_up_sync_key() */ void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) { __wake_up_sync_key(q, mode, nr_exclusive, NULL); } EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ /** * complete: - signals a single thread waiting on this completion * @x: holds the state of this particular completion * * This will wake up a single thread waiting on this completion. Threads will be * awakened in the same order in which they were queued. * * See also complete_all(), wait_for_completion() and related routines. * * It may be assumed that this function implies a write memory barrier before * changing the task state if and only if any tasks are woken up. */ void complete(struct completion *x) { unsigned long flags; spin_lock_irqsave(&x->wait.lock, flags); x->done++; __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL); spin_unlock_irqrestore(&x->wait.lock, flags); } EXPORT_SYMBOL(complete); /** * complete_all: - signals all threads waiting on this completion * @x: holds the state of this particular completion * * This will wake up all threads waiting on this particular completion event. * * It may be assumed that this function implies a write memory barrier before * changing the task state if and only if any tasks are woken up. */ void complete_all(struct completion *x) { unsigned long flags; spin_lock_irqsave(&x->wait.lock, flags); x->done += UINT_MAX/2; __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL); spin_unlock_irqrestore(&x->wait.lock, flags); } EXPORT_SYMBOL(complete_all); static inline long __sched do_wait_for_common(struct completion *x, long (*action)(long), long timeout, int state) { if (!x->done) { DECLARE_WAITQUEUE(wait, current); __add_wait_queue_tail_exclusive(&x->wait, &wait); do { if (signal_pending_state(state, current)) { timeout = -ERESTARTSYS; break; } __set_current_state(state); spin_unlock_irq(&x->wait.lock); timeout = action(timeout); spin_lock_irq(&x->wait.lock); } while (!x->done && timeout); __remove_wait_queue(&x->wait, &wait); if (!x->done) return timeout; } x->done--; return timeout ?: 1; } static inline long __sched __wait_for_common(struct completion *x, long (*action)(long), long timeout, int state) { might_sleep(); spin_lock_irq(&x->wait.lock); timeout = do_wait_for_common(x, action, timeout, state); spin_unlock_irq(&x->wait.lock); return timeout; } static long __sched wait_for_common(struct completion *x, long timeout, int state) { return __wait_for_common(x, schedule_timeout, timeout, state); } static long __sched wait_for_common_io(struct completion *x, long timeout, int state) { return __wait_for_common(x, io_schedule_timeout, timeout, state); } /** * wait_for_completion: - waits for completion of a task * @x: holds the state of this particular completion * * This waits to be signaled for completion of a specific task. It is NOT * interruptible and there is no timeout. * * See also similar routines (i.e. wait_for_completion_timeout()) with timeout * and interrupt capability. Also see complete(). */ void __sched wait_for_completion(struct completion *x) { wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL(wait_for_completion); /** * wait_for_completion_timeout: - waits for completion of a task (w/timeout) * @x: holds the state of this particular completion * @timeout: timeout value in jiffies * * This waits for either a completion of a specific task to be signaled or for a * specified timeout to expire. The timeout is in jiffies. It is not * interruptible. * * The return value is 0 if timed out, and positive (at least 1, or number of * jiffies left till timeout) if completed. */ unsigned long __sched wait_for_completion_timeout(struct completion *x, unsigned long timeout) { return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL(wait_for_completion_timeout); /** * wait_for_completion_io: - waits for completion of a task * @x: holds the state of this particular completion * * This waits to be signaled for completion of a specific task. It is NOT * interruptible and there is no timeout. The caller is accounted as waiting * for IO. */ void __sched wait_for_completion_io(struct completion *x) { wait_for_common_io(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL(wait_for_completion_io); /** * wait_for_completion_io_timeout: - waits for completion of a task (w/timeout) * @x: holds the state of this particular completion * @timeout: timeout value in jiffies * * This waits for either a completion of a specific task to be signaled or for a * specified timeout to expire. The timeout is in jiffies. It is not * interruptible. The caller is accounted as waiting for IO. * * The return value is 0 if timed out, and positive (at least 1, or number of * jiffies left till timeout) if completed. */ unsigned long __sched wait_for_completion_io_timeout(struct completion *x, unsigned long timeout) { return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL(wait_for_completion_io_timeout); /** * wait_for_completion_interruptible: - waits for completion of a task (w/intr) * @x: holds the state of this particular completion * * This waits for completion of a specific task to be signaled. It is * interruptible. * * The return value is -ERESTARTSYS if interrupted, 0 if completed. */ int __sched wait_for_completion_interruptible(struct completion *x) { long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); if (t == -ERESTARTSYS) return t; return 0; } EXPORT_SYMBOL(wait_for_completion_interruptible); /** * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr)) * @x: holds the state of this particular completion * @timeout: timeout value in jiffies * * This waits for either a completion of a specific task to be signaled or for a * specified timeout to expire. It is interruptible. The timeout is in jiffies. * * The return value is -ERESTARTSYS if interrupted, 0 if timed out, * positive (at least 1, or number of jiffies left till timeout) if completed. */ long __sched wait_for_completion_interruptible_timeout(struct completion *x, unsigned long timeout) { return wait_for_common(x, timeout, TASK_INTERRUPTIBLE); } EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); /** * wait_for_completion_killable: - waits for completion of a task (killable) * @x: holds the state of this particular completion * * This waits to be signaled for completion of a specific task. It can be * interrupted by a kill signal. * * The return value is -ERESTARTSYS if interrupted, 0 if completed. */ int __sched wait_for_completion_killable(struct completion *x) { long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE); if (t == -ERESTARTSYS) return t; return 0; } EXPORT_SYMBOL(wait_for_completion_killable); /** * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable)) * @x: holds the state of this particular completion * @timeout: timeout value in jiffies * * This waits for either a completion of a specific task to be * signaled or for a specified timeout to expire. It can be * interrupted by a kill signal. The timeout is in jiffies. * * The return value is -ERESTARTSYS if interrupted, 0 if timed out, * positive (at least 1, or number of jiffies left till timeout) if completed. */ long __sched wait_for_completion_killable_timeout(struct completion *x, unsigned long timeout) { return wait_for_common(x, timeout, TASK_KILLABLE); } EXPORT_SYMBOL(wait_for_completion_killable_timeout); /** * try_wait_for_completion - try to decrement a completion without blocking * @x: completion structure * * Returns: 0 if a decrement cannot be done without blocking * 1 if a decrement succeeded. * * If a completion is being used as a counting completion, * attempt to decrement the counter without blocking. This * enables us to avoid waiting if the resource the completion * is protecting is not available. */ bool try_wait_for_completion(struct completion *x) { unsigned long flags; int ret = 1; spin_lock_irqsave(&x->wait.lock, flags); if (!x->done) ret = 0; else x->done--; spin_unlock_irqrestore(&x->wait.lock, flags); return ret; } EXPORT_SYMBOL(try_wait_for_completion); /** * completion_done - Test to see if a completion has any waiters * @x: completion structure * * Returns: 0 if there are waiters (wait_for_completion() in progress) * 1 if there are no waiters. * */ bool completion_done(struct completion *x) { unsigned long flags; int ret = 1; spin_lock_irqsave(&x->wait.lock, flags); if (!x->done) ret = 0; spin_unlock_irqrestore(&x->wait.lock, flags); return ret; } EXPORT_SYMBOL(completion_done); static long __sched sleep_on_common(wait_queue_head_t *q, int state, long timeout) { unsigned long flags; wait_queue_t wait; init_waitqueue_entry(&wait, current); __set_current_state(state); spin_lock_irqsave(&q->lock, flags); __add_wait_queue(q, &wait); spin_unlock(&q->lock); timeout = schedule_timeout(timeout); spin_lock_irq(&q->lock); __remove_wait_queue(q, &wait); spin_unlock_irqrestore(&q->lock, flags); return timeout; } void __sched interruptible_sleep_on(wait_queue_head_t *q) { sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } EXPORT_SYMBOL(interruptible_sleep_on); long __sched interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) { return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout); } EXPORT_SYMBOL(interruptible_sleep_on_timeout); void __sched sleep_on(wait_queue_head_t *q) { sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } EXPORT_SYMBOL(sleep_on); long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout) { return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout); } EXPORT_SYMBOL(sleep_on_timeout); #ifdef CONFIG_RT_MUTEXES /* * rt_mutex_setprio - set the current priority of a task * @p: task * @prio: prio value (kernel-internal form) * * This function changes the 'effective' priority of a task. It does * not touch ->normal_prio like __setscheduler(). * * Used by the rt_mutex code to implement priority inheritance logic. */ void rt_mutex_setprio(struct task_struct *p, int prio) { int oldprio, on_rq, running; struct rq *rq; const struct sched_class *prev_class; BUG_ON(prio < 0 || prio > MAX_PRIO); rq = __task_rq_lock(p); /* * Idle task boosting is a nono in general. There is one * exception, when PREEMPT_RT and NOHZ is active: * * The idle task calls get_next_timer_interrupt() and holds * the timer wheel base->lock on the CPU and another CPU wants * to access the timer (probably to cancel it). We can safely * ignore the boosting request, as the idle CPU runs this code * with interrupts disabled and will complete the lock * protected section without being interrupted. So there is no * real need to boost. */ if (unlikely(p == rq->idle)) { WARN_ON(p != rq->curr); WARN_ON(p->pi_blocked_on); goto out_unlock; } trace_sched_pi_setprio(p, prio); oldprio = p->prio; prev_class = p->sched_class; on_rq = p->on_rq; running = task_current(rq, p); if (on_rq) dequeue_task(rq, p, 0); if (running) p->sched_class->put_prev_task(rq, p); if (rt_prio(prio)) p->sched_class = &rt_sched_class; else p->sched_class = &fair_sched_class; p->prio = prio; if (running) p->sched_class->set_curr_task(rq); if (on_rq) enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0); check_class_changed(rq, p, prev_class, oldprio); out_unlock: __task_rq_unlock(rq); } #endif void set_user_nice(struct task_struct *p, long nice) { int old_prio, delta, on_rq; unsigned long flags; struct rq *rq; if (TASK_NICE(p) == nice || nice < -20 || nice > 19) return; /* * We have to be careful, if called from sys_setpriority(), * the task might be in the middle of scheduling on another CPU. */ rq = task_rq_lock(p, &flags); /* * The RT priorities are set via sched_setscheduler(), but we still * allow the 'normal' nice value to be set - but as expected * it wont have any effect on scheduling until the task is * SCHED_FIFO/SCHED_RR: */ if (task_has_rt_policy(p)) { p->static_prio = NICE_TO_PRIO(nice); goto out_unlock; } on_rq = p->on_rq; if (on_rq) dequeue_task(rq, p, 0); p->static_prio = NICE_TO_PRIO(nice); set_load_weight(p); old_prio = p->prio; p->prio = effective_prio(p); delta = p->prio - old_prio; if (on_rq) { enqueue_task(rq, p, 0); /* * If the task increased its priority or is running and * lowered its priority, then reschedule its CPU: */ if (delta < 0 || (delta > 0 && task_running(rq, p))) resched_task(rq->curr); } out_unlock: task_rq_unlock(rq, p, &flags); } EXPORT_SYMBOL(set_user_nice); /* * can_nice - check if a task can reduce its nice value * @p: task * @nice: nice value */ int can_nice(const struct task_struct *p, const int nice) { /* convert nice value [19,-20] to rlimit style value [1,40] */ int nice_rlim = 20 - nice; return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || capable(CAP_SYS_NICE)); } #ifdef __ARCH_WANT_SYS_NICE /* * sys_nice - change the priority of the current process. * @increment: priority increment * * sys_setpriority is a more generic, but much slower function that * does similar things. */ SYSCALL_DEFINE1(nice, int, increment) { long nice, retval; /* * Setpriority might change our priority at the same moment. * We don't have to worry. Conceptually one call occurs first * and we have a single winner. */ if (increment < -40) increment = -40; if (increment > 40) increment = 40; nice = TASK_NICE(current) + increment; if (nice < -20) nice = -20; if (nice > 19) nice = 19; if (increment < 0 && !can_nice(current, nice)) return -EPERM; retval = security_task_setnice(current, nice); if (retval) return retval; set_user_nice(current, nice); return 0; } #endif /** * task_prio - return the priority value of a given task. * @p: the task in question. * * This is the priority value as seen by users in /proc. * RT tasks are offset by -200. Normal tasks are centered * around 0, value goes from -16 to +15. */ int task_prio(const struct task_struct *p) { return p->prio - MAX_RT_PRIO; } /** * task_nice - return the nice value of a given task. * @p: the task in question. */ int task_nice(const struct task_struct *p) { return TASK_NICE(p); } EXPORT_SYMBOL(task_nice); /** * idle_cpu - is a given cpu idle currently? * @cpu: the processor in question. */ int idle_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); if (rq->curr != rq->idle) return 0; if (rq->nr_running) return 0; #ifdef CONFIG_SMP if (!llist_empty(&rq->wake_list)) return 0; #endif return 1; } /** * idle_task - return the idle task for a given cpu. * @cpu: the processor in question. */ struct task_struct *idle_task(int cpu) { return cpu_rq(cpu)->idle; } /** * find_process_by_pid - find a process with a matching PID value. * @pid: the pid in question. */ static struct task_struct *find_process_by_pid(pid_t pid) { return pid ? find_task_by_vpid(pid) : current; } /* Actually do priority change: must hold pi & rq lock. */ static void __setscheduler(struct rq *rq, struct task_struct *p, const struct sched_attr *attr) { int policy = attr->sched_policy; if (policy == -1) /* setparam */ policy = p->policy; p->policy = policy; if (fair_policy(policy)) p->static_prio = NICE_TO_PRIO(attr->sched_nice); /* * __sched_setscheduler() ensures attr->sched_priority == 0 when * !rt_policy. Always setting this ensures that things like * getparam()/getattr() don't report silly values for !rt tasks. */ p->rt_priority = attr->sched_priority; p->normal_prio = normal_prio(p); p->prio = rt_mutex_getprio(p); if (rt_prio(p->prio)) p->sched_class = &rt_sched_class; else p->sched_class = &fair_sched_class; set_load_weight(p); } /* * check the target process has a UID that matches the current process's */ static bool check_same_owner(struct task_struct *p) { const struct cred *cred = current_cred(), *pcred; bool match; rcu_read_lock(); pcred = __task_cred(p); match = (uid_eq(cred->euid, pcred->euid) || uid_eq(cred->euid, pcred->uid)); rcu_read_unlock(); return match; } static int __sched_setscheduler(struct task_struct *p, const struct sched_attr *attr, bool user) { int retval, oldprio, oldpolicy = -1, on_rq, running; int policy = attr->sched_policy; unsigned long flags; const struct sched_class *prev_class; struct rq *rq; int reset_on_fork; /* may grab non-irq protected spin_locks */ BUG_ON(in_interrupt()); recheck: /* double check policy once rq lock held */ if (policy < 0) { reset_on_fork = p->sched_reset_on_fork; policy = oldpolicy = p->policy; } else { reset_on_fork = !!(policy & SCHED_RESET_ON_FORK); policy &= ~SCHED_RESET_ON_FORK; if (policy != SCHED_FIFO && policy != SCHED_RR && policy != SCHED_NORMAL && policy != SCHED_BATCH && policy != SCHED_IDLE) return -EINVAL; } /* * Valid priorities for SCHED_FIFO and SCHED_RR are * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL, * SCHED_BATCH and SCHED_IDLE is 0. */ if (attr->sched_priority < 0 || (p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) || (!p->mm && attr->sched_priority > MAX_RT_PRIO-1)) return -EINVAL; if (rt_policy(policy) != (attr->sched_priority != 0)) return -EINVAL; /* * Allow unprivileged RT tasks to decrease priority: */ if (user && !capable(CAP_SYS_NICE)) { if (fair_policy(policy)) { if (!can_nice(p, attr->sched_nice)) return -EPERM; } if (rt_policy(policy)) { unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); /* can't set/change the rt policy */ if (policy != p->policy && !rlim_rtprio) return -EPERM; /* can't increase priority */ if (attr->sched_priority > p->rt_priority && attr->sched_priority > rlim_rtprio) return -EPERM; } /* * Treat SCHED_IDLE as nice 20. Only allow a switch to * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. */ if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) { if (!can_nice(p, TASK_NICE(p))) return -EPERM; } /* can't change other user's priorities */ if (!check_same_owner(p)) return -EPERM; /* Normal users shall not reset the sched_reset_on_fork flag */ if (p->sched_reset_on_fork && !reset_on_fork) return -EPERM; } if (user) { retval = security_task_setscheduler(p); if (retval) return retval; } /* * make sure no PI-waiters arrive (or leave) while we are * changing the priority of the task: * * To be able to change p->policy safely, the appropriate * runqueue lock must be held. */ rq = task_rq_lock(p, &flags); /* * Changing the policy of the stop threads its a very bad idea */ if (p == rq->stop) { task_rq_unlock(rq, p, &flags); return -EINVAL; } /* * If not changing anything there's no need to proceed further: */ if (unlikely(policy == p->policy)) { if (fair_policy(policy) && attr->sched_nice != TASK_NICE(p)) goto change; if (rt_policy(policy) && attr->sched_priority != p->rt_priority) goto change; task_rq_unlock(rq, p, &flags); return 0; } change: #ifdef CONFIG_RT_GROUP_SCHED if (user) { /* * Do not allow realtime tasks into groups that have no runtime * assigned. */ if (rt_bandwidth_enabled() && rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0 && !task_group_is_autogroup(task_group(p))) { task_rq_unlock(rq, p, &flags); return -EPERM; } } #endif /* recheck policy now with rq lock held */ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { policy = oldpolicy = -1; task_rq_unlock(rq, p, &flags); goto recheck; } on_rq = p->on_rq; running = task_current(rq, p); if (on_rq) dequeue_task(rq, p, 0); if (running) p->sched_class->put_prev_task(rq, p); p->sched_reset_on_fork = reset_on_fork; oldprio = p->prio; prev_class = p->sched_class; __setscheduler(rq, p, attr); if (running) p->sched_class->set_curr_task(rq); if (on_rq) enqueue_task(rq, p, 0); check_class_changed(rq, p, prev_class, oldprio); task_rq_unlock(rq, p, &flags); rt_mutex_adjust_pi(p); return 0; } /** * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. * @p: the task in question. * @policy: new policy. * @param: structure containing the new RT priority. * * NOTE that the task may be already dead. */ int sched_setscheduler(struct task_struct *p, int policy, const struct sched_param *param) { struct sched_attr attr = { .sched_policy = policy, .sched_priority = param->sched_priority }; return __sched_setscheduler(p, &attr, true); } EXPORT_SYMBOL_GPL(sched_setscheduler); int sched_setattr(struct task_struct *p, const struct sched_attr *attr) { return __sched_setscheduler(p, attr, true); } EXPORT_SYMBOL_GPL(sched_setattr); /** * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. * @p: the task in question. * @policy: new policy. * @param: structure containing the new RT priority. * * Just like sched_setscheduler, only don't bother checking if the * current context has permission. For example, this is needed in * stop_machine(): we create temporary high priority worker threads, * but our caller might not have that capability. */ int sched_setscheduler_nocheck(struct task_struct *p, int policy, const struct sched_param *param) { struct sched_attr attr = { .sched_policy = policy, .sched_priority = param->sched_priority }; return __sched_setscheduler(p, &attr, false); } EXPORT_SYMBOL(sched_setscheduler_nocheck); static int do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) { struct sched_param lparam; struct task_struct *p; int retval; if (!param || pid < 0) return -EINVAL; if (copy_from_user(&lparam, param, sizeof(struct sched_param))) return -EFAULT; rcu_read_lock(); retval = -ESRCH; p = find_process_by_pid(pid); if (p != NULL) retval = sched_setscheduler(p, policy, &lparam); rcu_read_unlock(); return retval; } /* * Mimics kernel/events/core.c perf_copy_attr(). */ static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr) { u32 size; int ret; if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0)) return -EFAULT; /* * zero the full structure, so that a short copy will be nice. */ memset(attr, 0, sizeof(*attr)); ret = get_user(size, &uattr->size); if (ret) return ret; if (size > PAGE_SIZE) /* silly large */ goto err_size; if (!size) /* abi compat */ size = SCHED_ATTR_SIZE_VER0; if (size < SCHED_ATTR_SIZE_VER0) goto err_size; /* * If we're handed a bigger struct than we know of, * ensure all the unknown bits are 0 - i.e. new * user-space does not rely on any kernel feature * extensions we dont know about yet. */ if (size > sizeof(*attr)) { unsigned char __user *addr; unsigned char __user *end; unsigned char val; addr = (void __user *)uattr + sizeof(*attr); end = (void __user *)uattr + size; for (; addr < end; addr++) { ret = get_user(val, addr); if (ret) return ret; if (val) goto err_size; } size = sizeof(*attr); } ret = copy_from_user(attr, uattr, size); if (ret) return -EFAULT; /* * XXX: do we want to be lenient like existing syscalls; or do we want * to be strict and return an error on out-of-bounds values? */ attr->sched_nice = clamp(attr->sched_nice, -20, 19); out: return ret; err_size: put_user(sizeof(*attr), &uattr->size); ret = -E2BIG; goto out; } /** * sys_sched_setscheduler - set/change the scheduler policy and RT priority * @pid: the pid in question. * @policy: new policy. * @param: structure containing the new RT priority. */ SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) { /* negative values for policy are not valid */ if (policy < 0) return -EINVAL; return do_sched_setscheduler(pid, policy, param); } /** * sys_sched_setparam - set/change the RT priority of a thread * @pid: the pid in question. * @param: structure containing the new RT priority. */ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) { return do_sched_setscheduler(pid, -1, param); } /** * sys_sched_setattr - same as above, but with extended sched_attr * @pid: the pid in question. * @attr: structure containing the extended parameters. */ SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, unsigned int, flags) { struct sched_attr attr; struct task_struct *p; int retval; if (!uattr || pid < 0 || flags) return -EINVAL; if (sched_copy_attr(uattr, &attr)) return -EFAULT; rcu_read_lock(); retval = -ESRCH; p = find_process_by_pid(pid); if (p != NULL) retval = sched_setattr(p, &attr); rcu_read_unlock(); return retval; } /** * sys_sched_getscheduler - get the policy (scheduling class) of a thread * @pid: the pid in question. */ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) { struct task_struct *p; int retval; if (pid < 0) return -EINVAL; retval = -ESRCH; rcu_read_lock(); p = find_process_by_pid(pid); if (p) { retval = security_task_getscheduler(p); if (!retval) retval = p->policy | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); } rcu_read_unlock(); return retval; } /** * sys_sched_getparam - get the RT priority of a thread * @pid: the pid in question. * @param: structure containing the RT priority. */ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) { struct sched_param lp; struct task_struct *p; int retval; if (!param || pid < 0) return -EINVAL; rcu_read_lock(); p = find_process_by_pid(pid); retval = -ESRCH; if (!p) goto out_unlock; retval = security_task_getscheduler(p); if (retval) goto out_unlock; lp.sched_priority = p->rt_priority; rcu_read_unlock(); /* * This one might sleep, we cannot do it with a spinlock held ... */ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; return retval; out_unlock: rcu_read_unlock(); return retval; } static int sched_read_attr(struct sched_attr __user *uattr, struct sched_attr *attr, unsigned int usize) { int ret; if (!access_ok(VERIFY_WRITE, uattr, usize)) return -EFAULT; /* * If we're handed a smaller struct than we know of, * ensure all the unknown bits are 0 - i.e. old * user-space does not get uncomplete information. */ if (usize < sizeof(*attr)) { unsigned char *addr; unsigned char *end; addr = (void *)attr + usize; end = (void *)attr + sizeof(*attr); for (; addr < end; addr++) { if (*addr) goto err_size; } attr->size = usize; } ret = copy_to_user(uattr, attr, attr->size); if (ret) return -EFAULT; out: return ret; err_size: ret = -E2BIG; goto out; } /** * sys_sched_getattr - same as above, but with extended "sched_param" * @pid: the pid in question. * @attr: structure containing the extended parameters. * @size: sizeof(attr) for fwd/bwd comp. */ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, unsigned int, size, unsigned int, flags) { struct sched_attr attr = { .size = sizeof(struct sched_attr), }; struct task_struct *p; int retval; if (!uattr || pid < 0 || size > PAGE_SIZE || size < SCHED_ATTR_SIZE_VER0 || flags) return -EINVAL; rcu_read_lock(); p = find_process_by_pid(pid); retval = -ESRCH; if (!p) goto out_unlock; retval = security_task_getscheduler(p); if (retval) goto out_unlock; attr.sched_policy = p->policy; if (task_has_rt_policy(p)) attr.sched_priority = p->rt_priority; else attr.sched_nice = TASK_NICE(p); rcu_read_unlock(); retval = sched_read_attr(uattr, &attr, size); return retval; out_unlock: rcu_read_unlock(); return retval; } long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) { cpumask_var_t cpus_allowed, new_mask; struct task_struct *p; int retval; rcu_read_lock(); p = find_process_by_pid(pid); if (!p) { rcu_read_unlock(); return -ESRCH; } /* Prevent p going away */ get_task_struct(p); rcu_read_unlock(); if (p->flags & PF_NO_SETAFFINITY) { retval = -EINVAL; goto out_put_task; } if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { retval = -ENOMEM; goto out_put_task; } if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { retval = -ENOMEM; goto out_free_cpus_allowed; } retval = -EPERM; if (!check_same_owner(p)) { rcu_read_lock(); if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { rcu_read_unlock(); goto out_unlock; } rcu_read_unlock(); } retval = security_task_setscheduler(p); if (retval) goto out_unlock; cpuset_cpus_allowed(p, cpus_allowed); cpumask_and(new_mask, in_mask, cpus_allowed); again: retval = set_cpus_allowed_ptr(p, new_mask); if (!retval) { cpuset_cpus_allowed(p, cpus_allowed); if (!cpumask_subset(new_mask, cpus_allowed)) { /* * We must have raced with a concurrent cpuset * update. Just reset the cpus_allowed to the * cpuset's cpus_allowed */ cpumask_copy(new_mask, cpus_allowed); goto again; } } out_unlock: free_cpumask_var(new_mask); out_free_cpus_allowed: free_cpumask_var(cpus_allowed); out_put_task: put_task_struct(p); return retval; } static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, struct cpumask *new_mask) { if (len < cpumask_size()) cpumask_clear(new_mask); else if (len > cpumask_size()) len = cpumask_size(); return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; } /** * sys_sched_setaffinity - set the cpu affinity of a process * @pid: pid of the process * @len: length in bytes of the bitmask pointed to by user_mask_ptr * @user_mask_ptr: user-space pointer to the new cpu mask */ SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, unsigned long __user *, user_mask_ptr) { cpumask_var_t new_mask; int retval; if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) return -ENOMEM; retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); if (retval == 0) retval = sched_setaffinity(pid, new_mask); free_cpumask_var(new_mask); return retval; } long sched_getaffinity(pid_t pid, struct cpumask *mask) { struct task_struct *p; unsigned long flags; int retval; rcu_read_lock(); retval = -ESRCH; p = find_process_by_pid(pid); if (!p) goto out_unlock; retval = security_task_getscheduler(p); if (retval) goto out_unlock; raw_spin_lock_irqsave(&p->pi_lock, flags); cpumask_and(mask, &p->cpus_allowed, cpu_active_mask); raw_spin_unlock_irqrestore(&p->pi_lock, flags); out_unlock: rcu_read_unlock(); return retval; } /** * sys_sched_getaffinity - get the cpu affinity of a process * @pid: pid of the process * @len: length in bytes of the bitmask pointed to by user_mask_ptr * @user_mask_ptr: user-space pointer to hold the current cpu mask */ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, unsigned long __user *, user_mask_ptr) { int ret; cpumask_var_t mask; if ((len * BITS_PER_BYTE) < nr_cpu_ids) return -EINVAL; if (len & (sizeof(unsigned long)-1)) return -EINVAL; if (!alloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; ret = sched_getaffinity(pid, mask); if (ret == 0) { size_t retlen = min_t(size_t, len, cpumask_size()); if (copy_to_user(user_mask_ptr, mask, retlen)) ret = -EFAULT; else ret = retlen; } free_cpumask_var(mask); return ret; } /** * sys_sched_yield - yield the current processor to other threads. * * This function yields the current CPU to other tasks. If there are no * other threads running on this CPU then this function will return. */ SYSCALL_DEFINE0(sched_yield) { struct rq *rq = this_rq_lock(); schedstat_inc(rq, yld_count); current->sched_class->yield_task(rq); /* * Since we are going to call schedule() anyway, there's * no need to preempt or enable interrupts: */ __release(rq->lock); spin_release(&rq->lock.dep_map, 1, _THIS_IP_); do_raw_spin_unlock(&rq->lock); sched_preempt_enable_no_resched(); schedule(); return 0; } static inline int should_resched(void) { return need_resched() && !(preempt_count() & PREEMPT_ACTIVE); } static void __cond_resched(void) { add_preempt_count(PREEMPT_ACTIVE); __schedule(); sub_preempt_count(PREEMPT_ACTIVE); } int __sched _cond_resched(void) { if (should_resched()) { __cond_resched(); return 1; } return 0; } EXPORT_SYMBOL(_cond_resched); /* * __cond_resched_lock() - if a reschedule is pending, drop the given lock, * call schedule, and on return reacquire the lock. * * This works OK both with and without CONFIG_PREEMPT. We do strange low-level * operations here to prevent schedule() from being called twice (once via * spin_unlock(), once by hand). */ int __cond_resched_lock(spinlock_t *lock) { int resched = should_resched(); int ret = 0; lockdep_assert_held(lock); if (spin_needbreak(lock) || resched) { spin_unlock(lock); if (resched) __cond_resched(); else cpu_relax(); ret = 1; spin_lock(lock); } return ret; } EXPORT_SYMBOL(__cond_resched_lock); int __sched __cond_resched_softirq(void) { BUG_ON(!in_softirq()); if (should_resched()) { local_bh_enable(); __cond_resched(); local_bh_disable(); return 1; } return 0; } EXPORT_SYMBOL(__cond_resched_softirq); /** * yield - yield the current processor to other threads. * * Do not ever use this function, there's a 99% chance you're doing it wrong. * * The scheduler is at all times free to pick the calling task as the most * eligible task to run, if removing the yield() call from your code breaks * it, its already broken. * * Typical broken usage is: * * while (!event) * yield(); * * where one assumes that yield() will let 'the other' process run that will * make event true. If the current task is a SCHED_FIFO task that will never * happen. Never use yield() as a progress guarantee!! * * If you want to use yield() to wait for something, use wait_event(). * If you want to use yield() to be 'nice' for others, use cond_resched(). * If you still want to use yield(), do not! */ void __sched yield(void) { set_current_state(TASK_RUNNING); sys_sched_yield(); } EXPORT_SYMBOL(yield); /** * yield_to - yield the current processor to another thread in * your thread group, or accelerate that thread toward the * processor it's on. * @p: target task * @preempt: whether task preemption is allowed or not * * It's the caller's job to ensure that the target task struct * can't go away on us before we can do any checks. * * Returns: * true (>0) if we indeed boosted the target task. * false (0) if we failed to boost the target. * -ESRCH if there's no task to yield to. */ bool __sched yield_to(struct task_struct *p, bool preempt) { struct task_struct *curr = current; struct rq *rq, *p_rq; unsigned long flags; int yielded = 0; local_irq_save(flags); rq = this_rq(); again: p_rq = task_rq(p); /* * If we're the only runnable task on the rq and target rq also * has only one task, there's absolutely no point in yielding. */ if (rq->nr_running == 1 && p_rq->nr_running == 1) { yielded = -ESRCH; goto out_irq; } double_rq_lock(rq, p_rq); while (task_rq(p) != p_rq) { double_rq_unlock(rq, p_rq); goto again; } if (!curr->sched_class->yield_to_task) goto out_unlock; if (curr->sched_class != p->sched_class) goto out_unlock; if (task_running(p_rq, p) || p->state) goto out_unlock; yielded = curr->sched_class->yield_to_task(rq, p, preempt); if (yielded) { schedstat_inc(rq, yld_count); /* * Make p's CPU reschedule; pick_next_entity takes care of * fairness. */ if (preempt && rq != p_rq) resched_task(p_rq->curr); } out_unlock: double_rq_unlock(rq, p_rq); out_irq: local_irq_restore(flags); if (yielded > 0) schedule(); return yielded; } EXPORT_SYMBOL_GPL(yield_to); /* * This task is about to go to sleep on IO. Increment rq->nr_iowait so * that process accounting knows that this is a task in IO wait state. */ void __sched io_schedule(void) { struct rq *rq = raw_rq(); delayacct_blkio_start(); atomic_inc(&rq->nr_iowait); blk_flush_plug(current); current->in_iowait = 1; schedule(); current->in_iowait = 0; atomic_dec(&rq->nr_iowait); delayacct_blkio_end(); } EXPORT_SYMBOL(io_schedule); long __sched io_schedule_timeout(long timeout) { struct rq *rq = raw_rq(); long ret; delayacct_blkio_start(); atomic_inc(&rq->nr_iowait); blk_flush_plug(current); current->in_iowait = 1; ret = schedule_timeout(timeout); current->in_iowait = 0; atomic_dec(&rq->nr_iowait); delayacct_blkio_end(); return ret; } EXPORT_SYMBOL(io_schedule_timeout); /** * sys_sched_get_priority_max - return maximum RT priority. * @policy: scheduling class. * * this syscall returns the maximum rt_priority that can be used * by a given scheduling class. */ SYSCALL_DEFINE1(sched_get_priority_max, int, policy) { int ret = -EINVAL; switch (policy) { case SCHED_FIFO: case SCHED_RR: ret = MAX_USER_RT_PRIO-1; break; case SCHED_NORMAL: case SCHED_BATCH: case SCHED_IDLE: ret = 0; break; } return ret; } /** * sys_sched_get_priority_min - return minimum RT priority. * @policy: scheduling class. * * this syscall returns the minimum rt_priority that can be used * by a given scheduling class. */ SYSCALL_DEFINE1(sched_get_priority_min, int, policy) { int ret = -EINVAL; switch (policy) { case SCHED_FIFO: case SCHED_RR: ret = 1; break; case SCHED_NORMAL: case SCHED_BATCH: case SCHED_IDLE: ret = 0; } return ret; } /** * sys_sched_rr_get_interval - return the default timeslice of a process. * @pid: pid of the process. * @interval: userspace pointer to the timeslice value. * * this syscall writes the default timeslice value of a given process * into the user-space timespec buffer. A value of '0' means infinity. */ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, struct timespec __user *, interval) { struct task_struct *p; unsigned int time_slice; unsigned long flags; struct rq *rq; int retval; struct timespec t; if (pid < 0) return -EINVAL; retval = -ESRCH; rcu_read_lock(); p = find_process_by_pid(pid); if (!p) goto out_unlock; retval = security_task_getscheduler(p); if (retval) goto out_unlock; rq = task_rq_lock(p, &flags); time_slice = p->sched_class->get_rr_interval(rq, p); task_rq_unlock(rq, p, &flags); rcu_read_unlock(); jiffies_to_timespec(time_slice, &t); retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; return retval; out_unlock: rcu_read_unlock(); return retval; } static const char stat_nam[] = TASK_STATE_TO_CHAR_STR; void sched_show_task(struct task_struct *p) { unsigned long free = 0; int ppid; unsigned state; state = p->state ? __ffs(p->state) + 1 : 0; printk(KERN_INFO "%-15.15s %c", p->comm, state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); #if BITS_PER_LONG == 32 if (state == TASK_RUNNING) printk(KERN_CONT " running "); else printk(KERN_CONT " %08lx ", thread_saved_pc(p)); #else if (state == TASK_RUNNING) printk(KERN_CONT " running task "); else printk(KERN_CONT " %016lx ", thread_saved_pc(p)); #endif #ifdef CONFIG_DEBUG_STACK_USAGE free = stack_not_used(p); #endif rcu_read_lock(); ppid = task_pid_nr(rcu_dereference(p->real_parent)); rcu_read_unlock(); printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, task_pid_nr(p), ppid, (unsigned long)task_thread_info(p)->flags); print_worker_info(KERN_INFO, p); show_stack(p, NULL); } void show_state_filter(unsigned long state_filter) { struct task_struct *g, *p; #if BITS_PER_LONG == 32 printk(KERN_INFO " task PC stack pid father\n"); #else printk(KERN_INFO " task PC stack pid father\n"); #endif rcu_read_lock(); do_each_thread(g, p) { /* * reset the NMI-timeout, listing all files on a slow * console might take a lot of time: */ touch_nmi_watchdog(); if (!state_filter || (p->state & state_filter)) sched_show_task(p); } while_each_thread(g, p); touch_all_softlockup_watchdogs(); #ifdef CONFIG_SYSRQ_SCHED_DEBUG if (!state_filter) sysrq_sched_debug_show(); #endif rcu_read_unlock(); /* * Only show locks if all tasks are dumped: */ if (!state_filter) debug_show_all_locks(); } void __cpuinit init_idle_bootup_task(struct task_struct *idle) { idle->sched_class = &idle_sched_class; } /** * init_idle - set up an idle thread for a given CPU * @idle: task in question * @cpu: cpu the idle task belongs to * * NOTE: this function does not set the idle thread's NEED_RESCHED * flag, to make booting more robust. */ void __cpuinit init_idle(struct task_struct *idle, int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long flags; u64 mark_start; raw_spin_lock_irqsave(&rq->lock, flags); mark_start = orig_mark_start(idle); __sched_fork(idle); /* * Restore idle thread's original mark_start as we rely on it being * correct for maintaining per-cpu counters, curr/prev_runnable_sum. */ restore_orig_mark_start(idle, mark_start); idle->state = TASK_RUNNING; idle->se.exec_start = sched_clock(); do_set_cpus_allowed(idle, cpumask_of(cpu)); /* * We're having a chicken and egg problem, even though we are * holding rq->lock, the cpu isn't yet set to this cpu so the * lockdep check in task_group() will fail. * * Similar case to sched_fork(). / Alternatively we could * use task_rq_lock() here and obtain the other rq->lock. * * Silence PROVE_RCU */ rcu_read_lock(); __set_task_cpu(idle, cpu); rcu_read_unlock(); rq->curr = rq->idle = idle; #if defined(CONFIG_SMP) idle->on_cpu = 1; #endif raw_spin_unlock_irqrestore(&rq->lock, flags); /* Set the preempt count _outside_ the spinlocks! */ task_thread_info(idle)->preempt_count = 0; /* * The idle tasks have their own, simple scheduling class: */ idle->sched_class = &idle_sched_class; ftrace_graph_init_idle_task(idle, cpu); vtime_init_idle(idle, cpu); #if defined(CONFIG_SMP) sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); #endif } #ifdef CONFIG_SMP void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) { if (p->sched_class && p->sched_class->set_cpus_allowed) p->sched_class->set_cpus_allowed(p, new_mask); cpumask_copy(&p->cpus_allowed, new_mask); p->nr_cpus_allowed = cpumask_weight(new_mask); } /* * This is how migration works: * * 1) we invoke migration_cpu_stop() on the target CPU using * stop_one_cpu(). * 2) stopper starts to run (implicitly forcing the migrated thread * off the CPU) * 3) it checks whether the migrated task is still in the wrong runqueue. * 4) if it's in the wrong runqueue then the migration thread removes * it and puts it into the right queue. * 5) stopper completes and stop_one_cpu() returns and the migration * is done. */ /* * Change a given task's CPU affinity. Migrate the thread to a * proper CPU and schedule it away if the CPU it's executing on * is removed from the allowed bitmask. * * NOTE: the caller must have a valid reference to the task, the * task must not exit() & deallocate itself prematurely. The * call is not atomic; no spinlocks may be held. */ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) { unsigned long flags; struct rq *rq; unsigned int dest_cpu; int ret = 0; rq = task_rq_lock(p, &flags); if (cpumask_equal(&p->cpus_allowed, new_mask)) goto out; if (!cpumask_intersects(new_mask, cpu_active_mask)) { ret = -EINVAL; goto out; } do_set_cpus_allowed(p, new_mask); /* Can the task run on the task's current CPU? If so, we're done */ if (cpumask_test_cpu(task_cpu(p), new_mask)) goto out; dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); if (p->on_rq) { struct migration_arg arg = { p, dest_cpu }; /* Need help from migration thread: drop lock and wait. */ task_rq_unlock(rq, p, &flags); stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); tlb_migrate_finish(p->mm); return 0; } out: task_rq_unlock(rq, p, &flags); return ret; } EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); /* * Move (not current) task off this cpu, onto dest cpu. We're doing * this because either it can't run here any more (set_cpus_allowed() * away from this CPU, or CPU going down), or because we're * attempting to rebalance this task on exec (sched_exec). * * So we race with normal scheduler movements, but that's OK, as long * as the task is no longer on this CPU. * * Returns non-zero if task was successfully migrated. */ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) { struct rq *rq_dest, *rq_src; bool moved = false; int ret = 0; if (unlikely(!cpu_active(dest_cpu))) return ret; rq_src = cpu_rq(src_cpu); rq_dest = cpu_rq(dest_cpu); raw_spin_lock(&p->pi_lock); double_rq_lock(rq_src, rq_dest); /* Already moved. */ if (task_cpu(p) != src_cpu) goto done; /* Affinity changed (again). */ if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) goto fail; /* * If we're not on a rq, the next wake-up will ensure we're * placed properly. */ if (p->on_rq) { dequeue_task(rq_src, p, 0); set_task_cpu(p, dest_cpu); enqueue_task(rq_dest, p, 0); check_preempt_curr(rq_dest, p, 0); moved = true; } done: ret = 1; fail: double_rq_unlock(rq_src, rq_dest); raw_spin_unlock(&p->pi_lock); if (moved && !same_freq_domain(src_cpu, dest_cpu)) { check_for_freq_change(rq_src); check_for_freq_change(rq_dest); } if (moved && task_notify_on_migrate(p)) { struct migration_notify_data mnd; mnd.src_cpu = src_cpu; mnd.dest_cpu = dest_cpu; mnd.load = pct_task_load(p); atomic_notifier_call_chain(&migration_notifier_head, 0, (void *)&mnd); } return ret; } /* * migration_cpu_stop - this will be executed by a highprio stopper thread * and performs thread migration by bumping thread off CPU then * 'pushing' onto another runqueue. */ static int migration_cpu_stop(void *data) { struct migration_arg *arg = data; /* * The original target cpu might have gone down and we might * be on another cpu but it doesn't matter. */ local_irq_disable(); __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu); local_irq_enable(); return 0; } #ifdef CONFIG_HOTPLUG_CPU /* * Ensures that the idle task is using init_mm right before its cpu goes * offline. */ void idle_task_exit(void) { struct mm_struct *mm = current->active_mm; BUG_ON(cpu_online(smp_processor_id())); if (mm != &init_mm) switch_mm(mm, &init_mm, current); mmdrop(mm); } /* * Since this CPU is going 'away' for a while, fold any nr_active delta * we might have. Assumes we're called after migrate_tasks() so that the * nr_active count is stable. * * Also see the comment "Global load-average calculations". */ static void calc_load_migrate(struct rq *rq) { long delta = calc_load_fold_active(rq); if (delta) atomic_long_add(delta, &calc_load_tasks); } /* * Migrate all tasks from the rq, sleeping tasks will be migrated by * try_to_wake_up()->select_task_rq(). * * Called with rq->lock held even though we'er in stop_machine() and * there's no concurrency possible, we hold the required locks anyway * because of lock validation efforts. */ static void migrate_tasks(unsigned int dead_cpu) { struct rq *rq = cpu_rq(dead_cpu); struct task_struct *next, *stop = rq->stop; int dest_cpu; /* * Fudge the rq selection such that the below task selection loop * doesn't get stuck on the currently eligible stop task. * * We're currently inside stop_machine() and the rq is either stuck * in the stop_machine_cpu_stop() loop, or we're executing this code, * either way we should never end up calling schedule() until we're * done here. */ rq->stop = NULL; for ( ; ; ) { /* * There's this thread running, bail when that's the only * remaining thread. */ if (rq->nr_running == 1) break; next = pick_next_task(rq); BUG_ON(!next); next->sched_class->put_prev_task(rq, next); /* Find suitable destination for @next, with force if needed. */ dest_cpu = select_fallback_rq(dead_cpu, next); raw_spin_unlock(&rq->lock); __migrate_task(next, dead_cpu, dest_cpu); raw_spin_lock(&rq->lock); } rq->stop = stop; } #endif /* CONFIG_HOTPLUG_CPU */ #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) static struct ctl_table sd_ctl_dir[] = { { .procname = "sched_domain", .mode = 0555, }, {} }; static struct ctl_table sd_ctl_root[] = { { .procname = "kernel", .mode = 0555, .child = sd_ctl_dir, }, {} }; static struct ctl_table *sd_alloc_ctl_entry(int n) { struct ctl_table *entry = kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL); return entry; } static void sd_free_ctl_entry(struct ctl_table **tablep) { struct ctl_table *entry; /* * In the intermediate directories, both the child directory and * procname are dynamically allocated and could fail but the mode * will always be set. In the lowest directory the names are * static strings and all have proc handlers. */ for (entry = *tablep; entry->mode; entry++) { if (entry->child) sd_free_ctl_entry(&entry->child); if (entry->proc_handler == NULL) kfree(entry->procname); } kfree(*tablep); *tablep = NULL; } static int min_load_idx = 0; static int max_load_idx = CPU_LOAD_IDX_MAX-1; static void set_table_entry(struct ctl_table *entry, const char *procname, void *data, int maxlen, umode_t mode, proc_handler *proc_handler, bool load_idx) { entry->procname = procname; entry->data = data; entry->maxlen = maxlen; entry->mode = mode; entry->proc_handler = proc_handler; if (load_idx) { entry->extra1 = &min_load_idx; entry->extra2 = &max_load_idx; } } static struct ctl_table * sd_alloc_ctl_domain_table(struct sched_domain *sd) { struct ctl_table *table = sd_alloc_ctl_entry(13); if (table == NULL) return NULL; set_table_entry(&table[0], "min_interval", &sd->min_interval, sizeof(long), 0644, proc_doulongvec_minmax, false); set_table_entry(&table[1], "max_interval", &sd->max_interval, sizeof(long), 0644, proc_doulongvec_minmax, false); set_table_entry(&table[2], "busy_idx", &sd->busy_idx, sizeof(int), 0644, proc_dointvec_minmax, true); set_table_entry(&table[3], "idle_idx", &sd->idle_idx, sizeof(int), 0644, proc_dointvec_minmax, true); set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, sizeof(int), 0644, proc_dointvec_minmax, true); set_table_entry(&table[5], "wake_idx", &sd->wake_idx, sizeof(int), 0644, proc_dointvec_minmax, true); set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, sizeof(int), 0644, proc_dointvec_minmax, true); set_table_entry(&table[7], "busy_factor", &sd->busy_factor, sizeof(int), 0644, proc_dointvec_minmax, false); set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, sizeof(int), 0644, proc_dointvec_minmax, false); set_table_entry(&table[9], "cache_nice_tries", &sd->cache_nice_tries, sizeof(int), 0644, proc_dointvec_minmax, false); set_table_entry(&table[10], "flags", &sd->flags, sizeof(int), 0644, proc_dointvec_minmax, false); set_table_entry(&table[11], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring, false); /* &table[12] is terminator */ return table; } static ctl_table *sd_alloc_ctl_cpu_table(int cpu) { struct ctl_table *entry, *table; struct sched_domain *sd; int domain_num = 0, i; char buf[32]; for_each_domain(cpu, sd) domain_num++; entry = table = sd_alloc_ctl_entry(domain_num + 1); if (table == NULL) return NULL; i = 0; for_each_domain(cpu, sd) { snprintf(buf, 32, "domain%d", i); entry->procname = kstrdup(buf, GFP_KERNEL); entry->mode = 0555; entry->child = sd_alloc_ctl_domain_table(sd); entry++; i++; } return table; } static struct ctl_table_header *sd_sysctl_header; static void register_sched_domain_sysctl(void) { int i, cpu_num = num_possible_cpus(); struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); char buf[32]; WARN_ON(sd_ctl_dir[0].child); sd_ctl_dir[0].child = entry; if (entry == NULL) return; for_each_possible_cpu(i) { snprintf(buf, 32, "cpu%d", i); entry->procname = kstrdup(buf, GFP_KERNEL); entry->mode = 0555; entry->child = sd_alloc_ctl_cpu_table(i); entry++; } WARN_ON(sd_sysctl_header); sd_sysctl_header = register_sysctl_table(sd_ctl_root); } /* may be called multiple times per register */ static void unregister_sched_domain_sysctl(void) { if (sd_sysctl_header) unregister_sysctl_table(sd_sysctl_header); sd_sysctl_header = NULL; if (sd_ctl_dir[0].child) sd_free_ctl_entry(&sd_ctl_dir[0].child); } #else static void register_sched_domain_sysctl(void) { } static void unregister_sched_domain_sysctl(void) { } #endif static void set_rq_online(struct rq *rq) { if (!rq->online) { const struct sched_class *class; cpumask_set_cpu(rq->cpu, rq->rd->online); rq->online = 1; for_each_class(class) { if (class->rq_online) class->rq_online(rq); } } } static void set_rq_offline(struct rq *rq) { if (rq->online) { const struct sched_class *class; for_each_class(class) { if (class->rq_offline) class->rq_offline(rq); } cpumask_clear_cpu(rq->cpu, rq->rd->online); rq->online = 0; } } /* * migration_call - callback that gets triggered when a CPU is added. * Here we can start up the necessary migration thread for the new CPU. */ static int __cpuinit migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) { int cpu = (long)hcpu; unsigned long flags; struct rq *rq = cpu_rq(cpu); switch (action & ~CPU_TASKS_FROZEN) { case CPU_UP_PREPARE: raw_spin_lock_irqsave(&rq->lock, flags); set_window_start(rq); raw_spin_unlock_irqrestore(&rq->lock, flags); rq->calc_load_update = calc_load_update; account_reset_rq(rq); break; case CPU_ONLINE: /* Update our root-domain */ raw_spin_lock_irqsave(&rq->lock, flags); if (rq->rd) { BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); set_rq_online(rq); } raw_spin_unlock_irqrestore(&rq->lock, flags); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_DYING: sched_ttwu_pending(); /* Update our root-domain */ raw_spin_lock_irqsave(&rq->lock, flags); migrate_sync_cpu(cpu); if (rq->rd) { BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); set_rq_offline(rq); } migrate_tasks(cpu); BUG_ON(rq->nr_running != 1); /* the migration thread */ raw_spin_unlock_irqrestore(&rq->lock, flags); break; case CPU_DEAD: clear_hmp_request(cpu); calc_load_migrate(rq); break; #endif } update_max_interval(); return NOTIFY_OK; } /* * Register at high priority so that task migration (migrate_all_tasks) * happens before everything else. This has to be lower priority than * the notifier in the perf_event subsystem, though. */ static struct notifier_block __cpuinitdata migration_notifier = { .notifier_call = migration_call, .priority = CPU_PRI_MIGRATION, }; static int __cpuinit sched_cpu_active(struct notifier_block *nfb, unsigned long action, void *hcpu) { switch (action & ~CPU_TASKS_FROZEN) { case CPU_DOWN_FAILED: set_cpu_active((long)hcpu, true); return NOTIFY_OK; default: return NOTIFY_DONE; } } static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb, unsigned long action, void *hcpu) { switch (action & ~CPU_TASKS_FROZEN) { case CPU_DOWN_PREPARE: set_cpu_active((long)hcpu, false); return NOTIFY_OK; default: return NOTIFY_DONE; } } static int __init migration_init(void) { void *cpu = (void *)(long)smp_processor_id(); int err; /* Initialize migration for the boot CPU */ err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); BUG_ON(err == NOTIFY_BAD); migration_call(&migration_notifier, CPU_ONLINE, cpu); register_cpu_notifier(&migration_notifier); /* Register cpu active notifiers */ cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE); cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE); return 0; } early_initcall(migration_init); #endif #ifdef CONFIG_SMP static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */ #ifdef CONFIG_SCHED_DEBUG static __read_mostly int sched_debug_enabled; static int __init sched_debug_setup(char *str) { sched_debug_enabled = 1; return 0; } early_param("sched_debug", sched_debug_setup); static inline bool sched_debug(void) { return sched_debug_enabled; } static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, struct cpumask *groupmask) { struct sched_group *group = sd->groups; char str[256]; cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd)); cpumask_clear(groupmask); printk(KERN_DEBUG "%*s domain %d: ", level, "", level); if (!(sd->flags & SD_LOAD_BALANCE)) { printk("does not load-balance\n"); if (sd->parent) printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" " has parent"); return -1; } printk(KERN_CONT "span %s level %s\n", str, sd->name); if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { printk(KERN_ERR "ERROR: domain->span does not contain " "CPU%d\n", cpu); } if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { printk(KERN_ERR "ERROR: domain->groups does not contain" " CPU%d\n", cpu); } printk(KERN_DEBUG "%*s groups:", level + 1, ""); do { if (!group) { printk("\n"); printk(KERN_ERR "ERROR: group is NULL\n"); break; } /* * Even though we initialize ->power to something semi-sane, * we leave power_orig unset. This allows us to detect if * domain iteration is still funny without causing /0 traps. */ if (!group->sgp->power_orig) { printk(KERN_CONT "\n"); printk(KERN_ERR "ERROR: domain->cpu_power not " "set\n"); break; } if (!cpumask_weight(sched_group_cpus(group))) { printk(KERN_CONT "\n"); printk(KERN_ERR "ERROR: empty group\n"); break; } if (!(sd->flags & SD_OVERLAP) && cpumask_intersects(groupmask, sched_group_cpus(group))) { printk(KERN_CONT "\n"); printk(KERN_ERR "ERROR: repeated CPUs\n"); break; } cpumask_or(groupmask, groupmask, sched_group_cpus(group)); cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); printk(KERN_CONT " %s", str); if (group->sgp->power != SCHED_POWER_SCALE) { printk(KERN_CONT " (cpu_power = %d)", group->sgp->power); } group = group->next; } while (group != sd->groups); printk(KERN_CONT "\n"); if (!cpumask_equal(sched_domain_span(sd), groupmask)) printk(KERN_ERR "ERROR: groups don't span domain->span\n"); if (sd->parent && !cpumask_subset(groupmask, sched_domain_span(sd->parent))) printk(KERN_ERR "ERROR: parent span is not a superset " "of domain->span\n"); return 0; } static void sched_domain_debug(struct sched_domain *sd, int cpu) { int level = 0; if (!sched_debug_enabled) return; if (!sd) { printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); return; } printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); for (;;) { if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) break; level++; sd = sd->parent; if (!sd) break; } } #else /* !CONFIG_SCHED_DEBUG */ # define sched_domain_debug(sd, cpu) do { } while (0) static inline bool sched_debug(void) { return false; } #endif /* CONFIG_SCHED_DEBUG */ static int sd_degenerate(struct sched_domain *sd) { if (cpumask_weight(sched_domain_span(sd)) == 1) return 1; /* Following flags need at least 2 groups */ if (sd->flags & (SD_LOAD_BALANCE | SD_BALANCE_NEWIDLE | SD_BALANCE_FORK | SD_BALANCE_EXEC | SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES)) { if (sd->groups != sd->groups->next) return 0; } /* Following flags don't use groups */ if (sd->flags & (SD_WAKE_AFFINE)) return 0; return 1; } static int sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) { unsigned long cflags = sd->flags, pflags = parent->flags; if (sd_degenerate(parent)) return 1; if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) return 0; /* Flags needing groups don't count if only 1 group in parent */ if (parent->groups == parent->groups->next) { pflags &= ~(SD_LOAD_BALANCE | SD_BALANCE_NEWIDLE | SD_BALANCE_FORK | SD_BALANCE_EXEC | SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES); if (nr_node_ids == 1) pflags &= ~SD_SERIALIZE; } if (~cflags & pflags) return 0; return 1; } static void free_rootdomain(struct rcu_head *rcu) { struct root_domain *rd = container_of(rcu, struct root_domain, rcu); cpupri_cleanup(&rd->cpupri); free_cpumask_var(rd->rto_mask); free_cpumask_var(rd->online); free_cpumask_var(rd->span); kfree(rd); } static void rq_attach_root(struct rq *rq, struct root_domain *rd) { struct root_domain *old_rd = NULL; unsigned long flags; raw_spin_lock_irqsave(&rq->lock, flags); if (rq->rd) { old_rd = rq->rd; if (cpumask_test_cpu(rq->cpu, old_rd->online)) set_rq_offline(rq); cpumask_clear_cpu(rq->cpu, old_rd->span); /* * If we dont want to free the old_rt yet then * set old_rd to NULL to skip the freeing later * in this function: */ if (!atomic_dec_and_test(&old_rd->refcount)) old_rd = NULL; } atomic_inc(&rd->refcount); rq->rd = rd; cpumask_set_cpu(rq->cpu, rd->span); if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) set_rq_online(rq); raw_spin_unlock_irqrestore(&rq->lock, flags); if (old_rd) call_rcu_sched(&old_rd->rcu, free_rootdomain); } static int init_rootdomain(struct root_domain *rd) { memset(rd, 0, sizeof(*rd)); if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) goto out; if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) goto free_span; if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) goto free_online; if (cpupri_init(&rd->cpupri) != 0) goto free_rto_mask; return 0; free_rto_mask: free_cpumask_var(rd->rto_mask); free_online: free_cpumask_var(rd->online); free_span: free_cpumask_var(rd->span); out: return -ENOMEM; } /* * By default the system creates a single root-domain with all cpus as * members (mimicking the global state we have today). */ struct root_domain def_root_domain; static void init_defrootdomain(void) { init_rootdomain(&def_root_domain); atomic_set(&def_root_domain.refcount, 1); } static struct root_domain *alloc_rootdomain(void) { struct root_domain *rd; rd = kmalloc(sizeof(*rd), GFP_KERNEL); if (!rd) return NULL; if (init_rootdomain(rd) != 0) { kfree(rd); return NULL; } return rd; } static void free_sched_groups(struct sched_group *sg, int free_sgp) { struct sched_group *tmp, *first; if (!sg) return; first = sg; do { tmp = sg->next; if (free_sgp && atomic_dec_and_test(&sg->sgp->ref)) kfree(sg->sgp); kfree(sg); sg = tmp; } while (sg != first); } static void free_sched_domain(struct rcu_head *rcu) { struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); /* * If its an overlapping domain it has private groups, iterate and * nuke them all. */ if (sd->flags & SD_OVERLAP) { free_sched_groups(sd->groups, 1); } else if (atomic_dec_and_test(&sd->groups->ref)) { kfree(sd->groups->sgp); kfree(sd->groups); } kfree(sd); } static void destroy_sched_domain(struct sched_domain *sd, int cpu) { call_rcu(&sd->rcu, free_sched_domain); } static void destroy_sched_domains(struct sched_domain *sd, int cpu) { for (; sd; sd = sd->parent) destroy_sched_domain(sd, cpu); } /* * Keep a special pointer to the highest sched_domain that has * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this * allows us to avoid some pointer chasing select_idle_sibling(). * * Also keep a unique ID per domain (we use the first cpu number in * the cpumask of the domain), this allows us to quickly tell if * two cpus are in the same cache domain, see cpus_share_cache(). */ DEFINE_PER_CPU(struct sched_domain *, sd_llc); DEFINE_PER_CPU(int, sd_llc_id); static void update_top_cache_domain(int cpu) { struct sched_domain *sd; int id = cpu; sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); if (sd) id = cpumask_first(sched_domain_span(sd)); rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); per_cpu(sd_llc_id, cpu) = id; } /* * Attach the domain 'sd' to 'cpu' as its base domain. Callers must * hold the hotplug lock. */ static void cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) { struct rq *rq = cpu_rq(cpu); struct sched_domain *tmp; unsigned long next_balance = rq->next_balance; /* Remove the sched domains which do not contribute to scheduling. */ for (tmp = sd; tmp; ) { struct sched_domain *parent = tmp->parent; if (!parent) break; if (sd_parent_degenerate(tmp, parent)) { tmp->parent = parent->parent; if (parent->parent) parent->parent->child = tmp; destroy_sched_domain(parent, cpu); } else tmp = tmp->parent; } if (sd && sd_degenerate(sd)) { tmp = sd; sd = sd->parent; destroy_sched_domain(tmp, cpu); if (sd) sd->child = NULL; } for (tmp = sd; tmp; ) { unsigned long interval; interval = msecs_to_jiffies(tmp->balance_interval); if (time_after(next_balance, tmp->last_balance + interval)) next_balance = tmp->last_balance + interval; tmp = tmp->parent; } rq->next_balance = next_balance; sched_domain_debug(sd, cpu); rq_attach_root(rq, rd); tmp = rq->sd; rcu_assign_pointer(rq->sd, sd); destroy_sched_domains(tmp, cpu); update_top_cache_domain(cpu); } /* cpus with isolated domains */ static cpumask_var_t cpu_isolated_map; /* Setup the mask of cpus configured for isolated domains */ static int __init isolated_cpu_setup(char *str) { alloc_bootmem_cpumask_var(&cpu_isolated_map); cpulist_parse(str, cpu_isolated_map); return 1; } __setup("isolcpus=", isolated_cpu_setup); static const struct cpumask *cpu_cpu_mask(int cpu) { return cpumask_of_node(cpu_to_node(cpu)); } struct sd_data { struct sched_domain **__percpu sd; struct sched_group **__percpu sg; struct sched_group_power **__percpu sgp; }; struct s_data { struct sched_domain ** __percpu sd; struct root_domain *rd; }; enum s_alloc { sa_rootdomain, sa_sd, sa_sd_storage, sa_none, }; struct sched_domain_topology_level; typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu); typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); #define SDTL_OVERLAP 0x01 struct sched_domain_topology_level { sched_domain_init_f init; sched_domain_mask_f mask; int flags; int numa_level; struct sd_data data; }; /* * Build an iteration mask that can exclude certain CPUs from the upwards * domain traversal. * * Asymmetric node setups can result in situations where the domain tree is of * unequal depth, make sure to skip domains that already cover the entire * range. * * In that case build_sched_domains() will have terminated the iteration early * and our sibling sd spans will be empty. Domains should always include the * cpu they're built on, so check that. * */ static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) { const struct cpumask *span = sched_domain_span(sd); struct sd_data *sdd = sd->private; struct sched_domain *sibling; int i; for_each_cpu(i, span) { sibling = *per_cpu_ptr(sdd->sd, i); if (!cpumask_test_cpu(i, sched_domain_span(sibling))) continue; cpumask_set_cpu(i, sched_group_mask(sg)); } } /* * Return the canonical balance cpu for this group, this is the first cpu * of this group that's also in the iteration mask. */ int group_balance_cpu(struct sched_group *sg) { return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg)); } static int build_overlap_sched_groups(struct sched_domain *sd, int cpu) { struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg; const struct cpumask *span = sched_domain_span(sd); struct cpumask *covered = sched_domains_tmpmask; struct sd_data *sdd = sd->private; struct sched_domain *child; int i; cpumask_clear(covered); for_each_cpu(i, span) { struct cpumask *sg_span; if (cpumask_test_cpu(i, covered)) continue; child = *per_cpu_ptr(sdd->sd, i); /* See the comment near build_group_mask(). */ if (!cpumask_test_cpu(i, sched_domain_span(child))) continue; sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), GFP_KERNEL, cpu_to_node(cpu)); if (!sg) goto fail; sg_span = sched_group_cpus(sg); if (child->child) { child = child->child; cpumask_copy(sg_span, sched_domain_span(child)); } else cpumask_set_cpu(i, sg_span); cpumask_or(covered, covered, sg_span); sg->sgp = *per_cpu_ptr(sdd->sgp, i); if (atomic_inc_return(&sg->sgp->ref) == 1) build_group_mask(sd, sg); /* * Initialize sgp->power such that even if we mess up the * domains and no possible iteration will get us here, we won't * die on a /0 trap. */ sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span); /* * Make sure the first group of this domain contains the * canonical balance cpu. Otherwise the sched_domain iteration * breaks. See update_sg_lb_stats(). */ if ((!groups && cpumask_test_cpu(cpu, sg_span)) || group_balance_cpu(sg) == cpu) groups = sg; if (!first) first = sg; if (last) last->next = sg; last = sg; last->next = first; } sd->groups = groups; return 0; fail: free_sched_groups(first, 0); return -ENOMEM; } static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) { struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); struct sched_domain *child = sd->child; if (child) cpu = cpumask_first(sched_domain_span(child)); if (sg) { *sg = *per_cpu_ptr(sdd->sg, cpu); (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu); atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */ } return cpu; } /* * build_sched_groups will build a circular linked list of the groups * covered by the given span, and will set each group's ->cpumask correctly, * and ->cpu_power to 0. * * Assumes the sched_domain tree is fully constructed */ static int build_sched_groups(struct sched_domain *sd, int cpu) { struct sched_group *first = NULL, *last = NULL; struct sd_data *sdd = sd->private; const struct cpumask *span = sched_domain_span(sd); struct cpumask *covered; int i; get_group(cpu, sdd, &sd->groups); atomic_inc(&sd->groups->ref); if (cpu != cpumask_first(sched_domain_span(sd))) return 0; lockdep_assert_held(&sched_domains_mutex); covered = sched_domains_tmpmask; cpumask_clear(covered); for_each_cpu(i, span) { struct sched_group *sg; int group = get_group(i, sdd, &sg); int j; if (cpumask_test_cpu(i, covered)) continue; cpumask_clear(sched_group_cpus(sg)); sg->sgp->power = 0; cpumask_setall(sched_group_mask(sg)); for_each_cpu(j, span) { if (get_group(j, sdd, NULL) != group) continue; cpumask_set_cpu(j, covered); cpumask_set_cpu(j, sched_group_cpus(sg)); } if (!first) first = sg; if (last) last->next = sg; last = sg; } last->next = first; return 0; } /* * Initialize sched groups cpu_power. * * cpu_power indicates the capacity of sched group, which is used while * distributing the load between different sched groups in a sched domain. * Typically cpu_power for all the groups in a sched domain will be same unless * there are asymmetries in the topology. If there are asymmetries, group * having more cpu_power will pickup more load compared to the group having * less cpu_power. */ static void init_sched_groups_power(int cpu, struct sched_domain *sd) { struct sched_group *sg = sd->groups; WARN_ON(!sd || !sg); do { sg->group_weight = cpumask_weight(sched_group_cpus(sg)); sg = sg->next; } while (sg != sd->groups); if (cpu != group_balance_cpu(sg)) return; update_group_power(sd, cpu); atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight); } int __weak arch_sd_sibling_asym_packing(void) { return 0*SD_ASYM_PACKING; } /* * Initializers for schedule domains * Non-inlined to reduce accumulated stack pressure in build_sched_domains() */ #ifdef CONFIG_SCHED_DEBUG # define SD_INIT_NAME(sd, type) sd->name = #type #else # define SD_INIT_NAME(sd, type) do { } while (0) #endif #define SD_INIT_FUNC(type) \ static noinline struct sched_domain * \ sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \ { \ struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); \ *sd = SD_##type##_INIT; \ SD_INIT_NAME(sd, type); \ sd->private = &tl->data; \ return sd; \ } SD_INIT_FUNC(CPU) #ifdef CONFIG_SCHED_SMT SD_INIT_FUNC(SIBLING) #endif #ifdef CONFIG_SCHED_MC SD_INIT_FUNC(MC) #endif #ifdef CONFIG_SCHED_BOOK SD_INIT_FUNC(BOOK) #endif static int default_relax_domain_level = -1; int sched_domain_level_max; static int __init setup_relax_domain_level(char *str) { if (kstrtoint(str, 0, &default_relax_domain_level)) pr_warn("Unable to set relax_domain_level\n"); return 1; } __setup("relax_domain_level=", setup_relax_domain_level); static void set_domain_attribute(struct sched_domain *sd, struct sched_domain_attr *attr) { int request; if (!attr || attr->relax_domain_level < 0) { if (default_relax_domain_level < 0) return; else request = default_relax_domain_level; } else request = attr->relax_domain_level; if (request < sd->level) { /* turn off idle balance on this domain */ sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); } else { /* turn on idle balance on this domain */ sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); } } static void __sdt_free(const struct cpumask *cpu_map); static int __sdt_alloc(const struct cpumask *cpu_map); static void __free_domain_allocs(struct s_data *d, enum s_alloc what, const struct cpumask *cpu_map) { switch (what) { case sa_rootdomain: if (!atomic_read(&d->rd->refcount)) free_rootdomain(&d->rd->rcu); /* fall through */ case sa_sd: free_percpu(d->sd); /* fall through */ case sa_sd_storage: __sdt_free(cpu_map); /* fall through */ case sa_none: break; } } static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) { memset(d, 0, sizeof(*d)); if (__sdt_alloc(cpu_map)) return sa_sd_storage; d->sd = alloc_percpu(struct sched_domain *); if (!d->sd) return sa_sd_storage; d->rd = alloc_rootdomain(); if (!d->rd) return sa_sd; return sa_rootdomain; } /* * NULL the sd_data elements we've used to build the sched_domain and * sched_group structure so that the subsequent __free_domain_allocs() * will not free the data we're using. */ static void claim_allocations(int cpu, struct sched_domain *sd) { struct sd_data *sdd = sd->private; WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); *per_cpu_ptr(sdd->sd, cpu) = NULL; if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) *per_cpu_ptr(sdd->sg, cpu) = NULL; if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref)) *per_cpu_ptr(sdd->sgp, cpu) = NULL; } #ifdef CONFIG_SCHED_SMT static const struct cpumask *cpu_smt_mask(int cpu) { return topology_thread_cpumask(cpu); } #endif /* * Topology list, bottom-up. */ static struct sched_domain_topology_level default_topology[] = { #ifdef CONFIG_SCHED_SMT { sd_init_SIBLING, cpu_smt_mask, }, #endif #ifdef CONFIG_SCHED_MC { sd_init_MC, cpu_coregroup_mask, }, #endif #ifdef CONFIG_SCHED_BOOK { sd_init_BOOK, cpu_book_mask, }, #endif { sd_init_CPU, cpu_cpu_mask, }, { NULL, }, }; static struct sched_domain_topology_level *sched_domain_topology = default_topology; #ifdef CONFIG_NUMA static int sched_domains_numa_levels; static int *sched_domains_numa_distance; static struct cpumask ***sched_domains_numa_masks; static int sched_domains_curr_level; static inline int sd_local_flags(int level) { if (sched_domains_numa_distance[level] > RECLAIM_DISTANCE) return 0; return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE; } static struct sched_domain * sd_numa_init(struct sched_domain_topology_level *tl, int cpu) { struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); int level = tl->numa_level; int sd_weight = cpumask_weight( sched_domains_numa_masks[level][cpu_to_node(cpu)]); *sd = (struct sched_domain){ .min_interval = sd_weight, .max_interval = 2*sd_weight, .busy_factor = 32, .imbalance_pct = 125, .cache_nice_tries = 2, .busy_idx = 3, .idle_idx = 2, .newidle_idx = 0, .wake_idx = 0, .forkexec_idx = 0, .flags = 1*SD_LOAD_BALANCE | 1*SD_BALANCE_NEWIDLE | 0*SD_BALANCE_EXEC | 0*SD_BALANCE_FORK | 0*SD_BALANCE_WAKE | 0*SD_WAKE_AFFINE | 0*SD_SHARE_CPUPOWER | 0*SD_SHARE_PKG_RESOURCES | 1*SD_SERIALIZE | 0*SD_PREFER_SIBLING | sd_local_flags(level) , .last_balance = jiffies, .balance_interval = sd_weight, }; SD_INIT_NAME(sd, NUMA); sd->private = &tl->data; /* * Ugly hack to pass state to sd_numa_mask()... */ sched_domains_curr_level = tl->numa_level; return sd; } static const struct cpumask *sd_numa_mask(int cpu) { return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; } static void sched_numa_warn(const char *str) { static int done = false; int i,j; if (done) return; done = true; printk(KERN_WARNING "ERROR: %s\n\n", str); for (i = 0; i < nr_node_ids; i++) { printk(KERN_WARNING " "); for (j = 0; j < nr_node_ids; j++) printk(KERN_CONT "%02d ", node_distance(i,j)); printk(KERN_CONT "\n"); } printk(KERN_WARNING "\n"); } static bool find_numa_distance(int distance) { int i; if (distance == node_distance(0, 0)) return true; for (i = 0; i < sched_domains_numa_levels; i++) { if (sched_domains_numa_distance[i] == distance) return true; } return false; } static void sched_init_numa(void) { int next_distance, curr_distance = node_distance(0, 0); struct sched_domain_topology_level *tl; int level = 0; int i, j, k; sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); if (!sched_domains_numa_distance) return; /* * O(nr_nodes^2) deduplicating selection sort -- in order to find the * unique distances in the node_distance() table. * * Assumes node_distance(0,j) includes all distances in * node_distance(i,j) in order to avoid cubic time. */ next_distance = curr_distance; for (i = 0; i < nr_node_ids; i++) { for (j = 0; j < nr_node_ids; j++) { for (k = 0; k < nr_node_ids; k++) { int distance = node_distance(i, k); if (distance > curr_distance && (distance < next_distance || next_distance == curr_distance)) next_distance = distance; /* * While not a strong assumption it would be nice to know * about cases where if node A is connected to B, B is not * equally connected to A. */ if (sched_debug() && node_distance(k, i) != distance) sched_numa_warn("Node-distance not symmetric"); if (sched_debug() && i && !find_numa_distance(distance)) sched_numa_warn("Node-0 not representative"); } if (next_distance != curr_distance) { sched_domains_numa_distance[level++] = next_distance; sched_domains_numa_levels = level; curr_distance = next_distance; } else break; } /* * In case of sched_debug() we verify the above assumption. */ if (!sched_debug()) break; } /* * 'level' contains the number of unique distances, excluding the * identity distance node_distance(i,i). * * The sched_domains_numa_distance[] array includes the actual distance * numbers. */ /* * Here, we should temporarily reset sched_domains_numa_levels to 0. * If it fails to allocate memory for array sched_domains_numa_masks[][], * the array will contain less then 'level' members. This could be * dangerous when we use it to iterate array sched_domains_numa_masks[][] * in other functions. * * We reset it to 'level' at the end of this function. */ sched_domains_numa_levels = 0; sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL); if (!sched_domains_numa_masks) return; /* * Now for each level, construct a mask per node which contains all * cpus of nodes that are that many hops away from us. */ for (i = 0; i < level; i++) { sched_domains_numa_masks[i] = kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); if (!sched_domains_numa_masks[i]) return; for (j = 0; j < nr_node_ids; j++) { struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); if (!mask) return; sched_domains_numa_masks[i][j] = mask; for (k = 0; k < nr_node_ids; k++) { if (node_distance(j, k) > sched_domains_numa_distance[i]) continue; cpumask_or(mask, mask, cpumask_of_node(k)); } } } tl = kzalloc((ARRAY_SIZE(default_topology) + level) * sizeof(struct sched_domain_topology_level), GFP_KERNEL); if (!tl) return; /* * Copy the default topology bits.. */ for (i = 0; default_topology[i].init; i++) tl[i] = default_topology[i]; /* * .. and append 'j' levels of NUMA goodness. */ for (j = 0; j < level; i++, j++) { tl[i] = (struct sched_domain_topology_level){ .init = sd_numa_init, .mask = sd_numa_mask, .flags = SDTL_OVERLAP, .numa_level = j, }; } sched_domain_topology = tl; sched_domains_numa_levels = level; } static void sched_domains_numa_masks_set(int cpu) { int i, j; int node = cpu_to_node(cpu); for (i = 0; i < sched_domains_numa_levels; i++) { for (j = 0; j < nr_node_ids; j++) { if (node_distance(j, node) <= sched_domains_numa_distance[i]) cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); } } } static void sched_domains_numa_masks_clear(int cpu) { int i, j; for (i = 0; i < sched_domains_numa_levels; i++) { for (j = 0; j < nr_node_ids; j++) cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); } } /* * Update sched_domains_numa_masks[level][node] array when new cpus * are onlined. */ static int sched_domains_numa_masks_update(struct notifier_block *nfb, unsigned long action, void *hcpu) { int cpu = (long)hcpu; switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: sched_domains_numa_masks_set(cpu); break; case CPU_DEAD: sched_domains_numa_masks_clear(cpu); break; default: return NOTIFY_DONE; } return NOTIFY_OK; } #else static inline void sched_init_numa(void) { } static int sched_domains_numa_masks_update(struct notifier_block *nfb, unsigned long action, void *hcpu) { return 0; } #endif /* CONFIG_NUMA */ static int __sdt_alloc(const struct cpumask *cpu_map) { struct sched_domain_topology_level *tl; int j; for (tl = sched_domain_topology; tl->init; tl++) { struct sd_data *sdd = &tl->data; sdd->sd = alloc_percpu(struct sched_domain *); if (!sdd->sd) return -ENOMEM; sdd->sg = alloc_percpu(struct sched_group *); if (!sdd->sg) return -ENOMEM; sdd->sgp = alloc_percpu(struct sched_group_power *); if (!sdd->sgp) return -ENOMEM; for_each_cpu(j, cpu_map) { struct sched_domain *sd; struct sched_group *sg; struct sched_group_power *sgp; sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), GFP_KERNEL, cpu_to_node(j)); if (!sd) return -ENOMEM; *per_cpu_ptr(sdd->sd, j) = sd; sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), GFP_KERNEL, cpu_to_node(j)); if (!sg) return -ENOMEM; sg->next = sg; *per_cpu_ptr(sdd->sg, j) = sg; sgp = kzalloc_node(sizeof(struct sched_group_power) + cpumask_size(), GFP_KERNEL, cpu_to_node(j)); if (!sgp) return -ENOMEM; *per_cpu_ptr(sdd->sgp, j) = sgp; } } return 0; } static void __sdt_free(const struct cpumask *cpu_map) { struct sched_domain_topology_level *tl; int j; for (tl = sched_domain_topology; tl->init; tl++) { struct sd_data *sdd = &tl->data; for_each_cpu(j, cpu_map) { struct sched_domain *sd; if (sdd->sd) { sd = *per_cpu_ptr(sdd->sd, j); if (sd && (sd->flags & SD_OVERLAP)) free_sched_groups(sd->groups, 0); kfree(*per_cpu_ptr(sdd->sd, j)); } if (sdd->sg) kfree(*per_cpu_ptr(sdd->sg, j)); if (sdd->sgp) kfree(*per_cpu_ptr(sdd->sgp, j)); } free_percpu(sdd->sd); sdd->sd = NULL; free_percpu(sdd->sg); sdd->sg = NULL; free_percpu(sdd->sgp); sdd->sgp = NULL; } } struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, struct s_data *d, const struct cpumask *cpu_map, struct sched_domain_attr *attr, struct sched_domain *child, int cpu) { struct sched_domain *sd = tl->init(tl, cpu); if (!sd) return child; cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); if (child) { sd->level = child->level + 1; sched_domain_level_max = max(sched_domain_level_max, sd->level); child->parent = sd; } sd->child = child; set_domain_attribute(sd, attr); return sd; } /* * Build sched domains for a given set of cpus and attach the sched domains * to the individual cpus */ static int build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr) { enum s_alloc alloc_state = sa_none; struct sched_domain *sd; struct s_data d; int i, ret = -ENOMEM; alloc_state = __visit_domain_allocation_hell(&d, cpu_map); if (alloc_state != sa_rootdomain) goto error; /* Set up domains for cpus specified by the cpu_map. */ for_each_cpu(i, cpu_map) { struct sched_domain_topology_level *tl; sd = NULL; for (tl = sched_domain_topology; tl->init; tl++) { sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i); if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP)) sd->flags |= SD_OVERLAP; if (cpumask_equal(cpu_map, sched_domain_span(sd))) break; } while (sd->child) sd = sd->child; *per_cpu_ptr(d.sd, i) = sd; } /* Build the groups for the domains */ for_each_cpu(i, cpu_map) { for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { sd->span_weight = cpumask_weight(sched_domain_span(sd)); if (sd->flags & SD_OVERLAP) { if (build_overlap_sched_groups(sd, i)) goto error; } else { if (build_sched_groups(sd, i)) goto error; } } } /* Calculate CPU power for physical packages and nodes */ for (i = nr_cpumask_bits-1; i >= 0; i--) { if (!cpumask_test_cpu(i, cpu_map)) continue; for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { claim_allocations(i, sd); init_sched_groups_power(i, sd); } } /* Attach the domains */ rcu_read_lock(); for_each_cpu(i, cpu_map) { sd = *per_cpu_ptr(d.sd, i); cpu_attach_domain(sd, d.rd, i); } rcu_read_unlock(); ret = 0; error: __free_domain_allocs(&d, alloc_state, cpu_map); return ret; } static cpumask_var_t *doms_cur; /* current sched domains */ static int ndoms_cur; /* number of sched domains in 'doms_cur' */ static struct sched_domain_attr *dattr_cur; /* attribues of custom domains in 'doms_cur' */ /* * Special case: If a kmalloc of a doms_cur partition (array of * cpumask) fails, then fallback to a single sched domain, * as determined by the single cpumask fallback_doms. */ static cpumask_var_t fallback_doms; /* * arch_update_cpu_topology lets virtualized architectures update the * cpu core maps. It is supposed to return 1 if the topology changed * or 0 if it stayed the same. */ int __attribute__((weak)) arch_update_cpu_topology(void) { return 0; } cpumask_var_t *alloc_sched_domains(unsigned int ndoms) { int i; cpumask_var_t *doms; doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL); if (!doms) return NULL; for (i = 0; i < ndoms; i++) { if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { free_sched_domains(doms, i); return NULL; } } return doms; } void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) { unsigned int i; for (i = 0; i < ndoms; i++) free_cpumask_var(doms[i]); kfree(doms); } /* * Set up scheduler domains and groups. Callers must hold the hotplug lock. * For now this just excludes isolated cpus, but could be used to * exclude other special cases in the future. */ static int init_sched_domains(const struct cpumask *cpu_map) { int err; arch_update_cpu_topology(); ndoms_cur = 1; doms_cur = alloc_sched_domains(ndoms_cur); if (!doms_cur) doms_cur = &fallback_doms; cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); err = build_sched_domains(doms_cur[0], NULL); register_sched_domain_sysctl(); return err; } /* * Detach sched domains from a group of cpus specified in cpu_map * These cpus will now be attached to the NULL domain */ static void detach_destroy_domains(const struct cpumask *cpu_map) { int i; rcu_read_lock(); for_each_cpu(i, cpu_map) cpu_attach_domain(NULL, &def_root_domain, i); rcu_read_unlock(); } /* handle null as "default" */ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, struct sched_domain_attr *new, int idx_new) { struct sched_domain_attr tmp; /* fast path */ if (!new && !cur) return 1; tmp = SD_ATTR_INIT; return !memcmp(cur ? (cur + idx_cur) : &tmp, new ? (new + idx_new) : &tmp, sizeof(struct sched_domain_attr)); } /* * Partition sched domains as specified by the 'ndoms_new' * cpumasks in the array doms_new[] of cpumasks. This compares * doms_new[] to the current sched domain partitioning, doms_cur[]. * It destroys each deleted domain and builds each new domain. * * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. * The masks don't intersect (don't overlap.) We should setup one * sched domain for each mask. CPUs not in any of the cpumasks will * not be load balanced. If the same cpumask appears both in the * current 'doms_cur' domains and in the new 'doms_new', we can leave * it as it is. * * The passed in 'doms_new' should be allocated using * alloc_sched_domains. This routine takes ownership of it and will * free_sched_domains it when done with it. If the caller failed the * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, * and partition_sched_domains() will fallback to the single partition * 'fallback_doms', it also forces the domains to be rebuilt. * * If doms_new == NULL it will be replaced with cpu_online_mask. * ndoms_new == 0 is a special case for destroying existing domains, * and it will not create the default domain. * * Call with hotplug lock held */ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], struct sched_domain_attr *dattr_new) { int i, j, n; int new_topology; mutex_lock(&sched_domains_mutex); /* always unregister in case we don't destroy any domains */ unregister_sched_domain_sysctl(); /* Let architecture update cpu core mappings. */ new_topology = arch_update_cpu_topology(); n = doms_new ? ndoms_new : 0; /* Destroy deleted domains */ for (i = 0; i < ndoms_cur; i++) { for (j = 0; j < n && !new_topology; j++) { if (cpumask_equal(doms_cur[i], doms_new[j]) && dattrs_equal(dattr_cur, i, dattr_new, j)) goto match1; } /* no match - a current sched domain not in new doms_new[] */ detach_destroy_domains(doms_cur[i]); match1: ; } if (doms_new == NULL) { ndoms_cur = 0; doms_new = &fallback_doms; cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); WARN_ON_ONCE(dattr_new); } /* Build new domains */ for (i = 0; i < ndoms_new; i++) { for (j = 0; j < ndoms_cur && !new_topology; j++) { if (cpumask_equal(doms_new[i], doms_cur[j]) && dattrs_equal(dattr_new, i, dattr_cur, j)) goto match2; } /* no match - add a new doms_new */ build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); match2: ; } /* Remember the new sched domains */ if (doms_cur != &fallback_doms) free_sched_domains(doms_cur, ndoms_cur); kfree(dattr_cur); /* kfree(NULL) is safe */ doms_cur = doms_new; dattr_cur = dattr_new; ndoms_cur = ndoms_new; register_sched_domain_sysctl(); mutex_unlock(&sched_domains_mutex); } static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */ /* * Update cpusets according to cpu_active mask. If cpusets are * disabled, cpuset_update_active_cpus() becomes a simple wrapper * around partition_sched_domains(). * * If we come here as part of a suspend/resume, don't touch cpusets because we * want to restore it back to its original state upon resume anyway. */ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, void *hcpu) { switch (action) { case CPU_ONLINE_FROZEN: case CPU_DOWN_FAILED_FROZEN: /* * num_cpus_frozen tracks how many CPUs are involved in suspend * resume sequence. As long as this is not the last online * operation in the resume sequence, just build a single sched * domain, ignoring cpusets. */ num_cpus_frozen--; if (likely(num_cpus_frozen)) { partition_sched_domains(1, NULL, NULL); break; } /* * This is the last CPU online operation. So fall through and * restore the original sched domains by considering the * cpuset configurations. */ case CPU_ONLINE: case CPU_DOWN_FAILED: cpuset_update_active_cpus(true); break; default: return NOTIFY_DONE; } return NOTIFY_OK; } static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, void *hcpu) { switch (action) { case CPU_DOWN_PREPARE: cpuset_update_active_cpus(false); break; case CPU_DOWN_PREPARE_FROZEN: num_cpus_frozen++; partition_sched_domains(1, NULL, NULL); break; default: return NOTIFY_DONE; } return NOTIFY_OK; } void __init sched_init_smp(void) { cpumask_var_t non_isolated_cpus; init_cpu_efficiency(); alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); alloc_cpumask_var(&fallback_doms, GFP_KERNEL); sched_init_numa(); /* * There's no userspace yet to cause hotplug operations; hence all the * cpu masks are stable and all blatant races in the below code cannot * happen. */ mutex_lock(&sched_domains_mutex); init_sched_domains(cpu_active_mask); cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); if (cpumask_empty(non_isolated_cpus)) cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); mutex_unlock(&sched_domains_mutex); hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE); hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); init_hrtick(); /* Move init over to a non-isolated CPU */ if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) BUG(); sched_init_granularity(); free_cpumask_var(non_isolated_cpus); init_sched_rt_class(); } #else void __init sched_init_smp(void) { sched_init_granularity(); } #endif /* CONFIG_SMP */ const_debug unsigned int sysctl_timer_migration = 1; int in_sched_functions(unsigned long addr) { return in_lock_functions(addr) || (addr >= (unsigned long)__sched_text_start && addr < (unsigned long)__sched_text_end); } #ifdef CONFIG_CGROUP_SCHED /* * Default task group. * Every task in system belongs to this group at bootup. */ struct task_group root_task_group; LIST_HEAD(task_groups); #endif DECLARE_PER_CPU(cpumask_var_t, load_balance_mask); void __init sched_init(void) { int i, j; unsigned long alloc_size = 0, ptr; if (sched_enable_hmp) pr_info("HMP scheduling enabled.\n"); BUG_ON(num_possible_cpus() > BITS_PER_LONG); #ifdef CONFIG_FAIR_GROUP_SCHED alloc_size += 2 * nr_cpu_ids * sizeof(void **); #endif #ifdef CONFIG_RT_GROUP_SCHED alloc_size += 2 * nr_cpu_ids * sizeof(void **); #endif #ifdef CONFIG_CPUMASK_OFFSTACK alloc_size += num_possible_cpus() * cpumask_size(); #endif if (alloc_size) { ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); #ifdef CONFIG_FAIR_GROUP_SCHED root_task_group.se = (struct sched_entity **)ptr; ptr += nr_cpu_ids * sizeof(void **); root_task_group.cfs_rq = (struct cfs_rq **)ptr; ptr += nr_cpu_ids * sizeof(void **); #endif /* CONFIG_FAIR_GROUP_SCHED */ #ifdef CONFIG_RT_GROUP_SCHED root_task_group.rt_se = (struct sched_rt_entity **)ptr; ptr += nr_cpu_ids * sizeof(void **); root_task_group.rt_rq = (struct rt_rq **)ptr; ptr += nr_cpu_ids * sizeof(void **); #endif /* CONFIG_RT_GROUP_SCHED */ #ifdef CONFIG_CPUMASK_OFFSTACK for_each_possible_cpu(i) { per_cpu(load_balance_mask, i) = (void *)ptr; ptr += cpumask_size(); } #endif /* CONFIG_CPUMASK_OFFSTACK */ } #ifdef CONFIG_SMP init_defrootdomain(); #endif init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime()); #ifdef CONFIG_RT_GROUP_SCHED init_rt_bandwidth(&root_task_group.rt_bandwidth, global_rt_period(), global_rt_runtime()); #endif /* CONFIG_RT_GROUP_SCHED */ #ifdef CONFIG_CGROUP_SCHED list_add(&root_task_group.list, &task_groups); INIT_LIST_HEAD(&root_task_group.children); INIT_LIST_HEAD(&root_task_group.siblings); autogroup_init(&init_task); #endif /* CONFIG_CGROUP_SCHED */ for_each_possible_cpu(i) { struct rq *rq; per_cpu(dptr, i) = per_cpu(dbuf, i); rq = cpu_rq(i); raw_spin_lock_init(&rq->lock); rq->nr_running = 0; rq->calc_load_active = 0; rq->calc_load_update = jiffies + LOAD_FREQ; init_cfs_rq(&rq->cfs); init_rt_rq(&rq->rt, rq); #ifdef CONFIG_FAIR_GROUP_SCHED root_task_group.shares = ROOT_TASK_GROUP_LOAD; INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); /* * How much cpu bandwidth does root_task_group get? * * In case of task-groups formed thr' the cgroup filesystem, it * gets 100% of the cpu resources in the system. This overall * system cpu resource is divided among the tasks of * root_task_group and its child task-groups in a fair manner, * based on each entity's (task or task-group's) weight * (se->load.weight). * * In other words, if root_task_group has 10 tasks of weight * 1024) and two child groups A0 and A1 (of weight 1024 each), * then A0's share of the cpu resource is: * * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% * * We achieve this by letting root_task_group's tasks sit * directly in rq->cfs (i.e root_task_group->se[] = NULL). */ init_cfs_bandwidth(&root_task_group.cfs_bandwidth); init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); #endif /* CONFIG_FAIR_GROUP_SCHED */ rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; #ifdef CONFIG_RT_GROUP_SCHED INIT_LIST_HEAD(&rq->leaf_rt_rq_list); init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); #endif for (j = 0; j < CPU_LOAD_IDX_MAX; j++) rq->cpu_load[j] = 0; rq->last_load_update_tick = jiffies; #ifdef CONFIG_SMP rq->sd = NULL; rq->rd = NULL; rq->cpu_power = SCHED_POWER_SCALE; rq->post_schedule = 0; rq->active_balance = 0; rq->next_balance = jiffies; rq->push_cpu = 0; rq->push_task = NULL; rq->cpu = i; rq->online = 0; rq->idle_stamp = 0; rq->avg_idle = 2*sysctl_sched_migration_cost; rq->cstate = 0; rq->wakeup_latency = 0; rq->wakeup_energy = 0; #ifdef CONFIG_SCHED_HMP rq->cur_freq = 1; rq->max_freq = 1; rq->min_freq = 1; rq->max_possible_freq = 1; rq->max_possible_capacity = 0; rq->cumulative_runnable_avg = 0; rq->efficiency = 1024; rq->capacity = 1024; rq->load_scale_factor = 1024; rq->window_start = 0; rq->nr_small_tasks = rq->nr_big_tasks = 0; rq->hmp_flags = 0; #ifdef CONFIG_SCHED_FREQ_INPUT rq->old_busy_time = 0; rq->curr_runnable_sum = rq->prev_runnable_sum = 0; rq->notifier_sent = 0; #endif #endif INIT_LIST_HEAD(&rq->cfs_tasks); rq_attach_root(rq, &def_root_domain); #ifdef CONFIG_NO_HZ_COMMON rq->nohz_flags = 0; #endif #ifdef CONFIG_NO_HZ_FULL rq->last_sched_tick = 0; #endif #endif init_rq_hrtick(rq); atomic_set(&rq->nr_iowait, 0); } set_hmp_defaults(); set_load_weight(&init_task); #ifdef CONFIG_PREEMPT_NOTIFIERS INIT_HLIST_HEAD(&init_task.preempt_notifiers); #endif #ifdef CONFIG_RT_MUTEXES plist_head_init(&init_task.pi_waiters); #endif /* * The boot idle thread does lazy MMU switching as well: */ atomic_inc(&init_mm.mm_count); enter_lazy_tlb(&init_mm, current); /* * Make us the idle thread. Technically, schedule() should not be * called from this thread, however somewhere below it might be, * but because we are the idle thread, we just pick up running again * when this runqueue becomes "idle". */ init_idle(current, smp_processor_id()); calc_load_update = jiffies + LOAD_FREQ; /* * During early bootup we pretend to be a normal task: */ current->sched_class = &fair_sched_class; #ifdef CONFIG_SMP zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT); /* May be allocated at isolcpus cmdline parse time */ if (cpu_isolated_map == NULL) zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); idle_thread_set_boot_cpu(); #endif init_sched_fair_class(); scheduler_running = 1; } #ifdef CONFIG_DEBUG_ATOMIC_SLEEP static inline int preempt_count_equals(int preempt_offset) { int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth(); return (nested == preempt_offset); } static int __might_sleep_init_called; int __init __might_sleep_init(void) { __might_sleep_init_called = 1; return 0; } early_initcall(__might_sleep_init); void __might_sleep(const char *file, int line, int preempt_offset) { static unsigned long prev_jiffy; /* ratelimiting */ rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */ if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) || oops_in_progress) return; if (system_state != SYSTEM_RUNNING && (!__might_sleep_init_called || system_state != SYSTEM_BOOTING)) return; if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) return; prev_jiffy = jiffies; printk(KERN_ERR "BUG: sleeping function called from invalid context at %s:%d\n", file, line); printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", in_atomic(), irqs_disabled(), current->pid, current->comm); debug_show_held_locks(current); if (irqs_disabled()) print_irqtrace_events(current); dump_stack(); } EXPORT_SYMBOL(__might_sleep); #endif #ifdef CONFIG_MAGIC_SYSRQ static void normalize_task(struct rq *rq, struct task_struct *p) { const struct sched_class *prev_class = p->sched_class; struct sched_attr attr = { .sched_policy = SCHED_NORMAL, }; int old_prio = p->prio; int on_rq; on_rq = p->on_rq; if (on_rq) dequeue_task(rq, p, 0); __setscheduler(rq, p, &attr); if (on_rq) { enqueue_task(rq, p, 0); resched_task(rq->curr); } check_class_changed(rq, p, prev_class, old_prio); } void normalize_rt_tasks(void) { struct task_struct *g, *p; unsigned long flags; struct rq *rq; read_lock_irqsave(&tasklist_lock, flags); do_each_thread(g, p) { /* * Only normalize user tasks: */ if (!p->mm) continue; p->se.exec_start = 0; #ifdef CONFIG_SCHEDSTATS p->se.statistics.wait_start = 0; p->se.statistics.sleep_start = 0; p->se.statistics.block_start = 0; #endif if (!rt_task(p)) { /* * Renice negative nice level userspace * tasks back to 0: */ if (TASK_NICE(p) < 0 && p->mm) set_user_nice(p, 0); continue; } raw_spin_lock(&p->pi_lock); rq = __task_rq_lock(p); normalize_task(rq, p); __task_rq_unlock(rq); raw_spin_unlock(&p->pi_lock); } while_each_thread(g, p); read_unlock_irqrestore(&tasklist_lock, flags); } #endif /* CONFIG_MAGIC_SYSRQ */ #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) /* * These functions are only useful for the IA64 MCA handling, or kdb. * * They can only be called when the whole system has been * stopped - every CPU needs to be quiescent, and no scheduling * activity can take place. Using them for anything else would * be a serious bug, and as a result, they aren't even visible * under any other configuration. */ /** * curr_task - return the current task for a given cpu. * @cpu: the processor in question. * * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! */ struct task_struct *curr_task(int cpu) { return cpu_curr(cpu); } #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ #ifdef CONFIG_IA64 /** * set_curr_task - set the current task for a given cpu. * @cpu: the processor in question. * @p: the task pointer to set. * * Description: This function must only be used when non-maskable interrupts * are serviced on a separate stack. It allows the architecture to switch the * notion of the current task on a cpu in a non-blocking manner. This function * must be called with all CPU's synchronized, and interrupts disabled, the * and caller must save the original value of the current task (see * curr_task() above) and restore that value before reenabling interrupts and * re-starting the system. * * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! */ void set_curr_task(int cpu, struct task_struct *p) { cpu_curr(cpu) = p; } #endif #ifdef CONFIG_CGROUP_SCHED /* task_group_lock serializes the addition/removal of task groups */ static DEFINE_SPINLOCK(task_group_lock); static void free_sched_group(struct task_group *tg) { free_fair_sched_group(tg); free_rt_sched_group(tg); autogroup_free(tg); kfree(tg); } /* allocate runqueue etc for a new task group */ struct task_group *sched_create_group(struct task_group *parent) { struct task_group *tg; tg = kzalloc(sizeof(*tg), GFP_KERNEL); if (!tg) return ERR_PTR(-ENOMEM); if (!alloc_fair_sched_group(tg, parent)) goto err; if (!alloc_rt_sched_group(tg, parent)) goto err; return tg; err: free_sched_group(tg); return ERR_PTR(-ENOMEM); } void sched_online_group(struct task_group *tg, struct task_group *parent) { unsigned long flags; spin_lock_irqsave(&task_group_lock, flags); list_add_rcu(&tg->list, &task_groups); WARN_ON(!parent); /* root should already exist */ tg->parent = parent; INIT_LIST_HEAD(&tg->children); list_add_rcu(&tg->siblings, &parent->children); spin_unlock_irqrestore(&task_group_lock, flags); } /* rcu callback to free various structures associated with a task group */ static void free_sched_group_rcu(struct rcu_head *rhp) { /* now it should be safe to free those cfs_rqs */ free_sched_group(container_of(rhp, struct task_group, rcu)); } /* Destroy runqueue etc associated with a task group */ void sched_destroy_group(struct task_group *tg) { /* wait for possible concurrent references to cfs_rqs complete */ call_rcu(&tg->rcu, free_sched_group_rcu); } void sched_offline_group(struct task_group *tg) { unsigned long flags; int i; /* end participation in shares distribution */ for_each_possible_cpu(i) unregister_fair_sched_group(tg, i); spin_lock_irqsave(&task_group_lock, flags); list_del_rcu(&tg->list); list_del_rcu(&tg->siblings); spin_unlock_irqrestore(&task_group_lock, flags); } /* change task's runqueue when it moves between groups. * The caller of this function should have put the task in its new group * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to * reflect its new group. */ void sched_move_task(struct task_struct *tsk) { struct task_group *tg; int on_rq, running; unsigned long flags; struct rq *rq; rq = task_rq_lock(tsk, &flags); running = task_current(rq, tsk); on_rq = tsk->on_rq; if (on_rq) dequeue_task(rq, tsk, 0); if (unlikely(running)) tsk->sched_class->put_prev_task(rq, tsk); tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id, lockdep_is_held(&tsk->sighand->siglock)), struct task_group, css); tg = autogroup_task_group(tsk, tg); tsk->sched_task_group = tg; #ifdef CONFIG_FAIR_GROUP_SCHED if (tsk->sched_class->task_move_group) tsk->sched_class->task_move_group(tsk, on_rq); else #endif set_task_rq(tsk, task_cpu(tsk)); if (unlikely(running)) tsk->sched_class->set_curr_task(rq); if (on_rq) enqueue_task(rq, tsk, 0); task_rq_unlock(rq, tsk, &flags); } #endif /* CONFIG_CGROUP_SCHED */ #if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH) static unsigned long to_ratio(u64 period, u64 runtime) { if (runtime == RUNTIME_INF) return 1ULL << 20; return div64_u64(runtime << 20, period); } #endif #ifdef CONFIG_RT_GROUP_SCHED /* * Ensure that the real time constraints are schedulable. */ static DEFINE_MUTEX(rt_constraints_mutex); /* Must be called with tasklist_lock held */ static inline int tg_has_rt_tasks(struct task_group *tg) { struct task_struct *g, *p; do_each_thread(g, p) { if (rt_task(p) && task_rq(p)->rt.tg == tg) return 1; } while_each_thread(g, p); return 0; } struct rt_schedulable_data { struct task_group *tg; u64 rt_period; u64 rt_runtime; }; static int tg_rt_schedulable(struct task_group *tg, void *data) { struct rt_schedulable_data *d = data; struct task_group *child; unsigned long total, sum = 0; u64 period, runtime; period = ktime_to_ns(tg->rt_bandwidth.rt_period); runtime = tg->rt_bandwidth.rt_runtime; if (tg == d->tg) { period = d->rt_period; runtime = d->rt_runtime; } /* * Cannot have more runtime than the period. */ if (runtime > period && runtime != RUNTIME_INF) return -EINVAL; /* * Ensure we don't starve existing RT tasks. */ if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg)) return -EBUSY; total = to_ratio(period, runtime); /* * Nobody can have more than the global setting allows. */ if (total > to_ratio(global_rt_period(), global_rt_runtime())) return -EINVAL; /* * The sum of our children's runtime should not exceed our own. */ list_for_each_entry_rcu(child, &tg->children, siblings) { period = ktime_to_ns(child->rt_bandwidth.rt_period); runtime = child->rt_bandwidth.rt_runtime; if (child == d->tg) { period = d->rt_period; runtime = d->rt_runtime; } sum += to_ratio(period, runtime); } if (sum > total) return -EINVAL; return 0; } static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) { int ret; struct rt_schedulable_data data = { .tg = tg, .rt_period = period, .rt_runtime = runtime, }; rcu_read_lock(); ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data); rcu_read_unlock(); return ret; } static int tg_set_rt_bandwidth(struct task_group *tg, u64 rt_period, u64 rt_runtime) { int i, err = 0; mutex_lock(&rt_constraints_mutex); read_lock(&tasklist_lock); err = __rt_schedulable(tg, rt_period, rt_runtime); if (err) goto unlock; raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); tg->rt_bandwidth.rt_runtime = rt_runtime; for_each_possible_cpu(i) { struct rt_rq *rt_rq = tg->rt_rq[i]; raw_spin_lock(&rt_rq->rt_runtime_lock); rt_rq->rt_runtime = rt_runtime; raw_spin_unlock(&rt_rq->rt_runtime_lock); } raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); unlock: read_unlock(&tasklist_lock); mutex_unlock(&rt_constraints_mutex); return err; } static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) { u64 rt_runtime, rt_period; rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; if (rt_runtime_us < 0) rt_runtime = RUNTIME_INF; return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); } static long sched_group_rt_runtime(struct task_group *tg) { u64 rt_runtime_us; if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) return -1; rt_runtime_us = tg->rt_bandwidth.rt_runtime; do_div(rt_runtime_us, NSEC_PER_USEC); return rt_runtime_us; } static int sched_group_set_rt_period(struct task_group *tg, long rt_period_us) { u64 rt_runtime, rt_period; rt_period = (u64)rt_period_us * NSEC_PER_USEC; rt_runtime = tg->rt_bandwidth.rt_runtime; if (rt_period == 0) return -EINVAL; return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); } static long sched_group_rt_period(struct task_group *tg) { u64 rt_period_us; rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); do_div(rt_period_us, NSEC_PER_USEC); return rt_period_us; } static int sched_rt_global_constraints(void) { u64 runtime, period; int ret = 0; if (sysctl_sched_rt_period <= 0) return -EINVAL; runtime = global_rt_runtime(); period = global_rt_period(); /* * Sanity check on the sysctl variables. */ if (runtime > period && runtime != RUNTIME_INF) return -EINVAL; mutex_lock(&rt_constraints_mutex); read_lock(&tasklist_lock); ret = __rt_schedulable(NULL, 0, 0); read_unlock(&tasklist_lock); mutex_unlock(&rt_constraints_mutex); return ret; } static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) { /* Don't accept realtime tasks when there is no way for them to run */ if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) return 0; return 1; } #else /* !CONFIG_RT_GROUP_SCHED */ static int sched_rt_global_constraints(void) { unsigned long flags; int i; if (sysctl_sched_rt_period <= 0) return -EINVAL; /* * There's always some RT tasks in the root group * -- migration, kstopmachine etc.. */ if (sysctl_sched_rt_runtime == 0) return -EBUSY; raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); for_each_possible_cpu(i) { struct rt_rq *rt_rq = &cpu_rq(i)->rt; raw_spin_lock(&rt_rq->rt_runtime_lock); rt_rq->rt_runtime = global_rt_runtime(); raw_spin_unlock(&rt_rq->rt_runtime_lock); } raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); return 0; } #endif /* CONFIG_RT_GROUP_SCHED */ int sched_rr_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; static DEFINE_MUTEX(mutex); mutex_lock(&mutex); ret = proc_dointvec(table, write, buffer, lenp, ppos); /* make sure that internally we keep jiffies */ /* also, writing zero resets timeslice to default */ if (!ret && write) { sched_rr_timeslice = sched_rr_timeslice <= 0 ? RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice); } mutex_unlock(&mutex); return ret; } int sched_rt_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; int old_period, old_runtime; static DEFINE_MUTEX(mutex); mutex_lock(&mutex); old_period = sysctl_sched_rt_period; old_runtime = sysctl_sched_rt_runtime; ret = proc_dointvec(table, write, buffer, lenp, ppos); if (!ret && write) { ret = sched_rt_global_constraints(); if (ret) { sysctl_sched_rt_period = old_period; sysctl_sched_rt_runtime = old_runtime; } else { def_rt_bandwidth.rt_runtime = global_rt_runtime(); def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period()); } } mutex_unlock(&mutex); return ret; } #ifdef CONFIG_CGROUP_SCHED /* return corresponding task_group object of a cgroup */ static inline struct task_group *cgroup_tg(struct cgroup *cgrp) { return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id), struct task_group, css); } static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp) { struct task_group *tg, *parent; if (!cgrp->parent) { /* This is early initialization for the top cgroup */ return &root_task_group.css; } parent = cgroup_tg(cgrp->parent); tg = sched_create_group(parent); if (IS_ERR(tg)) return ERR_PTR(-ENOMEM); return &tg->css; } static int cpu_cgroup_css_online(struct cgroup *cgrp) { struct task_group *tg = cgroup_tg(cgrp); struct task_group *parent; if (!cgrp->parent) return 0; parent = cgroup_tg(cgrp->parent); sched_online_group(tg, parent); return 0; } static void cpu_cgroup_css_free(struct cgroup *cgrp) { struct task_group *tg = cgroup_tg(cgrp); sched_destroy_group(tg); } static void cpu_cgroup_css_offline(struct cgroup *cgrp) { struct task_group *tg = cgroup_tg(cgrp); sched_offline_group(tg); } static int cpu_cgroup_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) { struct task_struct *task; cgroup_taskset_for_each(task, cgrp, tset) { #ifdef CONFIG_RT_GROUP_SCHED if (!sched_rt_can_attach(cgroup_tg(cgrp), task)) return -EINVAL; #else /* We don't support RT-tasks being in separate groups */ if (task->sched_class != &fair_sched_class) return -EINVAL; #endif } return 0; } static void cpu_cgroup_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) { struct task_struct *task; cgroup_taskset_for_each(task, cgrp, tset) sched_move_task(task); } static void cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp, struct task_struct *task) { /* * cgroup_exit() is called in the copy_process() failure path. * Ignore this case since the task hasn't ran yet, this avoids * trying to poke a half freed task state from generic code. */ if (!(task->flags & PF_EXITING)) return; sched_move_task(task); } static u64 cpu_notify_on_migrate_read_u64(struct cgroup *cgrp, struct cftype *cft) { struct task_group *tg = cgroup_tg(cgrp); return tg->notify_on_migrate; } static int cpu_notify_on_migrate_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 notify) { struct task_group *tg = cgroup_tg(cgrp); tg->notify_on_migrate = (notify > 0); return 0; } #ifdef CONFIG_FAIR_GROUP_SCHED static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype, u64 shareval) { return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval)); } static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft) { struct task_group *tg = cgroup_tg(cgrp); return (u64) scale_load_down(tg->shares); } #ifdef CONFIG_CFS_BANDWIDTH static DEFINE_MUTEX(cfs_constraints_mutex); const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) { int i, ret = 0, runtime_enabled, runtime_was_enabled; struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; if (tg == &root_task_group) return -EINVAL; /* * Ensure we have at some amount of bandwidth every period. This is * to prevent reaching a state of large arrears when throttled via * entity_tick() resulting in prolonged exit starvation. */ if (quota < min_cfs_quota_period || period < min_cfs_quota_period) return -EINVAL; /* * Likewise, bound things on the otherside by preventing insane quota * periods. This also allows us to normalize in computing quota * feasibility. */ if (period > max_cfs_quota_period) return -EINVAL; mutex_lock(&cfs_constraints_mutex); ret = __cfs_schedulable(tg, period, quota); if (ret) goto out_unlock; runtime_enabled = quota != RUNTIME_INF; runtime_was_enabled = cfs_b->quota != RUNTIME_INF; /* * If we need to toggle cfs_bandwidth_used, off->on must occur * before making related changes, and on->off must occur afterwards */ if (runtime_enabled && !runtime_was_enabled) cfs_bandwidth_usage_inc(); raw_spin_lock_irq(&cfs_b->lock); cfs_b->period = ns_to_ktime(period); cfs_b->quota = quota; __refill_cfs_bandwidth_runtime(cfs_b); /* restart the period timer (if active) to handle new period expiry */ if (runtime_enabled && cfs_b->timer_active) { /* force a reprogram */ cfs_b->timer_active = 0; __start_cfs_bandwidth(cfs_b); } raw_spin_unlock_irq(&cfs_b->lock); for_each_possible_cpu(i) { struct cfs_rq *cfs_rq = tg->cfs_rq[i]; struct rq *rq = cfs_rq->rq; raw_spin_lock_irq(&rq->lock); cfs_rq->runtime_enabled = runtime_enabled; cfs_rq->runtime_remaining = 0; if (cfs_rq->throttled) unthrottle_cfs_rq(cfs_rq); raw_spin_unlock_irq(&rq->lock); } if (runtime_was_enabled && !runtime_enabled) cfs_bandwidth_usage_dec(); out_unlock: mutex_unlock(&cfs_constraints_mutex); return ret; } int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) { u64 quota, period; period = ktime_to_ns(tg->cfs_bandwidth.period); if (cfs_quota_us < 0) quota = RUNTIME_INF; else quota = (u64)cfs_quota_us * NSEC_PER_USEC; return tg_set_cfs_bandwidth(tg, period, quota); } long tg_get_cfs_quota(struct task_group *tg) { u64 quota_us; if (tg->cfs_bandwidth.quota == RUNTIME_INF) return -1; quota_us = tg->cfs_bandwidth.quota; do_div(quota_us, NSEC_PER_USEC); return quota_us; } int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) { u64 quota, period; period = (u64)cfs_period_us * NSEC_PER_USEC; quota = tg->cfs_bandwidth.quota; return tg_set_cfs_bandwidth(tg, period, quota); } long tg_get_cfs_period(struct task_group *tg) { u64 cfs_period_us; cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); do_div(cfs_period_us, NSEC_PER_USEC); return cfs_period_us; } static s64 cpu_cfs_quota_read_s64(struct cgroup *cgrp, struct cftype *cft) { return tg_get_cfs_quota(cgroup_tg(cgrp)); } static int cpu_cfs_quota_write_s64(struct cgroup *cgrp, struct cftype *cftype, s64 cfs_quota_us) { return tg_set_cfs_quota(cgroup_tg(cgrp), cfs_quota_us); } static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft) { return tg_get_cfs_period(cgroup_tg(cgrp)); } static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype, u64 cfs_period_us) { return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us); } struct cfs_schedulable_data { struct task_group *tg; u64 period, quota; }; /* * normalize group quota/period to be quota/max_period * note: units are usecs */ static u64 normalize_cfs_quota(struct task_group *tg, struct cfs_schedulable_data *d) { u64 quota, period; if (tg == d->tg) { period = d->period; quota = d->quota; } else { period = tg_get_cfs_period(tg); quota = tg_get_cfs_quota(tg); } /* note: these should typically be equivalent */ if (quota == RUNTIME_INF || quota == -1) return RUNTIME_INF; return to_ratio(period, quota); } static int tg_cfs_schedulable_down(struct task_group *tg, void *data) { struct cfs_schedulable_data *d = data; struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; s64 quota = 0, parent_quota = -1; if (!tg->parent) { quota = RUNTIME_INF; } else { struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; quota = normalize_cfs_quota(tg, d); parent_quota = parent_b->hierarchal_quota; /* * ensure max(child_quota) <= parent_quota, inherit when no * limit is set */ if (quota == RUNTIME_INF) quota = parent_quota; else if (parent_quota != RUNTIME_INF && quota > parent_quota) return -EINVAL; } cfs_b->hierarchal_quota = quota; return 0; } static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) { int ret; struct cfs_schedulable_data data = { .tg = tg, .period = period, .quota = quota, }; if (quota != RUNTIME_INF) { do_div(data.period, NSEC_PER_USEC); do_div(data.quota, NSEC_PER_USEC); } rcu_read_lock(); ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); rcu_read_unlock(); return ret; } static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft, struct cgroup_map_cb *cb) { struct task_group *tg = cgroup_tg(cgrp); struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; cb->fill(cb, "nr_periods", cfs_b->nr_periods); cb->fill(cb, "nr_throttled", cfs_b->nr_throttled); cb->fill(cb, "throttled_time", cfs_b->throttled_time); return 0; } #endif /* CONFIG_CFS_BANDWIDTH */ #endif /* CONFIG_FAIR_GROUP_SCHED */ #ifdef CONFIG_RT_GROUP_SCHED static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft, s64 val) { return sched_group_set_rt_runtime(cgroup_tg(cgrp), val); } static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft) { return sched_group_rt_runtime(cgroup_tg(cgrp)); } static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype, u64 rt_period_us) { return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us); } static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft) { return sched_group_rt_period(cgroup_tg(cgrp)); } #endif /* CONFIG_RT_GROUP_SCHED */ static struct cftype cpu_files[] = { { .name = "notify_on_migrate", .read_u64 = cpu_notify_on_migrate_read_u64, .write_u64 = cpu_notify_on_migrate_write_u64, }, #ifdef CONFIG_FAIR_GROUP_SCHED { .name = "shares", .read_u64 = cpu_shares_read_u64, .write_u64 = cpu_shares_write_u64, }, #endif #ifdef CONFIG_CFS_BANDWIDTH { .name = "cfs_quota_us", .read_s64 = cpu_cfs_quota_read_s64, .write_s64 = cpu_cfs_quota_write_s64, }, { .name = "cfs_period_us", .read_u64 = cpu_cfs_period_read_u64, .write_u64 = cpu_cfs_period_write_u64, }, { .name = "stat", .read_map = cpu_stats_show, }, #endif #ifdef CONFIG_RT_GROUP_SCHED { .name = "rt_runtime_us", .read_s64 = cpu_rt_runtime_read, .write_s64 = cpu_rt_runtime_write, }, { .name = "rt_period_us", .read_u64 = cpu_rt_period_read_uint, .write_u64 = cpu_rt_period_write_uint, }, #endif { } /* terminate */ }; struct cgroup_subsys cpu_cgroup_subsys = { .name = "cpu", .css_alloc = cpu_cgroup_css_alloc, .css_free = cpu_cgroup_css_free, .css_online = cpu_cgroup_css_online, .css_offline = cpu_cgroup_css_offline, .can_attach = cpu_cgroup_can_attach, .attach = cpu_cgroup_attach, .allow_attach = subsys_cgroup_allow_attach, .exit = cpu_cgroup_exit, .subsys_id = cpu_cgroup_subsys_id, .base_cftypes = cpu_files, .early_init = 1, }; #endif /* CONFIG_CGROUP_SCHED */ void dump_cpu_task(int cpu) { pr_info("Task dump for CPU %d:\n", cpu); sched_show_task(cpu_curr(cpu)); }
SlimRoms/kernel_cyanogen_msm8916
kernel/sched/core.c
C
gpl-2.0
257,636
/***************************************************************************** * * This program is free software ; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * $Id: cpu.c 585 2006-01-16 09:48:55Z picard $ * * The Core Pocket Media Player * Copyright (c) 2004-2005 Gabor Kovacs * ****************************************************************************/ #include "../common.h" #include "cpu.h" #ifdef ARM extern int STDCALL CheckARM5E(); extern int STDCALL CheckARMXScale(); #endif #ifndef SH3 extern void STDCALL GetCpuId(int,uint32_t*); #if defined(TARGET_SYMBIAN) && !defined(ARM) #define GetCpuId(a,b) #endif static NOINLINE void SafeGetCpuId(int Id, uint32_t* p) { memset(p,0,4*sizeof(uint32_t)); TRY_BEGIN { bool_t Mode = KernelMode(1); GetCpuId(Id,p); KernelMode(Mode); } TRY_END } #endif int CPUCaps() { cpudetect p; CPUDetect(&p); return p.Caps; } void CPUDetect(cpudetect* p) { int Caps = 0; uint32_t CpuId[4]; memset(p,0,sizeof(cpudetect)); #ifdef ARM p->Arch = T("ARM"); SafeGetCpuId(0,CpuId); if (CpuId[0]) { p->ICache = 512 << ((CpuId[1] >> 6) & 7); p->DCache = 512 << ((CpuId[1] >> 18) & 7); } else { #if !defined(TARGET_PALMOS) && !defined(TARGET_SYMBIAN) // when need to detect cpu features somehow // (only works if we can catch cpu exceptions) TRY_BEGIN { if (CheckARM5E()) { int XScale; Caps |= CAPS_ARM_5E; XScale = CheckARMXScale(); if (XScale) { p->ICache = p->DCache = 32768; Caps |= CAPS_ARM_XSCALE; if (XScale > 1) Caps |= CAPS_ARM_WMMX; } } } TRY_END #endif } if ((CpuId[0] & 0xFF000000) == 0x54000000) //TI { p->Vendor = T("TI"); Caps |= CAPS_ARM_GENERAL; switch ((CpuId[0] >> 4) & 0xFFF) { case 0x915: p->Model = T("915T"); break; case 0x925: p->Model = T("925T"); break; case 0x926: p->Model = T("926T"); Caps |= CAPS_ARM_5E; break; } } else if ((CpuId[0] & 0xFF000000) == 0x41000000) //arm { Caps |= CAPS_ARM_GENERAL; switch ((CpuId[0] >> 4) & 0xFFF) { case 0x920: p->Model = T("920T"); break; case 0x922: p->Model = T("922T"); break; case 0x926: p->Model = T("926E"); Caps |= CAPS_ARM_5E; break; case 0x940: p->Model = T("940T"); break; case 0x946: p->Model = T("946E"); Caps |= CAPS_ARM_5E; break; case 0xA22: p->Model = T("1020E"); Caps |= CAPS_ARM_5E; break; } } else if ((CpuId[0] & 0xFF000000) == 0x69000000) //intel { p->Vendor = T("Intel"); if ((CpuId[0] & 0xFF0000) == 0x050000) //intel arm5e Caps |= CAPS_ARM_5E|CAPS_ARM_XSCALE; if (((CpuId[0] >> 4) & 0xFFF) == 0xB11) { p->Model = T("SA1110"); } else { switch ((CpuId[0] >> 13) & 7) { case 0x2: Caps |= CAPS_ARM_WMMX; break; } switch ((CpuId[0] >> 4) & 31) { case 0x10: p->Model = T("PXA25x/26x"); break; case 0x11: p->Model = T("PXA27x"); break; case 0x12: p->Model = T("PXA210"); break; case 0x9: p->Model = T("PXA31x"); break; } } } #elif defined(MIPS) SafeGetCpuId(0,CpuId); p->Arch = T("MIPS"); if (((CpuId[0] >> 8) & 255) == 0x0c) { if ((CpuId[0] & 0xF0) == 0x50) { Caps |= CAPS_MIPS_VR4110; p->Model = T("VR411X"); } else { Caps |= CAPS_MIPS_VR4120; if ((CpuId[0] & 0xF0) == 0x80) p->Model = T("VR413X"); else p->Model = T("VR412X"); } } #elif defined(SH3) CpuId[0] = 0; // avoid warning Caps = CpuId[0]; p->Arch = T("SH3"); #elif defined(_M_IX86) p->Arch = T("x86"); SafeGetCpuId(0,CpuId); if (CpuId[1] == 0x756e6547 && CpuId[3] == 0x49656e69 && CpuId[2] == 0x6c65746e) { p->Vendor = T("Intel"); Intel: SafeGetCpuId(1,CpuId); if (CpuId[3] & 0x00800000) { Caps |= CAPS_X86_MMX; if (CpuId[3] & 0x02000000) Caps |= CAPS_X86_MMX2 | CAPS_X86_SSE; if (CpuId[3] & 0x04000000) Caps |= CAPS_X86_SSE2; } } else if (CpuId[1] == 0x68747541 && CpuId[3] == 0x69746e65 && CpuId[2] == 0x444d4163) { p->Vendor = T("AMD"); SafeGetCpuId(0x80000000,CpuId); if (CpuId[0] < 0x80000001) goto Intel; SafeGetCpuId(0x80000001,CpuId); if (CpuId[3] & 0x00800000) { Caps |= CAPS_X86_MMX; if (CpuId[3] & 0x80000000) Caps |= CAPS_X86_3DNOW; if (CpuId[3] & 0x00400000) Caps |= CAPS_X86_MMX2; } } else if (CpuId[1] == 0x746e6543 && CpuId[3] == 0x48727561 && CpuId[2] == 0x736c7561) { p->Vendor = T("VIA C3"); SafeGetCpuId(0x80000000,CpuId); if (CpuId[0] < 0x80000001) goto Intel; SafeGetCpuId(0x80000001,CpuId); if (CpuId[3] & (1<<31)) Caps |= CAPS_X86_3DNOW; if (CpuId[3] & (1<<23)) Caps |= CAPS_X86_MMX; if (CpuId[3] & (1<<24)) Caps |= CAPS_X86_MMX2; } else if (CpuId[1] == 0x69727943 && CpuId[3] == 0x736e4978 && CpuId[2] == 0x64616574) { p->Vendor = T("Cyrix"); if (CpuId[0] != 2) goto Intel; SafeGetCpuId(0x80000001,CpuId); if (CpuId[3] & 0x00800000) { Caps |= CAPS_X86_MMX; if (CpuId[3] & 0x01000000) Caps |= CAPS_X86_MMX2; } } #endif p->Caps = Caps; }
xorkrus/tcpmp-revive
common/cpu/cpu.c
C
gpl-2.0
6,053
/****************************************************************************** * Product: Adempiere ERP & CRM Smart Business Solution * * Copyright (C) 1999-2007 ComPiere, Inc. All Rights Reserved. * * This program is free software, you can redistribute it and/or modify it * * under the terms version 2 of the GNU General Public License as published * * by the Free Software Foundation. This program is distributed in the hope * * that it will be useful, but WITHOUT ANY WARRANTY, without even the implied * * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * * with this program, if not, write to the Free Software Foundation, Inc., * * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * For the text or an alternative of this public license, you may reach us * * ComPiere, Inc., 2620 Augustine Dr. #245, Santa Clara, CA 95054, USA * * or via info@compiere.org or http://www.compiere.org/license.html * *****************************************************************************/ /** Generated Model - DO NOT CHANGE */ package org.compiere.model; import java.sql.ResultSet; import java.util.Properties; import org.compiere.util.KeyNamePair; /** Generated Model for C_JobCategory * @author Adempiere (generated) * @version Release 3.8.0 - $Id$ */ public class X_C_JobCategory extends PO implements I_C_JobCategory, I_Persistent { /** * */ private static final long serialVersionUID = 20150101L; /** Standard Constructor */ public X_C_JobCategory (Properties ctx, int C_JobCategory_ID, String trxName) { super (ctx, C_JobCategory_ID, trxName); /** if (C_JobCategory_ID == 0) { setC_JobCategory_ID (0); setName (null); } */ } /** Load Constructor */ public X_C_JobCategory (Properties ctx, ResultSet rs, String trxName) { super (ctx, rs, trxName); } /** AccessLevel * @return 2 - Client */ protected int get_AccessLevel() { return accessLevel.intValue(); } /** Load Meta Data */ protected POInfo initPO (Properties ctx) { POInfo poi = POInfo.getPOInfo (ctx, Table_ID, get_TrxName()); return poi; } public String toString() { StringBuffer sb = new StringBuffer ("X_C_JobCategory[") .append(get_ID()).append("]"); return sb.toString(); } /** Set Position Category. @param C_JobCategory_ID Job Position Category */ public void setC_JobCategory_ID (int C_JobCategory_ID) { if (C_JobCategory_ID < 1) set_ValueNoCheck (COLUMNNAME_C_JobCategory_ID, null); else set_ValueNoCheck (COLUMNNAME_C_JobCategory_ID, Integer.valueOf(C_JobCategory_ID)); } /** Get Position Category. @return Job Position Category */ public int getC_JobCategory_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_C_JobCategory_ID); if (ii == null) return 0; return ii.intValue(); } /** Set Description. @param Description Optional short description of the record */ public void setDescription (String Description) { set_Value (COLUMNNAME_Description, Description); } /** Get Description. @return Optional short description of the record */ public String getDescription () { return (String)get_Value(COLUMNNAME_Description); } /** Set Comment/Help. @param Help Comment or Hint */ public void setHelp (String Help) { set_Value (COLUMNNAME_Help, Help); } /** Get Comment/Help. @return Comment or Hint */ public String getHelp () { return (String)get_Value(COLUMNNAME_Help); } /** Set Name. @param Name Alphanumeric identifier of the entity */ public void setName (String Name) { set_Value (COLUMNNAME_Name, Name); } /** Get Name. @return Alphanumeric identifier of the entity */ public String getName () { return (String)get_Value(COLUMNNAME_Name); } /** Get Record ID/ColumnName @return ID/ColumnName pair */ public KeyNamePair getKeyNamePair() { return new KeyNamePair(get_ID(), getName()); } }
armenrz/adempiere
base/src/org/compiere/model/X_C_JobCategory.java
Java
gpl-2.0
4,312
/* $Id: os_linux_kernel.h 369517 2012-07-01 17:28:57Z file $ */ /* * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com) * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __PJ_COMPAT_OS_LINUX_KERNEL_H__ #define __PJ_COMPAT_OS_LINUX_KERNEL_H__ /** * @file os_linux.h * @brief Describes Linux operating system specifics. */ #define PJ_OS_NAME "linux-module" #define PJ_HAS_ARPA_INET_H 0 #define PJ_HAS_ASSERT_H 0 #define PJ_HAS_CTYPE_H 0 #define PJ_HAS_ERRNO_H 0 #define PJ_HAS_LINUX_SOCKET_H 1 #define PJ_HAS_MALLOC_H 0 #define PJ_HAS_NETDB_H 0 #define PJ_HAS_NETINET_IN_H 0 #define PJ_HAS_SETJMP_H 0 #define PJ_HAS_STDARG_H 1 #define PJ_HAS_STDDEF_H 0 #define PJ_HAS_STDIO_H 0 #define PJ_HAS_STDLIB_H 0 #define PJ_HAS_STRING_H 0 #define PJ_HAS_SYS_IOCTL_H 0 #define PJ_HAS_SYS_SELECT_H 0 #define PJ_HAS_SYS_SOCKET_H 0 #define PJ_HAS_SYS_TIME_H 0 #define PJ_HAS_SYS_TIMEB_H 0 #define PJ_HAS_SYS_TYPES_H 0 #define PJ_HAS_TIME_H 0 #define PJ_HAS_UNISTD_H 0 #define PJ_HAS_MSWSOCK_H 0 #define PJ_HAS_WINSOCK_H 0 #define PJ_HAS_WINSOCK2_H 0 #define PJ_SOCK_HAS_INET_ATON 0 /* Set 1 if native sockaddr_in has sin_len member. * Default: 0 */ #define PJ_SOCKADDR_HAS_LEN 0 /* When this macro is set, getsockopt(SOL_SOCKET, SO_ERROR) will return * the status of non-blocking connect() operation. */ #define PJ_HAS_SO_ERROR 1 /** * If this macro is set, it tells select I/O Queue that select() needs to * be given correct value of nfds (i.e. largest fd + 1). This requires * select ioqueue to re-scan the descriptors on each registration and * unregistration. * If this macro is not set, then ioqueue will always give FD_SETSIZE for * nfds argument when calling select(). * * Default: 0 */ #define PJ_SELECT_NEEDS_NFDS 0 /* Is errno a good way to retrieve OS errors? * (probably no for linux kernel) * If you answer no here, you'll need to tell pjlib how to get OS * error (a compile error will tell you exactly where) */ #define PJ_HAS_ERRNO_VAR 0 /* This value specifies the value set in errno by the OS when a non-blocking * socket recv() can not return immediate daata. */ #define PJ_BLOCKING_ERROR_VAL EAGAIN /* This value specifies the value set in errno by the OS when a non-blocking * socket connect() can not get connected immediately. */ #define PJ_BLOCKING_CONNECT_ERROR_VAL EINPROGRESS #ifndef PJ_HAS_THREADS # define PJ_HAS_THREADS (1) #endif /* * Declare __FD_SETSIZE now before including <linux*>. */ #define __FD_SETSIZE PJ_IOQUEUE_MAX_HANDLES #define NULL ((void*)0) #include <linux/module.h> /* Needed by all modules */ #include <linux/kernel.h> /* Needed for KERN_INFO */ #define __PJ_EXPORT_SYMBOL(a) EXPORT_SYMBOL(a); /* * Override features. */ #define PJ_HAS_FLOATING_POINT 0 #define PJ_HAS_MALLOC 0 #define PJ_HAS_SEMAPHORE 0 #define PJ_HAS_EVENT_OBJ 0 #define PJ_HAS_HIGH_RES_TIMER 1 #ifndef PJ_OS_HAS_CHECK_STACK # define PJ_OS_HAS_CHECK_STACK 0 #endif #define PJ_TERM_HAS_COLOR 0 #define PJ_NATIVE_STRING_IS_UNICODE 0 #define PJ_ATOMIC_VALUE_TYPE int #define PJ_THREAD_DESC_SIZE 128 /* If 1, use Read/Write mutex emulation for platforms that don't support it */ #define PJ_EMULATE_RWMUTEX 0 /* If 1, pj_thread_create() should enforce the stack size when creating * threads. * Default: 0 (let OS decide the thread's stack size). */ #define PJ_THREAD_SET_STACK_SIZE 0 /* If 1, pj_thread_create() should allocate stack from the pool supplied. * Default: 0 (let OS allocate memory for thread's stack). */ #define PJ_THREAD_ALLOCATE_STACK 0 #endif /* __PJ_COMPAT_OS_LINUX_KERNEL_H__ */
fluentstream/asterisk-p2p
res/pjproject/pjlib/include/pj/compat/os_linux_kernel.h
C
gpl-2.0
4,540
.nav-holder .fusion-navbar-nav > li > ul, .nav-holder .fusion-navbar-nav > li > div, .nav-holder .fusion-navbar-nav .fusion-dropdown-menu ul, .top-menu li > div, .top-menu li > .sub-menu { display: none; } .nav-holder .fusion-navbar-nav li:hover > .sub-menu, .nav-holder .fusion-navbar-nav li:hover > div, .top-menu li:hover > .sub-menu, .top-menu li:hover > div, .top-menu .cart:hover .cart-contents { display: block; } .fusion-animated, .animated { visibility:visible;} .image .image-extras .image-extras-content{} .search input{padding-left:5px;} #header .tagline{margin-top:3px !important;} .has-sidebar .summary.entry-summary{width:318px;} .has-sidebar .woocommerce-tabs .panel { width: 394px; } .woocommerce .social-share li.email{border-right:0;} .star-rating { width: 5.2em !important;} .avada-select-parent .select-arrow, .gravity-select-parent .select-arrow, .wpcf7-select-parent .select-arrow, .select-arrow { background: #fff; } .search input,#searchform input {padding-left:10px;}
ftopolovec/proart
wp-content/themes/Avada/css/ie.css
CSS
gpl-2.0
1,002
import sys def inputText(): input = sys.stdin.readline() return input.strip() def inputChoices(list, backcmd = "b", backtext = "back"): repeat = True while repeat: repeat = False count = 0 for item in list: print count, "-", item count += 1 print backcmd, "-", backtext input = inputText() if input == backcmd: return None action = int(input) if action >= len(list): repeat = True return action
popazerty/beyonwiz-4.1
tools/host_tools/FormatConverter/input.py
Python
gpl-2.0
432
/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef __STORAGE_TEST_H__ #define __STORAGE_TEST_H__ #include <commonlib/sd_mmc_ctrlr.h> #include <device/device.h> #include <device/pci.h> #include <timer.h> #ifdef __SIMPLE_DEVICE__ uint32_t storage_test_init(pci_devfn_t dev, uint32_t *previous_bar, uint16_t *previous_command); void storage_test(uint32_t bar, int full_initialization); void storage_test_complete(pci_devfn_t dev, uint32_t previous_bar, uint16_t previous_command); #else uint32_t storage_test_init(struct device *dev, uint32_t *previous_bar, uint16_t *previous_command); void storage_test(uint32_t bar, int full_initialization); void storage_test_complete(struct device *dev, uint32_t previous_bar, uint16_t previous_command); #endif /* Logging support */ struct log_entry { struct mono_time time; struct mmc_command cmd; int cmd_issued; int ret; uint32_t response_entries; uint32_t response[4]; }; #define LOG_ENTRIES 256 #endif /* __STORAGE_TEST_H__ */
pcengines/coreboot
src/soc/intel/quark/include/soc/storage_test.h
C
gpl-2.0
985
/* * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package com.oracle.graal.jtt.bytecode; import com.oracle.graal.jtt.*; import org.junit.*; /* */ public class BC_putstatic extends JTTTest { private static int field; public static int test(int a) { field = a; return field; } @Test public void run0() throws Throwable { runTest("test", 0); } @Test public void run1() throws Throwable { runTest("test", 1); } @Test public void run2() throws Throwable { runTest("test", 2); } @Test public void run3() throws Throwable { runTest("test", 3); } @Test public void run4() throws Throwable { runTest("test", -4); } }
arodchen/MaxSim
graal/graal/com.oracle.graal.jtt/src/com/oracle/graal/jtt/bytecode/BC_putstatic.java
Java
gpl-2.0
1,754
/* Copyright (C) 2013-2016, The Regents of The University of Michigan. All rights reserved. This software was developed in the APRIL Robotics Lab under the direction of Edwin Olson, ebolson@umich.edu. This software may be available under alternative licensing terms; contact the address above. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied, of the Regents of The University of Michigan. */ #ifndef _MATHUTIL_H #define _MATHUTIL_H #include <math.h> #include <float.h> #include <stdlib.h> #include <stdint.h> #include <assert.h> #include <string.h> // memcpy #ifdef __cplusplus //extern "C" { #endif #if defined(_MSC_VER) #define inline __inline #endif #ifndef M_TWOPI # define M_TWOPI 6.2831853071795862319959 /* 2*pi */ #endif #ifndef M_PI # define M_PI 3.141592653589793238462643383279502884196 #endif #define to_radians(x) ( (x) * (M_PI / 180.0 )) #define to_degrees(x) ( (x) * (180.0 / M_PI )) #ifndef max #define max(A, B) (A < B ? B : A) #endif #ifndef min #define min(A, B) (A < B ? A : B) #endif /* DEPRECATE, threshold meaningless without context. static inline int dequals(double a, double b) { double thresh = 1e-9; return (fabs(a-b) < thresh); } */ static inline int dequals_mag(double a, double b, double thresh) { return (fabs(a-b) < thresh); } static inline int isq(int v) { return v*v; } static inline float fsq(float v) { return v*v; } static inline double sq(double v) { return v*v; } static inline double sgn(double v) { return (v>=0) ? 1 : -1; } // random number between [0, 1) static inline float randf() { return ((float) rand()) / (RAND_MAX + 1.0f); } static inline float signed_randf() { return randf()*2 - 1; } // return a random integer between [0, bound) static inline int irand(int bound) { int v = (int) (randf()*bound); if (v == bound) return (bound-1); //assert(v >= 0); //assert(v < bound); return v; } /** Map vin to [0, 2*PI) **/ static inline double mod2pi_positive(double vin) { return vin - M_TWOPI * floor(vin / M_TWOPI); } /** Map vin to [-PI, PI) **/ static inline double mod2pi(double vin) { return mod2pi_positive(vin + M_PI) - M_PI; } /** Return vin such that it is within PI degrees of ref **/ static inline double mod2pi_ref(double ref, double vin) { return ref + mod2pi(vin - ref); } /** Map vin to [0, 360) **/ static inline double mod360_positive(double vin) { return vin - 360 * floor(vin / 360); } /** Map vin to [-180, 180) **/ static inline double mod360(double vin) { return mod360_positive(vin + 180) - 180; } static inline int theta_to_int(double theta, int max) { theta = mod2pi_ref(M_PI, theta); int v = (int) (theta / M_TWOPI * max); if (v == max) v = 0; assert (v >= 0 && v < max); return v; } static inline int imin(int a, int b) { return (a < b) ? a : b; } static inline int imax(int a, int b) { return (a > b) ? a : b; } static inline int64_t imin64(int64_t a, int64_t b) { return (a < b) ? a : b; } static inline int64_t imax64(int64_t a, int64_t b) { return (a > b) ? a : b; } static inline int iclamp(int v, int minv, int maxv) { return imax(minv, imin(v, maxv)); } static inline double dclamp(double a, double min, double max) { if (a < min) return min; if (a > max) return max; return a; } static inline int fltcmp (float f1, float f2) { float epsilon = f1-f2; if (epsilon < 0.0) return -1; else if (epsilon > 0.0) return 1; else return 0; } static inline int dblcmp (double d1, double d2) { double epsilon = d1-d2; if (epsilon < 0.0) return -1; else if (epsilon > 0.0) return 1; else return 0; } #ifdef __cplusplus //} #endif #endif
MarcPouliquenInria/visp
3rdparty/apriltag/common/math_util.h
C
gpl-2.0
5,181
/* Hash tables for the CPP library. Copyright (C) 1986, 1987, 1989, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc. Written by Per Bothner, 1994. Based on CCCP program by Paul Rubin, June 1986 Adapted to ANSI C, Richard Stallman, Jan 1987 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. In other words, you are welcome to use, share and improve this program. You are forbidden to forbid anyone else to use, share and improve what you give them. Help stamp out software-hoarding! */ #include "config.h" #include "system.h" #include "cpplib.h" #include "internal.h" static cpp_hashnode *alloc_node (hash_table *); /* Return an identifier node for hashtable.c. Used by cpplib except when integrated with the C front ends. */ static cpp_hashnode * alloc_node (hash_table *table) { cpp_hashnode *node; node = obstack_alloc (&table->pfile->hash_ob, sizeof (cpp_hashnode)); memset (node, 0, sizeof (cpp_hashnode)); return node; } /* Set up the identifier hash table. Use TABLE if non-null, otherwise create our own. */ void _cpp_init_hashtable (cpp_reader *pfile, hash_table *table) { struct spec_nodes *s; if (table == NULL) { pfile->our_hashtable = 1; table = ht_create (13); /* 8K (=2^13) entries. */ table->alloc_node = (hashnode (*) (hash_table *)) alloc_node; _obstack_begin (&pfile->hash_ob, 0, 0, (void *(*) (long)) xmalloc, (void (*) (void *)) free); } table->pfile = pfile; pfile->hash_table = table; /* Now we can initialize things that use the hash table. */ _cpp_init_directives (pfile); _cpp_init_internal_pragmas (pfile); s = &pfile->spec_nodes; s->n_defined = cpp_lookup (pfile, DSC("defined")); s->n_true = cpp_lookup (pfile, DSC("true")); s->n_false = cpp_lookup (pfile, DSC("false")); s->n__VA_ARGS__ = cpp_lookup (pfile, DSC("__VA_ARGS__")); s->n__VA_ARGS__->flags |= NODE_DIAGNOSTIC; } /* Tear down the identifier hash table. */ void _cpp_destroy_hashtable (cpp_reader *pfile) { if (pfile->our_hashtable) { ht_destroy (pfile->hash_table); obstack_free (&pfile->hash_ob, 0); } } /* Returns the hash entry for the STR of length LEN, creating one if necessary. */ cpp_hashnode * cpp_lookup (cpp_reader *pfile, const unsigned char *str, unsigned int len) { /* ht_lookup cannot return NULL. */ return CPP_HASHNODE (ht_lookup (pfile->hash_table, str, len, HT_ALLOC)); } /* Determine whether the str STR, of length LEN, is a defined macro. */ int cpp_defined (cpp_reader *pfile, const unsigned char *str, int len) { cpp_hashnode *node; node = CPP_HASHNODE (ht_lookup (pfile->hash_table, str, len, HT_NO_INSERT)); /* If it's of type NT_MACRO, it cannot be poisoned. */ return node && node->type == NT_MACRO; } /* For all nodes in the hashtable, callback CB with parameters PFILE, the node, and V. */ void cpp_forall_identifiers (cpp_reader *pfile, cpp_cb cb, void *v) { /* We don't need a proxy since the hash table's identifier comes first in cpp_hashnode. */ ht_forall (pfile->hash_table, (ht_cb) cb, v); }
aosm/gcc_40
libcpp/identifiers.c
C
gpl-2.0
3,763
/* SPDX-License-Identifier: LGPL-2.1+ */ #pragma once /*** This file is part of systemd. Copyright 2016 Lennart Poettering systemd is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. systemd is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with systemd; If not, see <http://www.gnu.org/licenses/>. ***/ #include "macro.h" typedef struct LoopDevice LoopDevice; /* Some helpers for setting up loopback block devices */ struct LoopDevice { int fd; int nr; char *node; bool relinquished; }; int loop_device_make(int fd, int open_flags, LoopDevice **ret); int loop_device_make_by_path(const char *path, int open_flags, LoopDevice **ret); LoopDevice* loop_device_unref(LoopDevice *d); DEFINE_TRIVIAL_CLEANUP_FUNC(LoopDevice*, loop_device_unref); void loop_device_relinquish(LoopDevice *d);
Mic92/systemd
src/shared/loop-util.h
C
gpl-2.0
1,293
package com.iot.nero.parent_app.exception; import com.iot.nero.exception.BaseException; /** * Author neroyang * Email nerosoft@outlook.com * Date 2017/6/30 * Time 上午10:40 */ public class ClientOnlineException extends BaseException { public ClientOnlineException(String message) { super(message); } public ClientOnlineException(String message, Throwable cause) { super(message, cause); } }
ENERROR/iot_cloud
parent_app/src/main/java/com/iot/nero/parent_app/exception/ClientOnlineException.java
Java
gpl-2.0
438
/* * java-gnome, a UI library for writing GTK and GNOME programs from Java! * * Copyright © 2007-2010 Operational Dynamics Consulting, Pty Ltd * * The code in this file, and the program it is a part of, is made available * to you by its authors as open source software: you can redistribute it * and/or modify it under the terms of the GNU General Public License version * 2 ("GPL") as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GPL for more details. * * You should have received a copy of the GPL along with this program. If not, * see http://www.gnu.org/licenses/. The authors of this program may be * contacted through http://java-gnome.sourceforge.net/. * * Linking this library statically or dynamically with other modules is making * a combined work based on this library. Thus, the terms and conditions of * the GPL cover the whole combination. As a special exception (the * "Classpath Exception"), the copyright holders of this library give you * permission to link this library with independent modules to produce an * executable, regardless of the license terms of these independent modules, * and to copy and distribute the resulting executable under terms of your * choice, provided that you also meet, for each linked independent module, * the terms and conditions of the license of that module. An independent * module is a module which is not derived from or based on this library. If * you modify this library, you may extend the Classpath Exception to your * version of the library, but you are not obligated to do so. If you do not * wish to do so, delete this exception statement from your version. */ package org.gnome.pango; import org.gnome.glib.Object; /* * FIXME this is a placeholder stub for what will become the public API for * this type. Replace this comment with appropriate javadoc including author * and since tags. Note that the class may need to be made abstract, implement * interfaces, or even have its parent changed. No API stability guarantees * are made about this class until it has been reviewed by a hacker and this * comment has been replaced. */ public class FontFamily extends Object { protected FontFamily(long pointer) { super(pointer); } }
severinh/java-gnome
src/bindings/org/gnome/pango/FontFamily.java
Java
gpl-2.0
2,442
/* SPDX-License-Identifier: GPL-2.0-only */ #include <cbfs.h> #include <string.h> #include <soc/qupv3_config_common.h> #include <console/console.h> #include <soc/qup_se_handlers_common.h> #include <soc/qcom_qup_se.h> #include <soc/addressmap.h> static struct elf_se_hdr *fw_list[SE_PROTOCOL_MAX]; void qupv3_se_fw_load_and_init(unsigned int bus, unsigned int protocol, unsigned int mode) { uint32_t i; uint32_t reg_value; const uint8_t *cfg_idx_arr; const uint32_t *cfg_val_arr; const uint32_t *fw_val_arr; struct elf_se_hdr *hdr; struct qup_regs *regs = qup[bus].regs; static const char * const filename[] = { [SE_PROTOCOL_SPI] = "fallback/spi_fw", [SE_PROTOCOL_UART] = "fallback/uart_fw", [SE_PROTOCOL_I2C] = "fallback/i2c_fw", }; if (protocol >= SE_PROTOCOL_MAX || !filename[protocol]) die("*ERROR* * INVALID PROTOCOL ***\n"); if (!fw_list[protocol]) { fw_list[protocol] = cbfs_map(filename[protocol], NULL); if (!fw_list[protocol]) die("*ERROR* * cbfs_map failed ***\n"); } hdr = fw_list[protocol]; assert(hdr->magic == SEFW_MAGIC_HEADER) cfg_idx_arr = (const uint8_t *)hdr + hdr->cfg_idx_offset; cfg_val_arr = (const uint32_t *)((uint8_t *)hdr + hdr->cfg_val_offset); fw_val_arr = (const uint32_t *)((uint8_t *)hdr + hdr->fw_offset); /* Unlock SE for FW loading */ write32(&regs->se_geni_fw_multilock_protns, 0x0); write32(&regs->se_geni_fw_multilock_msa, 0x0); /* First, ensure GENI FW is disabled */ write32(&regs->geni_output_ctrl, 0x0); clrbits_le32(&regs->geni_dfs_if_cfg, GENI_DFS_IF_CFG_DFS_IF_EN_BMSK); setbits_le32(&regs->geni_cgc_ctrl, GENI_CGC_CTRL_PROG_RAM_SCLK_OFF_BMSK | GENI_CGC_CTRL_PROG_RAM_HCLK_OFF_BMSK); write32(&regs->se_geni_clk_ctrl, 0x0); clrbits_le32(&regs->geni_cgc_ctrl, GENI_CGC_CTRL_PROG_RAM_SCLK_OFF_BMSK | GENI_CGC_CTRL_PROG_RAM_HCLK_OFF_BMSK); /* HPG section 3.1.7.1 */ if (protocol != SE_PROTOCOL_UART) { setbits_le32(&regs->geni_dfs_if_cfg, GENI_DFS_IF_CFG_DFS_IF_EN_BMSK); /* configure clock dfsr */ clock_configure_dfsr(bus); } /* HPG section 3.1.7.2 */ /* No Init Required */ /* HPG section 3.1.7.3 */ write32(&regs->dma_general_cfg, DMA_GENERAL_CFG_AHB_SEC_SLV_CLK_CGC_ON_BMSK | DMA_GENERAL_CFG_DMA_AHB_SLV_CLK_CGC_ON_BMSK | DMA_GENERAL_CFG_DMA_TX_CLK_CGC_ON_BMSK | DMA_GENERAL_CFG_DMA_RX_CLK_CGC_ON_BMSK); write32(&regs->geni_cgc_ctrl, DEFAULT_CGC_EN); /* HPG section 3.1.7.4 */ write32(&regs->geni_init_cfg_revision, hdr->cfg_version); write32(&regs->geni_s_init_cfg_revision, hdr->cfg_version); assert(cfg_idx_arr[hdr->cfg_size_in_items - 1] * sizeof(uint32_t) <= MAX_OFFSET_CFG_REG); for (i = 0; i < hdr->cfg_size_in_items; i++) write32(&regs->geni_cfg_reg0 + cfg_idx_arr[i], cfg_val_arr[i]); /* HPG section 3.1.7.9 */ /* non-UART configuration, UART driver can configure as desired for UART */ write32(&regs->geni_rx_rfr_watermark_reg, FIFO_DEPTH - 2); /* HPG section 3.1.7.5 */ /* Don't change any SPI polarity, client driver will handle this */ setbits_le32(&regs->geni_output_ctrl, DEFAULT_IO_OUTPUT_CTRL_MSK); /* HPG section 3.1.7.6 */ reg_value = read32(&regs->geni_dma_mode_en); if (mode == GSI) { reg_value |= GENI_DMA_MODE_EN_GENI_DMA_MODE_EN_BMSK; write32(&regs->geni_dma_mode_en, reg_value); write32(&regs->se_irq_en, 0x0); write32(&regs->se_gsi_event_en, SE_GSI_EVENT_EN_BMSK); } else if (mode == FIFO) { reg_value &= ~GENI_DMA_MODE_EN_GENI_DMA_MODE_EN_BMSK; write32(&regs->geni_dma_mode_en, reg_value); write32(&regs->se_irq_en, SE_IRQ_EN_RMSK); write32(&regs->se_gsi_event_en, 0x0); } else if (mode == CPU_DMA) { reg_value |= GENI_DMA_MODE_EN_GENI_DMA_MODE_EN_BMSK; write32(&regs->geni_dma_mode_en, reg_value); write32(&regs->se_irq_en, SE_IRQ_EN_RMSK); write32(&regs->se_gsi_event_en, 0x0); } /* HPG section 3.1.7.7 */ write32(&regs->geni_m_irq_enable, M_COMMON_GENI_M_IRQ_EN); reg_value = S_CMD_OVERRUN_EN | S_ILLEGAL_CMD_EN | S_CMD_CANCEL_EN | S_CMD_ABORT_EN | S_GP_IRQ_0_EN | S_GP_IRQ_1_EN | S_GP_IRQ_2_EN | S_GP_IRQ_3_EN | S_RX_FIFO_WR_ERR_EN | S_RX_FIFO_RD_ERR_EN; write32(&regs->geni_s_irq_enable, reg_value); /* HPG section 3.1.7.8 */ /* GPI/DMA mode */ reg_value = DMA_TX_IRQ_EN_SET_RESET_DONE_EN_SET_BMSK | DMA_TX_IRQ_EN_SET_SBE_EN_SET_BMSK | DMA_TX_IRQ_EN_SET_DMA_DONE_EN_SET_BMSK; write32(&regs->dma_tx_irq_en_set, reg_value); reg_value = DMA_RX_IRQ_EN_SET_FLUSH_DONE_EN_SET_BMSK | DMA_RX_IRQ_EN_SET_RESET_DONE_EN_SET_BMSK | DMA_RX_IRQ_EN_SET_SBE_EN_SET_BMSK | DMA_RX_IRQ_EN_SET_DMA_DONE_EN_SET_BMSK; write32(&regs->dma_rx_irq_en_set, reg_value); /* HPG section 3.1.7.10 */ reg_value = (hdr->serial_protocol << FW_REV_PROTOCOL_SHFT) | (hdr->fw_version & 0xFF << FW_REV_VERSION_SHFT); write32(&regs->se_geni_fw_revision, reg_value); reg_value = (hdr->serial_protocol << FW_REV_PROTOCOL_SHFT) | (hdr->fw_version & 0xFF << FW_REV_VERSION_SHFT); write32(&regs->se_s_fw_revision, reg_value); assert(hdr->fw_size_in_items <= SIZE_GENI_FW_RAM); memcpy((&regs->se_geni_cfg_ramn), fw_val_arr, hdr->fw_size_in_items * sizeof(uint32_t)); /* HPG section 3.1.7.12 */ write32(&regs->geni_force_default_reg, 0x1); setbits_le32(&regs->geni_cgc_ctrl, GENI_CGC_CTRL_PROG_RAM_SCLK_OFF_BMSK |GENI_CGC_CTRL_PROG_RAM_HCLK_OFF_BMSK); setbits_le32(&regs->se_geni_clk_ctrl, GENI_CLK_CTRL_SER_CLK_SEL_BMSK); clrbits_le32(&regs->geni_cgc_ctrl, (GENI_CGC_CTRL_PROG_RAM_SCLK_OFF_BMSK | GENI_CGC_CTRL_PROG_RAM_HCLK_OFF_BMSK)); /* HPG section 3.1.7.13 */ /* GSI/DMA mode */ setbits_le32(&regs->se_dma_if_en, DMA_IF_EN_DMA_IF_EN_BMSK); /* HPG section 3.1.7.14 */ reg_value = read32(&regs->se_fifo_if_disable); if ((mode == MIXED) || (mode == FIFO)) reg_value &= ~FIFO_IF_DISABLE; else reg_value |= FIFO_IF_DISABLE; write32(&regs->se_fifo_if_disable, reg_value); write32(&regs->se_geni_clk_ctrl, 0x1); /* Lock SE from FW loading */ write32(&regs->se_geni_fw_multilock_protns, 0x1); write32(&regs->se_geni_fw_multilock_msa, 0x1); } void gpi_firmware_load(int addr) { uint32_t i; uint32_t regVal = 0; struct gsi_fw_hdr *gsi_hdr; struct gsi_fw_iep *fwIep; struct gsi_fw_iram *fwIRam; struct gsi_regs *regs = (struct gsi_regs *)(uintptr_t) addr; static const char * const filename = "fallback/gsi_fw"; /* Assign firmware header base */ gsi_hdr = cbfs_map(filename, NULL); if (!gsi_hdr) die("*ERROR* * cbfs_map() failed ***\n"); assert(gsi_hdr->magic == GSI_FW_MAGIC_HEADER) /* Assign IEP entry base */ fwIep = (struct gsi_fw_iep *)(((uint8_t *)gsi_hdr) + gsi_hdr->iep_offset); /* Assign firmware IRAM entry base */ fwIRam = (struct gsi_fw_iram *)(((uint8_t *)gsi_hdr) + gsi_hdr->fw_offset); clrbits_le32(&regs->gsi_cgc_ctrl, GSI_CGC_CTRL_REGION_2_HW_CGC_EN_BMSK); write32(&regs->gsi_periph_base_lsb, 0); write32(&regs->gsi_periph_base_msb, 0); /* Load IEP */ for (i = 0; i < gsi_hdr->iep_size_in_items; i++) { /* Check if offset does not exceed GSI address space size */ if (fwIep[i].offset < GSI_REG_BASE_SIZE) write32((void *)&regs->gsi_cfg + fwIep[i].offset, fwIep[i].value); } /* Load firmware in IRAM */ assert((gsi_hdr->fw_size_in_items * 2) < (GSI_INST_RAM_n_MAX_n + 1)) /* Program Firmware version */ write32(&regs->gsi_manager_mcs_code_ver, fwIRam->iram_dword0); memcpy((&regs->gsi_inst_ramn), (void *)fwIRam, gsi_hdr->fw_size_in_items * GSI_FW_BYTES_PER_LINE); setbits_le32(&regs->gsi_mcs_cfg, GSI_MCS_CFG_MCS_ENABLE_BMSK); setbits_le32(&regs->gsi_cfg, GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK | GSI_CFG_GSI_ENABLE_BMSK); write32(&regs->gsi_ee_n_scratch_0_addr, 0x0); write32(&regs->ee_n_gsi_ee_generic_cmd, 0x81); do { regVal = read32(&regs->gsi_ee_n_scratch_0_addr); } while (regVal > 1); } static void qup_common_init(int addr) { struct qupv3_common_reg *qupv3_common; /* HPG section 3.1.2 */ qupv3_common = (struct qupv3_common_reg *)(uintptr_t) addr; setbits32(&qupv3_common->qupv3_common_cfg_reg, QUPV3_COMMON_CFG_FAST_SWITCH_TO_HIGH_DISABLE_BMSK); /* HPG section 3.1.7.3 */ setbits32(&qupv3_common->qupv3_se_ahb_m_cfg_reg, QUPV3_SE_AHB_M_CFG_AHB_M_CLK_CGC_ON_BMSK); } void qupv3_fw_init(void) { uint8_t i; /* Turn on all QUP clocks */ for (i = 0; i < QUPV3_SE_MAX; i++) clock_enable_qup(i); qup_common_init(QUP_WRAP0_BASE); qup_common_init(QUP_WRAP1_BASE); }
felixsinger/coreboot
src/soc/qualcomm/common/qupv3_config.c
C
gpl-2.0
8,406
class Project class UpdateFromXmlCommand include Project::Errors attr_reader :project def initialize(project) @project = project end def run(xmlhash, force = nil) project.check_write_access! # check for raising read access permissions, which can't get ensured atm unless project.new_record? || project.disabled_for?('access', nil, nil) if FlagHelper.xml_disabled_for?(xmlhash, 'access') && !User.admin_session? raise ForbiddenError end end unless project.new_record? || project.disabled_for?('sourceaccess', nil, nil) if FlagHelper.xml_disabled_for?(xmlhash, 'sourceaccess') && !User.admin_session? raise ForbiddenError end end new_record = project.new_record? if ::Configuration.default_access_disabled == true && !new_record if project.disabled_for?('access', nil, nil) && !FlagHelper.xml_disabled_for?(xmlhash, 'access') && !User.admin_session? raise ForbiddenError end end if project.name != xmlhash['name'] raise SaveError, "project name mismatch: #{project.name} != #{xmlhash['name']}" end project.title = xmlhash.value('title') project.description = xmlhash.value('description') project.url = xmlhash.value('url') project.remoteurl = xmlhash.value('remoteurl') project.remoteproject = xmlhash.value('remoteproject') project.kind = xmlhash.value('kind') if xmlhash.value('kind').present? project.save! update_linked_projects(xmlhash) parse_develproject(xmlhash) update_maintained_prjs_from_xml(xmlhash) project.update_relationships_from_xml(xmlhash) #--- update flag group ---# project.update_all_flags(xmlhash) if ::Configuration.default_access_disabled == true && new_record # write a default access disable flag by default in this mode for projects if not defined if xmlhash.elements('access').empty? project.flags.new(status: 'disable', flag: 'access') end end update_repositories(xmlhash, force) end private # rubocop:disable Style/GuardClause def update_linked_projects(xmlhash) position = 1 # destroy all current linked projects project.linking_to.destroy_all # recreate linked projects from xml xmlhash.elements('link') do |l| link = Project.find_by_name(l['project']) if link.nil? if Project.find_remote_project(l['project']) project.linking_to.create(project: project, linked_remote_project_name: l['project'], vrevmode: l['vrevmode'], position: position) else raise SaveError, "unable to link against project '#{l['project']}'" end else raise SaveError, 'unable to link against myself' if link == project project.linking_to.create!(project: project, linked_db_project: link, vrevmode: l['vrevmode'], position: position) end position += 1 end position end # rubocop:enable Style/GuardClause def parse_develproject(xmlhash) project.develproject = nil devel = xmlhash['devel'] if devel prj_name = devel['project'] if prj_name begin develprj = Project.get_by_name(prj_name) rescue UnknownObjectError => e raise UnknownObjectError, "Project with name '#{e.message}' not found" end unless develprj raise SaveError, "value of develproject has to be a existing project (project '#{prj_name}' does not exist)" end if develprj == project raise SaveError, 'Devel project can not point to itself' end project.develproject = develprj end end # cycle detection prj = project processed = {} while prj && prj.develproject if processed[prj.name] raise CycleError, "There is a cycle in devel definition at #{processed.keys.join(' -- ')}" end processed[prj.name] = 1 prj = prj.develproject prj = project if prj && prj.id == project.id end end def update_maintained_prjs_from_xml(xmlhash) # First check all current maintained project relations olds = {} project.maintained_projects.each { |mp| olds[mp.project.name] = mp } # Set this project as the maintenance project for all maintained projects found in the XML xmlhash.get('maintenance').elements('maintains') do |maintains| pn = maintains['project'] next if olds.delete(pn) maintained_project = Project.get_by_name(pn) MaintainedProject.create(project: maintained_project, maintenance_project: project) end project.maintained_projects.delete(olds.values) end def update_repositories(xmlhash, force) fill_repo_cache xmlhash.elements('repository') do |repo_xml_hash| update_repository_without_path_element(repo_xml_hash) end # Some repositories might be refered by path elements before they appear in the # xml tree. Thus we have 2 iterations. First one goes through all repository # elements, second run handles path elements. # This can be the case when creating multiple repositories in a project where one # repository uses another one, eg. importing an existing config from elsewhere. xmlhash.elements('repository') do |repo| current_repo = project.repositories.find_by_name(repo['name']) update_path_elements(current_repo, repo) end # delete remaining repositories in @repocache @repocache.each do |name, object| Rails.logger.debug "offending repo: #{object.inspect}" unless force # find repositories that link against this one and issue warning if found list = PathElement.where(repository_id: object.id) check_for_empty_repo_list(list, "Repository #{project.name}/#{name} cannot be deleted because following repos link against it:") list = ReleaseTarget.where(target_repository_id: object.id) check_for_empty_repo_list( list, "Repository #{project.name}/#{name} cannot be deleted because following repos define it as release target:/" ) end Rails.logger.debug "deleting repository '#{name}'" project.repositories.destroy(object) end # save memory @repocache = nil end def fill_repo_cache @repocache = {} project.repositories.each do |repo| @repocache[repo.name] = repo if repo.remote_project_name.blank? end end def update_repository_without_path_element(xml_hash) current_repo = @repocache[xml_hash['name']] unless current_repo Rails.logger.debug "adding repository '#{xml_hash['name']}'" current_repo = project.repositories.new(name: xml_hash['name']) end Rails.logger.debug "modifying repository '#{xml_hash['name']}'" update_repository_flags(current_repo, xml_hash) update_release_targets(current_repo, xml_hash) update_hostsystem(current_repo, xml_hash) update_repository_architectures(current_repo, xml_hash) update_download_repositories(current_repo, xml_hash) current_repo.save! @repocache.delete(xml_hash['name']) end def update_path_elements(current_repo, xml_hash) # destroy all current pathelements current_repo.path_elements.destroy_all return unless xml_hash['path'] # recreate pathelements from xml position = 1 xml_hash.elements('path') do |path| link_repo = Repository.find_by_project_and_name(path['project'], path['repository']) if path['project'] == project.name && path['repository'] == xml_hash['name'] raise SaveError, 'Using same repository as path element is not allowed' end unless link_repo raise SaveError, "unable to walk on path '#{path['project']}/#{path['repository']}'" end current_repo.path_elements.new(link: link_repo, position: position) position += 1 end current_repo.save! end def check_for_empty_repo_list(list, error_prefix) return if list.empty? linking_repos = list.map { |x| x.repository.project.name + '/' + x.repository.name }.join("\n") raise SaveError, error_prefix + "\n" + linking_repos end def update_repository_flags(current_repo, xml_hash) current_repo.rebuild = xml_hash['rebuild'] current_repo.block = xml_hash['block'] current_repo.linkedbuild = xml_hash['linkedbuild'] end def update_release_targets(current_repo, xml_hash) # destroy all current releasetargets current_repo.release_targets.destroy_all # recreate release targets from xml xml_hash.elements('releasetarget') do |release_target| project = Project.find_by(name: release_target['project']) repository = release_target['repository'] trigger = release_target['trigger'] unless project raise SaveError, "Project '#{release_target['project']}' does not exist." end if project.defines_remote_instance? raise SaveError, "Can not use remote repository as release target '#{project}/#{repository}'" end target_repo = Repository.find_by_project_and_name(project.name, repository) raise SaveError, "Unknown target repository '#{project}/#{repository}'" unless target_repo current_repo.release_targets.new(target_repository: target_repo, trigger: trigger) end end def update_hostsystem(current_repo, xml_hash) if xml_hash.key?('hostsystem') target_project = Project.get_by_name(xml_hash['hostsystem']['project']) target_repo = target_project.repositories.find_by_name(xml_hash['hostsystem']['repository']) if xml_hash['hostsystem']['project'] == project.name && xml_hash['hostsystem']['repository'] == xml_hash['name'] raise SaveError, 'Using same repository as hostsystem element is not allowed' end unless target_repo raise SaveError, "Unknown target repository '#{xml_hash['hostsystem']['project']}/#{xml_hash['hostsystem']['repository']}'" end current_repo.hostsystem = target_repo else current_repo.hostsystem = nil end current_repo.save! if current_repo.changed? end def check_for_duplicated_archs!(architectures) duplicated_architectures = architectures.uniq.select { |architecture| architectures.count(architecture) > 1 } return if duplicated_architectures.empty? raise SaveError, "double use of architecture: '#{duplicated_architectures.first}'" end def update_repository_architectures(current_repo, xml_hash) xml_archs = xml_hash.elements('arch') check_for_duplicated_archs!(xml_archs) architectures = [] xml_archs.each_with_index do |archname, position| architecture = Architecture.from_cache!(archname) current_repo.repository_architectures.find_or_create_by(architecture: architecture).insert_at(position) architectures << architecture end current_repo.repository_architectures.where.not(architecture: architectures).delete_all end def update_download_repositories(current_repo, xml_hash) current_repo.download_repositories.delete_all dod_repositories = xml_hash.elements('download').map do |dod| dod_attributes = { repository: current_repo, arch: dod['arch'], url: dod['url'], repotype: dod['repotype'], archfilter: dod['archfilter'], pubkey: dod['pubkey'] } if dod['master'] dod_attributes[:masterurl] = dod['master']['url'] dod_attributes[:mastersslfingerprint] = dod['master']['sslfingerprint'] end repository = DownloadRepository.new(dod_attributes) raise SaveError, repository.errors.full_messages.to_sentence unless repository.valid? repository end current_repo.download_repositories.replace(dod_repositories) end end end
Conan-Kudo/open-build-service
src/api/app/models/project/update_from_xml_command.rb
Ruby
gpl-2.0
12,514
/* alignlib - a library for aligning protein sequences $Id: ImplAlignator.cpp,v 1.3 2005/02/24 11:07:25 aheger Exp $ Copyright (C) 2004 Andreas Heger This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <iostream> #include "alignlib_fwd.h" #include "alignlib_interfaces.h" #include "AlignlibDebug.h" #include "ImplMultipleAlignator.h" #include "HelpersAlignandum.h" #include "MultipleAlignator.h" using namespace std; namespace alignlib { //---------------------------------------------------------------------------------------- ImplMultipleAlignator::ImplMultipleAlignator() { debug_func_cerr( 5 ); } ImplMultipleAlignator::~ImplMultipleAlignator() { debug_func_cerr(5); } ImplMultipleAlignator::ImplMultipleAlignator( const ImplMultipleAlignator & src ) : MultipleAlignator(src) { } void ImplMultipleAlignator::align( HMultAlignment & result, const HStringVector & sequences ) const { debug_func_cerr(5); HAlignandumVector tmp(new AlignandumVector()); for (int x = 0; x < sequences->size(); ++x) tmp->push_back( makeSequence((*sequences)[x]) ); align( result, tmp ); } // this method needs to be defined for the redirection from align( HMultAlignment, HStringVector) to work void ImplMultipleAlignator::align( HMultAlignment & result, const HAlignandumVector & sequences ) const { debug_func_cerr(5); } } // namespace alignlib
AndreasHeger/alignlib
cython/alignlib_src/ImplMultipleAlignator.cpp
C++
gpl-2.0
2,027
<?php # -- BEGIN LICENSE BLOCK --------------------------------------- # # This file is part of Dotclear 2. # # Copyright (c) 2003-2013 Olivier Meunier & Association Dotclear # Licensed under the GPL version 2.0 license. # See LICENSE file or # http://www.gnu.org/licenses/old-licenses/gpl-2.0.html # # -- END LICENSE BLOCK ----------------------------------------- class dcPagesActionsPage extends dcPostsActionsPage { public function __construct($core,$uri,$redirect_args=array()) { parent::__construct($core,$uri,$redirect_args); $this->redirect_fields = array(); $this->caller_title = __('Pages'); } public function error(Exception $e) { $this->core->error->add($e->getMessage()); $this->beginPage(dcPage::breadcrumb( array( html::escapeHTML($this->core->blog->name) => '', __('Pages') => $this->getRedirection(true), __('Pages actions') => '' )) ); $this->endPage(); } public function beginPage($breadcrumb='',$head='') { echo '<html><head><title>'.__('Pages').'</title>'. dcPage::jsLoad('js/_posts_actions.js'). $head. '</script></head><body>'. $breadcrumb; echo '<p><a class="back" href="'.$this->getRedirection(true).'">'.__('Back to pages list').'</a></p>'; } public function endPage() { echo '</body></html>'; } public function loadDefaults() { DefaultPagesActions::adminPagesActionsPage($this->core,$this); $this->actions['reorder']=array('dcPagesActionsPage','doReorderPages'); $this->core->callBehavior('adminPagesActionsPage',$this->core,$this); } public function process() { // fake action for pages reordering if (!empty($this->from['reorder'])) { $this->from['action']='reorder'; } $this->from['post_type']='page'; return parent::process(); } public static function doReorderPages($core, dcPostsActionsPage $ap, $post) { foreach($post['order'] as $post_id => $value) { if (!$core->auth->check('publish,contentadmin',$core->blog->id)) throw new Exception(__('You are not allowed to change this entry status')); $strReq = "WHERE blog_id = '".$core->con->escape($core->blog->id)."' ". "AND post_id ".$core->con->in($post_id); #If user can only publish, we need to check the post's owner if (!$core->auth->check('contentadmin',$core->blog->id)) $strReq .= "AND user_id = '".$core->con->escape($core->auth->userID())."' "; $cur = $core->con->openCursor($core->prefix.'post'); $cur->post_position = (integer) $value-1; $cur->post_upddt = date('Y-m-d H:i:s'); $cur->update($strReq); $core->blog->triggerBlog(); } dcPage::addSuccessNotice(__('Selected pages have been successfully reordered.')); $ap->redirect(false); } } class DefaultPagesActions { public static function adminPagesActionsPage($core, $ap) { if ($core->auth->check('publish,contentadmin',$core->blog->id)) { $ap->addAction( array(__('Status') => array( __('Publish') => 'publish', __('Unpublish') => 'unpublish', __('Schedule') => 'schedule', __('Mark as pending') => 'pending' )), array('dcDefaultPostActions','doChangePostStatus') ); } if ($core->auth->check('admin',$core->blog->id)) { $ap->addAction( array(__('Change') => array( __('Change author') => 'author')), array('dcDefaultPostActions','doChangePostAuthor') ); } if ($core->auth->check('delete,contentadmin',$core->blog->id)) { $ap->addAction( array(__('Delete') => array( __('Delete') => 'delete')), array('dcDefaultPostActions','doDeletePost') ); } } }
yurikoneve/UserFrosting_Academic_Dashboard
plugins/pages/class.actionpage.php
PHP
gpl-2.0
3,532
<?php /** * LogoutCommand.php * * Copyright 2003-2013, Moxiecode Systems AB, All rights reserved. */ /** * Command that logs out the user. * * @package MOXMAN_Commands */ class MOXMAN_Commands_LogoutCommand extends MOXMAN_Commands_BaseCommand { /** * Executes the command logic with the specified RPC parameters. * * @param Object $params Command parameters sent from client. * @return Object Result object to be passed back to client. */ public function execute($params) { MOXMAN::getAuthManager()->logout(); return true; } } ?>
coassets/initial-d
sample_project/site_media/tinymce/plugins/moxiemanager/classes/Commands/LogoutCommand.php
PHP
gpl-2.0
556
package SysVStep; use NOCpulse::CommandLineApplicationComponent; @ISA=qw(NOCpulse::CommandLineApplicationComponent); use NOCpulse::LocalCommandShell; use NOCpulse::SetID; use NOCpulse::Object; use Data::Dumper; $NOCpulse::Object::CACHEACCESSORS = 0; $LibMode = 0; # If true, initialization won't try to parse command line switches. # Force Getopt::Long to accept options that aren't prefixed with anything at all. Getopt::Long::Configure('prefix_pattern=--|-|\+|'); sub overview { my $self = shift(); return "This is a System V init step that starts/stops the ".ref($self)." system/service"; } sub instVarDefinitions { my $self = shift(); ### NOTE!! If you do NOT want anything defined here saved between executions, ### be sure to undef it in persist()!!! $self->addInstVar('stopActions',[]); $self->addInstVar('lastAction',''); $self->addInstVar('lastShell'); $self->addInstVar('hbResourceMode'); $self->addInstVar('lastActionErrors',[]); return $self->SUPER::instVarDefinitions(); } sub initialize { my ($self,$switches,@params) = @_; $self->SUPER::initialize(@params); if (! $SysVStep::LibMode ) { my $stream = $self->debugObject->addstream(LEVEL=>SysVStep->ConfigValue('logFileLevel'), FILE=>SysVStep->ConfigValue('logFileName'), APPEND=>1); $stream->timestamps(1); if ($self->commandLineIsValid) { if (! $switches->{'hbResourceMode'}) { $stream = $self->debugObject->addstream(LEVEL=>$self->get_debug); $stream->timestamps(1); $self->dprint(1,'Debug level = ',$self->get_debug); $self->dprint(2,'Switches: ',join(',',@ARGV)); } else { $self->debugObject->addstream(LEVEL=>-1); } $self->dprint(3,'Non-LibMode reincarnation'); eval 'require NOCpulse::'.$self->databaseType; return $self->reincarnated($switches); } else { $self->dprint(3,'Not in LibMode, and invalid command line'); $self->printUsage; } } else { $self->dprint(3,'Lib mode reincarnation'); return $self->reincarnated($switches); } return $self; } sub reincarnated { my ($self,$switches) = @_; my $pastLife = ref($self)->loadFromDatabase($self->get_name); my ($key,$value); if ($pastLife) { $self->dprint(3,'Loading self from database'); $pastLife->set_switches($self->get_switches); while (($key,$value) = each(%$switches)) { if ($pastLife->hasSwitch($key)) { $pastLife->switch($key)->set_value($value); } else { $pastLife->set($key,$value); } } return $pastLife; } else { while (($key,$value) = each(%$switches)) { $self->dprint(3,"Setting ghost's $key to $value"); $self->set($key,$value); } return $self; } } sub registerSwitches { my $self = shift(); $self->addSwitch('start', undef, 0, 0, 'Start this step'); $self->addSwitch('stop', undef, 0, 0, 'Stop this step'); $self->addSwitch('force', undef, 0, 0, 'Force start/stop'); $self->addSwitch('restart', undef, 0, 0, 'Restart this step'); $self->addSwitch('status', undef, 0, 0, 'Print status'); $self->addSwitch('install', undef, 0, 0, 'Install SYSV symlinks'); $self->addSwitch('uninstall', undef, 0, 0, 'Uninstall SYSV symlinks'); $self->addSwitch('help', undef, 0, 0, 'Print usage'); $self->addSwitch('debug', '=i', 0, 0, 'Debug level'); $self->addSwitch('simshells', undef, 0, 0, "Simulate (don't run) shell commands"); } sub run { my ($self,$action,@params) = @_; if ($action) { $self->dprint(3,"Got action: $action"); } if (($action eq 'help') or $self->get_help) { $self->dprint(3,'RUN: help'); $self->printUsage; } elsif (($action eq 'start') or $self->get_start) { $self->dprint(3,'RUN: start'); $self->startStep; $self->persist; $self->_printStatus(1); return (! $self->isRunning); } elsif (($action eq 'stop') or $self->get_stop) { $self->dprint(3,'RUN: stop'); $self->stopStep; $self->persist; $self->_printStatus(1); return ($self->isRunning); } elsif (($action eq 'restart') or $self->get_restart) { $self->dprint(3,'RUN: restart'); $self->restartStep; $self->persist; return (! $self->isRunning); } elsif (($action eq 'status') or $self->get_status) { $self->dprint(3,'RUN: status'); $self->_printStatus(0); return (! $self->isRunning); } elsif (($action eq 'install') or $self->get_install) { $self->dprint(3,'RUN: install'); $self->installSysVLinks; $self->persist; return 0; } elsif (($action eq 'uninstall') or $self->get_uninstall) { $self->dprint(3,'RUN: uninstall'); $self->uninstallSysVLinks; $self->persist; return 0; } else { $self->dprint(3,'RUN: no valid action'); $self->printUsage; $self->_printStatus(0); } } ########################################## # SysV symlink maintenance sub installSysVLinks { my $self = shift(); if ($self->configValue('runLevels')) { symlink(SysVStep->ConfigValue('sysvStarter'),'/etc/rc.d/init.d/'.ref($self)); my @levels = split(',',$self->configValue('runLevels')); my $startSeq = $self->configValue('startSeq'); my $stopSeq = $self->configValue('stopSeq'); $self->dprint(1,'Installing '.ref($self).' for SysV startup in runlevels '.join(',',@levels).", start=$startSeq, stop=$stopSeq"); my $level; foreach $level (@levels) { symlink('/etc/rc.d/init.d/'.ref($self),'/etc/rc.d/rc'.$level.'.d/S'.$startSeq.ref($self)); } my @klevels = (0,1,6); foreach $level (@klevels) { symlink('/etc/rc.d/init.d/'.ref($self),'/etc/rc.d/rc'.$level.'.d/K'.$stopSeq.ref($self)); } local * FILE; open(FILE, '>', SysVStep->ConfigValue('installed').'/'.ref($self)); print FILE join(',',@levels)."\n"; close(FILE); } } sub uninstallSysVLinks { my $self = shift(); unlink('/etc/rc.d/init.d/'.ref($self)); $self->dprint(1,'Uninstalling '.ref($self).' from SysV startup'); my $level; foreach $level (0,1,2,3,4,5,6) { $self->shell('rm /etc/rc.d/rc'.$level.'.d/S*'.ref($self)); $self->shell('rm /etc/rc.d/rc'.$level.'.d/K*'.ref($self)); } $self->clearLastActionErrors; my $filename = SysVStep->ConfigValue('installed').'/'.ref($self); if ( -f $filename ) { unlink($filename) } } ########################################## # Persistence sub databaseType { return SysVStep->ConfigValue('databaseType'); } sub databaseDirectory { return SysVStep->ConfigValue('databaseDirectory'); } sub databaseFilename { my $class = shift(); return $class->databaseDirectory.'/SysVStep'.$class->databaseType->fileExtension; } sub get_name { return ref(shift()); } sub persist { my $self = shift(); my $switches = $self->get_switches; $self->set_switches({}); $self->set_hbResourceMode(undef); my $result = $self->SUPER::persist; $self->set_switches($switches); return $result; } ##################################### # Start/stop logic sub addError { my ($self,@errors) = @_; my $error; foreach $error (@errors) { push(@{$self->get_lastActionErrors},$error); } } sub isTrulyRunning { # Subclasses can override to add additional logic return 1; } sub isStarted { my $self = shift(); # Indicates simply that the step is known to have been started. Does not # indicate whether there were errors in that process. return ($self->get_lastAction eq 'start'); } sub isRunning { my $self = shift(); # The return value of this function is intended not only to indicate whether or not # the step has been started, but also the fact that it appeared to start without # errors. The difference is important: It is possible to have started something # that is in fact running, but that is running *incorrectly* due to some problem. # We still want to be able to *stop* a step that is in this state. Ergo the # difference. return ( $self->isStarted && (! $self->hasErrors) && $self->isTrulyRunning); } sub startStep { my $self = shift(); $is_not_subsystem = -f '/etc/rc.d/init.d/'.ref($self); if ((! $self->isStarted) || $self->get_force) { print "\t" unless $is_not_subsystem; print "Starting ", $self->get_name, " ... "; print "\n" if $is_not_subsystem; if ($self->get_force) { $self->clearStopActions; } $self->clearLastActionErrors; $self->startActions; $self->set_lastAction('start'); if ($self->hasErrors) { $self->listErrors; print "[ FAIL ]\n"; } else { print "[ OK ]\n"; } if ($is_not_subsystem) { # This is for RH "rc" script - won't kill if this isn't here local * FILE; open(FILE, '>', '/var/lock/subsys/'.ref($self)); print FILE 'running'; close(FILE); } return $self->hasErrors; } else { print "Starting ", $self->get_name, " ... [ ALREADY RUNNING ]\n" if ($self->get_name != 'InstallSoftwareConfig'); $self->dprint(1,'ALREADY RUNNING'); return 1; } } sub stopStep { my $self = shift(); $is_not_subsystem = -f '/etc/rc.d/init.d/'.ref($self); if ($self->isStarted || $self->get_force) { print "\t" unless $is_not_subsystem; print 'Stopping ', $self->get_name, " ... "; print "\n" if $is_not_subsystem; $self->clearLastActionErrors; $self->stopActions; $self->set_lastAction('stop'); if ($self->hasErrors) { $self->listErrors; print "[ FAIL ]\n"; } else { print "[ OK ]\n"; } if ($is_not_subsystem) { # This is for RH "rc" script - won't kill if this isn't here unlink('/var/lock/subsys/'.ref($self)); } return $self->hasErrors; } else { print "Stopping ", $self->get_name, " ... [ ALREADY STOPPED ]\n" if ($self->get_name != 'InstallSoftwareConfig'); $self->dprint(1,'ALREADY STOPPED'); return 1; } } sub restartStep { my $self = shift(); $self->dprint(3,'restartStep: calling stopStep'); $self->stopStep; $self->dprint(3,'restartStep: calling startStep'); $self->startStep; } sub startActions { my $self = shift(); $self->dprint(2,'Called abstract startActions'); } sub clearStopActions { my $self = shift(); $self->dprint(1,'Clearing stop actions'); $self->set_stopActions([]); } sub clearLastActionErrors { my $self = shift(); $self->dprint(1,'Clearing last action errors list'); $self->set_lastActionErrors([]); } sub addShellStopAction { my ($self,$action,$message) = @_; $shell = 'shell("'.$action.'")'; $self->addSelfStopAction($shell,$message); } sub addSelfStopAction { my ($self,$action,$message) = @_; my $selfaction = "\$self->$action"; $self->addStopAction($selfaction,$message); } sub addStopAction { my ($self,$action,$message) = @_; unshift(@{$self->get_stopActions},[$action,$message]); $self->dprint(3,"Added stop action: '$action'"); } sub okToCallStopAction { # Override if you want to be selective about executing a stop step # e.g. MacroSysVStep does this... return 1; } sub stopActions { my $self = shift(); my $action; foreach $action (@{$self->get_stopActions}) { my ($code,$message) = @$action; if ($self->okToCallStopAction($code)) { $self->dprint(3,"Executing stop action: '$code'"); if ($message) { $code = "$code || die('$message')"; } eval($code); if ($@) { $self->addError($@); } } else { $self->dprint(0,"NOTICE: Not executing stop action $code"); } } $self->clearStopActions; } ################################################## # General utility sub shell { my ($self,@command) = @_; my $command = join(' ',@command); my $shell = NOCpulse::LocalCommandShell->newInitialized; $self->dprint(1,"shell: '$command'"); $shell->set_probeCommands($command); if ($self->configValue('shellTimeout')) { $shell->set_timeout($self->configValue('shellTimeout')); } $self->dprint(1,'shell timeout: ',$shell->get_timeout); if (! $self->get_simshells) { $shell->execute; if ($shell->get_exit) { $self->addError("ERROR FROM SHELL COMMAND: "); $self->addError("STDOUT: ".$shell->get_stdout); $self->addError("STDERR: ".$shell->get_stderr); $self->addError("EXIT: ".$shell->get_exit); } } $self->set_lastShell($shell); return (! $shell->get_exit); } sub asUserDo { my ($self,$username,$doMe) = @_; $self->dprint(9,"asUserDo($username,$doMe)"); my $identity = NOCpulse::SetID->new( user => $username ); $identity->su(); my $result; $self->dprint(9,"EXECUTING with: ".`id`." $doMe *** USED: uid: $uid "); $result = eval $doMe; $identity->revert(); $self->dprint(9,"RESET to: ".`id`); if ($@) { $self->addError("asUserDo($username,$doMe) failed to eval: $@"); } return $result; } ################################################## # Console/log output sub _printStatus { my ($self,$avoidRedundancy) = @_; if ($self->get_hbResourceMode) { # NOTE!!! This EXACT TEXT is EXTREMELY IMPORTANT # to the heartbeat daemon! DO NOT ALTER IT!! # Heartbeat relies on this text, not an exit level!!! # By giving hearbeat the "isRunning" perspective versus # the "isStarted" perspective (see comment in isRunning()), # we make the hearbeat cluster more resillient in the face # of the misconfiguration of a backup node, etc. if ($self->isRunning) { print "running\n" } else { print "stopped\n" } } else { $self->dprint(1,'============ STATUS ==============='); $self->printStatus($avoidRedundancy); $self->dprint(1,'==================================='); } } sub printStatus { my ($self,$avoidRedundancy) = @_; $self->dprint(1,'Last action: ',$self->get_lastAction); if (-f '/etc/rc.d/init.d/'.ref($self)) { $self->dprint(1,'** Installed for SysV startup **'); } elsif ($self->configValue(runLevels)) { $self->dprint(1,'** Can be installed for SysV startup **'); $self->dprint(1,'Run levels: ',$self->configValue('runLevels')); $self->dprint(1,'Start sequence: ',$self->configValue('startSeq')); $self->dprint(1,'Stop sequence: ',$self->configValue('stopSeq')); } if ($self->isStarted) { if ($self->isRunning) { $self->dprint(1,'STARTED and RUNNING'); } else { $self->dprint(0,'WARNING: STARTED BUT *NOT* RUNNING'); } $self->dprint(1,'Stop actions: '); my $action; foreach $action (@{$self->get_stopActions}) { $self->dprint(1,"\t",$action->[0]); } } else { $self->dprint(1,'STOPPED'); } if ($self->hasErrors) { $self->dprint(0,'ERRORS ENCOUNTERED DURING LAST ACTION:'); $self->listErrors; } } sub dprint { my ($self,$level,@message) = @_; $self->SUPER::dprint($level,ref($self),': ',@message,"\n"); } sub hasErrors { return (scalar(@{shift()->get_lastActionErrors})) } sub listErrors { my $self = shift(); my $error; if ($self->hasErrors) { foreach $error (@{$self->get_lastActionErrors}) { $self->dprint(0,"\t!! $error"); } } } ######### Registry related stuff ############# sub registerForInstall { my ($self,$registrant,$traversal) = @_; if ( ! defined($traversal)) { $traversal = {}; } if ($registrant) { my $regdir = SysVStep->ConfigValue('registry'); my $filename = $regdir.'/'.ref($self); local * FILE; if ( ! -f $filename ) { open(FILE, '>', $filename); print FILE $registrant."\n"; close(FILE); } else { open(FILE, '<', $filename); chomp(my @keys = <FILE>); close(FILE); my %ary; @ary{@keys} = (1 .. scalar(@keys)); print ref($self)." keys = ".join(',',keys(%ary))."\n"; if ( ! defined($ary{$registrant})) { open(FILE, '>>', $filename); print FILE $registrant."\n"; close(FILE); } } } if (defined($traversal->{ref($self)})) { return; } else { $traversal->{ref($self)} = 1; } } sub registrationList { my $self = shift(); my @result; my $regdir = SysVStep->ConfigValue('registry'); my $filename = $regdir.'/'.ref($self); if ( -f $filename ) { local * FILE; open(FILE, '<', $filename); my $item; foreach $item (<FILE>) { chomp($item); push(@result,$item); } close(FILE); } return \@result; } 1;
dmacvicar/spacewalk
monitoring/SatConfig/general/SysVStep.pm
Perl
gpl-2.0
15,537
/* dispatch.c Network input dispatcher... */ /* * Copyright (c) 1995, 1996, 1997, 1998, 1999 * The Internet Software Consortium. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of The Internet Software Consortium nor the names * of its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE INTERNET SOFTWARE CONSORTIUM AND * CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE INTERNET SOFTWARE CONSORTIUM OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * This software has been written for the Internet Software Consortium * by Ted Lemon <mellon@fugue.com> in cooperation with Vixie * Enterprises. To learn more about the Internet Software Consortium, * see ``http://www.vix.com/isc''. To learn more about Vixie * Enterprises, see ``http://www.vix.com''. */ #ifndef lint #ifndef EMBED static char copyright[] = "$Id: dispatch.c,v 1.5 2003/08/22 04:20:40 danield Exp $ Copyright (c) 1995, 1996, 1997, 1998, 1999 The Internet Software Consortium. All rights reserved.\n"; #endif /*!EMBED*/ #endif /* not lint */ #include "dhcpd.h" #include <sys/ioctl.h> struct interface_info *interfaces, *dummy_interfaces, *fallback_interface; struct protocol *protocols; struct timeout *timeouts; int use_relay; static struct timeout *free_timeouts; static int interfaces_invalidated; void (*bootp_packet_handler) PROTO ((struct interface_info *, struct dhcp_packet *, int, unsigned int, struct iaddr *, struct hardware *)); int quiet_interface_discovery; /* Use the SIOCGIFCONF ioctl to get a list of all the attached interfaces. For each interface that's of type INET and not the loopback interface, register that interface with the network I/O software, figure out what subnet it's on, and add it to the list of interfaces. */ void discover_interfaces (state) int state; { struct interface_info *tmp; struct interface_info *last, *next; /*MN - to big char buf [8192]; */ char buf [4096]; struct ifconf ic; struct ifreq ifr; int i; int sock; struct subnet *subnet; struct shared_network *share; struct sockaddr_in foo; int ir; struct ifreq *tif; #ifdef ALIAS_NAMES_PERMUTED char *s; #endif /* Create an unbound datagram socket to do the SIOCGIFADDR ioctl on. */ if ((sock = socket (AF_INET, SOCK_DGRAM, IPPROTO_UDP)) < 0) error ("Can't create addrlist socket"); /* Get the interface configuration information... */ ic.ifc_len = sizeof buf; ic.ifc_ifcu.ifcu_buf = (caddr_t)buf; i = ioctl(sock, SIOCGIFCONF, &ic); if (i < 0) error ("ioctl: SIOCGIFCONF: %m"); /* If we already have a list of interfaces, and we're running as a DHCP server, the interfaces were requested. */ if (interfaces && (state == DISCOVER_SERVER || state == DISCOVER_RELAY || state == DISCOVER_REQUESTED)) ir = 0; else if (state == DISCOVER_UNCONFIGURED) ir = INTERFACE_REQUESTED | INTERFACE_AUTOMATIC; else ir = INTERFACE_REQUESTED; /* Cycle through the list of interfaces looking for IP addresses. */ for (i = 0; i < ic.ifc_len;) { struct ifreq *ifp = (struct ifreq *)((caddr_t)ic.ifc_req + i); #ifdef HAVE_SA_LEN if (ifp -> ifr_addr.sa_len > sizeof (struct sockaddr)) i += (sizeof ifp -> ifr_name) + ifp -> ifr_addr.sa_len; else #endif i += sizeof *ifp; #ifdef ALIAS_NAMES_PERMUTED if ((s = strrchr (ifp -> ifr_name, ':'))) { *s = 0; } #endif #ifdef SKIP_DUMMY_INTERFACES if (!strncmp (ifp -> ifr_name, "dummy", 5)) continue; #endif /* See if this is the sort of interface we want to deal with. */ strcpy (ifr.ifr_name, ifp -> ifr_name); if (ioctl (sock, SIOCGIFFLAGS, &ifr) < 0) error ("Can't get interface flags for %s: %m", ifr.ifr_name); /* Skip loopback, point-to-point and down interfaces, except don't skip down interfaces if we're trying to get a list of configurable interfaces. */ if ((ifr.ifr_flags & IFF_LOOPBACK) || #ifdef HAVE_IFF_POINTOPOINT (ifr.ifr_flags & IFF_POINTOPOINT) || #endif (!(ifr.ifr_flags & IFF_UP) && state != DISCOVER_UNCONFIGURED)) continue; /* See if we've seen an interface that matches this one. */ for (tmp = interfaces; tmp; tmp = tmp -> next) if (!strcmp (tmp -> name, ifp -> ifr_name)) break; /* If there isn't already an interface by this name, allocate one. */ if (!tmp) { tmp = ((struct interface_info *) dmalloc (sizeof *tmp, "discover_interfaces")); if (!tmp) error ("Insufficient memory to %s %s", "record interface", ifp -> ifr_name); strcpy (tmp -> name, ifp -> ifr_name); tmp -> next = interfaces; tmp -> flags = ir; interfaces = tmp; } /* If we have the capability, extract link information and record it in a linked list. */ #ifdef HAVE_AF_LINK if (ifp -> ifr_addr.sa_family == AF_LINK) { struct sockaddr_dl *foo = ((struct sockaddr_dl *) (&ifp -> ifr_addr)); tmp -> hw_address.hlen = foo -> sdl_alen; tmp -> hw_address.htype = HTYPE_ETHER; /* XXX */ memcpy (tmp -> hw_address.haddr, LLADDR (foo), foo -> sdl_alen); } else #endif /* AF_LINK */ if (ifp -> ifr_addr.sa_family == AF_INET) { struct iaddr addr; /* Get a pointer to the address... */ memcpy (&foo, &ifp -> ifr_addr, sizeof ifp -> ifr_addr); /* We don't want the loopback interface. */ if (foo.sin_addr.s_addr == htonl (INADDR_LOOPBACK)) continue; /* If this is the first real IP address we've found, keep a pointer to ifreq structure in which we found it. */ if (!tmp -> ifp) { #ifdef HAVE_SA_LEN int len = ((sizeof ifp -> ifr_name) + ifp -> ifr_addr.sa_len); #else int len = sizeof *ifp; #endif tif = (struct ifreq *)malloc (len); if (!tif) error ("no space to remember ifp."); memcpy (tif, ifp, len); tmp -> ifp = tif; tmp -> primary_address = foo.sin_addr; } /* Grab the address... */ addr.len = 4; memcpy (addr.iabuf, &foo.sin_addr.s_addr, addr.len); /* If there's a registered subnet for this address, connect it together... */ if ((subnet = find_subnet (&addr))) { /* If this interface has multiple aliases on the same subnet, ignore all but the first we encounter. */ if (!subnet -> interface) { subnet -> interface = tmp; subnet -> interface_address = addr; } else if (subnet -> interface != tmp) { warn ("Multiple %s %s: %s %s", "interfaces match the", "same subnet", subnet -> interface -> name, tmp -> name); } share = subnet -> shared_network; if (tmp -> shared_network && tmp -> shared_network != share) { warn ("Interface %s matches %s", tmp -> name, "multiple shared networks"); } else { tmp -> shared_network = share; } if (!share -> interface) { share -> interface = tmp; } else if (share -> interface != tmp) { warn ("Multiple %s %s: %s %s", "interfaces match the", "same shared network", share -> interface -> name, tmp -> name); } } } } #if defined (LINUX_SLASHPROC_DISCOVERY) /* On Linux, interfaces that don't have IP addresses don't show up in the SIOCGIFCONF syscall. We got away with this prior to Linux 2.1 because we would give each interface an IP address of 0.0.0.0 before trying to boot, but that doesn't work after 2.1 because we're using LPF, because we can't configure interfaces with IP addresses of 0.0.0.0 anymore (grumble). This only matters for the DHCP client, of course - the relay agent and server should only care about interfaces that are configured with IP addresses anyway. The PROCDEV_DEVICE (/proc/net/dev) is a kernel-supplied file that, when read, prints a human readable network status. We extract the names of the network devices by skipping the first two lines (which are header) and then parsing off everything up to the colon in each subsequent line - these lines start with the interface name, then a colon, then a bunch of statistics. Yes, Virgina, this is a kludge, but you work with what you have. */ if (state == DISCOVER_UNCONFIGURED) { FILE *proc_dev; char buffer [256]; int skip = 2; proc_dev = fopen (PROCDEV_DEVICE, "r"); if (!proc_dev) error ("%s: %m", PROCDEV_DEVICE); while (fgets (buffer, sizeof buffer, proc_dev)) { char *name = buffer; char *sep; /* Skip the first two blocks, which are header lines. */ if (skip) { --skip; continue; } sep = strrchr (buffer, ':'); if (sep) *sep = '\0'; while (*name == ' ') name++; /* See if we've seen an interface that matches this one. */ for (tmp = interfaces; tmp; tmp = tmp -> next) if (!strcmp (tmp -> name, name)) break; /* If we found one, nothing more to do.. */ if (tmp) continue; /* Otherwise, allocate one. */ tmp = ((struct interface_info *) dmalloc (sizeof *tmp, "discover_interfaces")); if (!tmp) error ("Insufficient memory to %s %s", "record interface", name); memset (tmp, 0, sizeof *tmp); strcpy (tmp -> name, name); tmp -> flags = ir; tmp -> next = interfaces; interfaces = tmp; } fclose (proc_dev); } #endif /* Now cycle through all the interfaces we found, looking for hardware addresses. */ #if defined (HAVE_SIOCGIFHWADDR) && !defined (HAVE_AF_LINK) for (tmp = interfaces; tmp; tmp = tmp -> next) { struct ifreq ifr; struct sockaddr sa; int b, sk; if (!tmp -> ifp) { /* Make up an ifreq structure. */ tif = (struct ifreq *)malloc (sizeof (struct ifreq)); if (!tif) error ("no space to remember ifp."); memset (tif, 0, sizeof (struct ifreq)); strcpy (tif -> ifr_name, tmp -> name); tmp -> ifp = tif; } /* Read the hardware address from this interface. */ ifr = *tmp -> ifp; if (ioctl (sock, SIOCGIFHWADDR, &ifr) < 0) continue; sa = *(struct sockaddr *)&ifr.ifr_hwaddr; switch (sa.sa_family) { #ifdef HAVE_ARPHRD_TUNNEL case ARPHRD_TUNNEL: /* ignore tunnel interfaces. */ #endif #ifdef HAVE_ARPHRD_ROSE case ARPHRD_ROSE: #endif #ifdef HAVE_ARPHRD_LOOPBACK case ARPHRD_LOOPBACK: /* ignore loopback interface */ break; #endif case ARPHRD_ETHER: tmp -> hw_address.hlen = 6; tmp -> hw_address.htype = ARPHRD_ETHER; memcpy (tmp -> hw_address.haddr, sa.sa_data, 6); break; #ifndef HAVE_ARPHRD_IEEE802 # define ARPHRD_IEEE802 HTYPE_IEEE802 #endif case ARPHRD_IEEE802: tmp -> hw_address.hlen = 6; tmp -> hw_address.htype = ARPHRD_IEEE802; memcpy (tmp -> hw_address.haddr, sa.sa_data, 6); break; #ifndef HAVE_ARPHRD_FDDI # define ARPHRD_FDDI HTYPE_FDDI #endif case ARPHRD_FDDI: tmp -> hw_address.hlen = 16; tmp -> hw_address.htype = HTYPE_FDDI; /* XXX */ memcpy (tmp -> hw_address.haddr, sa.sa_data, 16); break; #ifdef HAVE_ARPHRD_METRICOM case ARPHRD_METRICOM: tmp -> hw_address.hlen = 6; tmp -> hw_address.htype = ARPHRD_METRICOM; memcpy (tmp -> hw_address.haddr, sa.sa_data, 6); break; #endif #ifdef HAVE_ARPHRD_AX25 case ARPHRD_AX25: tmp -> hw_address.hlen = 6; tmp -> hw_address.htype = ARPHRD_AX25; memcpy (tmp -> hw_address.haddr, sa.sa_data, 6); break; #endif #ifdef HAVE_ARPHRD_NETROM case ARPHRD_NETROM: tmp -> hw_address.hlen = 6; tmp -> hw_address.htype = ARPHRD_NETROM; memcpy (tmp -> hw_address.haddr, sa.sa_data, 6); break; #endif default: warn ("%s: unknown hardware address type %d", ifr.ifr_name, sa.sa_family); break; } } #endif /* defined (HAVE_SIOCGIFHWADDR) && !defined (HAVE_AF_LINK) */ /* If we're just trying to get a list of interfaces that we might be able to configure, we can quit now. */ if (state == DISCOVER_UNCONFIGURED) return; /* Weed out the interfaces that did not have IP addresses. */ last = (struct interface_info *)0; for (tmp = interfaces; tmp; tmp = next) { next = tmp -> next; if ((tmp -> flags & INTERFACE_AUTOMATIC) && state == DISCOVER_REQUESTED) tmp -> flags &= ~(INTERFACE_AUTOMATIC | INTERFACE_REQUESTED); if (!tmp -> ifp || !(tmp -> flags & INTERFACE_REQUESTED)) { if ((tmp -> flags & INTERFACE_REQUESTED) != ir) error ("%s: not found", tmp -> name); if (!last) interfaces = interfaces -> next; else last -> next = tmp -> next; /* Remember the interface in case we need to know about it later. */ tmp -> next = dummy_interfaces; dummy_interfaces = tmp; continue; } last = tmp; memcpy (&foo, &tmp -> ifp -> ifr_addr, sizeof tmp -> ifp -> ifr_addr); /* We must have a subnet declaration for each interface. */ if (!tmp -> shared_network && (state == DISCOVER_SERVER)) { warn ("No subnet declaration for %s (%s).", tmp -> name, inet_ntoa (foo.sin_addr)); warn ("Please write a subnet declaration in your %s", "dhcpd.conf file for the"); error ("network segment to which interface %s %s", tmp -> name, "is attached."); } /* Find subnets that don't have valid interface addresses... */ for (subnet = (tmp -> shared_network ? tmp -> shared_network -> subnets : (struct subnet *)0); subnet; subnet = subnet -> next_sibling) { if (!subnet -> interface_address.len) { /* Set the interface address for this subnet to the first address we found. */ subnet -> interface_address.len = 4; memcpy (subnet -> interface_address.iabuf, &foo.sin_addr.s_addr, 4); } } /* Register the interface... */ if_register_receive (tmp); if_register_send (tmp); } /* Now register all the remaining interfaces as protocols. */ for (tmp = interfaces; tmp; tmp = tmp -> next) add_protocol (tmp -> name, tmp -> rfdesc, got_one, tmp); close (sock); if (use_relay) maybe_setup_fallback_relay (); else maybe_setup_fallback (); } struct interface_info *setup_fallback () { fallback_interface = ((struct interface_info *) dmalloc (sizeof *fallback_interface, "discover_interfaces")); if (!fallback_interface) error ("Insufficient memory to record fallback interface."); memset (fallback_interface, 0, sizeof *fallback_interface); strcpy (fallback_interface -> name, "fallback"); fallback_interface -> shared_network = new_shared_network ("parse_statement"); if (!fallback_interface -> shared_network) error ("No memory for shared subnet"); memset (fallback_interface -> shared_network, 0, sizeof (struct shared_network)); fallback_interface -> shared_network -> name = "fallback-net"; return fallback_interface; } void reinitialize_interfaces () { struct interface_info *ip; for (ip = interfaces; ip; ip = ip -> next) { if_reinitialize_receive (ip); if_reinitialize_send (ip); } if (fallback_interface) if_reinitialize_send (fallback_interface); interfaces_invalidated = 1; } #ifdef USE_POLL /* Wait for packets to come in using poll(). When a packet comes in, call receive_packet to receive the packet and possibly strip hardware addressing information from it, and then call through the bootp_packet_handler hook to try to do something with it. */ void dispatch () { struct protocol *l; int nfds = 0; struct pollfd *fds; int count; int i; int to_msec; nfds = 0; for (l = protocols; l; l = l -> next) { ++nfds; } fds = (struct pollfd *)malloc ((nfds) * sizeof (struct pollfd)); if (!fds) error ("Can't allocate poll structures."); do { /* Call any expired timeouts, and then if there's still a timeout registered, time out the select call then. */ another: if (timeouts) { struct timeout *t; if (timeouts -> when <= cur_time) { t = timeouts; timeouts = timeouts -> next; (*(t -> func)) (t -> what); t -> next = free_timeouts; free_timeouts = t; goto another; } /* Figure timeout in milliseconds, and check for potential overflow. We assume that integers are 32 bits, which is harmless if they're 64 bits - we'll just get extra timeouts in that case. Lease times would have to be quite long in order for a 32-bit integer to overflow, anyway. */ to_msec = timeouts -> when - cur_time; if (to_msec > 2147483) to_msec = 2147483; to_msec *= 1000; } else to_msec = -1; /* Set up the descriptors to be polled. */ i = 0; for (l = protocols; l; l = l -> next) { fds [i].fd = l -> fd; fds [i].events = POLLIN; fds [i].revents = 0; ++i; } /* Wait for a packet or a timeout... XXX */ count = poll (fds, nfds, to_msec); /* Get the current time... */ GET_TIME (&cur_time); /* Not likely to be transitory... */ if (count < 0) { if (errno == EAGAIN || errno == EINTR) continue; else error ("poll: %m"); } i = 0; for (l = protocols; l; l = l -> next) { if ((fds [i].revents & POLLIN)) { fds [i].revents = 0; if (l -> handler) (*(l -> handler)) (l); if (interfaces_invalidated) break; } ++i; } interfaces_invalidated = 0; } while (1); } #else /* Wait for packets to come in using select(). When one does, call receive_packet to receive the packet and possibly strip hardware addressing information from it, and then call through the bootp_packet_handler hook to try to do something with it. */ void dispatch () { fd_set r, w, x; struct protocol *l; int max = 0; int count; struct timeval tv, *tvp; FD_ZERO (&w); FD_ZERO (&x); do { /* Call any expired timeouts, and then if there's still a timeout registered, time out the select call then. */ another: if (timeouts) { struct timeout *t; if (timeouts -> when <= cur_time) { t = timeouts; timeouts = timeouts -> next; (*(t -> func)) (t -> what); t -> next = free_timeouts; free_timeouts = t; goto another; } tv.tv_sec = timeouts -> when - cur_time; tv.tv_usec = 0; tvp = &tv; } else tvp = (struct timeval *)0; /* Set up the read mask. */ FD_ZERO (&r); for (l = protocols; l; l = l -> next) { FD_SET (l -> fd, &r); if (l -> fd > max) max = l -> fd; } /* Wait for a packet or a timeout... XXX */ count = select (max + 1, &r, &w, &x, tvp); /* Get the current time... */ GET_TIME (&cur_time); /* Not likely to be transitory... */ if (count < 0) error ("select: %m"); for (l = protocols; l; l = l -> next) { if (!FD_ISSET (l -> fd, &r)) continue; if (l -> handler) (*(l -> handler)) (l); if (interfaces_invalidated) break; } interfaces_invalidated = 0; } while (1); } #endif /* USE_POLL */ void got_one (l) struct protocol *l; { struct sockaddr_in from; struct hardware hfrom; struct iaddr ifrom; int result; union { //unsigned char packbuf [4096]; /* Packet input buffer. Must be as large as largest possible MTU. */ /*MN - make smaller*/ unsigned char packbuf [1500]; struct dhcp_packet packet; } u; struct interface_info *ip = l -> local; if (use_relay && !strcmp (ip -> name, "fallback")) { if ((result = receive_fallback (ip, u.packbuf, sizeof u, &from, &hfrom)) < 0) { warn ("receive_packet failed on %s: %m", ip -> name); return; } } else if ((result = receive_packet (ip, u.packbuf, sizeof u, &from, &hfrom)) < 0) { warn ("receive_packet failed on %s: %m", ip -> name); return; } if (result == 0) return; if (bootp_packet_handler) { ifrom.len = 4; memcpy (ifrom.iabuf, &from.sin_addr, ifrom.len); (*bootp_packet_handler) (ip, &u.packet, result, from.sin_port, &ifrom, &hfrom); } } int locate_network (packet) struct packet *packet; { struct iaddr ia; /* If this came through a gateway, find the corresponding subnet... */ if (packet -> raw -> giaddr.s_addr) { struct subnet *subnet; ia.len = 4; memcpy (ia.iabuf, &packet -> raw -> giaddr, 4); subnet = find_subnet (&ia); if (subnet) packet -> shared_network = subnet -> shared_network; else packet -> shared_network = (struct shared_network *)0; } else { packet -> shared_network = packet -> interface -> shared_network; } if (packet -> shared_network) return 1; return 0; } void add_timeout (when, where, what) TIME when; void (*where) PROTO ((void *)); void *what; { struct timeout *t, *q; /* See if this timeout supersedes an existing timeout. */ t = (struct timeout *)0; for (q = timeouts; q; q = q -> next) { if (q -> func == where && q -> what == what) { if (t) t -> next = q -> next; else timeouts = q -> next; break; } t = q; } /* If we didn't supersede a timeout, allocate a timeout structure now. */ if (!q) { if (free_timeouts) { q = free_timeouts; free_timeouts = q -> next; q -> func = where; q -> what = what; } else { q = (struct timeout *)malloc (sizeof (struct timeout)); if (!q) error ("Can't allocate timeout structure!"); q -> func = where; q -> what = what; } } q -> when = when; /* Now sort this timeout into the timeout list. */ /* Beginning of list? */ if (!timeouts || timeouts -> when > q -> when) { q -> next = timeouts; timeouts = q; return; } /* Middle of list? */ for (t = timeouts; t -> next; t = t -> next) { if (t -> next -> when > q -> when) { q -> next = t -> next; t -> next = q; return; } } /* End of list. */ t -> next = q; q -> next = (struct timeout *)0; } void cancel_timeout (where, what) void (*where) PROTO ((void *)); void *what; { struct timeout *t, *q; /* Look for this timeout on the list, and unlink it if we find it. */ t = (struct timeout *)0; for (q = timeouts; q; q = q -> next) { if (q -> func == where && q -> what == what) { if (t) t -> next = q -> next; else timeouts = q -> next; break; } t = q; } /* If we found the timeout, put it on the free list. */ if (q) { q -> next = free_timeouts; free_timeouts = q; } } /* Add a protocol to the list of protocols... */ void add_protocol (name, fd, handler, local) char *name; int fd; void (*handler) PROTO ((struct protocol *)); void *local; { struct protocol *p; p = (struct protocol *)malloc (sizeof *p); if (!p) error ("can't allocate protocol struct for %s", name); p -> fd = fd; p -> handler = handler; p -> local = local; p -> next = protocols; protocols = p; } void remove_protocol (proto) struct protocol *proto; { struct protocol *p, *next, *prev; prev = (struct protocol *)0; for (p = protocols; p; p = next) { next = p -> next; if (p == proto) { if (prev) prev -> next = p -> next; else protocols = p -> next; free (p); } } }
sensysnetworks/uClinux
user/dhcp-isc/common/dispatch.c
C
gpl-2.0
23,971
<?php /** * This file is part of the Carbon package. * * (c) Brian Nesbitt <brian@nesbot.com> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ /** * Authors: * - Ubuntu René Manassé GALEKWA renemanasse@gmail.com */ return require __DIR__.'/ln.php';
matrix-msu/kora
vendor/nesbot/carbon/src/Carbon/Lang/ln_CD.php
PHP
gpl-2.0
344
#ifndef SOPHUS_SE2_HPP #define SOPHUS_SE2_HPP #include "so2.hpp" namespace Sophus { template <class Scalar_, int Options = 0> class SE2; using SE2d = SE2<double>; using SE2f = SE2<float>; } // namespace Sophus namespace Eigen { namespace internal { template <class Scalar_, int Options> struct traits<Sophus::SE2<Scalar_, Options>> { using Scalar = Scalar_; using TranslationType = Sophus::Vector2<Scalar>; using SO2Type = Sophus::SO2<Scalar>; }; template <class Scalar_, int Options> struct traits<Map<Sophus::SE2<Scalar_>, Options>> : traits<Sophus::SE2<Scalar_, Options>> { using Scalar = Scalar_; using TranslationType = Map<Sophus::Vector2<Scalar>, Options>; using SO2Type = Map<Sophus::SO2<Scalar>, Options>; }; template <class Scalar_, int Options> struct traits<Map<Sophus::SE2<Scalar_> const, Options>> : traits<Sophus::SE2<Scalar_, Options> const> { using Scalar = Scalar_; using TranslationType = Map<Sophus::Vector2<Scalar> const, Options>; using SO2Type = Map<Sophus::SO2<Scalar> const, Options>; }; } // namespace internal } // namespace Eigen namespace Sophus { // SE2 base type - implements SE2 class but is storage agnostic. // // SE(2) is the group of rotations and translation in 2d. It is the semi-direct // product of SO(2) and the 2d Euclidean vector space. The class is represented // using a composition of SO2Group for rotation and a 2-vector for translation. // // SE(2) is neither compact, nor a commutative group. // // See SO2Group for more details of the rotation representation in 2d. // template <class Derived> class SE2Base { public: using Scalar = typename Eigen::internal::traits<Derived>::Scalar; using TranslationType = typename Eigen::internal::traits<Derived>::TranslationType; using SO2Type = typename Eigen::internal::traits<Derived>::SO2Type; // Degrees of freedom of manifold, number of dimensions in tangent space // (two for translation, three for rotation). static int constexpr DoF = 3; // Number of internal parameters used (tuple for complex, two for // translation). static int constexpr num_parameters = 4; // Group transformations are 3x3 matrices. static int constexpr N = 3; using Transformation = Matrix<Scalar, N, N>; using Point = Vector2<Scalar>; using Tangent = Vector<Scalar, DoF>; using Adjoint = Matrix<Scalar, DoF, DoF>; // Adjoint transformation // // This function return the adjoint transformation ``Ad`` of the group // element ``A`` such that for all ``x`` it holds that // ``hat(Ad_A * x) = A * hat(x) A^{-1}``. See hat-operator below. // SOPHUS_FUNC Adjoint Adj() const { Matrix<Scalar, 2, 2> const& R = so2().matrix(); Transformation res; res.setIdentity(); res.template topLeftCorner<2, 2>() = R; res(0, 2) = translation()[1]; res(1, 2) = -translation()[0]; return res; } // Returns copy of instance casted to NewScalarType. // template <class NewScalarType> SOPHUS_FUNC SE2<NewScalarType> cast() const { return SE2<NewScalarType>(so2().template cast<NewScalarType>(), translation().template cast<NewScalarType>()); } // Returns group inverse. // SOPHUS_FUNC SE2<Scalar> inverse() const { SO2<Scalar> const invR = so2().inverse(); return SE2<Scalar>(invR, invR * (translation() * Scalar(-1))); } // Logarithmic map // // Returns tangent space representation (= twist) of the instance. // SOPHUS_FUNC Tangent log() const { return log(*this); } /** * \brief Normalize SO2 element * * It re-normalizes the SO2 element. */ SOPHUS_FUNC void normalize() { so2().normalize(); } // Returns 3x3 matrix representation of the instance. // // It has the following form: // // | R t | // | o 1 | // // where ``R`` is a 2x2 rotation matrix, ``t`` a translation 2-vector and // ``o`` a 2-column vector of zeros. // SOPHUS_FUNC Transformation matrix() const { Transformation homogenious_matrix; homogenious_matrix.template topLeftCorner<2, 3>() = matrix2x3(); homogenious_matrix.row(2) = Matrix<Scalar, 1, 3>(Scalar(0), Scalar(0), Scalar(1)); return homogenious_matrix; } // Returns the significant first two rows of the matrix above. // SOPHUS_FUNC Matrix<Scalar, 2, 3> matrix2x3() const { Matrix<Scalar, 2, 3> matrix; matrix.template topLeftCorner<2, 2>() = rotationMatrix(); matrix.col(2) = translation(); return matrix; } // Assignment operator. // template <class OtherDerived> SOPHUS_FUNC SE2Base<Derived>& operator=(SE2Base<OtherDerived> const& other) { so2() = other.so2(); translation() = other.translation(); return *this; } // Group multiplication, which is rotation concatenation. // SOPHUS_FUNC SE2<Scalar> operator*(SE2<Scalar> const& other) const { SE2<Scalar> result(*this); result *= other; return result; } // Group action on 2-points. // // This function rotates and translates a two dimensional point ``p`` by the // SE(2) element ``bar_T_foo = (bar_R_foo, t_bar)`` (= rigid body // transformation): // // ``p_bar = bar_R_foo * p_foo + t_bar``. // SOPHUS_FUNC Point operator*(Point const& p) const { return so2() * p + translation(); } // In-place group multiplication. // SOPHUS_FUNC SE2Base<Derived>& operator*=(SE2<Scalar> const& other) { translation() += so2() * (other.translation()); so2() *= other.so2(); return *this; } // Returns rotation matrix. // SOPHUS_FUNC Matrix<Scalar, 2, 2> rotationMatrix() const { return so2().matrix(); } // Takes in complex number, and normalizes it. // // Precondition: The complex number must not be close to zero. // SOPHUS_FUNC void setComplex(Sophus::Vector2<Scalar> const& complex) { return so2().setComplex(complex); } // Sets ``so3`` using ``rotation_matrix``. // // Precondition: ``R`` must be orthogonal and ``det(R)=1``. // SOPHUS_FUNC void setRotationMatrix(Matrix<Scalar, 2, 2> const& R) { so2().setComplex(Scalar(0.5) * (R(0, 0) + R(1, 1)), Scalar(0.5) * (R(1, 0) - R(0, 1))); } // Mutator of SO3 group. // SOPHUS_FUNC SO2Type& so2() { return static_cast<Derived*>(this)->so2(); } // Accessor of SO3 group. // SOPHUS_FUNC SO2Type const& so2() const { return static_cast<Derived const*>(this)->so2(); } // Mutator of translation vector. // SOPHUS_FUNC TranslationType& translation() { return static_cast<Derived*>(this)->translation(); } // Accessor of translation vector // SOPHUS_FUNC TranslationType const& translation() const { return static_cast<Derived const*>(this)->translation(); } // Accessor of unit complex number. // SOPHUS_FUNC typename Eigen::internal::traits<Derived>::SO2Type::Complex const& unit_complex() const { return so2().unit_complex(); } //////////////////////////////////////////////////////////////////////////// // public static functions //////////////////////////////////////////////////////////////////////////// // Derivative of Lie bracket with respect to first element. // // This function returns ``D_a [a, b]`` with ``D_a`` being the // differential operator with respect to ``a``, ``[a, b]`` being the lie // bracket of the Lie algebra se3. // See ``lieBracket()`` below. // SOPHUS_FUNC static Transformation d_lieBracketab_by_d_a(Tangent const& b) { static Scalar const zero = Scalar(0); Vector2<Scalar> upsilon2 = b.template head<2>(); Scalar theta2 = b[2]; Transformation res; res << zero, theta2, -upsilon2[1], -theta2, zero, upsilon2[0], zero, zero, zero; return res; } // Group exponential // // This functions takes in an element of tangent space (= twist ``a``) and // returns the corresponding element of the group SE(2). // // The first two components of ``a`` represent the translational part // ``upsilon`` in the tangent space of SE(2), while the last three components // of ``a`` represents the rotation vector ``omega``. // To be more specific, this function computes ``expmat(hat(a))`` with // ``expmat(.)`` being the matrix exponential and ``hat(.)`` the hat-operator // of SE(2), see below. // SOPHUS_FUNC static SE2<Scalar> exp(Tangent const& a) { Scalar theta = a[2]; SO2<Scalar> so2 = SO2<Scalar>::exp(theta); Scalar sin_theta_by_theta; Scalar one_minus_cos_theta_by_theta; if (std::abs(theta) < Constants<Scalar>::epsilon()) { Scalar theta_sq = theta * theta; sin_theta_by_theta = Scalar(1.) - Scalar(1. / 6.) * theta_sq; one_minus_cos_theta_by_theta = Scalar(0.5) * theta - Scalar(1. / 24.) * theta * theta_sq; } else { sin_theta_by_theta = so2.unit_complex().y() / theta; one_minus_cos_theta_by_theta = (Scalar(1.) - so2.unit_complex().x()) / theta; } Vector2<Scalar> trans( sin_theta_by_theta * a[0] - one_minus_cos_theta_by_theta * a[1], one_minus_cos_theta_by_theta * a[0] + sin_theta_by_theta * a[1]); return SE2<Scalar>(so2, trans); } // Returns the ith infinitesimal generators of SE(2). // // The infinitesimal generators of SE(2) are: // // | 0 0 1 | // G_0 = | 0 0 0 | // | 0 0 0 | // // | 0 0 0 | // G_1 = | 0 0 1 | // | 0 0 0 | // // | 0 -1 0 | // G_2 = | 1 0 0 | // | 0 0 0 | // Precondition: ``i`` must be in 0, 1 or 2. // SOPHUS_FUNC static Transformation generator(int i) { SOPHUS_ENSURE(i >= 0 || i <= 2, "i should be in range [0,2]."); Tangent e; e.setZero(); e[i] = Scalar(1); return hat(e); } // hat-operator // // It takes in the 3-vector representation (= twist) and returns the // corresponding matrix representation of Lie algebra element. // // Formally, the ``hat()`` operator of SE(3) is defined as // // ``hat(.): R^3 -> R^{3x33}, hat(a) = sum_i a_i * G_i`` (for i=0,1,2) // // with ``G_i`` being the ith infinitesimal generator of SE(2). // SOPHUS_FUNC static Transformation hat(Tangent const& a) { Transformation Omega; Omega.setZero(); Omega.template topLeftCorner<2, 2>() = SO2<Scalar>::hat(a[2]); Omega.col(2).template head<2>() = a.template head<2>(); return Omega; } // Lie bracket // // It computes the Lie bracket of SE(2). To be more specific, it computes // // ``[omega_1, omega_2]_se2 := vee([hat(omega_1), hat(omega_2)])`` // // with ``[A,B] := AB-BA`` being the matrix commutator, ``hat(.) the // hat-operator and ``vee(.)`` the vee-operator of SE(2). // SOPHUS_FUNC static Tangent lieBracket(Tangent const& a, Tangent const& b) { Vector2<Scalar> upsilon1 = a.template head<2>(); Vector2<Scalar> upsilon2 = b.template head<2>(); Scalar theta1 = a[2]; Scalar theta2 = b[2]; return Tangent(-theta1 * upsilon2[1] + theta2 * upsilon1[1], theta1 * upsilon2[0] - theta2 * upsilon1[0], Scalar(0)); } // Logarithmic map // // Computes the logarithm, the inverse of the group exponential which maps // element of the group (rigid body transformations) to elements of the // tangent space (twist). // // To be specific, this function computes ``vee(logmat(.))`` with // ``logmat(.)`` being the matrix logarithm and ``vee(.)`` the vee-operator // of SE(2). // SOPHUS_FUNC static Tangent log(SE2<Scalar> const& other) { Tangent upsilon_theta; SO2<Scalar> const& so2 = other.so2(); Scalar theta = SO2<Scalar>::log(so2); upsilon_theta[2] = theta; Scalar halftheta = Scalar(0.5) * theta; Scalar halftheta_by_tan_of_halftheta; Vector2<Scalar> const& z = so2.unit_complex(); Scalar real_minus_one = z.x() - Scalar(1.); if (std::abs(real_minus_one) < Constants<Scalar>::epsilon()) { halftheta_by_tan_of_halftheta = Scalar(1.) - Scalar(1. / 12) * theta * theta; } else { halftheta_by_tan_of_halftheta = -(halftheta * z.y()) / (real_minus_one); } Matrix<Scalar, 2, 2> V_inv; V_inv << halftheta_by_tan_of_halftheta, halftheta, -halftheta, halftheta_by_tan_of_halftheta; upsilon_theta.template head<2>() = V_inv * other.translation(); return upsilon_theta; } // vee-operator // // It takes the 3x3-matrix representation ``Omega`` and maps it to the // corresponding 3-vector representation of Lie algebra. // // This is the inverse of the hat-operator, see above. // // Precondition: ``Omega`` must have the following structure: // // | 0 -d a | // | d 0 b | // | 0 0 0 | . // SOPHUS_FUNC static Tangent vee(Transformation const& Omega) { SOPHUS_ENSURE( Omega.row(2).template lpNorm<1>() < Constants<Scalar>::epsilon(), "Omega: \n%", Omega); Tangent upsilon_omega; upsilon_omega.template head<2>() = Omega.col(2).template head<2>(); upsilon_omega[2] = SO2<Scalar>::vee(Omega.template topLeftCorner<2, 2>()); return upsilon_omega; } }; // SE2 default type - Constructors and default storage for SE3 Type. template <class Scalar_, int Options> class SE2 : public SE2Base<SE2<Scalar_, Options>> { using Base = SE2Base<SE2<Scalar_, Options>>; public: using Scalar = Scalar_; using Transformation = typename Base::Transformation; using Point = typename Base::Point; using Tangent = typename Base::Tangent; using Adjoint = typename Base::Adjoint; EIGEN_MAKE_ALIGNED_OPERATOR_NEW // Default constructor initialize rigid body motion to the identity. // SOPHUS_FUNC SE2() : translation_(Vector2<Scalar>::Zero()) {} // Copy constructor // template <class OtherDerived> SOPHUS_FUNC SE2(SE2Base<OtherDerived> const& other) : so2_(other.so2()), translation_(other.translation()) {} // Constructor from SO3 and translation vector // template <class OtherDerived> SOPHUS_FUNC SE2(SO2Base<OtherDerived> const& so2, Point const& translation) : so2_(so2), translation_(translation) {} // Constructor from rotation matrix and translation vector // // Precondition: Rotation matrix needs to be orthogonal with determinant of 1. // SOPHUS_FUNC SE2(typename SO2<Scalar>::Transformation const& rotation_matrix, Point const& translation) : so2_(rotation_matrix), translation_(translation) {} // Constructor from rotation angle and translation vector. // SOPHUS_FUNC SE2(Scalar const& theta, Point const& translation) : so2_(theta), translation_(translation) {} // Constructor from complex number and translation vector // // Precondition: ``complex` must not be close to zero. SOPHUS_FUNC SE2(Vector2<Scalar> const& complex, Point const& translation) : so2_(complex), translation_(translation) {} // Constructor from 3x3 matrix // // Precondition: Rotation matrix needs to be orthogonal with determinant of 1. // The last row must be (0, 0, 1). // SOPHUS_FUNC explicit SE2(Transformation const& T) : so2_(T.template topLeftCorner<2, 2>().eval()), translation_(T.template block<2, 1>(0, 2)) {} // This provides unsafe read/write access to internal data. SO(2) is // represented by a complex number (two parameters). When using direct write // access, the user needs to take care of that the complex number stays // normalized. // SOPHUS_FUNC Scalar* data() { // so2_ and translation_ are layed out sequentially with no padding return so2_.data(); } // Const version of data() above. // SOPHUS_FUNC Scalar const* data() const { // so2_ and translation_ are layed out sequentially with no padding return so2_.data(); } // Accessor of SO3 // SOPHUS_FUNC SO2<Scalar>& so2() { return so2_; } // Mutator of SO3 // SOPHUS_FUNC SO2<Scalar> const& so2() const { return so2_; } // Mutator of translation vector // SOPHUS_FUNC Vector2<Scalar>& translation() { return translation_; } // Accessor of translation vector // SOPHUS_FUNC Vector2<Scalar> const& translation() const { return translation_; } protected: Sophus::SO2<Scalar> so2_; Vector2<Scalar> translation_; }; } // end namespace namespace Eigen { // Specialization of Eigen::Map for ``SE2``. // // Allows us to wrap SE2 objects around POD array. template <class Scalar_, int Options> class Map<Sophus::SE2<Scalar_>, Options> : public Sophus::SE2Base<Map<Sophus::SE2<Scalar_>, Options>> { using Base = Sophus::SE2Base<Map<Sophus::SE2<Scalar_>, Options>>; public: using Scalar = Scalar_; using Transformation = typename Base::Transformation; using Point = typename Base::Point; using Tangent = typename Base::Tangent; using Adjoint = typename Base::Adjoint; EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Map) using Base::operator*=; using Base::operator*; SOPHUS_FUNC Map(Scalar* coeffs) : so2_(coeffs), translation_(coeffs + Sophus::SO2<Scalar>::num_parameters) {} // Mutator of SO3 // SOPHUS_FUNC Map<Sophus::SO2<Scalar>, Options>& so2() { return so2_; } // Accessor of SO3 // SOPHUS_FUNC Map<Sophus::SO2<Scalar>, Options> const& so2() const { return so2_; } // Mutator of translation vector // SOPHUS_FUNC Map<Sophus::Vector2<Scalar>, Options>& translation() { return translation_; } // Accessor of translation vector // SOPHUS_FUNC Map<Sophus::Vector2<Scalar>, Options> const& translation() const { return translation_; } protected: Map<Sophus::SO2<Scalar>, Options> so2_; Map<Sophus::Vector2<Scalar>, Options> translation_; }; // Specialization of Eigen::Map for ``SE2 const``. // // Allows us to wrap SE2 objects around POD array. template <class Scalar_, int Options> class Map<Sophus::SE2<Scalar_> const, Options> : public Sophus::SE2Base<Map<Sophus::SE2<Scalar_> const, Options>> { using Base = Sophus::SE2Base<Map<Sophus::SE2<Scalar_> const, Options>>; public: using Scalar = Scalar_; using Transformation = typename Base::Transformation; using Point = typename Base::Point; using Tangent = typename Base::Tangent; using Adjoint = typename Base::Adjoint; EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Map) using Base::operator*=; using Base::operator*; SOPHUS_FUNC Map(Scalar const* coeffs) : so2_(coeffs), translation_(coeffs + Sophus::SO2<Scalar>::num_parameters) {} // Accessor of SO3 // SOPHUS_FUNC Map<Sophus::SO2<Scalar> const, Options> const& so2() const { return so2_; } // Accessor of translation vector // SOPHUS_FUNC Map<Sophus::Vector2<Scalar> const, Options> const& translation() const { return translation_; } protected: Map<Sophus::SO2<Scalar> const, Options> const so2_; Map<Sophus::Vector2<Scalar> const, Options> const translation_; }; } #endif
Trexter/invio
include/invio/sophus/se2.hpp
C++
gpl-2.0
18,906
--- name: Bug report about: Create a report to help us improve labels: bug --- <!-- Before filling this issue, please read the wiki (https://github.com/fossology/fossology/wiki) and search if the bug do not already exists in the issues (https://github.com/fossology/fossology/issues). --> ### Description Please describe your issue in few words here. #### How to reproduce Describe the bug and list the steps you used when the issue occurred. #### Screenshots If applicable, add screenshots to help explain your problem. ### Versions * Last commit id on master: * Operating System (lsb_release -a): ### Logs Any logs (if any) generated in #### FOSSology logs Logs generated under /var/log/fossology/fossology.log #### Apache logs Logs generated under /var/log/apache2/error.log #### Job logs Logs generated under Geeky Scan Details (Job history => click on agent's job id)
fossology/fossology
.github/ISSUE_TEMPLATE/bug_report.md
Markdown
gpl-2.0
891
/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _DRAMC_SOC_H_ #define _DRAMC_SOC_H_ enum { CHANNEL_A = 0, CHANNEL_B, CHANNEL_NUM }; enum { GW_PARAM_COARSE = 0, GW_PARAM_FINE, GW_PARAM_NUM }; enum { DUAL_RANKS = 2, CATRAINING_NUM = 10 }; enum { DQ_DATA_WIDTH = 32, DQS_BIT_NUMBER = 8, DQS_NUMBER = (DQ_DATA_WIDTH / DQS_BIT_NUMBER) }; #endif /* _DRAMC_COMMON_H_ */
pcengines/coreboot
src/soc/mediatek/mt8173/include/soc/dramc_soc.h
C
gpl-2.0
386
package # hide from PAUSE DBIx::Class::ResultSetProxy; unless ($INC{"DBIx/Class/DB.pm"}) { warn "IMPORTANT: DBIx::Class::ResultSetProxy is DEPRECATED AND *WILL* BE REMOVED. DO NOT USE.\n"; } use strict; use warnings; use base qw/DBIx::Class/; sub search { shift->resultset_instance->search(@_); } sub search_literal { shift->resultset_instance->search_literal(@_); } sub search_like { shift->resultset_instance->search_like(@_); } sub count { shift->resultset_instance->count(@_); } sub count_literal { shift->resultset_instance->count_literal(@_); } sub find { shift->resultset_instance->find(@_); } sub create { shift->resultset_instance->create(@_); } sub find_or_create { shift->resultset_instance->find_or_create(@_); } sub find_or_new { shift->resultset_instance->find_or_new(@_); } sub update_or_create { shift->resultset_instance->update_or_create(@_); } 1;
mishin/dwimperl-windows
strawberry-perl-5.20.0.1-32bit-portable/perl/vendor/lib/DBIx/Class/ResultSetProxy.pm
Perl
gpl-2.0
997
/* * Copyright (c) 2002, Intel Corporation. All rights reserved. * Created by: julie.n.fleischer REMOVE-THIS AT intel DOT com * This file is licensed under the GPL license. For the full content * of this license, see the COPYING file at the top level of this * source tree. * Test that if value.it_value = 0, the timer is disarmed. Test by * disarming a currently armed timer. * * For this test, signal SIGTOTEST will be used, clock CLOCK_REALTIME * will be used. */ #include <time.h> #include <signal.h> #include <stdio.h> #include <unistd.h> #include <stdlib.h> #include "posixtest.h" #define SIGTOTEST SIGALRM #define TIMEREXPIRE 3 static void handler(int signo PTS_ATTRIBUTE_UNUSED) { printf("Incorrectly in signal handler\n"); printf("Test FAILED\n"); exit(PTS_FAIL); } int main(void) { struct sigevent ev; struct sigaction act; timer_t tid; struct itimerspec its; struct timespec ts; ev.sigev_notify = SIGEV_SIGNAL; ev.sigev_signo = SIGTOTEST; act.sa_handler = handler; act.sa_flags = 0; if (sigemptyset(&act.sa_mask) == -1) { perror("Error calling sigemptyset\n"); return PTS_UNRESOLVED; } if (sigaction(SIGTOTEST, &act, 0) == -1) { perror("Error calling sigaction\n"); return PTS_UNRESOLVED; } if (timer_create(CLOCK_REALTIME, &ev, &tid) != 0) { perror("timer_create() did not return success\n"); return PTS_UNRESOLVED; } /* * First set timer to TIMEREXPIRE */ its.it_interval.tv_sec = 0; its.it_interval.tv_nsec = 0; its.it_value.tv_sec = TIMEREXPIRE; its.it_value.tv_nsec = 0; if (timer_settime(tid, 0, &its, NULL) != 0) { perror("timer_settime() did not return success\n"); return PTS_UNRESOLVED; } /* * Second, set value.it_value = 0 */ its.it_value.tv_sec = 0; its.it_value.tv_nsec = 0; if (timer_settime(tid, 0, &its, NULL) != 0) { perror("timer_settime() did not return success\n"); return PTS_UNRESOLVED; } /* * Ensure sleep for TIMEREXPIRE seconds not interrupted */ ts.tv_sec = TIMEREXPIRE; ts.tv_nsec = 0; if (nanosleep(&ts, NULL) == -1) { printf("nanosleep() interrupted\n"); printf("Test FAILED\n"); return PTS_FAIL; } printf("Test PASSED\n"); return PTS_PASS; }
linux-test-project/ltp
testcases/open_posix_testsuite/conformance/interfaces/timer_settime/3-2.c
C
gpl-2.0
2,189
/* * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) * Copyright 2003 PathScale, Inc. * Derived from include/asm-i386/pgtable.h * Licensed under the GPL */ #ifndef __UM_PGTABLE_2LEVEL_H #define __UM_PGTABLE_2LEVEL_H #include <asm-generic/pgtable-nopmd.h> /* PGDIR_SHIFT determines what a third-level page table entry can map */ #define PGDIR_SHIFT 22 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) /* * entries per page directory level: the i386 is two-level, so * we don't really have any PMD directory physically. */ #define PTRS_PER_PTE 1024 #define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE) #define PTRS_PER_PGD 1024 #define FIRST_USER_ADDRESS 0 #define pte_ERROR(e) \ printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \ pte_val(e)) #define pgd_ERROR(e) \ printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), \ pgd_val(e)) static inline int pgd_newpage(pgd_t pgd) { return 0; } static inline void pgd_mkuptodate(pgd_t pgd) { } #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) #define pte_pfn(x) phys_to_pfn(pte_val(x)) #define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot)) #define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot)) #define pmd_page_vaddr(pmd) \ ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) /* * Bits 0 through 3 are taken */ #define PTE_FILE_MAX_BITS 28 #define pte_to_pgoff(pte) (pte_val(pte) >> 4) #define pgoff_to_pte(off) ((pte_t) { ((off) << 4) + _PAGE_FILE }) #endif
milaq/linux-hpc
include/asm-um/pgtable-2level.h
C
gpl-2.0
1,577
/****************************************************************************** * * Copyright( c ) 2009-2012 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * * Bug Fixes and enhancements for Linux Kernels >= 3.2 * by Benjamin Porter <BenjaminPorter86@gmail.com> * * Project homepage: https://github.com/FreedomBen/rtl8188ce-linux-driver * * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "table.h" u32 RTL8192CUPHY_REG_2TARRAY[RTL8192CUPHY_REG_2TARRAY_LENGTH] = { 0x024, 0x0011800f, 0x028, 0x00ffdb83, 0x800, 0x80040002, 0x804, 0x00000003, 0x808, 0x0000fc00, 0x80c, 0x0000000a, 0x810, 0x10000330, 0x814, 0x020c3d10, 0x818, 0x02200385, 0x81c, 0x00000000, 0x820, 0x01000100, 0x824, 0x00390004, 0x828, 0x01000100, 0x82c, 0x00390004, 0x830, 0x27272727, 0x834, 0x27272727, 0x838, 0x27272727, 0x83c, 0x27272727, 0x840, 0x00010000, 0x844, 0x00010000, 0x848, 0x27272727, 0x84c, 0x27272727, 0x850, 0x00000000, 0x854, 0x00000000, 0x858, 0x569a569a, 0x85c, 0x0c1b25a4, 0x860, 0x66e60230, 0x864, 0x061f0130, 0x868, 0x27272727, 0x86c, 0x2b2b2b27, 0x870, 0x07000700, 0x874, 0x22184000, 0x878, 0x08080808, 0x87c, 0x00000000, 0x880, 0xc0083070, 0x884, 0x000004d5, 0x888, 0x00000000, 0x88c, 0xcc0000c0, 0x890, 0x00000800, 0x894, 0xfffffffe, 0x898, 0x40302010, 0x89c, 0x00706050, 0x900, 0x00000000, 0x904, 0x00000023, 0x908, 0x00000000, 0x90c, 0x81121313, 0xa00, 0x00d047c8, 0xa04, 0x80ff000c, 0xa08, 0x8c838300, 0xa0c, 0x2e68120f, 0xa10, 0x9500bb78, 0xa14, 0x11144028, 0xa18, 0x00881117, 0xa1c, 0x89140f00, 0xa20, 0x1a1b0000, 0xa24, 0x090e1317, 0xa28, 0x00000204, 0xa2c, 0x00d30000, 0xa70, 0x101fbf00, 0xa74, 0x00000007, 0xc00, 0x48071d40, 0xc04, 0x03a05633, 0xc08, 0x000000e4, 0xc0c, 0x6c6c6c6c, 0xc10, 0x08800000, 0xc14, 0x40000100, 0xc18, 0x08800000, 0xc1c, 0x40000100, 0xc20, 0x00000000, 0xc24, 0x00000000, 0xc28, 0x00000000, 0xc2c, 0x00000000, 0xc30, 0x69e9ac44, 0xc34, 0x469652cf, 0xc38, 0x49795994, 0xc3c, 0x0a97971c, 0xc40, 0x1f7c403f, 0xc44, 0x000100b7, 0xc48, 0xec020107, 0xc4c, 0x007f037f, 0xc50, 0x69543420, 0xc54, 0x43bc0094, 0xc58, 0x69543420, 0xc5c, 0x433c0094, 0xc60, 0x00000000, 0xc64, 0x5116848b, 0xc68, 0x47c00bff, 0xc6c, 0x00000036, 0xc70, 0x2c7f000d, 0xc74, 0x2186115b, 0xc78, 0x0000001f, 0xc7c, 0x00b99612, 0xc80, 0x40000100, 0xc84, 0x20f60000, 0xc88, 0x40000100, 0xc8c, 0xa0e40000, 0xc90, 0x00121820, 0xc94, 0x00000000, 0xc98, 0x00121820, 0xc9c, 0x00007f7f, 0xca0, 0x00000000, 0xca4, 0x00000080, 0xca8, 0x00000000, 0xcac, 0x00000000, 0xcb0, 0x00000000, 0xcb4, 0x00000000, 0xcb8, 0x00000000, 0xcbc, 0x28000000, 0xcc0, 0x00000000, 0xcc4, 0x00000000, 0xcc8, 0x00000000, 0xccc, 0x00000000, 0xcd0, 0x00000000, 0xcd4, 0x00000000, 0xcd8, 0x64b22427, 0xcdc, 0x00766932, 0xce0, 0x00222222, 0xce4, 0x00000000, 0xce8, 0x37644302, 0xcec, 0x2f97d40c, 0xd00, 0x00080740, 0xd04, 0x00020403, 0xd08, 0x0000907f, 0xd0c, 0x20010201, 0xd10, 0xa0633333, 0xd14, 0x3333bc43, 0xd18, 0x7a8f5b6b, 0xd2c, 0xcc979975, 0xd30, 0x00000000, 0xd34, 0x80608000, 0xd38, 0x00000000, 0xd3c, 0x00027293, 0xd40, 0x00000000, 0xd44, 0x00000000, 0xd48, 0x00000000, 0xd4c, 0x00000000, 0xd50, 0x6437140a, 0xd54, 0x00000000, 0xd58, 0x00000000, 0xd5c, 0x30032064, 0xd60, 0x4653de68, 0xd64, 0x04518a3c, 0xd68, 0x00002101, 0xd6c, 0x2a201c16, 0xd70, 0x1812362e, 0xd74, 0x322c2220, 0xd78, 0x000e3c24, 0xe00, 0x2a2a2a2a, 0xe04, 0x2a2a2a2a, 0xe08, 0x03902a2a, 0xe10, 0x2a2a2a2a, 0xe14, 0x2a2a2a2a, 0xe18, 0x2a2a2a2a, 0xe1c, 0x2a2a2a2a, 0xe28, 0x00000000, 0xe30, 0x1000dc1f, 0xe34, 0x10008c1f, 0xe38, 0x02140102, 0xe3c, 0x681604c2, 0xe40, 0x01007c00, 0xe44, 0x01004800, 0xe48, 0xfb000000, 0xe4c, 0x000028d1, 0xe50, 0x1000dc1f, 0xe54, 0x10008c1f, 0xe58, 0x02140102, 0xe5c, 0x28160d05, 0xe60, 0x00000010, 0xe68, 0x001b25a4, 0xe6c, 0x63db25a4, 0xe70, 0x63db25a4, 0xe74, 0x0c1b25a4, 0xe78, 0x0c1b25a4, 0xe7c, 0x0c1b25a4, 0xe80, 0x0c1b25a4, 0xe84, 0x63db25a4, 0xe88, 0x0c1b25a4, 0xe8c, 0x63db25a4, 0xed0, 0x63db25a4, 0xed4, 0x63db25a4, 0xed8, 0x63db25a4, 0xedc, 0x001b25a4, 0xee0, 0x001b25a4, 0xeec, 0x6fdb25a4, 0xf14, 0x00000003, 0xf4c, 0x00000000, 0xf00, 0x00000300, }; u32 RTL8192CUPHY_REG_1TARRAY[RTL8192CUPHY_REG_1TARRAY_LENGTH] = { 0x024, 0x0011800f, 0x028, 0x00ffdb83, 0x800, 0x80040000, 0x804, 0x00000001, 0x808, 0x0000fc00, 0x80c, 0x0000000a, 0x810, 0x10000330, 0x814, 0x020c3d10, 0x818, 0x02200385, 0x81c, 0x00000000, 0x820, 0x01000100, 0x824, 0x00390004, 0x828, 0x00000000, 0x82c, 0x00000000, 0x830, 0x00000000, 0x834, 0x00000000, 0x838, 0x00000000, 0x83c, 0x00000000, 0x840, 0x00010000, 0x844, 0x00000000, 0x848, 0x00000000, 0x84c, 0x00000000, 0x850, 0x00000000, 0x854, 0x00000000, 0x858, 0x569a569a, 0x85c, 0x001b25a4, 0x860, 0x66e60230, 0x864, 0x061f0130, 0x868, 0x00000000, 0x86c, 0x32323200, 0x870, 0x07000700, 0x874, 0x22004000, 0x878, 0x00000808, 0x87c, 0x00000000, 0x880, 0xc0083070, 0x884, 0x000004d5, 0x888, 0x00000000, 0x88c, 0xccc000c0, 0x890, 0x00000800, 0x894, 0xfffffffe, 0x898, 0x40302010, 0x89c, 0x00706050, 0x900, 0x00000000, 0x904, 0x00000023, 0x908, 0x00000000, 0x90c, 0x81121111, 0xa00, 0x00d047c8, 0xa04, 0x80ff000c, 0xa08, 0x8c838300, 0xa0c, 0x2e68120f, 0xa10, 0x9500bb78, 0xa14, 0x11144028, 0xa18, 0x00881117, 0xa1c, 0x89140f00, 0xa20, 0x1a1b0000, 0xa24, 0x090e1317, 0xa28, 0x00000204, 0xa2c, 0x00d30000, 0xa70, 0x101fbf00, 0xa74, 0x00000007, 0xc00, 0x48071d40, 0xc04, 0x03a05611, 0xc08, 0x000000e4, 0xc0c, 0x6c6c6c6c, 0xc10, 0x08800000, 0xc14, 0x40000100, 0xc18, 0x08800000, 0xc1c, 0x40000100, 0xc20, 0x00000000, 0xc24, 0x00000000, 0xc28, 0x00000000, 0xc2c, 0x00000000, 0xc30, 0x69e9ac44, 0xc34, 0x469652cf, 0xc38, 0x49795994, 0xc3c, 0x0a97971c, 0xc40, 0x1f7c403f, 0xc44, 0x000100b7, 0xc48, 0xec020107, 0xc4c, 0x007f037f, 0xc50, 0x69543420, 0xc54, 0x43bc0094, 0xc58, 0x69543420, 0xc5c, 0x433c0094, 0xc60, 0x00000000, 0xc64, 0x5116848b, 0xc68, 0x47c00bff, 0xc6c, 0x00000036, 0xc70, 0x2c7f000d, 0xc74, 0x018610db, 0xc78, 0x0000001f, 0xc7c, 0x00b91612, 0xc80, 0x40000100, 0xc84, 0x20f60000, 0xc88, 0x40000100, 0xc8c, 0x20200000, 0xc90, 0x00121820, 0xc94, 0x00000000, 0xc98, 0x00121820, 0xc9c, 0x00007f7f, 0xca0, 0x00000000, 0xca4, 0x00000080, 0xca8, 0x00000000, 0xcac, 0x00000000, 0xcb0, 0x00000000, 0xcb4, 0x00000000, 0xcb8, 0x00000000, 0xcbc, 0x28000000, 0xcc0, 0x00000000, 0xcc4, 0x00000000, 0xcc8, 0x00000000, 0xccc, 0x00000000, 0xcd0, 0x00000000, 0xcd4, 0x00000000, 0xcd8, 0x64b22427, 0xcdc, 0x00766932, 0xce0, 0x00222222, 0xce4, 0x00000000, 0xce8, 0x37644302, 0xcec, 0x2f97d40c, 0xd00, 0x00000740, 0xd04, 0x00020401, 0xd08, 0x0000907f, 0xd0c, 0x20010201, 0xd10, 0xa0633333, 0xd14, 0x3333bc43, 0xd18, 0x7a8f5b6b, 0xd2c, 0xcc979975, 0xd30, 0x00000000, 0xd34, 0x80608000, 0xd38, 0x00000000, 0xd3c, 0x00027293, 0xd40, 0x00000000, 0xd44, 0x00000000, 0xd48, 0x00000000, 0xd4c, 0x00000000, 0xd50, 0x6437140a, 0xd54, 0x00000000, 0xd58, 0x00000000, 0xd5c, 0x30032064, 0xd60, 0x4653de68, 0xd64, 0x04518a3c, 0xd68, 0x00002101, 0xd6c, 0x2a201c16, 0xd70, 0x1812362e, 0xd74, 0x322c2220, 0xd78, 0x000e3c24, 0xe00, 0x2a2a2a2a, 0xe04, 0x2a2a2a2a, 0xe08, 0x03902a2a, 0xe10, 0x2a2a2a2a, 0xe14, 0x2a2a2a2a, 0xe18, 0x2a2a2a2a, 0xe1c, 0x2a2a2a2a, 0xe28, 0x00000000, 0xe30, 0x1000dc1f, 0xe34, 0x10008c1f, 0xe38, 0x02140102, 0xe3c, 0x681604c2, 0xe40, 0x01007c00, 0xe44, 0x01004800, 0xe48, 0xfb000000, 0xe4c, 0x000028d1, 0xe50, 0x1000dc1f, 0xe54, 0x10008c1f, 0xe58, 0x02140102, 0xe5c, 0x28160d05, 0xe60, 0x00000008, 0xe68, 0x001b25a4, 0xe6c, 0x631b25a0, 0xe70, 0x631b25a0, 0xe74, 0x081b25a0, 0xe78, 0x081b25a0, 0xe7c, 0x081b25a0, 0xe80, 0x081b25a0, 0xe84, 0x631b25a0, 0xe88, 0x081b25a0, 0xe8c, 0x631b25a0, 0xed0, 0x631b25a0, 0xed4, 0x631b25a0, 0xed8, 0x631b25a0, 0xedc, 0x001b25a0, 0xee0, 0x001b25a0, 0xeec, 0x6b1b25a0, 0xf14, 0x00000003, 0xf4c, 0x00000000, 0xf00, 0x00000300, }; u32 RTL8192CUPHY_REG_ARRAY_PG[RTL8192CUPHY_REG_ARRAY_PGLENGTH] = { 0xe00, 0xffffffff, 0x07090c0c, 0xe04, 0xffffffff, 0x01020405, 0xe08, 0x0000ff00, 0x00000000, 0x86c, 0xffffff00, 0x00000000, 0xe10, 0xffffffff, 0x0b0c0c0e, 0xe14, 0xffffffff, 0x01030506, 0xe18, 0xffffffff, 0x0b0c0d0e, 0xe1c, 0xffffffff, 0x01030509, 0x830, 0xffffffff, 0x07090c0c, 0x834, 0xffffffff, 0x01020405, 0x838, 0xffffff00, 0x00000000, 0x86c, 0x000000ff, 0x00000000, 0x83c, 0xffffffff, 0x0b0c0d0e, 0x848, 0xffffffff, 0x01030509, 0x84c, 0xffffffff, 0x0b0c0d0e, 0x868, 0xffffffff, 0x01030509, 0xe00, 0xffffffff, 0x00000000, 0xe04, 0xffffffff, 0x00000000, 0xe08, 0x0000ff00, 0x00000000, 0x86c, 0xffffff00, 0x00000000, 0xe10, 0xffffffff, 0x00000000, 0xe14, 0xffffffff, 0x00000000, 0xe18, 0xffffffff, 0x00000000, 0xe1c, 0xffffffff, 0x00000000, 0x830, 0xffffffff, 0x00000000, 0x834, 0xffffffff, 0x00000000, 0x838, 0xffffff00, 0x00000000, 0x86c, 0x000000ff, 0x00000000, 0x83c, 0xffffffff, 0x00000000, 0x848, 0xffffffff, 0x00000000, 0x84c, 0xffffffff, 0x00000000, 0x868, 0xffffffff, 0x00000000, 0xe00, 0xffffffff, 0x04040404, 0xe04, 0xffffffff, 0x00020204, 0xe08, 0x0000ff00, 0x00000000, 0x86c, 0xffffff00, 0x00000000, 0xe10, 0xffffffff, 0x06060606, 0xe14, 0xffffffff, 0x00020406, 0xe18, 0xffffffff, 0x00000000, 0xe1c, 0xffffffff, 0x00000000, 0x830, 0xffffffff, 0x04040404, 0x834, 0xffffffff, 0x00020204, 0x838, 0xffffff00, 0x00000000, 0x86c, 0x000000ff, 0x00000000, 0x83c, 0xffffffff, 0x06060606, 0x848, 0xffffffff, 0x00020406, 0x84c, 0xffffffff, 0x00000000, 0x868, 0xffffffff, 0x00000000, 0xe00, 0xffffffff, 0x00000000, 0xe04, 0xffffffff, 0x00000000, 0xe08, 0x0000ff00, 0x00000000, 0x86c, 0xffffff00, 0x00000000, 0xe10, 0xffffffff, 0x00000000, 0xe14, 0xffffffff, 0x00000000, 0xe18, 0xffffffff, 0x00000000, 0xe1c, 0xffffffff, 0x00000000, 0x830, 0xffffffff, 0x00000000, 0x834, 0xffffffff, 0x00000000, 0x838, 0xffffff00, 0x00000000, 0x86c, 0x000000ff, 0x00000000, 0x83c, 0xffffffff, 0x00000000, 0x848, 0xffffffff, 0x00000000, 0x84c, 0xffffffff, 0x00000000, 0x868, 0xffffffff, 0x00000000, 0xe00, 0xffffffff, 0x00000000, 0xe04, 0xffffffff, 0x00000000, 0xe08, 0x0000ff00, 0x00000000, 0x86c, 0xffffff00, 0x00000000, 0xe10, 0xffffffff, 0x00000000, 0xe14, 0xffffffff, 0x00000000, 0xe18, 0xffffffff, 0x00000000, 0xe1c, 0xffffffff, 0x00000000, 0x830, 0xffffffff, 0x00000000, 0x834, 0xffffffff, 0x00000000, 0x838, 0xffffff00, 0x00000000, 0x86c, 0x000000ff, 0x00000000, 0x83c, 0xffffffff, 0x00000000, 0x848, 0xffffffff, 0x00000000, 0x84c, 0xffffffff, 0x00000000, 0x868, 0xffffffff, 0x00000000, 0xe00, 0xffffffff, 0x04040404, 0xe04, 0xffffffff, 0x00020204, 0xe08, 0x0000ff00, 0x00000000, 0x86c, 0xffffff00, 0x00000000, 0xe10, 0xffffffff, 0x00000000, 0xe14, 0xffffffff, 0x00000000, 0xe18, 0xffffffff, 0x00000000, 0xe1c, 0xffffffff, 0x00000000, 0x830, 0xffffffff, 0x04040404, 0x834, 0xffffffff, 0x00020204, 0x838, 0xffffff00, 0x00000000, 0x86c, 0x000000ff, 0x00000000, 0x83c, 0xffffffff, 0x00000000, 0x848, 0xffffffff, 0x00000000, 0x84c, 0xffffffff, 0x00000000, 0x868, 0xffffffff, 0x00000000, 0xe00, 0xffffffff, 0x00000000, 0xe04, 0xffffffff, 0x00000000, 0xe08, 0x0000ff00, 0x00000000, 0x86c, 0xffffff00, 0x00000000, 0xe10, 0xffffffff, 0x00000000, 0xe14, 0xffffffff, 0x00000000, 0xe18, 0xffffffff, 0x00000000, 0xe1c, 0xffffffff, 0x00000000, 0x830, 0xffffffff, 0x00000000, 0x834, 0xffffffff, 0x00000000, 0x838, 0xffffff00, 0x00000000, 0x86c, 0x000000ff, 0x00000000, 0x83c, 0xffffffff, 0x00000000, 0x848, 0xffffffff, 0x00000000, 0x84c, 0xffffffff, 0x00000000, 0x868, 0xffffffff, 0x00000000, }; u32 RTL8192CURADIOA_2TARRAY[RTL8192CURADIOA_2TARRAYLENGTH] = { 0x000, 0x00030159, 0x001, 0x00031284, 0x002, 0x00098000, 0x003, 0x00018c63, 0x004, 0x000210e7, 0x009, 0x0002044f, 0x00a, 0x0001adb1, 0x00b, 0x00054867, 0x00c, 0x0008992e, 0x00d, 0x0000e52c, 0x00e, 0x00039ce7, 0x00f, 0x00000451, 0x019, 0x00000000, 0x01a, 0x00010255, 0x01b, 0x00060a00, 0x01c, 0x000fc378, 0x01d, 0x000a1250, 0x01e, 0x0004445f, 0x01f, 0x00080001, 0x020, 0x0000b614, 0x021, 0x0006c000, 0x022, 0x00000000, 0x023, 0x00001558, 0x024, 0x00000060, 0x025, 0x00000483, 0x026, 0x0004f000, 0x027, 0x000ec7d9, 0x028, 0x000577c0, 0x029, 0x00004783, 0x02a, 0x00000001, 0x02b, 0x00021334, 0x02a, 0x00000000, 0x02b, 0x00000054, 0x02a, 0x00000001, 0x02b, 0x00000808, 0x02b, 0x00053333, 0x02c, 0x0000000c, 0x02a, 0x00000002, 0x02b, 0x00000808, 0x02b, 0x0005b333, 0x02c, 0x0000000d, 0x02a, 0x00000003, 0x02b, 0x00000808, 0x02b, 0x00063333, 0x02c, 0x0000000d, 0x02a, 0x00000004, 0x02b, 0x00000808, 0x02b, 0x0006b333, 0x02c, 0x0000000d, 0x02a, 0x00000005, 0x02b, 0x00000808, 0x02b, 0x00073333, 0x02c, 0x0000000d, 0x02a, 0x00000006, 0x02b, 0x00000709, 0x02b, 0x0005b333, 0x02c, 0x0000000d, 0x02a, 0x00000007, 0x02b, 0x00000709, 0x02b, 0x00063333, 0x02c, 0x0000000d, 0x02a, 0x00000008, 0x02b, 0x0000060a, 0x02b, 0x0004b333, 0x02c, 0x0000000d, 0x02a, 0x00000009, 0x02b, 0x0000060a, 0x02b, 0x00053333, 0x02c, 0x0000000d, 0x02a, 0x0000000a, 0x02b, 0x0000060a, 0x02b, 0x0005b333, 0x02c, 0x0000000d, 0x02a, 0x0000000b, 0x02b, 0x0000060a, 0x02b, 0x00063333, 0x02c, 0x0000000d, 0x02a, 0x0000000c, 0x02b, 0x0000060a, 0x02b, 0x0006b333, 0x02c, 0x0000000d, 0x02a, 0x0000000d, 0x02b, 0x0000060a, 0x02b, 0x00073333, 0x02c, 0x0000000d, 0x02a, 0x0000000e, 0x02b, 0x0000050b, 0x02b, 0x00066666, 0x02c, 0x0000001a, 0x02a, 0x000e0000, 0x010, 0x0004000f, 0x011, 0x000e31fc, 0x010, 0x0006000f, 0x011, 0x000ff9f8, 0x010, 0x0002000f, 0x011, 0x000203f9, 0x010, 0x0003000f, 0x011, 0x000ff500, 0x010, 0x00000000, 0x011, 0x00000000, 0x010, 0x0008000f, 0x011, 0x0003f100, 0x010, 0x0009000f, 0x011, 0x00023100, 0x012, 0x00032000, 0x012, 0x00071000, 0x012, 0x000b0000, 0x012, 0x000fc000, 0x013, 0x000287b3, 0x013, 0x000244b7, 0x013, 0x000204ab, 0x013, 0x0001c49f, 0x013, 0x00018493, 0x013, 0x0001429b, 0x013, 0x00010299, 0x013, 0x0000c29c, 0x013, 0x000081a0, 0x013, 0x000040ac, 0x013, 0x00000020, 0x014, 0x0001944c, 0x014, 0x00059444, 0x014, 0x0009944c, 0x014, 0x000d9444, 0x015, 0x0000f424, 0x015, 0x0004f424, 0x015, 0x0008f424, 0x015, 0x000cf424, 0x016, 0x000e0330, 0x016, 0x000a0330, 0x016, 0x00060330, 0x016, 0x00020330, 0x000, 0x00010159, 0x018, 0x0000f401, 0x0fe, 0x00000000, 0x0fe, 0x00000000, 0x01f, 0x00080003, 0x0fe, 0x00000000, 0x0fe, 0x00000000, 0x01e, 0x00044457, 0x01f, 0x00080000, 0x000, 0x00030159, }; u32 RTL8192CU_RADIOB_2TARRAY[RTL8192CURADIOB_2TARRAYLENGTH] = { 0x000, 0x00030159, 0x001, 0x00031284, 0x002, 0x00098000, 0x003, 0x00018c63, 0x004, 0x000210e7, 0x009, 0x0002044f, 0x00a, 0x0001adb1, 0x00b, 0x00054867, 0x00c, 0x0008992e, 0x00d, 0x0000e52c, 0x00e, 0x00039ce7, 0x00f, 0x00000451, 0x012, 0x00032000, 0x012, 0x00071000, 0x012, 0x000b0000, 0x012, 0x000fc000, 0x013, 0x000287af, 0x013, 0x000244b7, 0x013, 0x000204ab, 0x013, 0x0001c49f, 0x013, 0x00018493, 0x013, 0x00014297, 0x013, 0x00010295, 0x013, 0x0000c298, 0x013, 0x0000819c, 0x013, 0x000040a8, 0x013, 0x0000001c, 0x014, 0x0001944c, 0x014, 0x00059444, 0x014, 0x0009944c, 0x014, 0x000d9444, 0x015, 0x0000f424, 0x015, 0x0004f424, 0x015, 0x0008f424, 0x015, 0x000cf424, 0x016, 0x000e0330, 0x016, 0x000a0330, 0x016, 0x00060330, 0x016, 0x00020330, }; u32 RTL8192CU_RADIOA_1TARRAY[RTL8192CURADIOA_1TARRAYLENGTH] = { 0x000, 0x00030159, 0x001, 0x00031284, 0x002, 0x00098000, 0x003, 0x00018c63, 0x004, 0x000210e7, 0x009, 0x0002044f, 0x00a, 0x0001adb1, 0x00b, 0x00054867, 0x00c, 0x0008992e, 0x00d, 0x0000e52c, 0x00e, 0x00039ce7, 0x00f, 0x00000451, 0x019, 0x00000000, 0x01a, 0x00010255, 0x01b, 0x00060a00, 0x01c, 0x000fc378, 0x01d, 0x000a1250, 0x01e, 0x0004445f, 0x01f, 0x00080001, 0x020, 0x0000b614, 0x021, 0x0006c000, 0x022, 0x00000000, 0x023, 0x00001558, 0x024, 0x00000060, 0x025, 0x00000483, 0x026, 0x0004f000, 0x027, 0x000ec7d9, 0x028, 0x000577c0, 0x029, 0x00004783, 0x02a, 0x00000001, 0x02b, 0x00021334, 0x02a, 0x00000000, 0x02b, 0x00000054, 0x02a, 0x00000001, 0x02b, 0x00000808, 0x02b, 0x00053333, 0x02c, 0x0000000c, 0x02a, 0x00000002, 0x02b, 0x00000808, 0x02b, 0x0005b333, 0x02c, 0x0000000d, 0x02a, 0x00000003, 0x02b, 0x00000808, 0x02b, 0x00063333, 0x02c, 0x0000000d, 0x02a, 0x00000004, 0x02b, 0x00000808, 0x02b, 0x0006b333, 0x02c, 0x0000000d, 0x02a, 0x00000005, 0x02b, 0x00000808, 0x02b, 0x00073333, 0x02c, 0x0000000d, 0x02a, 0x00000006, 0x02b, 0x00000709, 0x02b, 0x0005b333, 0x02c, 0x0000000d, 0x02a, 0x00000007, 0x02b, 0x00000709, 0x02b, 0x00063333, 0x02c, 0x0000000d, 0x02a, 0x00000008, 0x02b, 0x0000060a, 0x02b, 0x0004b333, 0x02c, 0x0000000d, 0x02a, 0x00000009, 0x02b, 0x0000060a, 0x02b, 0x00053333, 0x02c, 0x0000000d, 0x02a, 0x0000000a, 0x02b, 0x0000060a, 0x02b, 0x0005b333, 0x02c, 0x0000000d, 0x02a, 0x0000000b, 0x02b, 0x0000060a, 0x02b, 0x00063333, 0x02c, 0x0000000d, 0x02a, 0x0000000c, 0x02b, 0x0000060a, 0x02b, 0x0006b333, 0x02c, 0x0000000d, 0x02a, 0x0000000d, 0x02b, 0x0000060a, 0x02b, 0x00073333, 0x02c, 0x0000000d, 0x02a, 0x0000000e, 0x02b, 0x0000050b, 0x02b, 0x00066666, 0x02c, 0x0000001a, 0x02a, 0x000e0000, 0x010, 0x0004000f, 0x011, 0x000e31fc, 0x010, 0x0006000f, 0x011, 0x000ff9f8, 0x010, 0x0002000f, 0x011, 0x000203f9, 0x010, 0x0003000f, 0x011, 0x000ff500, 0x010, 0x00000000, 0x011, 0x00000000, 0x010, 0x0008000f, 0x011, 0x0003f100, 0x010, 0x0009000f, 0x011, 0x00023100, 0x012, 0x00032000, 0x012, 0x00071000, 0x012, 0x000b0000, 0x012, 0x000fc000, 0x013, 0x000287b3, 0x013, 0x000244b7, 0x013, 0x000204ab, 0x013, 0x0001c49f, 0x013, 0x00018493, 0x013, 0x0001429b, 0x013, 0x00010299, 0x013, 0x0000c29c, 0x013, 0x000081a0, 0x013, 0x000040ac, 0x013, 0x00000020, 0x014, 0x0001944c, 0x014, 0x00059444, 0x014, 0x0009944c, 0x014, 0x000d9444, 0x015, 0x0000f405, 0x015, 0x0004f405, 0x015, 0x0008f405, 0x015, 0x000cf405, 0x016, 0x000e0330, 0x016, 0x000a0330, 0x016, 0x00060330, 0x016, 0x00020330, 0x000, 0x00010159, 0x018, 0x0000f401, 0x0fe, 0x00000000, 0x0fe, 0x00000000, 0x01f, 0x00080003, 0x0fe, 0x00000000, 0x0fe, 0x00000000, 0x01e, 0x00044457, 0x01f, 0x00080000, 0x000, 0x00030159, }; u32 RTL8192CU_RADIOB_1TARRAY[RTL8192CURADIOB_1TARRAYLENGTH] = { 0x0, }; u32 RTL8192CUMAC_2T_ARRAY[RTL8192CUMAC_2T_ARRAYLENGTH] = { 0x420, 0x00000080, 0x423, 0x00000000, 0x430, 0x00000000, 0x431, 0x00000000, 0x432, 0x00000000, 0x433, 0x00000001, 0x434, 0x00000004, 0x435, 0x00000005, 0x436, 0x00000006, 0x437, 0x00000007, 0x438, 0x00000000, 0x439, 0x00000000, 0x43a, 0x00000000, 0x43b, 0x00000001, 0x43c, 0x00000004, 0x43d, 0x00000005, 0x43e, 0x00000006, 0x43f, 0x00000007, 0x440, 0x0000005d, 0x441, 0x00000001, 0x442, 0x00000000, 0x444, 0x00000015, 0x445, 0x000000f0, 0x446, 0x0000000f, 0x447, 0x00000000, 0x458, 0x00000041, 0x459, 0x000000a8, 0x45a, 0x00000072, 0x45b, 0x000000b9, 0x460, 0x00000066, 0x461, 0x00000066, 0x462, 0x00000008, 0x463, 0x00000003, 0x4c8, 0x000000ff, 0x4c9, 0x00000008, 0x4cc, 0x000000ff, 0x4cd, 0x000000ff, 0x4ce, 0x00000001, 0x500, 0x00000026, 0x501, 0x000000a2, 0x502, 0x0000002f, 0x503, 0x00000000, 0x504, 0x00000028, 0x505, 0x000000a3, 0x506, 0x0000005e, 0x507, 0x00000000, 0x508, 0x0000002b, 0x509, 0x000000a4, 0x50a, 0x0000005e, 0x50b, 0x00000000, 0x50c, 0x0000004f, 0x50d, 0x000000a4, 0x50e, 0x00000000, 0x50f, 0x00000000, 0x512, 0x0000001c, 0x514, 0x0000000a, 0x515, 0x00000010, 0x516, 0x0000000a, 0x517, 0x00000010, 0x51a, 0x00000016, 0x524, 0x0000000f, 0x525, 0x0000004f, 0x546, 0x00000040, 0x547, 0x00000000, 0x550, 0x00000010, 0x551, 0x00000010, 0x559, 0x00000002, 0x55a, 0x00000002, 0x55d, 0x000000ff, 0x605, 0x00000030, 0x608, 0x0000000e, 0x609, 0x0000002a, 0x652, 0x00000020, 0x63c, 0x00000008, 0x63d, 0x00000008, 0x63e, 0x0000000c, 0x63f, 0x0000000c, 0x66e, 0x00000005, 0x700, 0x00000021, 0x701, 0x00000043, 0x702, 0x00000065, 0x703, 0x00000087, 0x708, 0x00000021, 0x709, 0x00000043, 0x70a, 0x00000065, 0x70b, 0x00000087, }; u32 RTL8192CUAGCTAB_2TARRAY[RTL8192CUAGCTAB_2TARRAYLENGTH] = { 0xc78, 0x7b000001, 0xc78, 0x7b010001, 0xc78, 0x7b020001, 0xc78, 0x7b030001, 0xc78, 0x7b040001, 0xc78, 0x7b050001, 0xc78, 0x7a060001, 0xc78, 0x79070001, 0xc78, 0x78080001, 0xc78, 0x77090001, 0xc78, 0x760a0001, 0xc78, 0x750b0001, 0xc78, 0x740c0001, 0xc78, 0x730d0001, 0xc78, 0x720e0001, 0xc78, 0x710f0001, 0xc78, 0x70100001, 0xc78, 0x6f110001, 0xc78, 0x6e120001, 0xc78, 0x6d130001, 0xc78, 0x6c140001, 0xc78, 0x6b150001, 0xc78, 0x6a160001, 0xc78, 0x69170001, 0xc78, 0x68180001, 0xc78, 0x67190001, 0xc78, 0x661a0001, 0xc78, 0x651b0001, 0xc78, 0x641c0001, 0xc78, 0x631d0001, 0xc78, 0x621e0001, 0xc78, 0x611f0001, 0xc78, 0x60200001, 0xc78, 0x49210001, 0xc78, 0x48220001, 0xc78, 0x47230001, 0xc78, 0x46240001, 0xc78, 0x45250001, 0xc78, 0x44260001, 0xc78, 0x43270001, 0xc78, 0x42280001, 0xc78, 0x41290001, 0xc78, 0x402a0001, 0xc78, 0x262b0001, 0xc78, 0x252c0001, 0xc78, 0x242d0001, 0xc78, 0x232e0001, 0xc78, 0x222f0001, 0xc78, 0x21300001, 0xc78, 0x20310001, 0xc78, 0x06320001, 0xc78, 0x05330001, 0xc78, 0x04340001, 0xc78, 0x03350001, 0xc78, 0x02360001, 0xc78, 0x01370001, 0xc78, 0x00380001, 0xc78, 0x00390001, 0xc78, 0x003a0001, 0xc78, 0x003b0001, 0xc78, 0x003c0001, 0xc78, 0x003d0001, 0xc78, 0x003e0001, 0xc78, 0x003f0001, 0xc78, 0x7b400001, 0xc78, 0x7b410001, 0xc78, 0x7b420001, 0xc78, 0x7b430001, 0xc78, 0x7b440001, 0xc78, 0x7b450001, 0xc78, 0x7a460001, 0xc78, 0x79470001, 0xc78, 0x78480001, 0xc78, 0x77490001, 0xc78, 0x764a0001, 0xc78, 0x754b0001, 0xc78, 0x744c0001, 0xc78, 0x734d0001, 0xc78, 0x724e0001, 0xc78, 0x714f0001, 0xc78, 0x70500001, 0xc78, 0x6f510001, 0xc78, 0x6e520001, 0xc78, 0x6d530001, 0xc78, 0x6c540001, 0xc78, 0x6b550001, 0xc78, 0x6a560001, 0xc78, 0x69570001, 0xc78, 0x68580001, 0xc78, 0x67590001, 0xc78, 0x665a0001, 0xc78, 0x655b0001, 0xc78, 0x645c0001, 0xc78, 0x635d0001, 0xc78, 0x625e0001, 0xc78, 0x615f0001, 0xc78, 0x60600001, 0xc78, 0x49610001, 0xc78, 0x48620001, 0xc78, 0x47630001, 0xc78, 0x46640001, 0xc78, 0x45650001, 0xc78, 0x44660001, 0xc78, 0x43670001, 0xc78, 0x42680001, 0xc78, 0x41690001, 0xc78, 0x406a0001, 0xc78, 0x266b0001, 0xc78, 0x256c0001, 0xc78, 0x246d0001, 0xc78, 0x236e0001, 0xc78, 0x226f0001, 0xc78, 0x21700001, 0xc78, 0x20710001, 0xc78, 0x06720001, 0xc78, 0x05730001, 0xc78, 0x04740001, 0xc78, 0x03750001, 0xc78, 0x02760001, 0xc78, 0x01770001, 0xc78, 0x00780001, 0xc78, 0x00790001, 0xc78, 0x007a0001, 0xc78, 0x007b0001, 0xc78, 0x007c0001, 0xc78, 0x007d0001, 0xc78, 0x007e0001, 0xc78, 0x007f0001, 0xc78, 0x3800001e, 0xc78, 0x3801001e, 0xc78, 0x3802001e, 0xc78, 0x3803001e, 0xc78, 0x3804001e, 0xc78, 0x3805001e, 0xc78, 0x3806001e, 0xc78, 0x3807001e, 0xc78, 0x3808001e, 0xc78, 0x3c09001e, 0xc78, 0x3e0a001e, 0xc78, 0x400b001e, 0xc78, 0x440c001e, 0xc78, 0x480d001e, 0xc78, 0x4c0e001e, 0xc78, 0x500f001e, 0xc78, 0x5210001e, 0xc78, 0x5611001e, 0xc78, 0x5a12001e, 0xc78, 0x5e13001e, 0xc78, 0x6014001e, 0xc78, 0x6015001e, 0xc78, 0x6016001e, 0xc78, 0x6217001e, 0xc78, 0x6218001e, 0xc78, 0x6219001e, 0xc78, 0x621a001e, 0xc78, 0x621b001e, 0xc78, 0x621c001e, 0xc78, 0x621d001e, 0xc78, 0x621e001e, 0xc78, 0x621f001e, }; u32 RTL8192CUAGCTAB_1TARRAY[RTL8192CUAGCTAB_1TARRAYLENGTH] = { 0xc78, 0x7b000001, 0xc78, 0x7b010001, 0xc78, 0x7b020001, 0xc78, 0x7b030001, 0xc78, 0x7b040001, 0xc78, 0x7b050001, 0xc78, 0x7a060001, 0xc78, 0x79070001, 0xc78, 0x78080001, 0xc78, 0x77090001, 0xc78, 0x760a0001, 0xc78, 0x750b0001, 0xc78, 0x740c0001, 0xc78, 0x730d0001, 0xc78, 0x720e0001, 0xc78, 0x710f0001, 0xc78, 0x70100001, 0xc78, 0x6f110001, 0xc78, 0x6e120001, 0xc78, 0x6d130001, 0xc78, 0x6c140001, 0xc78, 0x6b150001, 0xc78, 0x6a160001, 0xc78, 0x69170001, 0xc78, 0x68180001, 0xc78, 0x67190001, 0xc78, 0x661a0001, 0xc78, 0x651b0001, 0xc78, 0x641c0001, 0xc78, 0x631d0001, 0xc78, 0x621e0001, 0xc78, 0x611f0001, 0xc78, 0x60200001, 0xc78, 0x49210001, 0xc78, 0x48220001, 0xc78, 0x47230001, 0xc78, 0x46240001, 0xc78, 0x45250001, 0xc78, 0x44260001, 0xc78, 0x43270001, 0xc78, 0x42280001, 0xc78, 0x41290001, 0xc78, 0x402a0001, 0xc78, 0x262b0001, 0xc78, 0x252c0001, 0xc78, 0x242d0001, 0xc78, 0x232e0001, 0xc78, 0x222f0001, 0xc78, 0x21300001, 0xc78, 0x20310001, 0xc78, 0x06320001, 0xc78, 0x05330001, 0xc78, 0x04340001, 0xc78, 0x03350001, 0xc78, 0x02360001, 0xc78, 0x01370001, 0xc78, 0x00380001, 0xc78, 0x00390001, 0xc78, 0x003a0001, 0xc78, 0x003b0001, 0xc78, 0x003c0001, 0xc78, 0x003d0001, 0xc78, 0x003e0001, 0xc78, 0x003f0001, 0xc78, 0x7b400001, 0xc78, 0x7b410001, 0xc78, 0x7b420001, 0xc78, 0x7b430001, 0xc78, 0x7b440001, 0xc78, 0x7b450001, 0xc78, 0x7a460001, 0xc78, 0x79470001, 0xc78, 0x78480001, 0xc78, 0x77490001, 0xc78, 0x764a0001, 0xc78, 0x754b0001, 0xc78, 0x744c0001, 0xc78, 0x734d0001, 0xc78, 0x724e0001, 0xc78, 0x714f0001, 0xc78, 0x70500001, 0xc78, 0x6f510001, 0xc78, 0x6e520001, 0xc78, 0x6d530001, 0xc78, 0x6c540001, 0xc78, 0x6b550001, 0xc78, 0x6a560001, 0xc78, 0x69570001, 0xc78, 0x68580001, 0xc78, 0x67590001, 0xc78, 0x665a0001, 0xc78, 0x655b0001, 0xc78, 0x645c0001, 0xc78, 0x635d0001, 0xc78, 0x625e0001, 0xc78, 0x615f0001, 0xc78, 0x60600001, 0xc78, 0x49610001, 0xc78, 0x48620001, 0xc78, 0x47630001, 0xc78, 0x46640001, 0xc78, 0x45650001, 0xc78, 0x44660001, 0xc78, 0x43670001, 0xc78, 0x42680001, 0xc78, 0x41690001, 0xc78, 0x406a0001, 0xc78, 0x266b0001, 0xc78, 0x256c0001, 0xc78, 0x246d0001, 0xc78, 0x236e0001, 0xc78, 0x226f0001, 0xc78, 0x21700001, 0xc78, 0x20710001, 0xc78, 0x06720001, 0xc78, 0x05730001, 0xc78, 0x04740001, 0xc78, 0x03750001, 0xc78, 0x02760001, 0xc78, 0x01770001, 0xc78, 0x00780001, 0xc78, 0x00790001, 0xc78, 0x007a0001, 0xc78, 0x007b0001, 0xc78, 0x007c0001, 0xc78, 0x007d0001, 0xc78, 0x007e0001, 0xc78, 0x007f0001, 0xc78, 0x3800001e, 0xc78, 0x3801001e, 0xc78, 0x3802001e, 0xc78, 0x3803001e, 0xc78, 0x3804001e, 0xc78, 0x3805001e, 0xc78, 0x3806001e, 0xc78, 0x3807001e, 0xc78, 0x3808001e, 0xc78, 0x3c09001e, 0xc78, 0x3e0a001e, 0xc78, 0x400b001e, 0xc78, 0x440c001e, 0xc78, 0x480d001e, 0xc78, 0x4c0e001e, 0xc78, 0x500f001e, 0xc78, 0x5210001e, 0xc78, 0x5611001e, 0xc78, 0x5a12001e, 0xc78, 0x5e13001e, 0xc78, 0x6014001e, 0xc78, 0x6015001e, 0xc78, 0x6016001e, 0xc78, 0x6217001e, 0xc78, 0x6218001e, 0xc78, 0x6219001e, 0xc78, 0x621a001e, 0xc78, 0x621b001e, 0xc78, 0x621c001e, 0xc78, 0x621d001e, 0xc78, 0x621e001e, 0xc78, 0x621f001e, }; u32 RTL8192CUPHY_REG_1T_HPArray[RTL8192CUPHY_REG_1T_HPArrayLength] = { 0x024, 0x0011800f, 0x028, 0x00ffdb83, 0x040, 0x000c0004, 0x800, 0x80040000, 0x804, 0x00000001, 0x808, 0x0000fc00, 0x80c, 0x0000000a, 0x810, 0x10005388, 0x814, 0x020c3d10, 0x818, 0x02200385, 0x81c, 0x00000000, 0x820, 0x01000100, 0x824, 0x00390204, 0x828, 0x00000000, 0x82c, 0x00000000, 0x830, 0x00000000, 0x834, 0x00000000, 0x838, 0x00000000, 0x83c, 0x00000000, 0x840, 0x00010000, 0x844, 0x00000000, 0x848, 0x00000000, 0x84c, 0x00000000, 0x850, 0x00000000, 0x854, 0x00000000, 0x858, 0x569a569a, 0x85c, 0x001b25a4, 0x860, 0x66e60230, 0x864, 0x061f0130, 0x868, 0x00000000, 0x86c, 0x20202000, 0x870, 0x03000300, 0x874, 0x22004000, 0x878, 0x00000808, 0x87c, 0x00ffc3f1, 0x880, 0xc0083070, 0x884, 0x000004d5, 0x888, 0x00000000, 0x88c, 0xccc000c0, 0x890, 0x00000800, 0x894, 0xfffffffe, 0x898, 0x40302010, 0x89c, 0x00706050, 0x900, 0x00000000, 0x904, 0x00000023, 0x908, 0x00000000, 0x90c, 0x81121111, 0xa00, 0x00d047c8, 0xa04, 0x80ff000c, 0xa08, 0x8c838300, 0xa0c, 0x2e68120f, 0xa10, 0x9500bb78, 0xa14, 0x11144028, 0xa18, 0x00881117, 0xa1c, 0x89140f00, 0xa20, 0x15160000, 0xa24, 0x070b0f12, 0xa28, 0x00000104, 0xa2c, 0x00d30000, 0xa70, 0x101fbf00, 0xa74, 0x00000007, 0xc00, 0x48071d40, 0xc04, 0x03a05611, 0xc08, 0x000000e4, 0xc0c, 0x6c6c6c6c, 0xc10, 0x08800000, 0xc14, 0x40000100, 0xc18, 0x08800000, 0xc1c, 0x40000100, 0xc20, 0x00000000, 0xc24, 0x00000000, 0xc28, 0x00000000, 0xc2c, 0x00000000, 0xc30, 0x69e9ac44, 0xc34, 0x469652cf, 0xc38, 0x49795994, 0xc3c, 0x0a97971c, 0xc40, 0x1f7c403f, 0xc44, 0x000100b7, 0xc48, 0xec020107, 0xc4c, 0x007f037f, 0xc50, 0x6954342e, 0xc54, 0x43bc0094, 0xc58, 0x6954342f, 0xc5c, 0x433c0094, 0xc60, 0x00000000, 0xc64, 0x5116848b, 0xc68, 0x47c00bff, 0xc6c, 0x00000036, 0xc70, 0x2c46000d, 0xc74, 0x018610db, 0xc78, 0x0000001f, 0xc7c, 0x00b91612, 0xc80, 0x24000090, 0xc84, 0x20f60000, 0xc88, 0x24000090, 0xc8c, 0x20200000, 0xc90, 0x00121820, 0xc94, 0x00000000, 0xc98, 0x00121820, 0xc9c, 0x00007f7f, 0xca0, 0x00000000, 0xca4, 0x00000080, 0xca8, 0x00000000, 0xcac, 0x00000000, 0xcb0, 0x00000000, 0xcb4, 0x00000000, 0xcb8, 0x00000000, 0xcbc, 0x28000000, 0xcc0, 0x00000000, 0xcc4, 0x00000000, 0xcc8, 0x00000000, 0xccc, 0x00000000, 0xcd0, 0x00000000, 0xcd4, 0x00000000, 0xcd8, 0x64b22427, 0xcdc, 0x00766932, 0xce0, 0x00222222, 0xce4, 0x00000000, 0xce8, 0x37644302, 0xcec, 0x2f97d40c, 0xd00, 0x00080740, 0xd04, 0x00020401, 0xd08, 0x0000907f, 0xd0c, 0x20010201, 0xd10, 0xa0633333, 0xd14, 0x3333bc43, 0xd18, 0x7a8f5b6b, 0xd2c, 0xcc979975, 0xd30, 0x00000000, 0xd34, 0x80608000, 0xd38, 0x00000000, 0xd3c, 0x00027293, 0xd40, 0x00000000, 0xd44, 0x00000000, 0xd48, 0x00000000, 0xd4c, 0x00000000, 0xd50, 0x6437140a, 0xd54, 0x00000000, 0xd58, 0x00000000, 0xd5c, 0x30032064, 0xd60, 0x4653de68, 0xd64, 0x04518a3c, 0xd68, 0x00002101, 0xd6c, 0x2a201c16, 0xd70, 0x1812362e, 0xd74, 0x322c2220, 0xd78, 0x000e3c24, 0xe00, 0x24242424, 0xe04, 0x24242424, 0xe08, 0x03902024, 0xe10, 0x24242424, 0xe14, 0x24242424, 0xe18, 0x24242424, 0xe1c, 0x24242424, 0xe28, 0x00000000, 0xe30, 0x1000dc1f, 0xe34, 0x10008c1f, 0xe38, 0x02140102, 0xe3c, 0x681604c2, 0xe40, 0x01007c00, 0xe44, 0x01004800, 0xe48, 0xfb000000, 0xe4c, 0x000028d1, 0xe50, 0x1000dc1f, 0xe54, 0x10008c1f, 0xe58, 0x02140102, 0xe5c, 0x28160d05, 0xe60, 0x00000008, 0xe68, 0x001b25a4, 0xe6c, 0x631b25a0, 0xe70, 0x631b25a0, 0xe74, 0x081b25a0, 0xe78, 0x081b25a0, 0xe7c, 0x081b25a0, 0xe80, 0x081b25a0, 0xe84, 0x631b25a0, 0xe88, 0x081b25a0, 0xe8c, 0x631b25a0, 0xed0, 0x631b25a0, 0xed4, 0x631b25a0, 0xed8, 0x631b25a0, 0xedc, 0x001b25a0, 0xee0, 0x001b25a0, 0xeec, 0x6b1b25a0, 0xee8, 0x31555448, 0xf14, 0x00000003, 0xf4c, 0x00000000, 0xf00, 0x00000300, }; u32 RTL8192CUPHY_REG_Array_PG_HP[RTL8192CUPHY_REG_Array_PG_HPLength] = { 0xe00, 0xffffffff, 0x06080808, 0xe04, 0xffffffff, 0x00040406, 0xe08, 0x0000ff00, 0x00000000, 0x86c, 0xffffff00, 0x00000000, 0xe10, 0xffffffff, 0x04060608, 0xe14, 0xffffffff, 0x00020204, 0xe18, 0xffffffff, 0x04060608, 0xe1c, 0xffffffff, 0x00020204, 0x830, 0xffffffff, 0x06080808, 0x834, 0xffffffff, 0x00040406, 0x838, 0xffffff00, 0x00000000, 0x86c, 0x000000ff, 0x00000000, 0x83c, 0xffffffff, 0x04060608, 0x848, 0xffffffff, 0x00020204, 0x84c, 0xffffffff, 0x04060608, 0x868, 0xffffffff, 0x00020204, 0xe00, 0xffffffff, 0x00000000, 0xe04, 0xffffffff, 0x00000000, 0xe08, 0x0000ff00, 0x00000000, 0x86c, 0xffffff00, 0x00000000, 0xe10, 0xffffffff, 0x00000000, 0xe14, 0xffffffff, 0x00000000, 0xe18, 0xffffffff, 0x00000000, 0xe1c, 0xffffffff, 0x00000000, 0x830, 0xffffffff, 0x00000000, 0x834, 0xffffffff, 0x00000000, 0x838, 0xffffff00, 0x00000000, 0x86c, 0x000000ff, 0x00000000, 0x83c, 0xffffffff, 0x00000000, 0x848, 0xffffffff, 0x00000000, 0x84c, 0xffffffff, 0x00000000, 0x868, 0xffffffff, 0x00000000, 0xe00, 0xffffffff, 0x00000000, 0xe04, 0xffffffff, 0x00000000, 0xe08, 0x0000ff00, 0x00000000, 0x86c, 0xffffff00, 0x00000000, 0xe10, 0xffffffff, 0x00000000, 0xe14, 0xffffffff, 0x00000000, 0xe18, 0xffffffff, 0x00000000, 0xe1c, 0xffffffff, 0x00000000, 0x830, 0xffffffff, 0x00000000, 0x834, 0xffffffff, 0x00000000, 0x838, 0xffffff00, 0x00000000, 0x86c, 0x000000ff, 0x00000000, 0x83c, 0xffffffff, 0x00000000, 0x848, 0xffffffff, 0x00000000, 0x84c, 0xffffffff, 0x00000000, 0x868, 0xffffffff, 0x00000000, 0xe00, 0xffffffff, 0x00000000, 0xe04, 0xffffffff, 0x00000000, 0xe08, 0x0000ff00, 0x00000000, 0x86c, 0xffffff00, 0x00000000, 0xe10, 0xffffffff, 0x00000000, 0xe14, 0xffffffff, 0x00000000, 0xe18, 0xffffffff, 0x00000000, 0xe1c, 0xffffffff, 0x00000000, 0x830, 0xffffffff, 0x00000000, 0x834, 0xffffffff, 0x00000000, 0x838, 0xffffff00, 0x00000000, 0x86c, 0x000000ff, 0x00000000, 0x83c, 0xffffffff, 0x00000000, 0x848, 0xffffffff, 0x00000000, 0x84c, 0xffffffff, 0x00000000, 0x868, 0xffffffff, 0x00000000, 0xe00, 0xffffffff, 0x00000000, 0xe04, 0xffffffff, 0x00000000, 0xe08, 0x0000ff00, 0x00000000, 0x86c, 0xffffff00, 0x00000000, 0xe10, 0xffffffff, 0x00000000, 0xe14, 0xffffffff, 0x00000000, 0xe18, 0xffffffff, 0x00000000, 0xe1c, 0xffffffff, 0x00000000, 0x830, 0xffffffff, 0x00000000, 0x834, 0xffffffff, 0x00000000, 0x838, 0xffffff00, 0x00000000, 0x86c, 0x000000ff, 0x00000000, 0x83c, 0xffffffff, 0x00000000, 0x848, 0xffffffff, 0x00000000, 0x84c, 0xffffffff, 0x00000000, 0x868, 0xffffffff, 0x00000000, 0xe00, 0xffffffff, 0x00000000, 0xe04, 0xffffffff, 0x00000000, 0xe08, 0x0000ff00, 0x00000000, 0x86c, 0xffffff00, 0x00000000, 0xe10, 0xffffffff, 0x00000000, 0xe14, 0xffffffff, 0x00000000, 0xe18, 0xffffffff, 0x00000000, 0xe1c, 0xffffffff, 0x00000000, 0x830, 0xffffffff, 0x00000000, 0x834, 0xffffffff, 0x00000000, 0x838, 0xffffff00, 0x00000000, 0x86c, 0x000000ff, 0x00000000, 0x83c, 0xffffffff, 0x00000000, 0x848, 0xffffffff, 0x00000000, 0x84c, 0xffffffff, 0x00000000, 0x868, 0xffffffff, 0x00000000, 0xe00, 0xffffffff, 0x00000000, 0xe04, 0xffffffff, 0x00000000, 0xe08, 0x0000ff00, 0x00000000, 0x86c, 0xffffff00, 0x00000000, 0xe10, 0xffffffff, 0x00000000, 0xe14, 0xffffffff, 0x00000000, 0xe18, 0xffffffff, 0x00000000, 0xe1c, 0xffffffff, 0x00000000, 0x830, 0xffffffff, 0x00000000, 0x834, 0xffffffff, 0x00000000, 0x838, 0xffffff00, 0x00000000, 0x86c, 0x000000ff, 0x00000000, 0x83c, 0xffffffff, 0x00000000, 0x848, 0xffffffff, 0x00000000, 0x84c, 0xffffffff, 0x00000000, 0x868, 0xffffffff, 0x00000000, }; u32 RTL8192CURadioA_1T_HPArray[RTL8192CURadioA_1T_HPArrayLength] = { 0x000, 0x00030159, 0x001, 0x00031284, 0x002, 0x00098000, 0x003, 0x00018c63, 0x004, 0x000210e7, 0x009, 0x0002044f, 0x00a, 0x0001adb0, 0x00b, 0x00054867, 0x00c, 0x0008992e, 0x00d, 0x0000e529, 0x00e, 0x00039ce7, 0x00f, 0x00000451, 0x019, 0x00000000, 0x01a, 0x00000255, 0x01b, 0x00060a00, 0x01c, 0x000fc378, 0x01d, 0x000a1250, 0x01e, 0x0004445f, 0x01f, 0x00080001, 0x020, 0x0000b614, 0x021, 0x0006c000, 0x022, 0x0000083c, 0x023, 0x00001558, 0x024, 0x00000060, 0x025, 0x00000483, 0x026, 0x0004f000, 0x027, 0x000ec7d9, 0x028, 0x000977c0, 0x029, 0x00004783, 0x02a, 0x00000001, 0x02b, 0x00021334, 0x02a, 0x00000000, 0x02b, 0x00000054, 0x02a, 0x00000001, 0x02b, 0x00000808, 0x02b, 0x00053333, 0x02c, 0x0000000c, 0x02a, 0x00000002, 0x02b, 0x00000808, 0x02b, 0x0005b333, 0x02c, 0x0000000d, 0x02a, 0x00000003, 0x02b, 0x00000808, 0x02b, 0x00063333, 0x02c, 0x0000000d, 0x02a, 0x00000004, 0x02b, 0x00000808, 0x02b, 0x0006b333, 0x02c, 0x0000000d, 0x02a, 0x00000005, 0x02b, 0x00000808, 0x02b, 0x00073333, 0x02c, 0x0000000d, 0x02a, 0x00000006, 0x02b, 0x00000709, 0x02b, 0x0005b333, 0x02c, 0x0000000d, 0x02a, 0x00000007, 0x02b, 0x00000709, 0x02b, 0x00063333, 0x02c, 0x0000000d, 0x02a, 0x00000008, 0x02b, 0x0000060a, 0x02b, 0x0004b333, 0x02c, 0x0000000d, 0x02a, 0x00000009, 0x02b, 0x0000060a, 0x02b, 0x00053333, 0x02c, 0x0000000d, 0x02a, 0x0000000a, 0x02b, 0x0000060a, 0x02b, 0x0005b333, 0x02c, 0x0000000d, 0x02a, 0x0000000b, 0x02b, 0x0000060a, 0x02b, 0x00063333, 0x02c, 0x0000000d, 0x02a, 0x0000000c, 0x02b, 0x0000060a, 0x02b, 0x0006b333, 0x02c, 0x0000000d, 0x02a, 0x0000000d, 0x02b, 0x0000060a, 0x02b, 0x00073333, 0x02c, 0x0000000d, 0x02a, 0x0000000e, 0x02b, 0x0000050b, 0x02b, 0x00066666, 0x02c, 0x0000001a, 0x02a, 0x000e0000, 0x010, 0x0004000f, 0x011, 0x000e31fc, 0x010, 0x0006000f, 0x011, 0x000ff9f8, 0x010, 0x0002000f, 0x011, 0x000203f9, 0x010, 0x0003000f, 0x011, 0x000ff500, 0x010, 0x00000000, 0x011, 0x00000000, 0x010, 0x0008000f, 0x011, 0x0003f100, 0x010, 0x0009000f, 0x011, 0x00023100, 0x012, 0x000d8000, 0x012, 0x00090000, 0x012, 0x00051000, 0x012, 0x00012000, 0x013, 0x00028fb4, 0x013, 0x00024fa8, 0x013, 0x000207a4, 0x013, 0x0001c798, 0x013, 0x000183a4, 0x013, 0x00014398, 0x013, 0x000101a4, 0x013, 0x0000c198, 0x013, 0x000080a4, 0x013, 0x00004098, 0x013, 0x00000000, 0x014, 0x0001944c, 0x014, 0x00059444, 0x014, 0x0009944c, 0x014, 0x000d9444, 0x015, 0x0000f405, 0x015, 0x0004f405, 0x015, 0x0008f405, 0x015, 0x000cf405, 0x016, 0x000e0330, 0x016, 0x000a0330, 0x016, 0x00060330, 0x016, 0x00020330, 0x000, 0x00010159, 0x018, 0x0000f401, 0x0fe, 0x00000000, 0x0fe, 0x00000000, 0x01f, 0x00080003, 0x0fe, 0x00000000, 0x0fe, 0x00000000, 0x01e, 0x00044457, 0x01f, 0x00080000, 0x000, 0x00030159, }; u32 Rtl8192CUAGCTAB_1T_HPArray[RTL8192CUAGCTAB_1T_HPArrayLength] = { 0xc78, 0x7b000001, 0xc78, 0x7b010001, 0xc78, 0x7b020001, 0xc78, 0x7b030001, 0xc78, 0x7b040001, 0xc78, 0x7b050001, 0xc78, 0x7b060001, 0xc78, 0x7b070001, 0xc78, 0x7b080001, 0xc78, 0x7a090001, 0xc78, 0x790a0001, 0xc78, 0x780b0001, 0xc78, 0x770c0001, 0xc78, 0x760d0001, 0xc78, 0x750e0001, 0xc78, 0x740f0001, 0xc78, 0x73100001, 0xc78, 0x72110001, 0xc78, 0x71120001, 0xc78, 0x70130001, 0xc78, 0x6f140001, 0xc78, 0x6e150001, 0xc78, 0x6d160001, 0xc78, 0x6c170001, 0xc78, 0x6b180001, 0xc78, 0x6a190001, 0xc78, 0x691a0001, 0xc78, 0x681b0001, 0xc78, 0x671c0001, 0xc78, 0x661d0001, 0xc78, 0x651e0001, 0xc78, 0x641f0001, 0xc78, 0x63200001, 0xc78, 0x62210001, 0xc78, 0x61220001, 0xc78, 0x60230001, 0xc78, 0x46240001, 0xc78, 0x45250001, 0xc78, 0x44260001, 0xc78, 0x43270001, 0xc78, 0x42280001, 0xc78, 0x41290001, 0xc78, 0x402a0001, 0xc78, 0x262b0001, 0xc78, 0x252c0001, 0xc78, 0x242d0001, 0xc78, 0x232e0001, 0xc78, 0x222f0001, 0xc78, 0x21300001, 0xc78, 0x20310001, 0xc78, 0x06320001, 0xc78, 0x05330001, 0xc78, 0x04340001, 0xc78, 0x03350001, 0xc78, 0x02360001, 0xc78, 0x01370001, 0xc78, 0x00380001, 0xc78, 0x00390001, 0xc78, 0x003a0001, 0xc78, 0x003b0001, 0xc78, 0x003c0001, 0xc78, 0x003d0001, 0xc78, 0x003e0001, 0xc78, 0x003f0001, 0xc78, 0x7b400001, 0xc78, 0x7b410001, 0xc78, 0x7b420001, 0xc78, 0x7b430001, 0xc78, 0x7b440001, 0xc78, 0x7b450001, 0xc78, 0x7b460001, 0xc78, 0x7b470001, 0xc78, 0x7b480001, 0xc78, 0x7a490001, 0xc78, 0x794a0001, 0xc78, 0x784b0001, 0xc78, 0x774c0001, 0xc78, 0x764d0001, 0xc78, 0x754e0001, 0xc78, 0x744f0001, 0xc78, 0x73500001, 0xc78, 0x72510001, 0xc78, 0x71520001, 0xc78, 0x70530001, 0xc78, 0x6f540001, 0xc78, 0x6e550001, 0xc78, 0x6d560001, 0xc78, 0x6c570001, 0xc78, 0x6b580001, 0xc78, 0x6a590001, 0xc78, 0x695a0001, 0xc78, 0x685b0001, 0xc78, 0x675c0001, 0xc78, 0x665d0001, 0xc78, 0x655e0001, 0xc78, 0x645f0001, 0xc78, 0x63600001, 0xc78, 0x62610001, 0xc78, 0x61620001, 0xc78, 0x60630001, 0xc78, 0x46640001, 0xc78, 0x45650001, 0xc78, 0x44660001, 0xc78, 0x43670001, 0xc78, 0x42680001, 0xc78, 0x41690001, 0xc78, 0x406a0001, 0xc78, 0x266b0001, 0xc78, 0x256c0001, 0xc78, 0x246d0001, 0xc78, 0x236e0001, 0xc78, 0x226f0001, 0xc78, 0x21700001, 0xc78, 0x20710001, 0xc78, 0x06720001, 0xc78, 0x05730001, 0xc78, 0x04740001, 0xc78, 0x03750001, 0xc78, 0x02760001, 0xc78, 0x01770001, 0xc78, 0x00780001, 0xc78, 0x00790001, 0xc78, 0x007a0001, 0xc78, 0x007b0001, 0xc78, 0x007c0001, 0xc78, 0x007d0001, 0xc78, 0x007e0001, 0xc78, 0x007f0001, 0xc78, 0x3800001e, 0xc78, 0x3801001e, 0xc78, 0x3802001e, 0xc78, 0x3803001e, 0xc78, 0x3804001e, 0xc78, 0x3805001e, 0xc78, 0x3806001e, 0xc78, 0x3807001e, 0xc78, 0x3808001e, 0xc78, 0x3c09001e, 0xc78, 0x3e0a001e, 0xc78, 0x400b001e, 0xc78, 0x440c001e, 0xc78, 0x480d001e, 0xc78, 0x4c0e001e, 0xc78, 0x500f001e, 0xc78, 0x5210001e, 0xc78, 0x5611001e, 0xc78, 0x5a12001e, 0xc78, 0x5e13001e, 0xc78, 0x6014001e, 0xc78, 0x6015001e, 0xc78, 0x6016001e, 0xc78, 0x6217001e, 0xc78, 0x6218001e, 0xc78, 0x6219001e, 0xc78, 0x621a001e, 0xc78, 0x621b001e, 0xc78, 0x621c001e, 0xc78, 0x621d001e, 0xc78, 0x621e001e, 0xc78, 0x621f001e, };
alexrao/rtl8188ce-linux-driver
rtl8192cu/table.c
C
gpl-2.0
41,362
/* * uuidparse.c --- Interpret uuid encoded information. This program * violates the UUID abstraction barrier by reaching into the * guts of a UUID. * * Based on libuuid/src/uuid_time.c * Copyright (C) 1998, 1999 Theodore Ts'o. * * All alterations (C) 2017 Sami Kerola * The 3-Clause BSD License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, and the entire permission notice in its entirety, * including the disclaimer of warranties. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote * products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ #include <assert.h> #include <getopt.h> #include <libsmartcols.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <unistd.h> #include <uuid.h> #include "c.h" #include "closestream.h" #include "nls.h" #include "optutils.h" #include "strutils.h" #include "timeutils.h" #include "xalloc.h" /* column IDs */ enum { COL_UUID = 0, COL_VARIANT, COL_TYPE, COL_TIME }; /* column names */ struct colinfo { const char *name; /* header */ double whint; /* width hint (N < 1 is in percent of termwidth) */ int flags; /* SCOLS_FL_* */ const char *help; }; /* columns descriptions */ static const struct colinfo infos[] = { [COL_UUID] = {"UUID", UUID_STR_LEN, 0, N_("unique identifier")}, [COL_VARIANT] = {"VARIANT", 9, 0, N_("variant name")}, [COL_TYPE] = {"TYPE", 10, 0, N_("type name")}, [COL_TIME] = {"TIME", 31, 0, N_("timestamp")} }; static int columns[ARRAY_SIZE(infos) * 2]; static size_t ncolumns; struct control { unsigned int json:1, no_headings:1, raw:1; }; static void __attribute__((__noreturn__)) usage(void) { size_t i; fputs(USAGE_HEADER, stdout); fprintf(stdout, _(" %s [options] <uuid ...>\n"), program_invocation_short_name); fputs(USAGE_OPTIONS, stdout); puts(_(" -J, --json use JSON output format")); puts(_(" -n, --noheadings don't print headings")); puts(_(" -o, --output <list> COLUMNS to display (see below)")); puts(_(" -r, --raw use the raw output format")); printf(USAGE_HELP_OPTIONS(24)); fputs(USAGE_COLUMNS, stdout); for (i = 0; i < ARRAY_SIZE(infos); i++) fprintf(stdout, " %8s %s\n", infos[i].name, _(infos[i].help)); printf(USAGE_MAN_TAIL("uuidparse(1)")); exit(EXIT_SUCCESS); } static int column_name_to_id(const char *name, size_t namesz) { size_t i; assert(name); for (i = 0; i < ARRAY_SIZE(infos); i++) { const char *cn = infos[i].name; if (!strncasecmp(name, cn, namesz) && !*(cn + namesz)) return i; } warnx(_("unknown column: %s"), name); return -1; } static int get_column_id(size_t num) { assert(num < ncolumns); assert(columns[num] < (int)ARRAY_SIZE(infos)); return columns[num]; } static const struct colinfo *get_column_info(int num) { return &infos[get_column_id(num)]; } static void fill_table_row(struct libscols_table *tb, char const *const uuid) { static struct libscols_line *ln; size_t i; uuid_t buf; int invalid = 0; int variant = -1, type = -1; assert(tb); assert(uuid); ln = scols_table_new_line(tb, NULL); if (!ln) errx(EXIT_FAILURE, _("failed to allocate output line")); if (uuid_parse(uuid, buf)) invalid = 1; else { variant = uuid_variant(buf); type = uuid_type(buf); } for (i = 0; i < ncolumns; i++) { char *str = NULL; switch (get_column_id(i)) { case COL_UUID: str = xstrdup(uuid); break; case COL_VARIANT: if (invalid) { str = xstrdup(_("invalid")); break; } switch (variant) { case UUID_VARIANT_NCS: str = xstrdup("NCS"); break; case UUID_VARIANT_DCE: str = xstrdup("DCE"); break; case UUID_VARIANT_MICROSOFT: str = xstrdup("Microsoft"); break; default: str = xstrdup(_("other")); } break; case COL_TYPE: if (invalid) { str = xstrdup(_("invalid")); break; } switch (type) { case UUID_TYPE_DCE_NIL: if (uuid_is_null(buf)) str = xstrdup(_("nil")); else str = xstrdup(_("unknown")); break; case UUID_TYPE_DCE_TIME: str = xstrdup(_("time-based")); break; case UUID_TYPE_DCE_SECURITY: str = xstrdup("DCE"); break; case UUID_TYPE_DCE_MD5: str = xstrdup(_("name-based")); break; case UUID_TYPE_DCE_RANDOM: str = xstrdup(_("random")); break; case UUID_TYPE_DCE_SHA1: str = xstrdup(_("sha1-based")); break; default: str = xstrdup(_("unknown")); } break; case COL_TIME: if (invalid) { str = xstrdup(_("invalid")); break; } if (variant == UUID_VARIANT_DCE && type == UUID_TYPE_DCE_TIME) { struct timeval tv; char date_buf[ISO_BUFSIZ]; uuid_time(buf, &tv); strtimeval_iso(&tv, ISO_TIMESTAMP_COMMA, date_buf, sizeof(date_buf)); str = xstrdup(date_buf); } break; default: abort(); } if (str && scols_line_refer_data(ln, i, str)) errx(EXIT_FAILURE, _("failed to add output data")); } } static void print_output(struct control const *const ctrl, int argc, char **argv) { struct libscols_table *tb; size_t i; scols_init_debug(0); tb = scols_new_table(); if (!tb) err(EXIT_FAILURE, _("failed to allocate output table")); if (ctrl->json) { scols_table_enable_json(tb, 1); scols_table_set_name(tb, "uuids"); } scols_table_enable_noheadings(tb, ctrl->no_headings); scols_table_enable_raw(tb, ctrl->raw); for (i = 0; i < ncolumns; i++) { const struct colinfo *col = get_column_info(i); if (!scols_table_new_column(tb, col->name, col->whint, col->flags)) err(EXIT_FAILURE, _("failed to initialize output column")); } for (i = 0; i < (size_t) argc; i++) fill_table_row(tb, argv[i]); if (i == 0) { char uuid[UUID_STR_LEN]; while (scanf(" %36[^ \t\n]%*c", uuid) && !feof(stdin)) fill_table_row(tb, uuid); } scols_print_table(tb); scols_unref_table(tb); } int main(int argc, char **argv) { struct control ctrl = { 0 }; char *outarg = NULL; int c; static const struct option longopts[] = { {"json", no_argument, NULL, 'J'}, {"noheadings", no_argument, NULL, 'n'}, {"output", required_argument, NULL, 'o'}, {"raw", no_argument, NULL, 'r'}, {"version", no_argument, NULL, 'V'}, {"help", no_argument, NULL, 'h'}, {NULL, 0, NULL, 0} }; static const ul_excl_t excl[] = { {'J', 'r'}, {0} }; int excl_st[ARRAY_SIZE(excl)] = UL_EXCL_STATUS_INIT; setlocale(LC_ALL, ""); bindtextdomain(PACKAGE, LOCALEDIR); textdomain(PACKAGE); close_stdout_atexit(); while ((c = getopt_long(argc, argv, "Jno:rVh", longopts, NULL)) != -1) { err_exclusive_options(c, longopts, excl, excl_st); switch (c) { case 'J': ctrl.json = 1; break; case 'n': ctrl.no_headings = 1; break; case 'o': outarg = optarg; break; case 'r': ctrl.raw = 1; break; case 'V': print_version(EXIT_SUCCESS); case 'h': usage(); default: errtryhelp(EXIT_FAILURE); } } argc -= optind; argv += optind; columns[ncolumns++] = COL_UUID; columns[ncolumns++] = COL_VARIANT; columns[ncolumns++] = COL_TYPE; columns[ncolumns++] = COL_TIME; if (outarg && string_add_to_idarray(outarg, columns, ARRAY_SIZE(columns), &ncolumns, column_name_to_id) < 0) return EXIT_FAILURE; print_output(&ctrl, argc, argv); return EXIT_SUCCESS; }
karelzak/util-linux
misc-utils/uuidparse.c
C
gpl-2.0
8,571
/* * msm8974-thermistor.c - thermistor of H-F Project * * Copyright (C) 2011 Samsung Electrnoics * SangYoung Son <hello.son@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <mach/msm8974-thermistor.h> #include <mach/sec_thermistor.h> #ifdef CONFIG_SEC_THERMISTOR /*Below adc table is same as batt_temp_adc table*/ /* mismatch 8974 */ #if defined(CONFIG_MACH_KS01SKT) || defined(CONFIG_MACH_KS01KTT) || defined(CONFIG_MACH_KS01LGT) || defined(CONFIG_MACH_JACTIVESKT) static struct sec_therm_adc_table temper_table_ap[] = { {27229, 700}, {27271, 690}, {27309, 680}, {27435, 670}, {27534, 660}, {27620, 650}, {27761, 640}, {27834, 630}, {27886, 620}, {27970, 610}, {28106, 600}, {28200, 590}, {28252, 580}, {28339, 570}, {28534, 560}, {28640, 550}, {28794, 540}, {28884, 530}, {28926, 520}, {29091, 510}, {29269, 500}, {29445, 490}, {29620, 480}, {29615, 470}, {29805, 460}, {30015, 450}, {30227, 440}, {30392, 430}, {30567, 420}, {30731, 410}, {30825, 400}, {31060, 390}, {31224, 380}, {31406, 370}, {31595, 360}, {31764, 350}, {31990, 340}, {32111, 330}, {32342, 320}, {32562, 310}, {32705, 300}, {33697, 250}, {34696, 200}, {35682, 150}, {36634, 100}, {37721, 50}, {38541, 0}, {39415, -50}, {40155, -100}, {40730, -150}, {41455, -200}, {41772, -250}, {42149, -300}, }; #elif defined(CONFIG_MACH_HLTESKT) || defined(CONFIG_MACH_HLTEKTT) || \ defined(CONFIG_MACH_HLTELGT) || defined(CONFIG_MACH_HLTEDCM) || defined(CONFIG_MACH_HLTEKDI) static struct sec_therm_adc_table temper_table_ap[] = { {26729, 730}, {26784, 720}, {26836, 710}, {26910, 700}, {26976, 690}, {27048, 680}, {27124, 670}, {27227, 660}, {27332, 650}, {27413, 640}, {27522, 630}, {27588, 620}, {27670, 610}, {27835, 600}, {27934, 590}, {28036, 580}, {28125, 570}, {28231, 560}, {28348, 550}, {28463, 540}, {28589, 530}, {28703, 520}, {28830, 510}, {28958, 500}, {29089, 490}, {29212, 480}, {29354, 470}, {29499, 460}, {29648, 450}, {29805, 440}, {29950, 430}, {30109, 420}, {30259, 410}, {30441, 400}, {30600, 390}, {30757, 380}, {30926, 370}, {31077, 360}, {31289, 350}, {31457, 340}, {31663, 330}, {31872, 320}, {32056, 310}, {32292, 300}, {32472, 290}, {32701, 280}, {32915, 270}, {33061, 260}, {33285, 250}, {33468, 240}, {33675, 230}, {33882, 220}, {34092, 210}, {34291, 200}, {34536, 190}, {34725, 180}, {34953, 170}, {35165, 160}, {35348, 150}, {35567, 140}, {35744, 130}, {35997, 120}, {36202, 110}, {36431, 100}, {36649, 90}, {36818, 80}, {37066, 70}, {37222, 60}, {37459, 50}, {37608, 40}, {37841, 30}, {37987, 20}, {38205, 10}, {38416, 0}, {38604, -10}, {38788, -20}, {38959, -30}, {39121, -40}, {39280, -50}, {39431, -60}, {39591, -70}, {39748, -80}, {39895, -90}, {40043, -100}, {40172, -110}, {40307, -120}, {40449, -130}, {40565, -140}, {40712, -150}, {40788, -160}, {40932, -170}, {41010, -180}, {41135, -190}, {41225, -200}, }; #elif defined(CONFIG_MACH_FRESCOLTESKT) || defined(CONFIG_MACH_FRESCOLTEKTT) || \ defined(CONFIG_MACH_FRESCOLTELGT) static struct sec_therm_adc_table temper_table_ap[] = { {26699, 730}, {26751, 720}, {26805, 710}, {26880, 700}, {26935, 690}, {27010, 680}, {27101, 670}, {27200, 660}, {27302, 650}, {27383, 640}, {27502, 630}, {27558, 620}, {27630, 610}, {27801, 600}, {27885, 590}, {27988, 580}, {28078, 570}, {28190, 560}, {28314, 550}, {28426, 540}, {28539, 530}, {28666, 520}, {28791, 510}, {28901, 500}, {29049, 490}, {29194, 480}, {29324, 470}, {29475, 460}, {29624, 450}, {29772, 440}, {29930, 430}, {30073, 420}, {30233, 410}, {30400, 400}, {30554, 390}, {30761, 380}, {30930, 370}, {31104, 360}, {31287, 350}, {31473, 340}, {31633, 330}, {31806, 320}, {31984, 310}, {32178, 300}, {32369, 290}, {32569, 280}, {32776, 270}, {32979, 260}, {33191, 250}, {33401, 240}, {33606, 230}, {33819, 220}, {34046, 210}, {34270, 200}, {34480, 190}, {34722, 180}, {34936, 170}, {35144, 160}, {35378, 150}, {35594, 140}, {35800, 130}, {36033, 120}, {36242, 110}, {36448, 100}, {36670, 90}, {36882, 80}, {37082, 70}, {37292, 60}, {37490, 50}, {37678, 40}, {37874, 30}, {38068, 20}, {38238, 10}, {38430, 0}, {38512, -10}, {38639, -20}, {38832, -30}, {38995, -40}, {39177, -50}, {39345, -60}, {39502, -70}, {39650, -80}, {39802, -90}, {39948, -100}, {40091, -110}, {40211, -120}, {40356, -130}, {40483, -140}, {40645, -150}, {40751, -160}, {40902, -170}, {40990, -180}, {41105, -190}, {41195, -200}, }; #elif defined(CONFIG_SEC_F_PROJECT) static struct sec_therm_adc_table temper_table_ap[] = { {25749, 900}, {25773, 890}, {25819, 880}, {25873, 870}, {25910, 860}, {25957, 850}, {25993, 840}, {26045, 830}, {26102, 820}, {26157, 810}, {26169, 800}, {26224, 790}, {26297, 780}, {26341, 770}, {26409, 760}, {26464, 750}, {26532, 740}, {26599, 730}, {26658, 720}, {26712, 710}, {26809, 700}, {26862, 690}, {26943, 680}, {27026, 670}, {27094, 660}, {27211, 650}, {27294, 640}, {27405, 630}, {27487, 620}, {27581, 610}, {27672, 600}, {27769, 590}, {27881, 580}, {28004, 570}, {28109, 560}, {28214, 550}, {28327, 540}, {28448, 530}, {28585, 520}, {28692, 510}, {28815, 500}, {28955, 490}, {29093, 480}, {29250, 470}, {29381, 460}, {29533, 450}, {29670, 440}, {29830, 430}, {29981, 420}, {30182, 410}, {30322, 400}, {30464, 390}, {30632, 380}, {30870, 370}, {31001, 360}, {31210, 350}, {31366, 340}, {31624, 330}, {31748, 320}, {31958, 310}, {32167, 300}, {32374, 290}, {32547, 280}, {32754, 270}, {32958, 260}, {33131, 250}, {33395, 240}, {33600, 230}, {33798, 220}, {34007, 210}, {34230, 200}, {34480, 190}, {34730, 180}, {34839, 170}, {35122, 160}, {35324, 150}, {35509, 140}, {35769, 130}, {35925, 120}, {36148, 110}, {36424, 100}, {36561, 90}, {36850, 80}, {37021, 70}, {37180, 60}, {37397, 50}, {37598, 40}, {37787, 30}, {37961, 20}, {38167, 10}, {38349, 0}, {38381, -10}, {38466, -20}, {38604, -30}, {38805, -40}, {38949, -50}, {39161, -60}, {39302, -70}, {39490, -80}, {39615, -90}, {39811, -100}, {39918, -110}, {40072, -120}, {40211, -130}, {40322, -140}, {40453, -150}, {40577, -160}, {40696, -170}, {40801, -180}, {40934, -190}, {41024, -200}, }; #elif defined(CONFIG_SEC_K_PROJECT) || \ defined(CONFIG_SEC_KACTIVE_PROJECT) || defined(CONFIG_SEC_KSPORTS_PROJECT) || \ defined(CONFIG_SEC_S_PROJECT) static struct sec_therm_adc_table temper_table_ap[] = { {25954, 900}, {26005, 890}, {26052, 880}, {26105, 870}, {26151, 860}, {26207, 850}, {26253, 840}, {26302, 830}, {26354, 820}, {26405, 810}, {26454, 800}, {26503, 790}, {26554, 780}, {26602, 770}, {26657, 760}, {26691, 750}, {26757, 740}, {26823, 730}, {26889, 720}, {26955, 710}, {27020, 700}, {27081, 690}, {27142, 680}, {27203, 670}, {27264, 660}, {27327, 650}, {27442, 640}, {27557, 630}, {27672, 620}, {27787, 610}, {27902, 600}, {28004, 590}, {28106, 580}, {28208, 570}, {28310, 560}, {28415, 550}, {28608, 540}, {28801, 530}, {28995, 520}, {28944, 510}, {28893, 500}, {29148, 490}, {29347, 480}, {29546, 470}, {29746, 460}, {29911, 450}, {30076, 440}, {30242, 430}, {30490, 420}, {30738, 410}, {30986, 400}, {31101, 390}, {31216, 380}, {31331, 370}, {31446, 360}, {31561, 350}, {31768, 340}, {31975, 330}, {32182, 320}, {32389, 310}, {32596, 300}, {32962, 290}, {32974, 280}, {32986, 270}, {33744, 260}, {33971, 250}, {34187, 240}, {34403, 230}, {34620, 220}, {34836, 210}, {35052, 200}, {35261, 190}, {35470, 180}, {35679, 170}, {35888, 160}, {36098, 150}, {36317, 140}, {36537, 130}, {36756, 120}, {36976, 110}, {37195, 100}, {37413, 90}, {37630, 80}, {37848, 70}, {38065, 60}, {38282, 50}, {38458, 40}, {38635, 30}, {38811, 20}, {38987, 10}, {39163, 0}, {39317, -10}, {39470, -20}, {39624, -30}, {39777, -40}, {39931, -50}, {40065, -60}, {40199, -70}, {40333, -80}, {40467, -90}, {40601, -100}, {40728, -110}, {40856, -120}, {40983, -130}, {41110, -140}, {41237, -150}, {41307, -160}, {41378, -170}, {41448, -180}, {41518, -190}, {41588, -200}, }; #elif defined(CONFIG_SEC_PATEK_PROJECT) static struct sec_therm_adc_table temper_table_ap[] = { {25943, 900}, {26002, 890}, {26061, 880}, {26121, 870}, {26181, 860}, {26242, 850}, {26277, 840}, {26313, 830}, {26348, 820}, {26384, 810}, {26420, 800}, {26467, 790}, {26514, 780}, {26562, 770}, {26609, 760}, {26657, 750}, {26728, 740}, {26800, 730}, {26872, 720}, {26944, 710}, {27016, 700}, {27092, 690}, {27168, 680}, {27244, 670}, {27320, 660}, {27396, 650}, {27499, 640}, {27602, 630}, {27705, 620}, {27808, 610}, {27911, 600}, {28058, 590}, {28205, 580}, {28352, 570}, {28499, 560}, {28647, 550}, {28826, 540}, {29005, 530}, {29185, 520}, {29364, 510}, {29544, 500}, {29631, 490}, {29718, 480}, {29805, 470}, {29892, 460}, {29979, 450}, {30179, 440}, {30381, 430}, {30583, 420}, {30785, 410}, {30989, 400}, {31112, 390}, {31236, 380}, {31359, 370}, {31483, 360}, {31607, 350}, {31804, 340}, {32002, 330}, {32199, 320}, {32397, 310}, {32595, 300}, {32783, 290}, {32971, 280}, {33159, 270}, {33347, 260}, {33536, 250}, {33747, 240}, {33958, 230}, {34170, 220}, {34381, 210}, {34593, 200}, {34803, 190}, {35014, 180}, {35224, 170}, {35435, 160}, {35646, 150}, {35859, 140}, {36072, 130}, {36285, 120}, {36498, 110}, {36712, 100}, {36906, 90}, {37101, 80}, {37296, 70}, {37491, 60}, {37686, 50}, {37815, 40}, {37945, 30}, {38074, 20}, {38204, 10}, {38334, 0}, {38506, -10}, {38678, -20}, {28850, -30}, {39022, -40}, {39194, -50}, {39342, -60}, {39490, -70}, {39638, -80}, {39786, -90}, {39935, -100}, {40072, -110}, {40209, -120}, {40347, -130}, {40484, -140}, {40622, -150}, {40730, -160}, {40838, -170}, {40946, -180}, {41054, -190}, {41163, -200}, }; #elif defined(CONFIG_MACH_CHAGALL_LTE) || defined(CONFIG_MACH_KLIMT_LTE) static struct sec_therm_adc_table temper_table_ap[] = { {25765, 900}, {25836, 890}, {25907, 880}, {25978, 870}, {26049, 860}, {26120, 850}, {26191, 840}, {26262, 830}, {26333, 820}, {26404, 810}, {26475, 800}, {26549, 790}, {26620, 780}, {26691, 770}, {26762, 760}, {26833, 750}, {26904, 740}, {26975, 730}, {27046, 720}, {27117, 710}, {27188, 700}, {27271, 690}, {27355, 680}, {27438, 670}, {27522, 660}, {27605, 650}, {27721, 640}, {27836, 630}, {27952, 620}, {28067, 610}, {28182, 600}, {28296, 590}, {28409, 580}, {28522, 570}, {28635, 560}, {28748, 550}, {28852, 540}, {28955, 530}, {29058, 520}, {29161, 510}, {29290, 500}, {29410, 490}, {29555, 480}, {29700, 470}, {29845, 460}, {29990, 450}, {30188, 440}, {30386, 430}, {30584, 420}, {30782, 410}, {30981, 400}, {31164, 390}, {31347, 380}, {31530, 370}, {31713, 360}, {31896, 350}, {32081, 340}, {32266, 330}, {32450, 320}, {32635, 310}, {32820, 300}, {33047, 290}, {33274, 280}, {33502, 270}, {33729, 260}, {33956, 250}, {34172, 240}, {34388, 230}, {34605, 220}, {34821, 210}, {35037, 200}, {35246, 190}, {35455, 180}, {35664, 170}, {35873, 160}, {36083, 150}, {36302, 140}, {36522, 130}, {36741, 120}, {36961, 110}, {37180, 100}, {37398, 90}, {37615, 80}, {37833, 70}, {38050, 60}, {38267, 50}, {38443, 40}, {38620, 30}, {38796, 20}, {38972, 10}, {39148, 0}, {39302, -10}, {39455, -20}, {39609, -30}, {39762, -40}, {39916, -50}, {40050, -60}, {40184, -70}, {40318, -80}, {40452, -90}, {40586, -100}, {40713, -110}, {40841, -120}, {40968, -130}, {41095, -140}, {41222, -150}, {41292, -160}, {41363, -170}, {41433, -180}, {41503, -190}, {41573, -200}, }; #else static struct sec_therm_adc_table temper_table_ap[] = { {27188, 700}, {27271, 690}, {27355, 680}, {27438, 670}, {27522, 660}, {27605, 650}, {27721, 640}, {27836, 630}, {27952, 620}, {28067, 610}, {28182, 600}, {28296, 590}, {28409, 580}, {28522, 570}, {28635, 560}, {28748, 550}, {28852, 540}, {28955, 530}, {29058, 520}, {29161, 510}, {29290, 500}, {29410, 490}, {29555, 480}, {29700, 470}, {29845, 460}, {29990, 450}, {30188, 440}, {30386, 430}, {30584, 420}, {30782, 410}, {30981, 400}, {31164, 390}, {31347, 380}, {31530, 370}, {31713, 360}, {31896, 350}, {32081, 340}, {32266, 330}, {32450, 320}, {32635, 310}, {32820, 300}, {33047, 290}, {33274, 280}, {33502, 270}, {33729, 260}, {33956, 250}, {34172, 240}, {34388, 230}, {34605, 220}, {34821, 210}, {35037, 200}, {35246, 190}, {35455, 180}, {35664, 170}, {35873, 160}, {36083, 150}, {36302, 140}, {36522, 130}, {36741, 120}, {36961, 110}, {37180, 100}, {37398, 90}, {37615, 80}, {37833, 70}, {38050, 60}, {38267, 50}, {38443, 40}, {38620, 30}, {38796, 20}, {38972, 10}, {39148, 0}, {39302, -10}, {39455, -20}, {39609, -30}, {39762, -40}, {39916, -50}, {40050, -60}, {40184, -70}, {40318, -80}, {40452, -90}, {40586, -100}, {40713, -110}, {40841, -120}, {40968, -130}, {41095, -140}, {41222, -150}, {41292, -160}, {41363, -170}, {41433, -180}, {41503, -190}, {41573, -200}, }; #endif #if defined(CONFIG_MACH_HLTEDCM) || defined(CONFIG_MACH_HLTEKDI) static struct sec_therm_adc_table temper_table_flash[] = { {26135, 850}, {26159, 840}, {26199, 830}, {26241, 820}, {26293, 810}, {26351, 800}, {26391, 790}, {26439, 780}, {26483, 770}, {26536, 760}, {26599, 750}, {26639, 740}, {26729, 730}, {26786, 720}, {26846, 710}, {26915, 700}, {26978, 690}, {27048, 680}, {27134, 670}, {27225, 660}, {27301, 650}, {27412, 640}, {27467, 630}, {27591, 620}, {27679, 610}, {27779, 600}, {27879, 590}, {27982, 580}, {28087, 570}, {28209, 560}, {28345, 550}, {28461, 540}, {28574, 530}, {28702, 520}, {28830, 510}, {28951, 500}, {29098, 490}, {29243, 480}, {29384, 470}, {29542, 460}, {29680, 450}, {29832, 440}, {29996, 430}, {30153, 420}, {30312, 410}, {30477, 400}, {30630, 390}, {30799, 380}, {30961, 370}, {31104, 360}, {31273, 350}, {31455, 340}, {31643, 330}, {31828, 320}, {32024, 310}, {32230, 300}, {32425, 290}, {32638, 280}, {32836, 270}, {33088, 260}, {33302, 250}, {33523, 240}, {33732, 230}, {33947, 220}, {34178, 210}, {34397, 200}, {34627, 190}, {34855, 180}, {35087, 170}, {35282, 160}, {35500, 150}, {35715, 140}, {35924, 130}, {36129, 120}, {36351, 110}, {36553, 100}, {36750, 90}, {36954, 80}, {37121, 70}, {37304, 60}, {37501, 50}, {37703, 40}, {37895, 30}, {38090, 20}, {38228, 10}, {38326, 0}, {38411, -10}, {38508, -20}, {38683, -30}, {38894, -40}, {39082, -50}, {39242, -60}, {39393, -70}, {39540, -80}, {39706, -90}, {39868, -100}, {40012, -110}, {40167, -120}, {40311, -130}, {40437, -140}, {40565, -150}, {40693, -160}, {40821, -170}, {40941, -180}, {41052, -190}, {41154, -200}, }; #endif static int get_msm8974_siop_level(int temp) { static int prev_temp = 400; static int prev_level; int level = -1; #if defined(CONFIG_MACH_HLTEDCM) || defined(CONFIG_MACH_HLTEKDI) || defined(CONFIG_MACH_JS01LTEDCM) /* This is only for JPN JF-DCM model, currently the SIOP is not using this function. However, the JPN vendor(DoCoMo) wants to implement the Camera APP shutdown functionality to avoid over-heat damage. For this, only JPN model gives thermistor value from the driver layer to platform layer. In this routine, the "notify_change_of_temperature()" function gives thermistor value and also SIOP value together. This SIOP value is invalid information and could give an effect to SIOP APP. That is why this enforcing return code is added. */ return -1; #endif if (temp > prev_temp) { if (temp >= 540) level = 4; else if (temp >= 530) level = 3; else if (temp >= 480) level = 2; else if (temp >= 440) level = 1; else level = 0; } else { if (temp < 410) level = 0; else if (temp < 440) level = 1; else if (temp < 480) level = 2; else if (temp < 530) level = 3; else level = 4; if (level > prev_level) level = prev_level; } prev_temp = temp; prev_level = level; return level; } static struct sec_therm_platform_data sec_therm_pdata = { .adc_arr_size = ARRAY_SIZE(temper_table_ap), .adc_table = temper_table_ap, .polling_interval = 30 * 1000, /* msecs */ .get_siop_level = get_msm8974_siop_level, #if defined(CONFIG_MACH_HLTEDCM) || defined(CONFIG_MACH_HLTEKDI) || defined(CONFIG_MACH_JS01LTEDCM) || defined(CONFIG_MACH_KLTE_JPN) || \ defined(CONFIG_MACH_CHAGALL_KDI) || defined(CONFIG_MACH_KLIMT_LTE_DCM) #if defined(CONFIG_MACH_HLTEDCM) || defined(CONFIG_MACH_HLTEKDI) .adc_flash_arr_size = ARRAY_SIZE(temper_table_flash), .adc_table_flash = temper_table_flash, #endif .no_polling = 0, #else .no_polling = 1, #endif }; struct platform_device sec_device_thermistor = { .name = "sec-thermistor", .id = -1, .dev.platform_data = &sec_therm_pdata, }; #endif
kyasu/android_kernel_samsung_hltedcm
arch/arm/mach-msm/msm8974-thermistor.c
C
gpl-2.0
18,796
/* * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package pkg1; import java.io.*; /** * A test class where outer class is package private and the inner class is * protected. * * @author Bhavesh Patel */ class ProtectedInnerClass { protected static class ProInnerClass implements java.io.Serializable { public final int SERIALIZABLE_CONSTANT = 1; /** * @param s ObjectInputStream. * @throws IOException when there is an I/O error. * @serial */ private void readObject(ObjectInputStream s) throws IOException { } /** * @param s ObjectOutputStream. * @throws IOException when there is an I/O error. * @serial */ private void writeObject(ObjectOutputStream s) throws IOException { } } }
reprogrammer/jsr308-langtools
test/com/sun/javadoc/testSerializedForm/pkg1/ProtectedInnerClass.java
Java
gpl-2.0
1,994
/** * @version $Id: template_rtl.css 10571 2008-07-21 01:27:35Z pasamio $ * @package Joomla * @subpackage Accessible-Template-Beez * @copyright Copyright (C) 2005 - 2008 Open Source Matters. All rights reserved. * @license GNU/GPL, see LICENSE.php * Joomla! is free software. This version may have been modified pursuant to the * GNU General Public License, and as distributed it includes or is derivative * of works licensed under the GNU General Public License or other free or open * source software licenses. See COPYRIGHT.php for copyright notices and * details. */ /** * Joomla! 1.5 Beez template RTL css file * * @author Mati Kochen /Angie Radtke * @package Joomla * @since 1.5 * @version 1.0 */ body{ direction: rtl; } p{ text-align: right; } /* Font Size */ #fontsize { right: auto; left: 0; padding: 4px 0; } #fontsize h3 { margin: 0; padding: 0; } #fontsize p { margin: 0 5px 0 0; padding: 0; } #fontsize p a { display: -moz-inline-box; padding: 0 24px 0 0; zoom: 1; } /* Font Size Background Images */ #fontsize p a.larger { background: #fff url(../images/lupe_larger.gif) right no-repeat; } #fontsize p a.smaller { background: #fff url(../images/lupe_smaller.gif) right no-repeat; } #fontsize p a.reset { background: #fff url(../images/lupe_reset.gif) right no-repeat; } #fontsize p a.reset:hover, #fontsize p a.reset:active, #fontsize p a.reset:focus, #fontsize p a.smaller:hover, #fontsize p a.smaller:active, #fontsize p a.smaller:focus, #fontsize p a.larger:hover, #fontsize p a.larger:active, #fontsize p a.larger:focus { background-position:right; } #logo {text-align:right} #logo span{ margin: 0 100px 2px 0 !important; text-align: right; } #header {overflow:hidden } #header ul{ text-align: left; } #header ul li{ background: none; } #header ul li a:link, #header ul li a:visited { border-right: solid 1px #666 !important; display: -moz-inline-box; zoom:1 } body #header ul { padding-top: 3px; margin-bottom: -5px; } #header form{ float: left !important; text-align: right; margin: 0 -10px 0 0 !important; } #header form .inputbox { padding:0 2px 0 0; margin-right: 20px !important; } #header form .button{ margin:20px 0 0 10px; padding:0 30px 0 5px; background: #000000 url(../images/pfeil_rtl.gif) no-repeat scroll 100% } #breadcrumbs{ text-align: right !important; margin:0 0 0 -10px !important; } #wrapper { margin:0 21% 0 0; } #main2{ float: right; } #left{ float: right; } #right{ right: auto; left: 0; float: right; } #left h3{ margin:0 5px 0 0; padding:7px 10px 7px 5px; } #right h3{ padding: 0 0 0 5px; } #all{ text-align: right; } p.buttonheading{ text-align: left; } .leading{ padding: 20px 120px 40px 20px; background: #EFDEEA url(../images/biene_rtl.gif) no-repeat scroll right top; } .leading div{ text-align: right; } form.login fieldset{ text-align: right; } form.login label.remember, form.login input.checkbox{ float: right; } form.login .button{ clear: right; background: #CC3399 url(../images/pfeil_rtl.gif) no-repeat scroll right; padding: 0 30px 0 0 !important; } #main ul li, #main2 ul li { padding:0 20px 0 0; text-align: right !important; } /* ie7 fix for list elements with count of articles */ #main ul li a , #main2 ul li a { display:inline-block; } /* end fix */ #main ul, #main2 ul { margin:0 20px 0 0; padding:0 10px 0 0; } #main ul, #main ol { margin:10px 20px 10px 0; padding:0 10px 0 0; } #main .leading .readon, #main2 .leading .readon{ background: #93246F url(../images/pfeil_rtl.gif) no-repeat scroll right; padding: 2px 30px 0 2px; right: auto; left: 0px !important; position: absolute; text-align: right; } #main a.readon:hover, #main2 a.readon:hover, #main a.readon:active, #main2 a.readon:active, #main a.readon:focus, #main2 a.readon:focus { background:#000000 url(../images/pfeil_rtl.gif) no-repeat scroll right; padding:2px 30px 0 2px; } #main a.readon, #main2 a.readon { background:transparent url(../images/pfeil_rtl.gif) no-repeat scroll right; padding:2px 30px 0 2px; display: table-cell; } #right form.poll fieldset label{ padding: 0 4px 0 0; } #right form.poll fieldset input{ float:right; } #right ul{ padding: 10px 0 10px 10px; zoom:1 } #left ul{ margin: 0 10px 20px 0; } #left ul li.active a:link, #left ul li.active a:visited { border-left: none; border-right:4px solid #BE7CA9; padding:3px 6px 3px 0; } #left ul li ul { border-width:0 4px 0 1px; } #left ul li.active ul li a:link, #left ul li.active ul li a:visited{ border-left: none; border-right: 0 solid #DD75BB; padding: 3px 20px 3px 4px; } #left ul li ul li ul{ padding: 10px 5px 10px 0; } #left ul li.active ul li.active ul li a:link, #left ul li.active ul li.active ul li a:visited{ background: #FFFFFF url(../images/arrow_rtl.gif) no-repeat scroll right top; margin: 0 15px 0 0; padding:3px 15px 3px 0; } ul.pagination{ overflow:hidden; } ul.pagination li { padding: 2px 5px 2px 5px; float:right ; font-size:1em; width:auto } ul.pagination a { display:inline !important; margin:0 !important; padding:0px !important; } #footer p{ float: left; } #footer p.syndicate{ float: right; } form.user label{ float: right; } /* edit button */ .contentpaneopen_edit{ float: right; } form.editor .publishing .radio label span , form.editor .publishing label{ float: right; } table.contenttoc{ float: left; } #main .blog_more ul, #main2 .blog_more ul{ padding: 10px 0 30px 10px; } #main .blog_more ul li a, #main2 .blog_more ul li a { background:transparent url(../images/pfeil_rtl.gif) no-repeat scroll right center; padding:2px 30px 4px 30px !important; display: table-cell; } #main .blog_more ul li a:active, #main2 .blog_more ul li a:active, #main .blog_more ul li a:focus, #main2 .blog_more ul li a:focus { background:#993399 url(../images/pfeil_rtl.gif) no-repeat scroll right !important; padding:2px 30px 2px 0; } #main .blog_more ul li a:hover,#main2 .blog_more ul li a:hover { background:#939 url(../images/pfeil_rtl.gif) no-repeat right !important; padding:2px 30px 2px 0; } form.search_result, form.registration, form.lost_password, .login_form{ margin: 15px 0 0 10px; } .search_result .button, form.registration .button { background:#000000 url(../images/pfeil_rtl.gif) no-repeat scroll 100%; padding:0 30px 0 5px; } .phrase{ float: right; margin: 10px 0 10px 10px; } .only { float: right; margin:10px 10px 0 0; } .only input{ float: right; margin:0 0 0 10px; } form.registration label, form.lost_password label, .login_form label{ float: right; } .column2, .column1{ float: right; } .column2{ float: left; } .article_column{ padding: 10px 0 10px 10px; }
shafiqissani/Jewelery-Ecommerce-
templates/beez/css/template_rtl.css
CSS
gpl-2.0
7,175
/* JPC: An x86 PC Hardware Emulator for a pure Java Virtual Machine Copyright (C) 2012-2013 Ian Preston This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Details (including contact information) can be found at: jpc.sourceforge.net or the developer website sourceforge.net/projects/jpc/ End of licence header */ package org.jpc.emulator.execution.opcodes.vm; import org.jpc.emulator.execution.*; import org.jpc.emulator.execution.decoder.*; import org.jpc.emulator.processor.*; import org.jpc.emulator.processor.fpu64.*; import static org.jpc.emulator.processor.Processor.*; public class fchs extends Executable { public fchs(int blockStart, int eip, int prefices, PeekableInputStream input) { super(blockStart, eip); } public Branch execute(Processor cpu) { cpu.fpu.setST(0, -cpu.fpu.ST(0)); return Branch.None; } public boolean isBranch() { return false; } public String toString() { return this.getClass().getName(); } }
ianopolous/JPC
src/org/jpc/emulator/execution/opcodes/vm/fchs.java
Java
gpl-2.0
1,660
Obsoleted Module ================ **This module is obsoleted and is not developed anymore.** Use the new [yast2-storage-ng](https://github.com/yast/yast-storage-ng) module instead. GIT --- If you want to see the last state check the [*master_old*](../../tree/master_old) branch. Use the *SLE12-SPx* branches for the SLE12 maintenance.
shundhammer/yast-storage
README.md
Markdown
gpl-2.0
342
jQuery(document).ready(function($){ //var timings //var fade_rand var blocked = false; var slide_names = "fade_"+fade_rand+"_slide_"; var current_slide = 0; $(document).oneTime(timings[0],fade_rand+"_timer", function(){ change_slide(null); }); $("#fade_"+fade_rand+" div.fade_bullets_bullets ul li").click(function(){ var id = $(this).attr("data-id"); if(id != current_slide){ $(document).stopTime(fade_rand+"_timer"); change_slide(id); } }); function change_slide(next_slide){ if(blocked == false){ blocked = true; if(next_slide != null){ var nextnum = next_slide; } else{ var nextnum = parseInt(current_slide) +1; } next = $("#"+slide_names+nextnum); next_id = next.attr("id"); if(next_slide == null){ if(typeof(next_id) == "undefined"){ next = $("#"+slide_names+"0"); nextnum = 0; } } var current = $("#"+slide_names+current_slide); if(current.attr("id") != next.attr("id")){ $("#fade_"+fade_rand+" div.fade_bullets_bullets ul li#fade_bullet_"+current_slide).removeClass("active"); current.fadeOut(500,function(){ $("#fade_"+fade_rand+" div.fade_bullets_bullets ul li#fade_bullet_"+nextnum).addClass("active"); next.fadeIn(500, function(){ current_slide = nextnum; blocked = false; $(document).oneTime(timings[current_slide],fade_rand+"_timer", function(){ change_slide(null); }); }); }); } else{ blocked = false; } } } });
andrewmanchester2/itsnotadate
wp-content/plugins/wp-jquery-text-and-image-slider/fade-bulleted/fade-bulleted.js
JavaScript
gpl-2.0
1,543
/* * This file is part of CDS Invenio. * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 2009 CERN. * * CDS Invenio is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * CDS Invenio is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CDS Invenio; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. */ /* A Javascript module performing all the operations related to the * HoldingPen BibEdit integration * The functions include * * General event handlers * onHoldingPenPanelRecordIdChanged * * Treatment of the left menu panel * holdingPenPanelAddEntry * holdingPenPanelSetChanges * holdingPenPanelRemoveEntry * holdingPenPanelRemoveChangeSet * holdingPenPanelRemoveEntries * * The chqngeset preview window * showHoldingPenChangesetPreview * onHoldingPenPreviewDataRetreived * onToggleDetailsVisibility * enableChangesetControls * disableChangesetControls * visualizeRetrievedChangeset * * Treatment of the changes previewed in the editor * onHoldingPenChangesetRetrieved * holdingPenPanelApplyChangeSet * visualizeRetrievedChangeset * removeViewedChange * addGeneralControls * onRejectChangeClicked * onAcceptAllChanges * prepareRemoveAllAppliedChanges * onRejectAllChanges * * Preparing the AJAX requests and changing the interface * prepareSubfieldRemovedRequest * prepareFieldRemovedRequest * prepareSubfieldAddedRequest * prepareFieldChangedRequest * prepareFieldAddedRequest * prepareSubfieldChangedRequest * * Functions performing the entire process of applying a change * applySubfieldChanged * applySubfieldRemoved * applyFieldRemoved * applySubfieldAdded * applyFieldChanged * applyFieldAdded */ function onHoldingPenPanelRecordIdChanged(recordId){ /** function that should be called when the edited record identifier changed * the functionality consists of reloading the entries using the Ajax call */ holdingPenPanelRemoveEntries(); createReq({recID: recordId, requestType: 'getHoldingPenUpdates'}, holdingPenPanelSetChanges); } /** Holding Pen menu panel */ function holdingPenPanelAddEntry(entry){ /** A function adding a holding pen entry to the interface and connecting appropriate * events with it * * Parameter: * entry - a holding Pen entry descriptor being a tuple * (changeset_number, changeset_datetime) */ changesetNumber = entry[0]; changesetDatetime = entry[1]; isChangesetProcessed = entry[2]; // if the entry has been already processed entry = createHoldingPenPanelEntry(changesetNumber, changesetDatetime); $(entry).appendTo("#bibeditHPChanges"); } function holdingPenPanelSetChanges(data){ /** Setting the Holding Pen panel content. * This function cqn be utilised as a Javascript callback * * Parameter: * data - The dictionary containing a 'changes' key under which, a list * of changes is stored */ if (data.recID == gRecID || data.recID == gRecIDLoading) { holdingPenPanelRemoveEntries(); for (var i = 0; i < data['changes'].length; i++) { holdingPenPanelAddEntry(data['changes'][i]); } } } function holdingPenPanelRemoveEntry(changesNum){ /** Function removing an entry representing a Holding Pen changeset * from the Holding Pen BibEdit menu panel * * Parameters: * changesNum - a Holding Pen changeset identifier */ $('#bibeditHoldingPenPanelEntry_' + changesNum).remove(); } function holdingPenPanelRemoveChangeSet(changesNum){ /** Function removing a partivular changeset from the panel in the menu * * Parameters: * changesetNum: the internal Holding Pen changeset identifier */ // removing the control holdingPenPanelRemoveEntry(changesNum) // now removing the changeset from the database // This is an operation that can not be undoed ! // TODO: if there is a necessity, undoing should be implemented var undoHandler = 0; var data = { recID: gRecID, requestType: "deleteHoldingPenChangeset", changesetNumber : changesNum, undoRedo : undoHandler } createReq(data, function(json){ updateStatus('report', gRESULT_CODES[json['resultCode']])}); } function holdingPenPanelRemoveEntries(){ /** Function that removes all the entries from the Holding Pen panel */ $("#bibeditHPChanges").empty(); } /*** Functions dealing with the changeset preview */ function showHoldingPenChangesetPreview(changesetNumber, record){ /** Function rendering the changeset preview * * Parameters: * changesetNumber - the internal Holding Pen identifier of the changeset * record - the object representing the record after changes */ newContent = createHoldingPenChangePreview(record); previewBoxId = "holdingPenPreview_" + changesetNumber; previewBoxSelector = "#" + previewBoxId; $(previewBoxSelector).html(newContent); } function onHoldingPenPreviewDataRetreived(json){ /** An event-handler function utilised executed when the data of a * particular changeset arrived, ready to be previewed * * Parameters: * json: a dictionary describing the retrieved changeset. * Must contain the following keys: * 'record' : a new record value * 'changeset_number' : the number of the changeset * downloded */ changesetNumber = json['changeset_number']; record = json['record']; showHoldingPenChangesetPreview(changesetNumber, record); gHoldingPenLoadedChanges[changesetNumber] = record; } function onToggleDetailsVisibility(changesetNumber){ /** Changes the visibility of the change preview. Initialises * dowloading the changeset data if necessary * * Parameters: * changesetNumber - the number of the HoldingPen changeset */ hidingClass = 'bibeditHPHiddenElement'; detailsSelector = "#holdingPenPreview_" + changesetNumber; togglingSelector = "#holdingPenToggleDetailsVisibility_" + changesetNumber; if ($(detailsSelector).hasClass(hidingClass)) { // showing the details -> the preview used to be closed if (gHoldingPenLoadedChanges[changesetNumber] == undefined) { // start prealoading the data that will be fileld into the // preview box createReq({ changesetNumber: changesetNumber, requestType: 'getHoldingPenUpdateDetails' }, onHoldingPenPreviewDataRetreived) } else { // showing the preview based on the precached data showHoldingPenChangesetPreview(gHoldingPenLoadedChanges[changesetNumber]); } // Making the DOM layers visible $(detailsSelector).removeClass(hidingClass); $(togglingSelector).text('-'); } else { // The changes preview was visible until now - time to hide it $(detailsSelector).addClass(hidingClass); $(togglingSelector).text('+'); } } function enableChangesetControls(changesetNum){ $("#bibeditHPRemoveChange" + changesetNum).removeClass('bibEditImgCtrlDisabled').addClass('bibEditImgCtrlEnabled').removeAttr("disabled"); $("#bibeditHPApplyChange" + changesetNum).removeClass('bibEditImgCtrlDisabled').addClass('bibEditImgCtrlEnabled').removeAttr("disabled"); } function disableChangesetControls(changesetNum){ $("#bibeditHPRemoveChange" + changesetNum).removeClass('bibEditImgCtrlEnabled').addClass('bibEditImgCtrlDisabled').attr("disabled", "disabled"); $("#bibeditHPApplyChange" + changesetNum).removeClass('bibEditImgCtrlEnabled').addClass('bibEditImgCtrlDisabled').attr("disabled", "disabled"); } function markHpChangesetAsInactive(changesetId){ $("#bibeditHoldingPenPanelEntry_" + changesetId).addClass("bibeditHPPanelEntryDisabled"); disableChangesetControls(changesetId); } function adjustHPChangesetsActivity(){ $(".bibeditHPPanelEntry").removeClass("bibeditHPPanelEntryDisabled"); $(".bibeditHPControl").removeClass('bibEditImgCtrlDisabled').addClass("bibEditImgCtrlEnabled").removeAttr("disabled"); // disabling the changes that have for (changesetId in gDisabledHpEntries){ if (gDisabledHpEntries[changesetId] === true){ markHpChangesetAsInactive(changesetId); } } } function prepareUndoVisualizeChangeset(changesetNumber, changesBefore){ /** Preparing the Ajax request data undoing the visualise data request */ // this is handler for undoing the visualization of preloaded Holding Pen changes var tagsToRedraw = {}; var addFieldChangesToRemove = {}; var addFieldChangesToDraw = {}; for (changeInd in gHoldingPenChanges){ tagsToRedraw[gHoldingPenChanges[changeInd].tag] = true; if (gHoldingPenChanges[changeInd].change_type == "field_added"){ addFieldChangesToRemove[changeInd] = true; } } gHoldingPenChanges = changesBefore; for (changeInd in gHoldingPenChanges){ tagsToRedraw[gHoldingPenChanges[changeInd].tag] = true; if (gHoldingPenChanges[changeInd].change_type == "field_added"){ // the changes that are not displayed at the moment but should be if (gHoldingPenChanges[changeInd].applied_change !== true){ addFieldChangesToDraw[changeInd] = true; } } } gDisabledHpEntries[changesetNumber] = false; // now updating the interface for (tag in tagsToRedraw){ redrawFields(tag); } for (changeNo in addFieldChangesToRemove){ removeAddFieldControl(changeNo); } for (changeNo in addFieldChangesToDraw){ addFieldAddedControl(changeNo); } // removing all the field_added changes adjustGeneralHPControlsVisibility(); adjustHPChangesetsActivity(); reColorFields(); var ajaxData = { hpChanges: { toOverride: changesBefore, changesetsToActivate: [changesetNumber] }, requestType: 'otherUpdateRequest', recID: gRecID, undoRedo: "undo" }; return ajaxData; } function visualizeRetrievedChangeset(changesetNumber, newRecordData, isRedo){ // first checking if there are already some changes loaded -> if so, wait var canPass = true; for (ind in gHoldingPenChanges){ if (gHoldingPenChanges[ind].applied_change !== true){ // undefined or false canPass = false; } } if (canPass) { var oldChangesList = gHoldingPenChanges; // we want to get rid of some changes that are obviously invalid, // such as removal of the record number var newChangesList = filterChanges(compareRecords(gRecord, newRecordData)); var undoRedo = 0; if (isRedo === true){ // this operation can be performed only on redo or as a genuine operation ! undoRedo = "redo"; } else { undoRedo = prepareUndoHandlerVisualizeChangeset(changesetNumber, oldChangesList, newChangesList); addUndoOperation(undoRedo); } var ajaxData = prepareVisualizeChangeset(changesetNumber, newChangesList, undoRedo); createReq(ajaxData, function(json){ updateStatus('report', gRESULT_CODES[json['resultCode']]); }); } else { alert("Please process the changes already visualised in the interface"); enableChangesetControls(changesetNumber); } } function prepareVisualizeChangeset(changesetNumber, newChangesList, undoHandler){ /** Makes the retrieved changeset visible in the main BibEdit editor * * Parameters: * changesetNumber: the internal Holding Pen number of the changeset * newRecordData: the value of a record after changing * undoHandler: the handler passed directly throught the AJAX call */ gHoldingPenChanges = []; $("#holdingPenPreview_" + changesetNumber).remove(); // now producing the controls allowing to apply the change for (change in newChangesList) { changePos = gHoldingPenChanges.length; gHoldingPenChanges[changePos] = newChangesList[change]; addChangeControl(changePos); } gDisabledHpEntries[changesetNumber] = true; adjustHPChangesetsActivity(); adjustGeneralHPControlsVisibility(); return { hpChanges: { toOverride: gHoldingPenChanges, changesetsToDeactivate : [changesetNumber] }, requestType: 'otherUpdateRequest', recID: gRecID, undoRedo: undoHandler }; } /** Treatment of the changesets applied to the main editor */ function onHoldingPenChangesetRetrieved(json){ /** An event-havdler executed when a changeset intended to be applied * is retrieved * * Parameters: * json - The response code. A dictionary. it has to contain following * keys: * 'record' - a new record object * 'changeset_number' - an internal HoldingPen identifier of * the changeset */ newRecordData = json['record']; changesNumber = json['changeset_number']; // processing added and modified fields visualizeRetrievedChangeset(changesNumber, newRecordData); gHoldingPenLoadedChanges[changesNumber] = newRecordData; } function holdingPenPanelApplyChangeSet(changesNum){ /** Applies the changeset of given number to the record * (initialises the retrieving if necessary) * * applying a changeset consists of adding the proposal * buttons in appropriate fields and removing the Holding Pen entry */ if (failInReadOnly()){ return; } disableChangesetControls(changesNum); if (gHoldingPenLoadedChanges[changesNum] == undefined){ createReq({ changesetNumber: changesNum, requestType: 'getHoldingPenUpdateDetails'}, onHoldingPenChangesetRetrieved); }else { // we can apply the changes directly without wawiting for them to be retrieved visualizeRetrievedChangeset(changesNum, gHoldingPenLoadedChanges[changesNum]); } } /** Functions performing the client-side of applying a change and creating an appropriate AJAX request data * The client side operations consist of modifying the client-side model * * Each of these functions takes exactly one parameter being the client-side identifier of the change * and in the same time, the index in global gHoldingPenChanges array */ function prepareHPFieldChangedUndoHandler(changeNo){ var tag = gHoldingPenChanges[changeNo].tag; var fieldPos = gHoldingPenChanges[changeNo].field_position; var oldInd1 = gRecord[tag][fieldPos][1]; var oldInd2 = gRecord[tag][fieldPos][2]; var oldSubfields = gRecord[tag][fieldPos][0]; var oldIsControlField = gRecord[tag][fieldPos][3] != ""; var oldValue = gRecord[tag][fieldPos][3]; var newInd1 = gHoldingPenChanges[changeNo].indicators[0]; var newInd2 = gHoldingPenChanges[changeNo].indicators[1]; var newSubfields = gHoldingPenChanges[changeNo].field_content; var newIsControlField = false; var newValue = ""; var origHandler = prepareUndoHandlerChangeField(tag, fieldPos, oldInd1, oldInd2, oldSubfields, oldIsControlField, oldValue, newInd1, newInd2, newSubfields, newIsControlField, newValue); return prepareUndoHandlerApplyHPChange(origHandler, changeNo); } function prepareHPSubfieldRemovedUndoHandler(changeNo){ var tag = gHoldingPenChanges[changeNo].tag; var fieldPos = gHoldingPenChanges[changeNo].field_position; var sfPos = gHoldingPenChanges[changeNo].subfield_position; var toDelete = {}; var sfToDelete = {}; sfToDelete[tag] = {}; sfToDelete[tag][fieldPos] = {}; sfToDelete[tag][fieldPos][sfPos] = gRecord[tag][fieldPos][0][sfPos]; toDelete.fields = {}; toDelete.subfields = sfToDelete; var origHandler = prepareUndoHandlerDeleteFields(toDelete); return prepareUndoHandlerApplyHPChange(origHandler, changeNo); } function prepareSubfieldRemovedRequest(changeNo){ var fieldId = gHoldingPenChanges[changeNo].tag; var fieldPos = gHoldingPenChanges[changeNo].field_position; var sfPos = gHoldingPenChanges[changeNo].subfield_position; var toDelete = {}; toDelete[fieldId] = {}; toDelete[fieldId][fieldPos] = [sfPos]; gRecord[fieldId][fieldPos][0].splice(sfPos, 1); redrawFields(fieldId); return { recID: gRecID, requestType: 'deleteFields', toDelete: toDelete, hpChanges: { toDisable : [changeNo] } }; } function prepareHPFieldRemovedUndoHandler(changeNo){ var tag = gHoldingPenChanges[changeNo].tag; var fieldPos = gHoldingPenChanges[changeNo].field_position; var toDelete = {}; var fToDelete = {} fToDelete[tag] = {}; fToDelete[tag][fieldPos] = gRecord[tag][fieldPos]; toDelete.subfields = {}; toDelete.fields = fToDelete; var origHandler = prepareUndoHandlerDeleteFields(toDelete); return prepareUndoHandlerApplyHPChange(origHandler, changeNo); } function prepareFieldRemovedRequest(changeNo){ var fieldId = gHoldingPenChanges[changeNo]["tag"]; var fieldPos = gHoldingPenChanges[changeNo]["field_position"]; var toDelete = {}; toDelete[fieldId] = {}; toDelete[fieldId][fieldPos] = []; gRecord[fieldId].splice(fieldPos, 1); redrawFields(fieldId); return { recID: gRecID, requestType: 'deleteFields', toDelete: toDelete, hpChanges: {toDisable : [changeNo]} }; } function prepareHPSubfieldAddedUndoHandler(changeNo){ var tag = gHoldingPenChanges[changeNo]["tag"]; var fieldPos = gHoldingPenChanges[changeNo]["field_position"]; var sfCode = gHoldingPenChanges[changeNo]["subfield_code"]; var sfValue = gHoldingPenChanges[changeNo]["subfield_content"]; var subfields = [[sfCode, sfValue]]; var origHandler = prepareUndoHandlerAddSubfields(tag, fieldPos, subfields); return prepareUndoHandlerApplyHPChange(origHandler, changeNo); } function prepareSubfieldAddedRequest(changeNo){ var fieldId = gHoldingPenChanges[changeNo]["tag"]; var indicators = gHoldingPenChanges[changeNo]["indicators"]; var fieldPos = gHoldingPenChanges[changeNo]["field_position"]; var sfType = gHoldingPenChanges[changeNo]["subfield_code"]; var content = gHoldingPenChanges[changeNo]["subfield_content"]; gRecord[fieldId][fieldPos][0].push([sfType, content]); return { recID: gRecID, requestType: 'addSubfields', tag: fieldId, fieldPosition: fieldPos, subfields: [[sfType, content]], hpChanges: {toDisable: [changeNo]} }; } function prepareFieldChangedRequest(changeNumber, undoHandler){ var change = gHoldingPenChanges[changeNumber]; var tag = change.tag; var indicators = change.indicators; var ind1 = (indicators[0] == '_') ? ' ' : indicators[0]; var ind2 = (indicators[1] == '_') ? ' ' : indicators[1]; var fieldPos = change.field_position; var subFields = change.field_content; return performChangeField(tag, fieldPos, ind1, ind2, subFields, false, "", undoHandler); } function getFullFieldContentFromHPChange(changeNo){ /** An auxiliary function allowing us to obrain a full record content based on the HP change entry. The record content might be retrieved from the following types of Holdin Pen changes: subfield_changed: a field that contains only one subfield (the one that has been changed) field_changed: a field containing all the new content field_added: a field containing all the new content Arguemnts: changeNo: a number of the change which we are considering Result: An object containing following properties: tag : a tag of the field ind1, ind2 : indicators of the field isControlField: a boolean value indicating if the resulting field is a Control Field value: a value in case of dealing with a Control Field an empty object is returned in case of passing unsupported type of Holding Pen change */ var chT = gHoldingPenChanges[changeNo]["change_type"]; if (chT != "subfield_changed" && chT != "field_added" && chT != "field_changed"){ return {}; } var indicators = gHoldingPenChanges[changeNo]["indicators"]; var result = {}; result.tag = gHoldingPenChanges[changeNo].tag; result.ind1 = (indicators[0] == '_') ? " " : indicators[0]; result.ind2 = (indicators[1] == '_') ? " " : indicators[1]; if (chT == "subfield_changed"){ result.subfields = [[gHoldingPenChanges[changeNo].subfield_code, gHoldingPenChanges[changeNo].subfield_content]]; } if (chT == "field_added" || chT == "field_changed"){ result.subfields = subfields = gHoldingPenChanges[changeNo]. field_content; } result.isControlField = false; result.value = ""; return result; } function prepareHPFieldAddedUndoHandler(changeNo, fieldPos){ /** A function creating the Undo/Redo handler for applying a change consisting of adding a new field. This handler can be only created after the field is really added Arguments: changeNo: a number of the Holding Pen Change fieldPos: a position on which the field has been inserted. */ var r = getFullFieldContentFromHPChange(changeNo); var origHandler = prepareUndoHandlerAddField(r.tag, r.ind1, r.ind2, fieldPos, r.subfields, r.isControlField, r.value); return prepareUndoHandlerApplyHPChange(origHandler, changeNo); } function prepareFieldAddedRequest(changeNo){ /** A function preparing the request of adding a new field, based on the HoldingPen change. This function can be used with following change types: subfield_changed : in the case when we want to add new field instead of getting modifying the existing content field_changed : In the case when we want to add a new field instead of modifying the existing structure field_added : the most regular case of adding a new field Arguments: changeNo: a number of the change associated with the request that is being created. Result: A complete AJAX data related to adding a field based on a Holding Pen change */ var r = getFullFieldContentFromHPChange(changeNo); var position = insertFieldToRecord(gRecord, r.tag, r.ind1, r.ind2, r.subfields); return { recID: gRecID, requestType: "addField", controlfield : r.isControlField, fieldPosition : position, tag: r.tag, ind1: r.ind1, ind2: r.ind2, subfields: r.subfields, value: r.value, hpChanges: {toDisable: [changeNo]} }; } function prepareSubfieldChangedRequest(changeNo){ /** a wrapper around getUpdateSubfieldValueRequestData, providing the values from the change*/ var tag = gHoldingPenChanges[changeNo].tag; var fieldPosition = gHoldingPenChanges[changeNo].field_position; var subfieldIndex = gHoldingPenChanges[changeNo].subfield_position; var subfieldCode = gRecord[tag][fieldPosition][0][subfieldIndex][0]; var value = gHoldingPenChanges[changeNo].subfield_content; gRecord[tag][fieldPosition][0][subfieldIndex][1] = value; return getUpdateSubfieldValueRequestData(tag, fieldPosition, subfieldIndex, subfieldCode, value, changeNo); } /*** A set of functions applying differend kinds of changes * All the functions obtain the identifier of a change ( NOT from the HoldingPen but one generated * on the client side, that is the index in gHoldingPenChanges global Javascript array ) */ function applySubfieldChanged(changeNo){ /** Function applying the change of changing the subfield content */ if (failInReadOnly()){ return; } if (gCurrentStatus == "ready") { var tag = gHoldingPenChanges[changeNo].tag; var fieldPos = gHoldingPenChanges[changeNo].field_position; var sfPos = gHoldingPenChanges[changeNo].subfield_position; var content = gHoldingPenChanges[changeNo].subfield_content; var sfCode = gRecord[tag][fieldPos][0][sfPos][0]; var oldContent = gRecord[tag][fieldPos][0][sfPos][1]; gRecord[tag][fieldPos][0][sfPos][1] = content; // changing the local copy var modificationUndoHandler = prepareUndoHandlerChangeSubfield(tag, fieldPos, sfPos, oldContent, content, sfCode, sfCode); var undoHandler = prepareUndoHandlerApplyHPChange(modificationUndoHandler, changeNo); addUndoOperation(undoHandler); updateSubfieldValue(tag, fieldPos, sfPos, gRecord[tag][fieldPos][0][sfPos][0], content, changeNo, undoHandler); removeViewedChange(changeNo); } } function applySubfieldRemoved(changeNo){ /** Function applying the change of removing the subfield */ if (failInReadOnly()){ return; } if (gCurrentStatus == "ready") { var undoHandler = prepareHPSubfieldRemovedUndoHandler(changeNo); var data = prepareSubfieldRemovedRequest(changeNo); data.undoRedo = undoHandler; addUndoOperation(data.undoRedo); removeViewedChange(changeNo); createReq(data, function(json){ updateStatus('report', gRESULT_CODES[json['resultCode']]); }); } } function applyFieldRemoved(changeNo){ /** Function applying the change of removing the field */ if (failInReadOnly()){ return; } if (gCurrentStatus == "ready") { var fieldId = gHoldingPenChanges[changeNo]["tag"]; var indicators = gHoldingPenChanges[changeNo]["indicators"]; var fieldPos = gHoldingPenChanges[changeNo]["field_position"]; var undoHandler = prepareHPFieldRemovedUndoHandler(changeNo); var data = prepareFieldRemovedRequest(changeNo); data.undoRedo = undoHandler; addUndoOperation(undoHandler); createReq(data, function(json){ updateStatus('report', gRESULT_CODES[json['resultCode']]); }); // now the position of the fields has changed. We have to fix all teh references inside the gHoldingPenChanges for (change in gHoldingPenChanges) { if ((gHoldingPenChanges[change]["tag"] == fieldId) && (gHoldingPenChanges[change]["indicators"] == indicators)) { if (gHoldingPenChanges[change]["field_position"] > fieldPos) { gHoldingPenChanges[change]["field_position"] -= 1; } if (gHoldingPenChanges[change]["field_position"] == fieldPos) { // there are more changes associated with this field ! They are no more correct // and should be removed... it is also possible to consider transforming them into add field // change, but seems to be an unnecessary effort gHoldingPenChanges[change].applied_change = true; } } } removeViewedChange(changeNo); // includes redrawing the controls } } function applySubfieldAdded(changeNo){ /** Function applying the change of adding the subfield */ if (failInReadOnly()){ return; } if (gCurrentStatus == "ready") { var undoHandler = prepareHPSubfieldAddedUndoHandler(changeNo); var data = prepareSubfieldAddedRequest(changeNo); data.undoRedo = undoHandler; addUndoOperation(undoHandler); createReq(data, function(json){ updateStatus('report', gRESULT_CODES[json['resultCode']]) }); removeViewedChange(changeNo); // automatic redrawing ! } } function applyFieldChanged(changeNumber){ /** Function applying the change of changing the field content */ if (failInReadOnly()){ return; } if (gCurrentStatus == "ready") { var undoHandler = prepareHPFieldChangedUndoHandler(changeNumber); addUndoOperation(undoHandler); var data = prepareFieldChangedRequest(changeNumber, undoHandler); createReq(data, function(json){ updateStatus('report', gRESULT_CODES[json['resultCode']]) }); removeViewedChange(changeNumber); // redrawing included in this call } } function applyFieldAdded(changeNo){ /** Function applying the change of adding the field */ if (failInReadOnly()){ return; } if (gCurrentStatus == "ready") { var data = prepareFieldAddedRequest(changeNo); var undoHandler = prepareHPFieldAddedUndoHandler(changeNo, data.fieldPosition); addUndoOperation(undoHandler); data.undoRedo = undoHandler; createReq(data, function(json){ updateStatus('report', gRESULT_CODES[json['resultCode']]) }); // now adding appropriate controls to the interface removeViewedChange(changeNo); redrawFields(fieldId); reColorFields(); } } /*** Manipulations on changes previewed in the editor */ function updateInterfaceAfterChangeModification(changeNo){ tag = gHoldingPenChanges[changeNo]["tag"]; redrawFields(tag, true); // redraw the controls - skipping the field added controls reColorFields(); // in case of add_field change being reactivated/activated, we have to display the interface if (gHoldingPenChanges[changeNo].change_type == "field_added"){ if (gHoldingPenChanges[changeNo].applied_change == undefined || gHoldingPenChanges[changeNo].applied_change !== true){ addFieldAddedControl(changeNo); } else { // in case of the add_field action, the controls have to be removed in a different manner - // they are not part of the main table removeAddFieldControl(changeNo); } } adjustGeneralHPControlsVisibility(); } function revertViewedChange(changeNo){ /** Reverts a Holding Pen change that has been marked as removed before Parameters: changeNo - The change index in local changes array changeType - type of a current change (to override) */ gHoldingPenChanges[changeNo].applied_change = false; updateInterfaceAfterChangeModification(changeNo); } function removeViewedChange(changeNo){ /** Function removing the control of a given change * * Parameters: * changeNo - a client-side identifier of the change */ gHoldingPenChanges[changeNo].applied_change = true; updateInterfaceAfterChangeModification(changeNo); } function addGeneralControls(){ /** If necessary, creates the panel containing the general controls that allow * to accept or reject all teh viewed changes */ if ($("#bibeditHoldingPenGC").length == 0){ panel = createGeneralControlsPanel(); $("#bibEditContent").prepend(panel); } } function adjustGeneralHPControlsVisibility(){ /** Function adjusting the visibility of the general Holding Pen changes bar. This bar is responsible of applying or rejecting all the visualized changes at once */ var shouldDisplay = false; for (changeInd in gHoldingPenChanges){ if (gHoldingPenChanges[changeInd].applied_change !== true){ shouldDisplay = true; } } if (shouldDisplay){ addGeneralControls(); } else { $("#bibeditHoldingPenGC").remove(); } } function refreshChangesControls(){ /** Redrawing all the changes controls */ removeAllChangeControls(); var tagsToRedraw = {}; for (changeInd in gHoldingPenChanges){ if (gHoldingPenChanges[changeInd].applied_change !== true){ addChangeControl(changeInd); tagsToRedraw[gHoldingPenChanges[changeInd].tag] = true; } } for (tag in tagsToRedraw){ redrawFields(tag); } adjustHPChangesetsActivity(); } function prepareHPRejectChangeUndoHandler(changeNo){ var origHandler = prepareUndoHandlerEmpty(); return prepareUndoHandlerApplyHPChange(origHandler, changeNo); } function onRejectChangeClicked(changeNo){ /** An event handler fired when user requests to reject the change that has been proposed * by the user interface*/ var undoHandler = prepareHPRejectChangeUndoHandler(changeNo); addUndoOperation(undoHandler); removeViewedChange(changeNo); createReq({ requestType : "otherUpdateRequest", hpChanges : { toDisable: [changeNo]}, recID : gRecID, undoRedo: undoHandler }, function(json){ updateStatus('report', gRESULT_CODES[json['resultCode']]) }); } function aggregateHoldingPenChanges(){ /** Fuction aggregating the Holding Pen changes in different catheegories. Returns an object with following fuields: changesAddModify : a list of numbers of changes of modification or adding fields changesRemoveField : a list of numbers of changes of field removal changesRemoveSubfield : a list of numbers of changes of subfield removal */ var result = {}; result.changesAddModify= []; result.changesRemoveField = []; result.changesRemoveSubfield = []; for (changeNum in gHoldingPenChanges){ changeNumInt = parseInt(changeNum); changeType = gHoldingPenChanges[changeNum].change_type; if ( changeType == "field_added" || changeType == "subfield_changed" || changeType == "subfield_added" || changeType == "field_changed"){ result.changesAddModify.push(changeNumInt); } if ( changeType == "field_removed"){ result.changesRemoveField.push(changeNumInt); } if ( changeType == "subfield_removed"){ result.changesRemoveSubfield.push(changeNumInt); } } return result; } function acceptAddModifyChanges(changeNumbers){ /** A helper function. Applies a list of add/modify Holding Pen changes Returns an object with the following subfields ajaxData : a list of Ajax requests data undoHandlers : a list of undo handlers tagsToRedraw : a dictionary of tags affected by the changes and needing to be redrawn. Every entry is of the form "tag" : true */ var result = {}; result.ajaxData = []; result.undoHandlers = []; result.tagsToRedraw = {}; for (changePos in changeNumbers) { var changeNum = changeNumbers[changePos]; var changeType = gHoldingPenChanges[changeNum].change_type; result.tagsToRedraw[gHoldingPenChanges[changeNum].tag] = true; if ( changeType == "field_added"){ var changeData = prepareFieldAddedRequest(changeNum); var undoHandler = prepareHPFieldAddedUndoHandler(changeNum, changeData.fieldPosition); result.ajaxData.push(changeData); result.undoHandlers.push(undoHandler); } if (changeType == "subfield_changed"){ var tag = gHoldingPenChanges[changeNum].tag; var fieldPos = gHoldingPenChanges[changeNum].field_position; var sfPos = gHoldingPenChanges[changeNum].subfield_position; var content = gHoldingPenChanges[changeNum].subfield_content; var sfCode = gRecord[tag][fieldPos][0][sfPos][0]; var oldContent = gRecord[tag][fieldPos][0][sfPos][1]; var modificationUndoHandler = prepareUndoHandlerChangeSubfield(tag, fieldPos, sfPos, oldContent, content, sfCode, sfCode); var undoHandler = prepareUndoHandlerApplyHPChange(modificationUndoHandler, changeNum); var changeData = prepareSubfieldChangedRequest(changeNum); result.ajaxData.push(changeData); result.undoHandlers.push(undoHandler); } if ( changeType == "subfield_added"){ var undoHandler = prepareHPSubfieldAddedUndoHandler(changeNo); var changeData = prepareSubfieldAddedRequest(changeNum); result.undoHandlers.push(undoHandler); result.ajaxData.push(changeData); } if ( changeType == "field_changed"){ var undoHandler = prepareHPFieldChangedUndoHandler(changeNum); var changeData = prepareFieldChangedRequest(changeNum, 0); result.undoHandlers.push(undoHandler); result.ajaxData.push(changeData); } } return result; } function acceptRemoveFieldChanges(changeNumbers){ /** A function applying all the field removal changes. Returns an object having the following subfields: ajaxData: a list of ajax objects undoHandlers: a list of undo handlers associated with the removals tagsToRedraw: a dictionary of tags affected by the changes and needing to be redrawn. Every entry is of the form "tag" : true */ var result = {}; result.ajaxData = []; result.undoHandlers = []; result.tagsToRedraw = {}; /** First we have to sort the removals in the order of descending indices in order to make subsequent removals harmless to each other */ var changesRemoveFieldNumbersSorted = changeNumbers.sort( function (a, b){ val1 = gHoldingPenChanges[a].field_position; val2 = gHoldingPenChanges[b].field_position; if (val1 < val2) return 1; else{ if (val1 == val2) return 0; else return -1; } }); /** Now we can proceed with applying the changes in a given order */ for (changePos in changesRemoveFieldNumbersSorted){ var changeNum = changesRemoveFieldNumbersSorted[changePos]; var undoHandler = prepareHPFieldRemovedUndoHandler(changeNum); var changeData = prepareFieldRemovedRequest(changeNum); result.tagsToRedraw[gHoldingPenChanges[changeNum].tag] = true; result.undoHandlers.push(undoHandler); result.ajaxData.push(changeData); } return result; } function acceptRemoveSubfieldChanges(changeNumbers){ /** A function applying all the subfield removal changes. Returns an object having the following subfields: ajaxData: a list of ajax objects undoHandlers: a list of undo handlers associated with the removals tagsToRedraw: a dictionary of tags affected by the changes and needing to be redrawn. Every entry is of the form "tag" : true */ var result = {}; result.undoHandlers = []; result.ajaxData = []; result.tagsToRedraw = {}; /** First we sort all the changes by the decreasing subfield index in order to make the subsequent changes harmless to each other. Subfield positions associated with the changes not being applied yet, should be always valid, which means that every time we have to remove a subfield with the highest index */ var changesRemoveSubfieldNumbersSorted = changeNumbers.sort( function (a, b){ val1 = gHoldingPenChanges[a].subfield_position; val2 = gHoldingPenChanges[b].subfield_position; if (val1 < val2) return 1; else{ if (val1 == val2) return 0; else return -1; } }); /** Now we can proceed with the removals in the appropriate order*/ for (changePos in changesRemoveSubfieldNumbersSorted){ var changeNum = changesRemoveSubfieldNumbersSorted[changePos]; var undoHandler = prepareHPSubfieldRemovedUndoHandler(changeNum); var changeData = prepareSubfieldRemovedRequest(changeNum); undoHandlers.push(undoHandler); changesRemove.push(changeData); result.tagsToRedraw[gHoldingPenChanges[changeNum].tag] = true; } return result; } function onAcceptAllChanges(){ /** Applying all the changes visualised in the editor. */ /** Changes have to be ordered by their type. First we process the modifications of the content and adding new fields and subfields. Such changes do not modify the numeration of other fields/subfields and so, the indices of fields/subfields stored in other changes remain valid */ var chNumbers = aggregateHoldingPenChanges(); /** First we add the addField requests, as they do not change the numbers of existing fields and subields. Subsequents field/subfield removals will be possible. An opposite order (first removals and then adding, would break the record structure */ var resAddUpdate = acceptAddModifyChanges(chNumbers.changesAddModify); /** Next we can proceed with the subfields removal. Application of such changes implies modification of the subfield indices. Field positions remain untouched */ var resRemoveSubfields = acceptRemoveSubfieldChanges( chNumbers.changesRemoveSubfield); /** Finally, we can proceed with removal of the fields. Doing so, changes the field numbers */ var resRemoveFields = acceptRemoveFieldChanges( chNumbers.changesRemoveSubfield); /** Now we remove all the changes visulaized in the interface */ var removeAllChangesUndoHandler = prepareUndoHandlerRemoveAllHPChanges( gHoldingPenChanges); var removeAllChangesAjaxData = prepareRemoveAllAppliedChanges(); /** updating the user interface after all the changes being finished in the cliens side model */ var collectiveTagsToRedraw = {}; for (tag in resAddUpdate.tagsToRedraw){ collectiveTagsToRedraw[tag] = true; } for (tag in resRemoveFields.tagsToRedraw){ collectiveTagsToRedraw[tag] = true; } for (tag in resRemoveSubfields.tagsToRedraw){ collectiveTagsToRedraw[tag] = true; } for (tag in collectiveTagsToRedraw){ redrawFields(tag); } adjustGeneralHPControlsVisibility(); reColorFields(); /** At this point, all the changes to the browser interface are finished. The only remaining activity is combining the AJAX request into one big, preparing the bulk undo/redo handler and passing the request to the server side of BibEdit */ var collectiveAjaxData = resAddUpdate.ajaxData.concat( resRemoveSubfields.ajaxData.concat( resRemoveFields.ajaxData.concat( [removeAllChangesAjaxData]))); var collectiveUndoHandlers = resAddUpdate.undoHandlers.concat( resRemoveSubfields.undoHandlers.concat( resRemoveFields.undoHandlers.concat( [removeAllChangesUndoHandler]))); collectiveUndoHandlers.reverse(); var finalUndoHandler = prepareUndoHandlerBulkOperation(collectiveUndoHandlers, "apply all changes"); addUndoOperation(finalUndoHandler); var optArgs = { undoRedo: finalUndoHandler }; createBulkReq(collectiveAjaxData, function(json){ updateStatus('report', gRESULT_CODES[json['resultCode']]) }, optArgs); } function prepareRemoveAllAppliedChanges(){ /**Removing all the changes together with their user interface controls. in order to avoid multiple redrawing of the same fields, the changes are groupped by the tag (because the tag is drawn at once) the requests for adding the changes are treated separately */ gHoldingPenChanges = []; removeAllChangeControls(); return {recID: gRecID, requestType: "otherUpdateRequest", hpChanges: {toOverride : []}}; } function onRejectAllChanges(){ /** Rejecting all the considered changes*/ var undoHandler = prepareUndoHandlerRemoveAllHPChanges(gHoldingPenChanges); addUndoOperation(undoHandler); var ajaxData = prepareRemoveAllAppliedChanges(); ajaxData.undoRedo = undoHandler; createReq(ajaxData, function(json){ updateStatus('report', gRESULT_CODES[json['resultCode']])}); reColorFields(); }
pombredanne/invenio-old
modules/bibedit/lib/bibedit_holdingpen.js
JavaScript
gpl-2.0
43,195
/* KeePass Password Safe - The Open-Source Password Manager Copyright (C) 2003-2015 Dominik Reichl <dominik.reichl@t-online.de> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ using System; using System.Collections.Generic; using System.Diagnostics; using KeePassLib.Interfaces; namespace KeePassLib { /// <summary> /// Represents an object that has been deleted. /// </summary> public sealed class PwDeletedObject : IDeepCloneable<PwDeletedObject> { private PwUuid m_uuid = PwUuid.Zero; /// <summary> /// UUID of the entry that has been deleted. /// </summary> public PwUuid Uuid { get { return m_uuid; } set { if(value == null) throw new ArgumentNullException("value"); m_uuid = value; } } private DateTime m_dtDeletionTime = PwDefs.DtDefaultNow; /// <summary> /// The date/time when the entry has been deleted. /// </summary> public DateTime DeletionTime { get { return m_dtDeletionTime; } set { m_dtDeletionTime = value; } } /// <summary> /// Construct a new <c>PwDeletedObject</c> object. /// </summary> public PwDeletedObject() { } public PwDeletedObject(PwUuid uuid, DateTime dtDeletionTime) { if(uuid == null) throw new ArgumentNullException("uuid"); m_uuid = uuid; m_dtDeletionTime = dtDeletionTime; } /// <summary> /// Clone the object. /// </summary> /// <returns>Value copy of the current object.</returns> public PwDeletedObject CloneDeep() { PwDeletedObject pdo = new PwDeletedObject(); pdo.m_uuid = m_uuid; // PwUuid objects are immutable pdo.m_dtDeletionTime = m_dtDeletionTime; return pdo; } } }
haro-freezd/KeePass
KeePassLib/PwDeletedObject.cs
C#
gpl-2.0
2,295
/* * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package org.graalvm.compiler.jtt.jdk; import java.util.EnumMap; import org.junit.Test; import org.graalvm.compiler.jtt.JTTTest; /* */ public class EnumMap01 extends JTTTest { private static final EnumMap<Enum, String> map = new EnumMap<>(Enum.class); static { map.put(Enum.A, "A"); map.put(Enum.B, "B"); map.put(Enum.C, "C"); } public static String test(int i) { return map.get(Enum.values()[i]); } private enum Enum { A, B, C } @Test public void run0() throws Throwable { runTest("test", 0); } @Test public void run1() throws Throwable { runTest("test", 1); } @Test public void run2() throws Throwable { runTest("test", 2); } }
md-5/jdk10
src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.jtt/src/org/graalvm/compiler/jtt/jdk/EnumMap01.java
Java
gpl-2.0
1,845
/* Copyright_License { XCSoar Glide Computer - http://www.xcsoar.org/ Copyright (C) 2000-2013 The XCSoar Project A detailed list of copyright holders can be found in the file "AUTHORS". This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. } */ #ifndef XCSOAR_DIALOGS_PLANES_HPP #define XCSOAR_DIALOGS_PLANES_HPP struct Plane; void dlgPlanesShowModal(); bool dlgPlaneDetailsShowModal(Plane &plane); bool dlgPlanePolarShowModal(Plane &plane); #endif
onkelhotte/test
src/Dialogs/Plane/PlaneDialogs.hpp
C++
gpl-2.0
1,109
<!DOCTYPE html> <!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]--> <!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]--> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <title>thermo command &mdash; LIGGGHTS v3.X documentation</title> <link rel="stylesheet" href="_static/css/theme.css" type="text/css" /> <link rel="top" title="LIGGGHTS v3.X documentation" href="index.html"/> <script src="_static/js/modernizr.min.js"></script> </head> <body class="wy-body-for-nav" role="document"> <div class="wy-grid-for-nav"> <nav data-toggle="wy-nav-shift" class="wy-nav-side"> <div class="wy-side-scroll"> <div class="wy-side-nav-search"> <a href="Manual.html" class="icon icon-home"> LIGGGHTS </a> <div class="version"> v3.X </div> <div role="search"> <form id="rtd-search-form" class="wy-form" action="search.html" method="get"> <input type="text" name="q" placeholder="Search docs" /> <input type="hidden" name="check_keywords" value="yes" /> <input type="hidden" name="area" value="default" /> </form> </div> </div> <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation"> <ul> <li class="toctree-l1"><a class="reference internal" href="Section_intro.html">1. Introduction</a></li> <li class="toctree-l1"><a class="reference internal" href="Section_start.html">2. Getting Started</a></li> <li class="toctree-l1"><a class="reference internal" href="Section_input_script.html">3. Input Script</a></li> <li class="toctree-l1"><a class="reference internal" href="Section_commands.html">4. Commands</a></li> <li class="toctree-l1"><a class="reference internal" href="Section_gran_models.html">5. Contact models</a></li> <li class="toctree-l1"><a class="reference internal" href="Section_mesh_modules.html">6. Mesh modules</a></li> <li class="toctree-l1"><a class="reference internal" href="Section_packages.html">7. Packages</a></li> <li class="toctree-l1"><a class="reference internal" href="Section_howto.html">8. How-to discussions</a></li> <li class="toctree-l1"><a class="reference internal" href="Section_modify.html">9. Modifying &amp; extending LIGGGHTS(R)-PUBLIC</a></li> <li class="toctree-l1"><a class="reference internal" href="Section_python.html">10. Python interface to LIGGGHTS(R)-PUBLIC</a></li> <li class="toctree-l1"><a class="reference internal" href="Section_errors.html">11. Errors</a></li> </ul> </div> </div> </nav> <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap"> <nav class="wy-nav-top" role="navigation" aria-label="top navigation"> <i data-toggle="wy-nav-top" class="fa fa-bars"></i> <a href="Manual.html">LIGGGHTS</a> </nav> <div class="wy-nav-content"> <div class="rst-content"> <div role="navigation" aria-label="breadcrumbs navigation"> <ul class="wy-breadcrumbs"> <li><a href="Manual.html">Docs</a> &raquo;</li> <li>thermo command</li> <li class="wy-breadcrumbs-aside"> <a href="_sources/thermo.txt" rel="nofollow"> View page source</a> <a href="http://www.cfdem.com"> Website</a> <a href="Section_commands.html#comm" rel="nofollow"> Commands</a> </li> </ul> <hr/> </div> <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article"> <div itemprop="articleBody"> <div class="section" id="thermo-command"> <span id="index-0"></span><h1>thermo command<a class="headerlink" href="#thermo-command" title="Permalink to this headline">¶</a></h1> <div class="section" id="syntax"> <h2>Syntax<a class="headerlink" href="#syntax" title="Permalink to this headline">¶</a></h2> <div class="highlight-python"><div class="highlight"><pre>thermo N </pre></div> </div> <ul class="simple"> <li>N = output thermodynamics every N timesteps</li> <li>N can be a variable (see below)</li> </ul> </div> <div class="section" id="examples"> <h2>Examples<a class="headerlink" href="#examples" title="Permalink to this headline">¶</a></h2> <div class="highlight-python"><div class="highlight"><pre>thermo 100 </pre></div> </div> </div> <div class="section" id="description"> <h2>Description<a class="headerlink" href="#description" title="Permalink to this headline">¶</a></h2> <p>Compute and print thermodynamic info (e.g. temperature, energy, pressure) on timesteps that are a multiple of N and at the beginning and end of a simulation. A value of 0 will only print thermodynamics at the beginning and end.</p> <p>The content and format of what is printed is controlled by the <a class="reference internal" href="thermo_style.html"><em>thermo_style</em></a> and <a class="reference internal" href="thermo_modify.html"><em>thermo_modify</em></a> commands.</p> <p>Instead of a numeric value, N can be specifed as an <a class="reference internal" href="variable.html"><em>equal-style variable</em></a>, which should be specified as v_name, where name is the variable name. In this case, the variable is evaluated at the beginning of a run to determine the next timestep at which thermodynamic info will be written out. On that timestep, the variable will be evaluated again to determine the next timestep, etc. Thus the variable should return timestep values. See the stagger() and logfreq() and stride() math functions for <a class="reference internal" href="variable.html"><em>equal-style variables</em></a>, as examples of useful functions to use in this context. Other similar math functions could easily be added as options for <a class="reference internal" href="variable.html"><em>equal-style variables</em></a>.</p> <p>For example, the following commands will output thermodynamic info at timesteps 0,10,20,30,100,200,300,1000,2000,etc:</p> <div class="highlight-python"><div class="highlight"><pre>variable s equal logfreq(10,3,10) thermo v_s </pre></div> </div> </div> <div class="section" id="restrictions"> <h2>Restrictions<a class="headerlink" href="#restrictions" title="Permalink to this headline">¶</a></h2> <blockquote> <div>none</div></blockquote> </div> <div class="section" id="related-commands"> <h2>Related commands<a class="headerlink" href="#related-commands" title="Permalink to this headline">¶</a></h2> <p><a class="reference internal" href="thermo_style.html"><em>thermo_style</em></a>, <a class="reference internal" href="thermo_modify.html"><em>thermo_modify</em></a></p> </div> <div class="section" id="default"> <h2>Default<a class="headerlink" href="#default" title="Permalink to this headline">¶</a></h2> <div class="highlight-python"><div class="highlight"><pre>thermo 0 </pre></div> </div> </div> </div> </div> <div class="articleComments"> </div> </div> <footer> <hr/> <div role="contentinfo"> <p> &copy; Copyright 2016, DCS Computing GmbH, JKU Linz and Sandia Corporation. </p> </div> Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/snide/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>. </footer> </div> </div> </section> </div> <script type="text/javascript"> var DOCUMENTATION_OPTIONS = { URL_ROOT:'./', VERSION:'v3.X', LANGUAGE:'None', COLLAPSE_INDEX:false, FILE_SUFFIX:'.html', HAS_SOURCE: true, SOURCELINK_SUFFIX: '' }; </script> <script type="text/javascript" src="_static/jquery.js"></script> <script type="text/javascript" src="_static/underscore.js"></script> <script type="text/javascript" src="_static/doctools.js"></script> <script type="text/javascript" src="_static/js/theme.js"></script> <script type="text/javascript"> jQuery(function () { SphinxRtdTheme.StickyNav.enable(); }); </script> </body> </html>
aaigner/LIGGGHTS-PUBLIC
doc/thermo.html
HTML
gpl-2.0
8,675
//require <xatajax.ui.tk.js> /** * The ComponentListener interface that should be implemented by any * object that wishes to register to receive notifications from * Components when child components are added or removed. * * @created Feb. 7, 2011 * @author Steve Hannah <steve@weblite.ca> */ (function(){ var ComponentEvent = XataJax.ui.tk.ComponentEvent; /** * Register the Public API */ XataJax.ui.tk.ComponentListener = ComponentListener; /** * Implementation Details Below this line ========= */ var $ = jQuery; /** * An interface for objects that wish to be notified of changes to a component. * @override-params any * * @constructor */ function ComponentListener(o){ XataJax.publicAPI(this, publicAPI); if ( typeof(o) == 'object' ){ $.extend(this, o); } } var publicAPI = { beforeChildAdded: beforeChildAdded, beforeChildRemoved: beforeChildRemoved, childAdded: childAdded, childRemoved: childRemoved, beforeUpdate: beforeUpdate, afterUpdate: afterUpdate }; ComponentListener.prototype = publicAPI; ComponentListener.constructor = ComponentListener; /** * Method called before a child is added to a component. This gives an * opportunity for the listener to veto the add by throwing an exception. * * @param ComponentEvent event * @throws XataJax.Exception */ function beforeChildAdded(event){} /** * Method called before a child is removed from the compoennt. This * gives an opportunity for the listener to veto the removal by * throwing an exception. * * @param {ComponentEvent} event * @throws XataJax.Exception */ function beforeChildRemoved(event){} /** * Method called after a child component has successfully been added to * the component. * * @param ComponentEvent event */ function childAdded(event){} /** * Method called after a child component has successfully been removed from * the component. * * @param ComponentEvent event */ function childRemoved(event){} function beforeUpdate(event){} function afterUpdate(event){} })();
shannah/xataface
modules/XataJax/js/xatajax.ui.tk/ComponentListener.js
JavaScript
gpl-2.0
2,119
<?php class WSTemplateLibrary extends WSLibraryModuleBase { public function getFunctions() { return array( 'parse' => array( 'parse', 1 ), 'arg' => array( 'arg', 1 ), 'named_args' => array( 'namedArgs', 0 ), 'numbered_args' => array( 'numberedArgs', 0 ), 'is_transcluded' => array( 'isTranscluded', 0 ), ); } public function parse( $args, $context, $line ) { $text = $args[0]->toString(); // Push into stack for tracking purposes $context->mInterpreter->mCallStack->addParse( $text ); // Imitate OT_PREPROCESS $oldOT = $context->mParser->mOutputType; $context->mParser->setOutputType( Parser::OT_PREPROCESS ); // FIXME: is that legit way to do this? $parsed = $context->mParser->replaceVariables( $text, $context->mFrame ); $parsed = $context->mParser->mStripState->unstripBoth( $parsed ); $context->mParser->setOutputType( $oldOT ); $context->mInterpreter->mCallStack->pop(); return new WSData( WSData::DString, $parsed ); } public function arg( $args, $context, $line ) { $argName = $args[0]->toString(); $default = isset( $args[1] ) ? $args[1] : new WSData(); if( $context->mFrame->getArgument( $argName ) === false ) return $default; else return new WSData( WSData::DString, $context->mFrame->getArgument( $argName ) ); } public function namedArgs( $args, $context, $line ) { return WSData::newFromPHPVar( $context->mFrame->getNamedArguments() ); } public function numberedArgs( $args, $context, $line ) { return WSData::newFromPHPVar( $context->mFrame->getNumberedArguments() ); } public function isTranscluded( $args, $context, $line ) { return new WSData( WSData::DBool, $context->mFrame->isTemplate() ); } }
SuriyaaKudoIsc/wikia-app-test
extensions/WikiScripts/interpreter/lib/Template.php
PHP
gpl-2.0
1,698
/* * $Id$ * * Copyright 2008 Glencoe Software, Inc. All rights reserved. * Use is subject to license terms supplied in LICENSE.txt * */ #include <omero/fixture.h> #include <omero/callbacks.h> #include <omero/all.h> #include <string> #include <map> using namespace std; using namespace omero; using namespace omero::api; using namespace omero::api::_cpp_delete; using namespace omero::callbacks; using namespace omero::model; using namespace omero::rtypes; using namespace omero::sys; TEST(DeleteTest, testSimpleDelete ) { Fixture f; client_ptr c = f.login(); ServiceFactoryPrx sf = c->getSession(); IQueryPrx iquery = sf->getQueryService(); IUpdatePrx iupdate = sf->getUpdateService(); IDeletePrx idelete = sf->getDeleteService(); ImagePtr image = new ImageI(); image->setName( rstring("testSimpleDelete") ); image->setAcquisitionDate( rtime(0) ); image = ImagePtr::dynamicCast( iupdate->saveAndReturnObject( image ) ); std::map<string, string> options; DeleteCommands dcs; DeleteCommand dc; dc.type = "/Image"; dc.id = image->getId()->getValue(); dc.options = options; dcs.push_back(dc); DeleteHandlePrx handle = idelete->queueDelete( dcs ); DeleteCallbackIPtr cb = new DeleteCallbackI(c->getObjectAdapter(), handle); cb->loop(10, 500); }
rleigh-dundee/openmicroscopy
components/tools/OmeroCpp/test/integration/delete.cpp
C++
gpl-2.0
1,340
-- Create the parameter class. local class = require 'pl.class' local Parameter = require 'parameter' local Parameter_UINT16 = class(Parameter) function Parameter_UINT16:_init(strOwner, strName, strHelp, tLogWriter, strLogLevel) self:super(strOwner, strName, strHelp, tLogWriter, strLogLevel) end function Parameter_UINT16:__validate(tValue) local fIsValid = false local tValidatedValue local strMessage = nil local ulValue = tonumber(tValue) if ulValue==nil then strMessage = string.format('The value %s could not be converted to a number.', tostring(tValue)) elseif ulValue<0 or ulValue>0xffff then strMessage = string.format('The value %d is not in the allowed range of [0,65535].', ulValue) else fIsValid = true tValidatedValue = ulValue end return fIsValid, tValidatedValue, strMessage end return Parameter_UINT16
muhkuh-sys/muhkuh_tester
lua/parameter_uint16.lua
Lua
gpl-2.0
863
/* * Copyright (c) 2012-2015 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * This file was originally distributed by Qualcomm Atheros, Inc. * under proprietary terms before Copyright ownership was assigned * to the Linux Foundation. */ /*=========================================================================== W L A N _ Q C T _ T L . C OVERVIEW: This software unit holds the implementation of the WLAN Transport Layer. The functions externalized by this module are to be called ONLY by other WLAN modules that properly register with the Transport Layer initially. DEPENDENCIES: Are listed for each API below. ===========================================================================*/ /*=========================================================================== EDIT HISTORY FOR FILE This section contains comments describing changes made to the module. Notice that changes are listed in reverse chronological order. $Header$$DateTime$$Author$ when who what, where, why ---------- --- -------------------------------------------------------- 2010-07-13 c_shinde Fixed an issue where WAPI rekeying was failing because WAI frame sent out during rekeying had the protected bit set to 1. 2010-05-06 rnair Changed name of variable from usLlcType to usEtherType Changed function name from GetLLCType to GetEtherType Fixed 802.3 to 802.11 frame translation issue where two bytes of the LLC header was getting overwritten in the non-Qos path 2010-05-06 rnair RxAuth path fix for modifying the header before ether type is retreived (Detected while testing rekeying in WAPI Volans) 2010-02-19 bad Fixed 802.11 to 802.3 ft issues with WAPI 2010-02-19 rnair WAPI: If frame is a WAI frame in TxConn and TxAuth, TL does frame translation. 2010-02-01 rnair WAPI: Fixed a bug where the value of ucIsWapiSta was not being set in the TL control block in the RegisterSTA func. 2010-01-08 lti Added TL Data Caching 2009-11-04 rnair WAPI: Moving common functionality to a seperate function called WLANTL_GetLLCType 2009-10-15 rnair WAPI: Featurizing WAPI code 2009-10-09 rnair WAPI: Modifications to authenticated state handling of Rx data 2009-10-06 rnair Adding support for WAPI 2009-09-22 lti Add deregistration API for management client 2009-07-16 rnair Temporary fix to let TL fetch packets when multiple peers exist in an IBSS 2009-06-10 lti Fix for checking TID value of meta info on TX - prevent memory overwrite Fix for properly checking the sta id for resuming trigger frame generation 2009-05-14 lti Fix for sending out trigger frames 2009-05-15 lti Addr3 filtering 2009-04-13 lti Assert if packet larger then allowed Drop packet that fails flatten 2009-04-02 lti Performance fixes for TL 2009-02-19 lti Added fix for LLC management on Rx Connect 2009-01-16 lti Replaced peek data with extract data for non BD opertions Extracted frame control in Tl and pass to HAL for frame type evaluation 2009-02-02 sch Add handoff support 2008-12-09 lti Fixes for AMSS compilation Removed assert on receive when there is no station 2008-12-02 lti Fix fo trigger frame generation 2008-10-31 lti Fix fo TL tx suspend 2008-10-01 lti Merged in fixes from reordering Disabled part of UAPSD functionality in TL (will be re-enabled once UAPSD is tested) Fix for UAPSD multiple enable 2008-08-10 lti Fixes following UAPSD testing Fixed infinite loop on mask computation when STA no reg 2008-08-06 lti Fixes after QOS unit testing 2008-08-06 lti Added QOS support 2008-07-23 lti Fix for vos packet draining 2008-07-17 lti Fix for data type value Added frame translation code in TL Avoid returning failure to PE in case previous frame is still pending; fail previous and cache new one for tx Get frames returning boolean true if more frames are pending 2008-07-03 lti Fixes following pre-integration testing 2008-06-26 lti Fixes following unit testing Added alloc and free for TL context Using atomic set u8 instead of u32 2008-05-16 lti Created module ===========================================================================*/ /*---------------------------------------------------------------------------- * Include Files * -------------------------------------------------------------------------*/ #include "wlan_qct_tl.h" #include "wlan_qct_wda.h" #include "wlan_qct_tli.h" #include "wlan_qct_tli_ba.h" #include "wlan_qct_tl_hosupport.h" #include "vos_types.h" #include "vos_trace.h" #include "wlan_qct_tl_trace.h" #include "tlDebug.h" #include "cfgApi.h" #ifdef FEATURE_WLAN_WAPI /*Included to access WDI_RxBdType */ #include "wlan_qct_wdi_bd.h" #endif /*Enables debugging behavior in TL*/ #define TL_DEBUG /*Enables debugging FC control frame in TL*/ //#define TL_DEBUG_FC //#define WLAN_SOFTAP_FLOWCTRL_EN //#define BTAMP_TEST #ifdef TL_DEBUG_FC #include <wlan_qct_pal_status.h> #include <wlan_qct_pal_device.h> // wpalReadRegister #endif /*---------------------------------------------------------------------------- * Preprocessor Definitions and Constants * -------------------------------------------------------------------------*/ /*LLC header value*/ static v_U8_t WLANTL_LLC_HEADER[] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00 }; #ifdef FEATURE_WLAN_ESE /*Aironet SNAP header value*/ static v_U8_t WLANTL_AIRONET_SNAP_HEADER[] = {0xAA, 0xAA, 0x03, 0x00, 0x40, 0x96, 0x00, 0x00 }; #endif //FEATURE_WLAN_ESE /*BT-AMP packet LLC OUI value*/ const v_U8_t WLANTL_BT_AMP_OUI[] = {0x00, 0x19, 0x58 }; #ifdef WLAN_FEATURE_LINK_LAYER_STATS extern const v_U8_t WLANTL_TID_2_AC[WLAN_MAX_TID]; #endif #define WLANTL_MAX_SNR_DATA_SAMPLES 20 #ifdef VOLANS_PERF #define WLANTL_BD_PDU_INTERRUPT_ENABLE_THRESHOLD 120 #define WLANTL_BD_PDU_INTERRUPT_GET_THRESHOLD 120 /* TL BD/PDU threshold to enable interrupt */ int bdPduInterruptEnableThreshold = WLANTL_BD_PDU_INTERRUPT_ENABLE_THRESHOLD; int bdPduInterruptGetThreshold = WLANTL_BD_PDU_INTERRUPT_GET_THRESHOLD; #endif /* VOLANS_PERF */ /*-----------------------------------* | Type(2b) | Sub-type(4b) | *-----------------------------------*/ #define WLANTL_IS_DATA_FRAME(_type_sub) \ ( WLANTL_DATA_FRAME_TYPE == ( (_type_sub) & 0x30 )) #define WLANTL_IS_QOS_DATA_FRAME(_type_sub) \ (( WLANTL_DATA_FRAME_TYPE == ( (_type_sub) & 0x30 )) && \ ( WLANTL_80211_DATA_QOS_SUBTYPE == ( (_type_sub) & 0xF ))) #define WLANTL_IS_MGMT_FRAME(_type_sub) \ ( WLANTL_MGMT_FRAME_TYPE == ( (_type_sub) & 0x30 )) #define WLANTL_IS_MGMT_ACTION_FRAME(_type_sub) \ (( WLANTL_MGMT_FRAME_TYPE == ( (_type_sub) & 0x30 )) && \ ( ( WLANTL_80211_MGMT_ACTION_SUBTYPE == ( (_type_sub) & 0xF )) || \ ( WLANTL_80211_MGMT_ACTION_NO_ACK_SUBTYPE == ( (_type_sub) & 0xF )))) #define WLANTL_IS_PROBE_REQ(_type_sub) \ ( WLANTL_MGMT_PROBE_REQ_FRAME_TYPE == ( (_type_sub) & 0x3F )) #define WLANTL_IS_CTRL_FRAME(_type_sub) \ ( WLANTL_CTRL_FRAME_TYPE == ( (_type_sub) & 0x30 )) #ifdef FEATURE_WLAN_TDLS #define WLANTL_IS_TDLS_FRAME(_eth_type) \ ( WLANTL_LLC_TDLS_TYPE == ( _eth_type)) #endif /*MAX Allowed len processed by TL - MAx MTU + 802.3 header + BD+DXE+XTL*/ #define WLANTL_MAX_ALLOWED_LEN (1514 + 100) #define WLANTL_DATA_FLOW_MASK 0x0F //some flow_control define //LWM mode will be enabled for this station if the egress/ingress falls below this ratio #define WLANTL_LWM_EGRESS_INGRESS_THRESHOLD (0.75) //Get enough sample to do the LWM related calculation #define WLANTL_LWM_INGRESS_SAMPLE_THRESHOLD (64) //Maximal on-fly packet per station in LWM mode #define WLANTL_STA_BMU_THRESHOLD_MAX (256) #define WLANTL_AC_MASK (0x7) #define WLANTL_STAID_OFFSET (0x6) /* UINT32 type endian swap */ #define SWAP_ENDIAN_UINT32(a) ((a) = ((a) >> 0x18 ) |(((a) & 0xFF0000) >> 0x08) | \ (((a) & 0xFF00) << 0x08) | (((a) & 0xFF) << 0x18)) /* Maximum value of SNR that can be calculated by the HW */ #define WLANTL_MAX_HW_SNR 35 #define DISABLE_ARP_TOGGLE 0 #define ENABLE_ARP_TOGGLE 1 #define SEND_ARP_ON_WQ5 2 /*---------------------------------------------------------------------------- * Type Declarations * -------------------------------------------------------------------------*/ #define TL_LITTLE_BIT_ENDIAN typedef struct { v_U8_t protVer :2; v_U8_t type :2; v_U8_t subType :4; v_U8_t toDS :1; v_U8_t fromDS :1; v_U8_t moreFrag :1; v_U8_t retry :1; v_U8_t powerMgmt :1; v_U8_t moreData :1; v_U8_t wep :1; v_U8_t order :1; } WLANTL_MACFCType; /* 802.11 header */ typedef struct { /* Frame control field */ WLANTL_MACFCType wFrmCtrl; /* Duration ID */ v_U16_t usDurationId; /* Address 1 field */ v_U8_t vA1[VOS_MAC_ADDR_SIZE]; /* Address 2 field */ v_U8_t vA2[VOS_MAC_ADDR_SIZE]; /* Address 3 field */ v_U8_t vA3[VOS_MAC_ADDR_SIZE]; /* Sequence control field */ v_U16_t usSeqCtrl; // Find the size of the mandatory header size. #define WLAN80211_MANDATORY_HEADER_SIZE \ (sizeof(WLANTL_MACFCType) + sizeof(v_U16_t) + \ (3 * (sizeof(v_U8_t) * VOS_MAC_ADDR_SIZE)) + \ sizeof(v_U16_t)) /* Optional A4 address */ v_U8_t optvA4[VOS_MAC_ADDR_SIZE]; /* Optional QOS control field */ v_U16_t usQosCtrl; }WLANTL_80211HeaderType; /* 802.3 header */ typedef struct { /* Destination address field */ v_U8_t vDA[VOS_MAC_ADDR_SIZE]; /* Source address field */ v_U8_t vSA[VOS_MAC_ADDR_SIZE]; /* Length field */ v_U16_t usLenType; }WLANTL_8023HeaderType; /*---------------------------------------------------------------------------- * Global Data Definitions * -------------------------------------------------------------------------*/ #define WLAN_TL_INVALID_U_SIG 255 #define WLAN_TL_INVALID_B_SIG 255 #define ENTER() VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "Enter:%s", __func__) #define WLAN_TL_AC_ARRAY_2_MASK( _pSTA, _ucACMask, i ) \ do\ {\ _ucACMask = 0; \ for ( i = 0; i < WLANTL_NUM_TX_QUEUES; i++ ) \ { \ if ( 0 != (_pSTA)->aucACMask[i] ) \ { \ _ucACMask |= ( 1 << i ); \ } \ } \ } while (0); /*---------------------------------------------------------------------------- * Static Variable Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Static Function Declarations and Definitions * -------------------------------------------------------------------------*/ static VOS_STATUS WLANTL_GetEtherType ( v_U8_t * aucBDHeader, vos_pkt_t * vosDataBuff, v_U8_t ucMPDUHLen, v_U16_t * usEtherType ); /*---------------------------------------------------------------------------- * Externalized Function Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Function Declarations and Documentation * -------------------------------------------------------------------------*/ /*========================================================================== FUNCTION WLANTL_FreeClientMemory DESCRIPTION It frees up the memory allocated to all the STA clients in TLCB block Can be called inside Close, Stop or when some FAULT occurs DEPENDENCIES PARAMETERS IN pClientSTA: Pointer to the global client pointer array RETURN VALUE SIDE EFFECTS ============================================================================*/ void WLANTL_FreeClientMemory (WLANTL_STAClientType* pClientSTA[WLAN_MAX_STA_COUNT]) { v_U32_t i = 0; for(i =0; i < WLAN_MAX_STA_COUNT; i++) { if( NULL != pClientSTA[i] ) { vos_mem_free(pClientSTA[i]); } pClientSTA[i] = NULL; } return; } /*========================================================================== FUNCTION WLANTL_Open DESCRIPTION Called by HDD at driver initialization. TL will initialize all its internal resources and will wait for the call to start to register with the other modules. DEPENDENCIES PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context pTLConfig: TL Configuration RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_Open ( v_PVOID_t pvosGCtx, WLANTL_ConfigInfoType* pTLConfig ) { WLANTL_CbType* pTLCb = NULL; v_U8_t ucIndex; tHalHandle smeContext; v_U32_t i = 0; #if defined WLAN_FEATURE_NEIGHBOR_ROAMING VOS_STATUS status = VOS_STATUS_SUCCESS; #endif /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check Extract TL control block ------------------------------------------------------------------------*/ ENTER(); vos_alloc_context( pvosGCtx, VOS_MODULE_ID_TL, (void*)&pTLCb, sizeof(WLANTL_CbType)); pTLCb = VOS_GET_TL_CB(pvosGCtx); if (( NULL == pTLCb ) || ( NULL == pTLConfig ) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_FATAL, "WLAN TL: Invalid input pointer on WLANTL_Open TL %p Config %p", pTLCb, pTLConfig )); return VOS_STATUS_E_FAULT; } /* Set the default log level to VOS_TRACE_LEVEL_ERROR */ vos_trace_setLevel(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR); smeContext = vos_get_context(VOS_MODULE_ID_SME, pvosGCtx); if ( NULL == smeContext ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "%s: Invalid smeContext", __func__)); return VOS_STATUS_E_FAULT; } /* Zero out the memory so we are OK, when CleanCB is called.*/ vos_mem_zero((v_VOID_t *)pTLCb, sizeof(WLANTL_CbType)); /*------------------------------------------------------------------------ Clean up TL control block, initialize all values ------------------------------------------------------------------------*/ TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:WLANTL_Open")); for ( i =0; i<WLAN_MAX_STA_COUNT; i++ ) { if ( i < WLAN_NON32_STA_COUNT ) { pTLCb->atlSTAClients[i] = vos_mem_malloc(sizeof(WLANTL_STAClientType)); /* Allocating memory for LEGACY STA COUNT so as to avoid regression issues. */ if ( NULL == pTLCb->atlSTAClients[i] ) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: StaClient allocation failed")); WLANTL_FreeClientMemory(pTLCb->atlSTAClients); vos_free_context(pvosGCtx, VOS_MODULE_ID_TL, pTLCb); return VOS_STATUS_E_FAULT; } vos_mem_zero((v_VOID_t *) pTLCb->atlSTAClients[i], sizeof(WLANTL_STAClientType)); } else { pTLCb->atlSTAClients[i] = NULL; } } pTLCb->reorderBufferPool = vos_mem_vmalloc(sizeof(WLANTL_REORDER_BUFFER_T) * WLANTL_MAX_BA_SESSION); if (NULL == pTLCb->reorderBufferPool) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: Reorder buffer allocation failed")); WLANTL_FreeClientMemory(pTLCb->atlSTAClients); vos_free_context(pvosGCtx, VOS_MODULE_ID_TL, pTLCb); return VOS_STATUS_E_FAULT; } vos_mem_zero((v_VOID_t *)pTLCb->reorderBufferPool, sizeof(WLANTL_REORDER_BUFFER_T) * WLANTL_MAX_BA_SESSION); WLANTL_CleanCB(pTLCb, 0 /*do not empty*/); for ( ucIndex = 0; ucIndex < WLANTL_NUM_TX_QUEUES ; ucIndex++) { pTLCb->tlConfigInfo.ucAcWeights[ucIndex] = pTLConfig->ucAcWeights[ucIndex]; } for ( ucIndex = 0; ucIndex < WLANTL_MAX_AC ; ucIndex++) { pTLCb->tlConfigInfo.ucReorderAgingTime[ucIndex] = pTLConfig->ucReorderAgingTime[ucIndex]; } // scheduling init to be the last one of previous round pTLCb->uCurServedAC = WLANTL_AC_BK; pTLCb->ucCurLeftWeight = 1; pTLCb->ucCurrentSTA = WLAN_MAX_STA_COUNT-1; #if 0 //flow control field init vos_mem_zero(&pTLCb->tlFCInfo, sizeof(tFcTxParams_type)); //bit 0: set (Bd/pdu count) bit 1: set (request station PS change notification) pTLCb->tlFCInfo.fcConfig = 0x1; #endif pTLCb->vosTxFCBuf = NULL; pTLCb->tlConfigInfo.uMinFramesProcThres = pTLConfig->uMinFramesProcThres; #ifdef FEATURE_WLAN_TDLS pTLCb->ucTdlsPeerCount = 0; #endif pTLCb->tlConfigInfo.uDelayedTriggerFrmInt = pTLConfig->uDelayedTriggerFrmInt; /*------------------------------------------------------------------------ Allocate internal resources ------------------------------------------------------------------------*/ vos_pkt_get_packet(&pTLCb->vosDummyBuf, VOS_PKT_TYPE_RX_RAW, 1, 1, 1/*true*/,NULL, NULL); WLANTL_InitBAReorderBuffer(pvosGCtx); #if defined WLAN_FEATURE_NEIGHBOR_ROAMING /* Initialize Handoff support modue * RSSI measure and Traffic state monitoring */ status = WLANTL_HSInit(pvosGCtx); if(!VOS_IS_STATUS_SUCCESS(status)) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "Handoff support module init fail")); WLANTL_FreeClientMemory(pTLCb->atlSTAClients); vos_mem_vfree(pTLCb->reorderBufferPool); vos_free_context(pvosGCtx, VOS_MODULE_ID_TL, pTLCb); return status; } #endif pTLCb->isBMPS = VOS_FALSE; pmcRegisterDeviceStateUpdateInd( smeContext, WLANTL_PowerStateChangedCB, pvosGCtx ); return VOS_STATUS_SUCCESS; }/* WLANTL_Open */ /*========================================================================== FUNCTION WLANTL_Start DESCRIPTION Called by HDD as part of the overall start procedure. TL will use this call to register with BAL as a transport layer entity. DEPENDENCIES PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) Other codes can be returned as a result of a BAL failure; see BAL API for more info SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_Start ( v_PVOID_t pvosGCtx ) { WLANTL_CbType* pTLCb = NULL; v_U32_t uResCount = WDA_TLI_MIN_RES_DATA; VOS_STATUS vosStatus; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check Extract TL control block ------------------------------------------------------------------------*/ ENTER(); pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_Start")); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Register with WDA as transport layer client Request resources for tx from bus ------------------------------------------------------------------------*/ TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:WLAN TL:WLANTL_Start")); tlTraceInit(); vosStatus = WDA_DS_Register( pvosGCtx, WLANTL_TxComp, WLANTL_RxFrames, WLANTL_GetFrames, WLANTL_ResourceCB, WDA_TLI_MIN_RES_DATA, pvosGCtx, &uResCount ); if ( VOS_STATUS_SUCCESS != vosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:TL failed to register with BAL/WDA, Err: %d", vosStatus)); return vosStatus; } /* Enable transmission */ vos_atomic_set_U8( &pTLCb->ucTxSuspended, 0); pTLCb->uResCount = uResCount; return VOS_STATUS_SUCCESS; }/* WLANTL_Start */ /*========================================================================== FUNCTION WLANTL_Stop DESCRIPTION Called by HDD to stop operation in TL, before close. TL will suspend all frame transfer operation and will wait for the close request to clean up its resources. DEPENDENCIES PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_Stop ( v_PVOID_t pvosGCtx ) { WLANTL_CbType* pTLCb = NULL; v_U8_t ucIndex; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check Extract TL control block ------------------------------------------------------------------------*/ ENTER(); pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_ChangeSTAState")); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Stop TL and empty Station list ------------------------------------------------------------------------*/ TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:WLANTL_Stop")); /* Disable transmission */ vos_atomic_set_U8( &pTLCb->ucTxSuspended, 1); if ( NULL != pTLCb->tlMgmtFrmClient.vosPendingDataBuff ) { vos_pkt_return_packet(pTLCb->tlMgmtFrmClient.vosPendingDataBuff); pTLCb->tlMgmtFrmClient.vosPendingDataBuff = NULL; } if ( NULL != pTLCb->tlBAPClient.vosPendingDataBuff ) { vos_pkt_return_packet(pTLCb->tlBAPClient.vosPendingDataBuff); pTLCb->tlBAPClient.vosPendingDataBuff = NULL; } #if defined WLAN_FEATURE_NEIGHBOR_ROAMING if(VOS_STATUS_SUCCESS != WLANTL_HSStop(pvosGCtx)) { TLLOGW(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "Handoff Support module stop fail")); } #endif /*------------------------------------------------------------------------- Clean client stations -------------------------------------------------------------------------*/ for ( ucIndex = 0; ucIndex < WLAN_MAX_STA_COUNT; ucIndex++) { if ( NULL != pTLCb->atlSTAClients[ucIndex] ) { WLANTL_CleanSTA(pTLCb->atlSTAClients[ucIndex], 1 /*empty all queues*/); } } return VOS_STATUS_SUCCESS; }/* WLANTL_Stop */ /*========================================================================== FUNCTION WLANTL_Close DESCRIPTION Called by HDD during general driver close procedure. TL will clean up all the internal resources. DEPENDENCIES PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_Close ( v_PVOID_t pvosGCtx ) { WLANTL_CbType* pTLCb = NULL; tHalHandle smeContext; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check Extract TL control block ------------------------------------------------------------------------*/ ENTER(); pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_ChangeSTAState")); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Deregister from PMC ------------------------------------------------------------------------*/ smeContext = vos_get_context(VOS_MODULE_ID_SME, pvosGCtx); if ( NULL == smeContext ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "%s: Invalid smeContext", __func__)); // continue so that we can cleanup as much as possible } else { pmcDeregisterDeviceStateUpdateInd( smeContext, WLANTL_PowerStateChangedCB ); } #if defined WLAN_FEATURE_NEIGHBOR_ROAMING if(VOS_STATUS_SUCCESS != WLANTL_HSDeInit(pvosGCtx)) { TLLOGW(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "Handoff Support module DeInit fail")); } #endif /*------------------------------------------------------------------------ Cleanup TL control block. ------------------------------------------------------------------------*/ TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL: WLANTL_Close")); WLANTL_CleanCB(pTLCb, 1 /* empty queues/lists/pkts if any*/); WLANTL_FreeClientMemory(pTLCb->atlSTAClients); vos_mem_vfree(pTLCb->reorderBufferPool); /*------------------------------------------------------------------------ Free TL context from VOSS global ------------------------------------------------------------------------*/ vos_free_context(pvosGCtx, VOS_MODULE_ID_TL, pTLCb); return VOS_STATUS_SUCCESS; }/* WLANTL_Close */ /*---------------------------------------------------------------------------- INTERACTION WITH HDD ---------------------------------------------------------------------------*/ /*========================================================================== FUNCTION WLANTL_ConfigureSwFrameTXXlationForAll DESCRIPTION Function to disable/enable frame translation for all association stations. DEPENDENCIES PARAMETERS IN pvosGCtx: VOS context EnableFrameXlation TRUE means enable SW translation for all stations. . RETURN VALUE void. ============================================================================*/ void WLANTL_ConfigureSwFrameTXXlationForAll ( v_PVOID_t pvosGCtx, v_BOOL_t enableFrameXlation ) { v_U8_t ucIndex; /*------------------------------------------------------------------------ Extract TL control block ------------------------------------------------------------------------*/ WLANTL_CbType* pTLCb = VOS_GET_TL_CB(pvosGCtx); WLANTL_STAClientType* pClientSTA = NULL; if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on " "WLANTL_ConfigureSwFrameTXXlationForAll")); return; } TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "WLANTL_ConfigureSwFrameTXXlationForAll: Configure SW frameXlation %d", enableFrameXlation)); for ( ucIndex = 0; ucIndex < WLAN_MAX_TID; ucIndex++) { pClientSTA = pTLCb->atlSTAClients[ucIndex]; if ( NULL != pClientSTA && 0 != pClientSTA->ucExists ) { #ifdef WLAN_SOFTAP_VSTA_FEATURE // if this station was not allocated resources to perform HW-based // TX frame translation then force SW-based TX frame translation // otherwise use the frame translation supplied by the client if (!WDA_IsHwFrameTxTranslationCapable(pvosGCtx, ucIndex)) { pClientSTA->wSTADesc.ucSwFrameTXXlation = 1; } else #endif pClientSTA->wSTADesc.ucSwFrameTXXlation = enableFrameXlation; } } } /*=========================================================================== FUNCTION WLANTL_StartForwarding DESCRIPTION This function is used to ask serialization through TX thread of the cached frame forwarding (if statation has been registered in the mean while) or flushing (if station has not been registered by the time) In case of forwarding, upper layer is only required to call WLANTL_RegisterSTAClient() and doesn't need to call this function explicitly. TL will handle this inside WLANTL_RegisterSTAClient(). In case of flushing, upper layer is required to call this function explicitly DEPENDENCIES TL must have been initialized before this gets called. PARAMETERS ucSTAId: station id RETURN VALUE The result code associated with performing the operation Please check return values of vos_tx_mq_serialize. SIDE EFFECTS If TL was asked to perform WLANTL_CacheSTAFrame() in WLANTL_RxFrames(), either WLANTL_RegisterSTAClient() or this function must be called within reasonable time. Otherwise, TL will keep cached vos buffer until one of this function is called, and may end up with system buffer exhasution. It's an upper layer's responsibility to call this function in case of flushing ============================================================================*/ VOS_STATUS WLANTL_StartForwarding ( v_U8_t ucSTAId, v_U8_t ucUcastSig, v_U8_t ucBcastSig ) { vos_msg_t sMessage; v_U32_t uData; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /* Signal the OS to serialize our event */ VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "Serializing TL Start Forwarding Cached for control STA %d", ucSTAId ); vos_mem_zero( &sMessage, sizeof(vos_msg_t) ); uData = ucSTAId | (ucUcastSig << 8 ) | (ucBcastSig << 16); sMessage.bodyval = uData; sMessage.type = WLANTL_RX_FWD_CACHED; return vos_rx_mq_serialize(VOS_MQ_ID_TL, &sMessage); } /* WLANTL_StartForwarding() */ /*=========================================================================== FUNCTION WLANTL_EnableCaching DESCRIPTION This function is used to enable caching only when assoc/reassoc req is send. that is cache packets only for such STA ID. DEPENDENCIES TL must have been initialized before this gets called. PARAMETERS staId: station id RETURN VALUE none ============================================================================*/ void WLANTL_EnableCaching(v_U8_t staId) { v_PVOID_t pvosGCtx= vos_get_global_context(VOS_MODULE_ID_TL,NULL); WLANTL_CbType* pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on " "WLANTL_EnableCaching")); return; } pTLCb->atlSTAClients[staId]->enableCaching = 1; } /*=========================================================================== FUNCTION WLANTL_AssocFailed DESCRIPTION This function is used by PE to notify TL that cache needs to flushed' when association is not successfully completed Internally, TL post a message to TX_Thread to serialize the request to keep lock-free mechanism. DEPENDENCIES TL must have been initialized before this gets called. PARAMETERS ucSTAId: station id RETURN VALUE none SIDE EFFECTS There may be race condition that PE call this API and send another association request immediately with same staId before TX_thread can process the message. To avoid this, we might need PE to wait for TX_thread process the message, but this is not currently implemented. ============================================================================*/ void WLANTL_AssocFailed(v_U8_t staId) { // flushing frames and forwarding frames uses the same message // the only difference is what happens when the message is processed // if the STA exist, the frames will be forwarded // and if it doesn't exist, the frames will be flushed // in this case we know it won't exist so the DPU index signature values don't matter MTRACE(vos_trace(VOS_MODULE_ID_TL, TRACE_CODE_TL_ASSOC_FAILED, staId, 0)); if(!VOS_IS_STATUS_SUCCESS(WLANTL_StartForwarding(staId,0,0))) { VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, " %s fails to start forwarding (staId %d)", __func__, staId); } } /*=========================================================================== FUNCTION WLANTL_Finish_ULA DESCRIPTION This function is used by HDD to notify TL to finish Upper layer authentication incase the last EAPOL packet is pending in the TL queue. To avoid the race condition between sme set key and the last EAPOL packet the HDD module calls this function just before calling the sme_RoamSetKey. DEPENDENCIES TL must have been initialized before this gets called. PARAMETERS callbackRoutine: HDD Callback function. callbackContext : HDD userdata context. RETURN VALUE VOS_STATUS_SUCCESS/VOS_STATUS_FAILURE SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_Finish_ULA( void (*callbackRoutine) (void *callbackContext), void *callbackContext) { return WDA_DS_FinishULA( callbackRoutine, callbackContext); } /*=========================================================================== FUNCTION WLANTL_RegisterSTAClient DESCRIPTION This function is used by HDD to register as a client for data services with TL. HDD will call this API for each new station that it adds, thus having the flexibility of registering different callback for each STA it services. DEPENDENCIES TL must have been initialized before this gets called. Restriction: Main thread will have higher priority that Tx and Rx threads thus guaranteeing that a station will be added before any data can be received for it. (This enables TL to be lock free) PARAMETERS pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context pfnStARx: function pointer to the receive packet handler from HDD pfnSTATxComp: function pointer to the transmit complete confirmation handler from HDD pfnSTAFetchPkt: function pointer to the packet retrieval routine in HDD wSTADescType: STA Descriptor, contains information related to the new added STA RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input parameters are invalid VOS_STATUS_E_FAULT: Station ID is outside array boundaries or pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_E_EXISTS: Station was already registered VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_RegisterSTAClient ( v_PVOID_t pvosGCtx, WLANTL_STARxCBType pfnSTARx, WLANTL_TxCompCBType pfnSTATxComp, WLANTL_STAFetchPktCBType pfnSTAFetchPkt, WLAN_STADescType* pwSTADescType, v_S7_t rssi ) { WLANTL_CbType* pTLCb = NULL; WLANTL_STAClientType* pClientSTA = NULL; v_U8_t ucTid = 0;/*Local variable to clear previous replay counters of STA on all TIDs*/ v_U32_t istoggleArpEnb = 0; tpAniSirGlobal pMac; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ ENTER(); if (( NULL == pwSTADescType ) || ( NULL == pfnSTARx ) || ( NULL == pfnSTAFetchPkt )) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid parameter sent on WLANTL_RegisterSTAClient")); return VOS_STATUS_E_INVAL; } if ( WLANTL_STA_ID_INVALID( pwSTADescType->ucSTAId ) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid station id requested on WLANTL_RegisterSTAClient")); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_RegisterSTAClient")); return VOS_STATUS_E_FAULT; } //Code for checking and allocating memory for new STA if ( NULL == pTLCb->atlSTAClients[pwSTADescType->ucSTAId] ){ pTLCb->atlSTAClients[pwSTADescType->ucSTAId] = vos_mem_malloc(sizeof(WLANTL_STAClientType)); if ( NULL == pTLCb->atlSTAClients[pwSTADescType->ucSTAId] ){ TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: STA Client memory allocation failed in WLANTL_RegisterSTAClient")); return VOS_STATUS_E_FAILURE; } TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "WLAN TL: STA Client memory allocation in WLANTL_RegisterSTAClient")); vos_mem_zero((v_VOID_t *) pTLCb->atlSTAClients[pwSTADescType->ucSTAId],sizeof(WLANTL_STAClientType)); } //Assigning the pointer to local variable for easy access in future pClientSTA = pTLCb->atlSTAClients[pwSTADescType->ucSTAId]; if ( 0 != pClientSTA->ucExists ) { pClientSTA->ucExists++; TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Station was already registered on WLANTL_RegisterSTAClient")); return VOS_STATUS_E_EXISTS; } /*------------------------------------------------------------------------ Register station with TL ------------------------------------------------------------------------*/ MTRACE(vos_trace(VOS_MODULE_ID_TL, TRACE_CODE_TL_REGISTER_STA_CLIENT, pwSTADescType->ucSTAId, (unsigned ) (*(pwSTADescType->vSTAMACAddress.bytes+2)<<24 | *(pwSTADescType->vSTAMACAddress.bytes+3)<<16 | *(pwSTADescType->vSTAMACAddress.bytes+4)<<8 | *(pwSTADescType->vSTAMACAddress.bytes+5)))); TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Registering STA Client ID: %d", pwSTADescType->ucSTAId )); pClientSTA->pfnSTARx = pfnSTARx; pClientSTA->pfnSTAFetchPkt = pfnSTAFetchPkt; /* Only register if different from NULL - TL default Tx Comp Cb will release the vos packet */ if ( NULL != pfnSTATxComp ) { pClientSTA->pfnSTATxComp = pfnSTATxComp; } pClientSTA->tlState = WLANTL_STA_INIT; pClientSTA->tlPri = WLANTL_STA_PRI_NORMAL; pClientSTA->wSTADesc.ucSTAId = pwSTADescType->ucSTAId; pClientSTA->ptkInstalled = 0; pMac = vos_get_context(VOS_MODULE_ID_PE, pvosGCtx); if ( NULL != pMac ) { wlan_cfgGetInt(pMac, WNI_CFG_TOGGLE_ARP_BDRATES, &istoggleArpEnb); } pClientSTA->arpRate = istoggleArpEnb ? ENABLE_ARP_TOGGLE : DISABLE_ARP_TOGGLE; pClientSTA->arpOnWQ5 = istoggleArpEnb == SEND_ARP_ON_WQ5; TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Registering STA Client ID: %d with UC %d and BC %d toggleArp :%hhu", pwSTADescType->ucSTAId, pwSTADescType->ucUcastSig, pwSTADescType->ucBcastSig, pClientSTA->arpRate)); pClientSTA->wSTADesc.wSTAType = pwSTADescType->wSTAType; pClientSTA->wSTADesc.ucQosEnabled = pwSTADescType->ucQosEnabled; pClientSTA->wSTADesc.ucAddRmvLLC = pwSTADescType->ucAddRmvLLC; pClientSTA->wSTADesc.ucProtectedFrame = pwSTADescType->ucProtectedFrame; #ifdef FEATURE_WLAN_ESE pClientSTA->wSTADesc.ucIsEseSta = pwSTADescType->ucIsEseSta; TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Registering STA Client ID: %d QoS %d Add LLC %d ProtFrame %d EseSta %d", pwSTADescType->ucSTAId, pwSTADescType->ucQosEnabled, pwSTADescType->ucAddRmvLLC, pwSTADescType->ucProtectedFrame, pwSTADescType->ucIsEseSta)); #else TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Registering STA Client ID: %d QoS %d Add LLC %d ProtFrame %d", pwSTADescType->ucSTAId, pwSTADescType->ucQosEnabled, pwSTADescType->ucAddRmvLLC, pwSTADescType->ucProtectedFrame)); #endif //FEATURE_WLAN_ESE #ifdef WLAN_SOFTAP_VSTA_FEATURE // if this station was not allocated resources to perform HW-based // TX frame translation then force SW-based TX frame translation // otherwise use the frame translation supplied by the client if (!WDA_IsHwFrameTxTranslationCapable(pvosGCtx, pwSTADescType->ucSTAId) || ( WLAN_STA_BT_AMP == pwSTADescType->wSTAType)) { pwSTADescType->ucSwFrameTXXlation = 1; } #endif pClientSTA->wSTADesc.ucSwFrameTXXlation = pwSTADescType->ucSwFrameTXXlation; pClientSTA->wSTADesc.ucSwFrameRXXlation = pwSTADescType->ucSwFrameRXXlation; #ifdef FEATURE_WLAN_WAPI pClientSTA->wSTADesc.ucIsWapiSta = pwSTADescType->ucIsWapiSta; #endif /* FEATURE_WLAN_WAPI */ vos_copy_macaddr( &pClientSTA->wSTADesc.vSTAMACAddress, &pwSTADescType->vSTAMACAddress); vos_copy_macaddr( &pClientSTA->wSTADesc.vBSSIDforIBSS, &pwSTADescType->vBSSIDforIBSS); vos_copy_macaddr( &pClientSTA->wSTADesc.vSelfMACAddress, &pwSTADescType->vSelfMACAddress); /* In volans release L replay check is done at TL */ pClientSTA->ucIsReplayCheckValid = pwSTADescType->ucIsReplayCheckValid; pClientSTA->ulTotalReplayPacketsDetected = 0; /*Clear replay counters of the STA on all TIDs*/ for(ucTid = 0; ucTid < WLANTL_MAX_TID ; ucTid++) { pClientSTA->ullReplayCounter[ucTid] = 0; } /*-------------------------------------------------------------------- Set the AC for the registered station to the highest priority AC Even if this AC is not supported by the station, correction will be made in the main TL loop after the supported mask is properly updated in the pending packets call --------------------------------------------------------------------*/ pClientSTA->ucCurrentAC = WLANTL_AC_HIGH_PRIO; pClientSTA->ucCurrentWeight = 0; pClientSTA->ucServicedAC = WLANTL_AC_BK; pClientSTA->ucEapolPktPending = 0; vos_mem_zero( pClientSTA->aucACMask, sizeof(pClientSTA->aucACMask)); vos_mem_zero( &pClientSTA->wUAPSDInfo, sizeof(pClientSTA->wUAPSDInfo)); /*-------------------------------------------------------------------- Reordering info and AMSDU de-aggregation --------------------------------------------------------------------*/ vos_mem_zero( pClientSTA->atlBAReorderInfo, sizeof(pClientSTA->atlBAReorderInfo[0])* WLAN_MAX_TID); vos_mem_zero( pClientSTA->aucMPDUHeader, WLANTL_MPDU_HEADER_LEN); pClientSTA->ucMPDUHeaderLen = 0; pClientSTA->vosAMSDUChain = NULL; pClientSTA->vosAMSDUChainRoot = NULL; /* Reorder LOCK * During handle normal RX frame within RX thread, * if MC thread try to preempt, ADDBA, DELBA, TIMER * Context should be protected from race */ for (ucTid = 0; ucTid < WLAN_MAX_TID ; ucTid++) { if (!VOS_IS_STATUS_SUCCESS( vos_lock_init(&pClientSTA->atlBAReorderInfo[ucTid].reorderLock))) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "Lock Init Fail")); return VOS_STATUS_E_FAILURE; } } /*-------------------------------------------------------------------- Stats info --------------------------------------------------------------------*/ vos_mem_zero( pClientSTA->auRxCount, sizeof(pClientSTA->auRxCount[0])* WLAN_MAX_TID); vos_mem_zero( pClientSTA->auTxCount, sizeof(pClientSTA->auRxCount[0])* WLAN_MAX_TID); /* Initial RSSI is always reported as zero because TL doesnt have enough data to calculate RSSI. So to avoid reporting zero, we are initializing RSSI with RSSI saved in BssDescription during scanning. */ pClientSTA->rssiAvg = rssi; pClientSTA->rssiAvgBmps = rssi; #ifdef FEATURE_WLAN_TDLS if(WLAN_STA_TDLS == pClientSTA->wSTADesc.wSTAType) { /* If client is TDLS, use TDLS specific alpha */ pClientSTA->rssiAlpha = WLANTL_HO_TDLS_ALPHA; } else { pClientSTA->rssiAlpha = WLANTL_HO_DEFAULT_ALPHA; } #else pClientSTA->rssiAlpha = WLANTL_HO_DEFAULT_ALPHA; #endif /* FEATURE_WLAN_TDLS */ #ifdef WLAN_FEATURE_LINK_LAYER_STATS pClientSTA->rssiDataAlpha = WLANTL_HO_DEFAULT_ALPHA; pClientSTA->interfaceStats.accessCategoryStats[0].ac = WLANTL_AC_BK; pClientSTA->interfaceStats.accessCategoryStats[1].ac = WLANTL_AC_BE; pClientSTA->interfaceStats.accessCategoryStats[2].ac = WLANTL_AC_VI; pClientSTA->interfaceStats.accessCategoryStats[3].ac = WLANTL_AC_VO; #endif /*Tx not suspended and station fully registered*/ vos_atomic_set_U8( &pClientSTA->ucTxSuspended, 0); /* Used until multiple station support will be added*/ pTLCb->ucRegisteredStaId = pwSTADescType->ucSTAId; /* Save the BAP station ID for future usage */ if ( WLAN_STA_BT_AMP == pwSTADescType->wSTAType ) { pTLCb->tlBAPClient.ucBAPSTAId = pwSTADescType->ucSTAId; } /*------------------------------------------------------------------------ Statistics info -----------------------------------------------------------------------*/ memset(&pClientSTA->trafficStatistics, 0, sizeof(WLANTL_TRANSFER_STA_TYPE)); /*------------------------------------------------------------------------ Start with the state suggested by client caller -----------------------------------------------------------------------*/ pClientSTA->tlState = pwSTADescType->ucInitState; /*----------------------------------------------------------------------- After all the init is complete we can mark the existance flag ----------------------------------------------------------------------*/ pClientSTA->ucExists++; //flow control fields init pClientSTA->ucLwmModeEnabled = FALSE; pClientSTA->ucLwmEventReported = FALSE; pClientSTA->bmuMemConsumed = 0; pClientSTA->uIngress_length = 0; pClientSTA->uBuffThresholdMax = WLANTL_STA_BMU_THRESHOLD_MAX; pClientSTA->uLwmThreshold = WLANTL_STA_BMU_THRESHOLD_MAX / 3; //@@@ HDDSOFTAP does not queue unregistered packet for now if ( WLAN_STA_SOFTAP != pwSTADescType->wSTAType ) { /*------------------------------------------------------------------------ Forward received frames while STA was not yet registered - ----------------------------------------------------------------------*/ if(!VOS_IS_STATUS_SUCCESS(WLANTL_StartForwarding( pwSTADescType->ucSTAId, pwSTADescType->ucUcastSig, pwSTADescType->ucBcastSig))) { VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, " %s fails to start forwarding", __func__); } #ifdef FEATURE_WLAN_TDLS if( WLAN_STA_TDLS == pwSTADescType->wSTAType ) pTLCb->ucTdlsPeerCount++; #endif } return VOS_STATUS_SUCCESS; }/* WLANTL_RegisterSTAClient */ /*=========================================================================== FUNCTION WLANTL_ClearSTAClient DESCRIPTION HDD will call this API when it no longer needs data services for the particular station. DEPENDENCIES A station must have been registered before the clear registration is called. PARAMETERS pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context ucSTAId: identifier for the STA to be cleared RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: Station ID is outside array boundaries or pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_E_EXISTS: Station was not registered VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_ClearSTAClient ( v_PVOID_t pvosGCtx, v_U8_t ucSTAId ) { WLANTL_CbType* pTLCb = NULL; v_U8_t ucIndex; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ ENTER(); /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( WLANTL_STA_ID_INVALID( ucSTAId ) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid station id requested on WLANTL_ClearSTAClient")); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_ClearSTAClient")); return VOS_STATUS_E_FAULT; } if ( NULL == pTLCb->atlSTAClients[ucSTAId] ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } if ( 0 == pTLCb->atlSTAClients[ucSTAId]->ucExists ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Station was not previously registered on WLANTL_ClearSTAClient")); /* Clean packets cached for the STA */ WLANTL_StartForwarding(ucSTAId,0,0); return VOS_STATUS_E_EXISTS; } /* Delete BA sessions on all TID's */ for (ucIndex = 0; ucIndex < WLAN_MAX_TID ; ucIndex++) { WLANTL_BaSessionDel(pvosGCtx, ucSTAId, ucIndex); vos_lock_destroy(&pTLCb->atlSTAClients[ucSTAId]->atlBAReorderInfo[ucIndex].reorderLock); } #ifdef FEATURE_WLAN_TDLS /* decrement ucTdlsPeerCount only if it is non-zero */ if(WLAN_STA_TDLS == pTLCb->atlSTAClients[ucSTAId]->wSTADesc.wSTAType && pTLCb->ucTdlsPeerCount) pTLCb->ucTdlsPeerCount--; #endif /*------------------------------------------------------------------------ Clear station ------------------------------------------------------------------------*/ TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Clearing STA Client ID: %d", ucSTAId )); WLANTL_CleanSTA(pTLCb->atlSTAClients[ucSTAId], 1 /*empty packets*/); TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Clearing STA Reset History RSSI and Region number")); pTLCb->hoSupport.currentHOState.historyRSSI = 0; pTLCb->hoSupport.currentHOState.regionNumber = 0; return VOS_STATUS_SUCCESS; }/* WLANTL_ClearSTAClient */ /*=========================================================================== FUNCTION WLANTL_ChangeSTAState DESCRIPTION HDD will make this notification whenever a change occurs in the connectivity state of a particular STA. DEPENDENCIES A station must have been registered before the change state can be called. RESTRICTION: A station is being notified as authenticated before the keys are installed in HW. This way if a frame is received before the keys are installed DPU will drop that frame. Main thread has higher priority that Tx and Rx threads thus guaranteeing the following: - a station will be in assoc state in TL before TL receives any data for it PARAMETERS pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context ucSTAId: identifier for the STA that is pending transmission tlSTAState: the new state of the connection to the given station RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input parameters are invalid VOS_STATUS_E_FAULT: Station ID is outside array boundaries or pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_E_EXISTS: Station was not registered VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_ChangeSTAState ( v_PVOID_t pvosGCtx, v_U8_t ucSTAId, WLANTL_STAStateType tlSTAState ) { WLANTL_CbType* pTLCb = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( tlSTAState >= WLANTL_STA_MAX_STATE ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid parameter sent on WLANTL_ChangeSTAState")); return VOS_STATUS_E_INVAL; } if ( WLANTL_STA_ID_INVALID( ucSTAId ) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid station id requested on WLANTL_ChangeSTAState")); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Extract TL control block and check existance ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_ChangeSTAState")); return VOS_STATUS_E_FAULT; } if ( NULL == pTLCb->atlSTAClients[ucSTAId] ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } if ( 0 == pTLCb->atlSTAClients[ucSTAId]->ucExists ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Station was not previously registered on WLANTL_ChangeSTAState")); return VOS_STATUS_E_EXISTS; } /*------------------------------------------------------------------------ Change STA state No need to lock this operation, see restrictions above ------------------------------------------------------------------------*/ TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Changing state for STA Client ID: %d from %d to %d", ucSTAId, pTLCb->atlSTAClients[ucSTAId]->tlState, tlSTAState)); MTRACE(vos_trace(VOS_MODULE_ID_TL, TRACE_CODE_TL_STA_STATE, ucSTAId,tlSTAState )); pTLCb->atlSTAClients[ucSTAId]->tlState = tlSTAState; return VOS_STATUS_SUCCESS; }/* WLANTL_ChangeSTAState */ /*=========================================================================== FUNCTION WLANTL_UpdateTdlsSTAClient DESCRIPTION HDD will call this API when ENABLE_LINK happens and HDD want to register QoS or other params for TDLS peers. DEPENDENCIES A station must have been registered before the WMM/QOS registration is called. PARAMETERS pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context wSTADescType: STA Descriptor, contains information related to the new added STA RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: Station ID is outside array boundaries or pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_E_EXISTS: Station was not registered VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_UpdateTdlsSTAClient ( v_PVOID_t pvosGCtx, WLAN_STADescType* pwSTADescType ) { WLANTL_CbType* pTLCb = NULL; WLANTL_STAClientType* pClientSTA = NULL; /*------------------------------------------------------------------------ Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb || ( WLAN_MAX_STA_COUNT <= pwSTADescType->ucSTAId)) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_UpdateTdlsSTAClient")); return VOS_STATUS_E_FAULT; } pClientSTA = pTLCb->atlSTAClients[pwSTADescType->ucSTAId]; if ((NULL == pClientSTA) || 0 == pClientSTA->ucExists) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Station not exists")); return VOS_STATUS_E_FAILURE; } pClientSTA->wSTADesc.ucQosEnabled = pwSTADescType->ucQosEnabled; TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "WLAN TL: %s: ucQosEnabled of pwSTADescType: %d" "pClientSTA->wSTADesc: %d", __func__, pwSTADescType->ucQosEnabled, pClientSTA->wSTADesc.ucQosEnabled)); return VOS_STATUS_SUCCESS; } VOS_STATUS WLANTL_SetMonRxCbk(v_PVOID_t pvosGCtx, WLANTL_MonRxCBType pfnMonRx) { WLANTL_CbType* pTLCb = NULL ; pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_RegisterSTAClient")); return VOS_STATUS_E_FAULT; } pTLCb->pfnMonRx = pfnMonRx; return VOS_STATUS_SUCCESS; } void WLANTL_SetIsConversionReq(v_PVOID_t pvosGCtx, v_BOOL_t isConversionReq) { WLANTL_CbType* pTLCb = NULL ; pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_RegisterSTAClient")); return; } pTLCb->isConversionReq = isConversionReq; return; } /*=========================================================================== FUNCTION WLANTL_STAPtkInstalled DESCRIPTION HDD will make this notification whenever PTK is installed for the STA DEPENDENCIES A station must have been registered before the change state can be called. PARAMETERS pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context ucSTAId: identifier for the STA for which Pairwise key is installed RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: Station ID is outside array boundaries or pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_E_EXISTS: Station was not registered VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_STAPtkInstalled ( v_PVOID_t pvosGCtx, v_U8_t ucSTAId ) { WLANTL_CbType* pTLCb = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( WLANTL_STA_ID_INVALID( ucSTAId ) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid station id requested on WLANTL_ChangeSTAState")); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Extract TL control block and check existance ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, FL("WLAN TL:Invalid TL pointer from pvosGCtx"))); return VOS_STATUS_E_FAULT; } if ( NULL == pTLCb->atlSTAClients[ucSTAId] ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, FL("WLAN TL:Client Memory was not allocated"))); return VOS_STATUS_E_FAILURE; } if ( 0 == pTLCb->atlSTAClients[ucSTAId]->ucExists ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, FL("WLAN TL:Station was not previously registered"))); return VOS_STATUS_E_EXISTS; } pTLCb->atlSTAClients[ucSTAId]->ptkInstalled = 1; return VOS_STATUS_SUCCESS; }/* WLANTL_STAPtkInstalled */ /*=========================================================================== FUNCTION WLANTL_GetSTAState DESCRIPTION Returns connectivity state of a particular STA. DEPENDENCIES A station must have been registered before its state can be retrieved. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context ucSTAId: identifier of the station OUT ptlSTAState: the current state of the connection to the given station RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input parameters are invalid VOS_STATUS_E_FAULT: Station ID is outside array boundaries or pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_E_EXISTS: Station was not registered VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_GetSTAState ( v_PVOID_t pvosGCtx, v_U8_t ucSTAId, WLANTL_STAStateType *ptlSTAState ) { WLANTL_CbType* pTLCb = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( NULL == ptlSTAState ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid parameter sent on WLANTL_GetSTAState")); return VOS_STATUS_E_INVAL; } if ( WLANTL_STA_ID_INVALID( ucSTAId ) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid station id requested on WLANTL_GetSTAState")); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Extract TL control block and check existance ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_GetSTAState")); return VOS_STATUS_E_FAULT; } if ( NULL == pTLCb->atlSTAClients[ucSTAId] ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } if ( 0 == pTLCb->atlSTAClients[ucSTAId]->ucExists ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "WLAN TL:Station was not previously registered on WLANTL_GetSTAState")); return VOS_STATUS_E_EXISTS; } /*------------------------------------------------------------------------ Get STA state ------------------------------------------------------------------------*/ *ptlSTAState = pTLCb->atlSTAClients[ucSTAId]->tlState; return VOS_STATUS_SUCCESS; }/* WLANTL_GetSTAState */ /*========================================================================== FUNCTION WLANTL_UpdateSTABssIdforIBSS DESCRIPTION HDD will call this API to update the BSSID for this Station. DEPENDENCIES The HDD Should registered the staID with TL before calling this function. PARAMETERS IN pvosGCtx: Pointer to the global vos context; a handle to TL's or WDA's control block can be extracted from its context IN ucSTAId The Station ID for Bssid to be updated IN pBssid BSSID to be updated RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input parameters are invalid VOS_STATUS_E_FAULT: Station ID is outside array boundaries or pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_E_EXISTS: Station was not registered VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_UpdateSTABssIdforIBSS ( v_PVOID_t pvosGCtx, v_U8_t ucSTAId, v_U8_t *pBssid ) { WLANTL_CbType* pTLCb = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( WLANTL_STA_ID_INVALID( ucSTAId ) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid station id requested %s", __func__)); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Extract TL control block and check existance ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx %s", __func__)); return VOS_STATUS_E_FAULT; } if ( NULL == pTLCb->atlSTAClients[ucSTAId] ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } if ( 0 == pTLCb->atlSTAClients[ucSTAId]->ucExists ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "WLAN TL:Station was not previously registered %s", __func__)); return VOS_STATUS_E_EXISTS; } /*------------------------------------------------------------------------ Update the IBSS BSSID ------------------------------------------------------------------------*/ vos_mem_copy( &pTLCb->atlSTAClients[ucSTAId]->wSTADesc.vBSSIDforIBSS, pBssid, sizeof(v_MACADDR_t)); return VOS_STATUS_SUCCESS; } /*=========================================================================== FUNCTION WLANTL_STAPktPending DESCRIPTION HDD will call this API when a packet is pending transmission in its queues. DEPENDENCIES A station must have been registered before the packet pending notification can be sent. RESTRICTION: TL will not count packets for pending notification. HDD is expected to send the notification only when non-empty event gets triggered. Worst case scenario is that TL might end up making a call when Hdds queues are actually empty. PARAMETERS pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context ucSTAId: identifier for the STA that is pending transmission RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input parameters are invalid VOS_STATUS_E_FAULT: Station ID is outside array boundaries or pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_E_EXISTS: Station was not registered VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_STAPktPending ( v_PVOID_t pvosGCtx, v_U8_t ucSTAId, WLANTL_ACEnumType ucAc ) { WLANTL_CbType* pTLCb = NULL; WLANTL_STAClientType* pClientSTA = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "WLAN TL:Packet pending indication for STA: %d AC: %d", ucSTAId, ucAc); /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( WLANTL_STA_ID_INVALID( ucSTAId ) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid station id requested on WLANTL_STAPktPending")); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Extract TL control block and check existance ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_STAPktPending")); return VOS_STATUS_E_FAULT; } pClientSTA = pTLCb->atlSTAClients[ucSTAId]; if ( NULL == pClientSTA ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } if ( 0 == pClientSTA->ucExists ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Station was not previously registered on WLANTL_STAPktPending")); return VOS_STATUS_E_EXISTS; } /*--------------------------------------------------------------------- Temporary fix to enable TL to fetch packets when multiple peers join an IBSS. To fix CR177301. Needs to go away when the actual fix of going through all STA's in round robin fashion gets merged in from BT AMP branch. --------------------------------------------------------------------*/ pTLCb->ucRegisteredStaId = ucSTAId; if( WLANTL_STA_CONNECTED == pClientSTA->tlState ) { /* EAPOL_HI_PRIORITY : need to find out whether EAPOL is pending before WLANTL_FetchPacket()/WLANTL_TxConn() is called. change STA_AUTHENTICATED != tlState to CONNECTED == tlState to make sure TL is indeed waiting for EAPOL. Just in the case when STA got disconnected shortly after connectection */ pClientSTA->ucEapolPktPending = 1; MTRACE(vos_trace(VOS_MODULE_ID_TL, TRACE_CODE_TL_EAPOL_PKT_PENDING, ucSTAId, ucAc)); VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Packet pending indication for STA: %d AC: %d State: %d", ucSTAId, ucAc, pClientSTA->tlState); } /*----------------------------------------------------------------------- Enable this AC in the AC mask in order for TL to start servicing it Set packet pending flag To avoid race condition, serialize the updation of AC and AC mask through WLANTL_TX_STAID_AC_IND message. -----------------------------------------------------------------------*/ pClientSTA->aucACMask[ucAc] = 1; vos_atomic_set_U8( &pClientSTA->ucPktPending, 1); /*------------------------------------------------------------------------ Check if there are enough resources for transmission and tx is not suspended. ------------------------------------------------------------------------*/ if (( pTLCb->uResCount >= WDA_TLI_MIN_RES_DATA ) && ( 0 == pTLCb->ucTxSuspended )) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "Issuing Xmit start request to BAL")); WDA_DS_StartXmit(pvosGCtx); } else { /*--------------------------------------------------------------------- No error code is sent because TL will resume tx autonomously if resources become available or tx gets resumed ---------------------------------------------------------------------*/ VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Request to send but condition not met. Res: %d,Suspend: %d", pTLCb->uResCount, pTLCb->ucTxSuspended ); } return VOS_STATUS_SUCCESS; }/* WLANTL_STAPktPending */ /*========================================================================== FUNCTION WLANTL_SetSTAPriority DESCRIPTION TL exposes this API to allow upper layers a rough control over the priority of transmission for a given station when supporting multiple connections. DEPENDENCIES A station must have been registered before the change in priority can be called. PARAMETERS pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context ucSTAId: identifier for the STA that has to change priority RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input parameters are invalid VOS_STATUS_E_FAULT: Station ID is outside array boundaries or pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_E_EXISTS: Station was not registered VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_SetSTAPriority ( v_PVOID_t pvosGCtx, v_U8_t ucSTAId, WLANTL_STAPriorityType tlSTAPri ) { WLANTL_CbType* pTLCb = NULL; WLANTL_STAClientType* pClientSTA = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( WLANTL_STA_ID_INVALID( ucSTAId ) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid station id requested on WLANTL_SetSTAPriority")); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_SetSTAPriority")); return VOS_STATUS_E_FAULT; } pClientSTA = pTLCb->atlSTAClients[ucSTAId]; if ( NULL == pClientSTA ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } if ( 0 == pClientSTA->ucExists ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Station was not previously registered on WLANTL_SetSTAPriority")); return VOS_STATUS_E_EXISTS; } /*------------------------------------------------------------------------ Re-analize if lock is needed when adding multiple stations ------------------------------------------------------------------------*/ TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Changing state for STA Pri ID: %d from %d to %d", ucSTAId, pClientSTA->tlPri, tlSTAPri)); pClientSTA->tlPri = tlSTAPri; return VOS_STATUS_SUCCESS; }/* WLANTL_SetSTAPriority */ /*---------------------------------------------------------------------------- INTERACTION WITH BAP ---------------------------------------------------------------------------*/ /*========================================================================== FUNCTION WLANTL_RegisterBAPClient DESCRIPTION Called by SME to register itself as client for non-data BT-AMP packets. DEPENDENCIES TL must be initialized before this function can be called. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's or SME's control block can be extracted from its context pfnTlBAPRxFrm: pointer to the receive processing routine for non-data BT-AMP packets pfnFlushOpCompleteCb: pointer to the call back function, for the Flush operation completion. RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input parameters are invalid VOS_STATUS_E_FAULT: Station ID is outside array boundaries or pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_E_EXISTS: BAL client was already registered VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_RegisterBAPClient ( v_PVOID_t pvosGCtx, WLANTL_BAPRxCBType pfnTlBAPRxFrm, WLANTL_FlushOpCompCBType pfnFlushOpCompleteCb ) { WLANTL_CbType* pTLCb = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( NULL == pfnTlBAPRxFrm ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid parameter sent on WLANTL_RegisterBAPClient")); return VOS_STATUS_E_INVAL; } if ( NULL == pfnFlushOpCompleteCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "Invalid Flush Complete Cb parameter sent on WLANTL_RegisterBAPClient")); return VOS_STATUS_E_INVAL; } /*------------------------------------------------------------------------ Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_RegisterBAPClient")); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Make sure this is the first registration attempt ------------------------------------------------------------------------*/ if ( 0 != pTLCb->tlBAPClient.ucExists ) { pTLCb->tlBAPClient.ucExists++; TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:BAP client was already registered")); return VOS_STATUS_E_EXISTS; } /*------------------------------------------------------------------------ Register station with TL ------------------------------------------------------------------------*/ TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Registering BAP Client" )); pTLCb->tlBAPClient.ucExists++; if ( NULL != pfnTlBAPRxFrm ) { pTLCb->tlBAPClient.pfnTlBAPRx = pfnTlBAPRxFrm; } pTLCb->tlBAPClient.pfnFlushOpCompleteCb = pfnFlushOpCompleteCb; pTLCb->tlBAPClient.vosPendingDataBuff = NULL; return VOS_STATUS_SUCCESS; }/* WLANTL_RegisterBAPClient */ /*========================================================================== FUNCTION WLANTL_TxBAPFrm DESCRIPTION BAP calls this when it wants to send a frame to the module DEPENDENCIES BAP must be registered with TL before this function can be called. RESTRICTION: BAP CANNOT push any packets to TL until it did not receive a tx complete from the previous packet, that means BAP sends one packet, wait for tx complete and then sends another one If BAP sends another packet before TL manages to process the previously sent packet call will end in failure PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's or BAP's control block can be extracted from its context vosDataBuff: pointer to the vOSS buffer containing the packet to be transmitted pMetaInfo: meta information about the packet pfnTlBAPTxComp: pointer to a transmit complete routine for notifying the result of the operation over the bus RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_E_EXISTS: BAL client was not yet registered VOS_STATUS_E_BUSY: The previous BT-AMP packet was not yet transmitted VOS_STATUS_SUCCESS: Everything is good :) Other failure messages may be returned from the BD header handling routines, please check apropriate API for more info. SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_TxBAPFrm ( v_PVOID_t pvosGCtx, vos_pkt_t* vosDataBuff, WLANTL_MetaInfoType* pMetaInfo, WLANTL_TxCompCBType pfnTlBAPTxComp ) { WLANTL_CbType* pTLCb = NULL; VOS_STATUS vosStatus = VOS_STATUS_SUCCESS; v_MACADDR_t vDestMacAddr; v_U16_t usPktLen; v_U8_t ucStaId = 0; v_U8_t extraHeadSpace = 0; v_U8_t ucWDSEnabled = 0; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_TxBAPFrm")); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Ensure that BAP client was registered previously ------------------------------------------------------------------------*/ if (( 0 == pTLCb->tlBAPClient.ucExists ) || ( WLANTL_STA_ID_INVALID(pTLCb->tlBAPClient.ucBAPSTAId) )) { pTLCb->tlBAPClient.ucExists++; TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:BAP client not register on WLANTL_TxBAPFrm")); return VOS_STATUS_E_EXISTS; } /*------------------------------------------------------------------------ Check if any BT-AMP Frm is pending ------------------------------------------------------------------------*/ if ( NULL != pTLCb->tlBAPClient.vosPendingDataBuff ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:BT-AMP Frame already pending tx in TL on WLANTL_TxBAPFrm")); return VOS_STATUS_E_BUSY; } /*------------------------------------------------------------------------ Save buffer and notify BAL; no lock is needed if the above restriction is met Save the tx complete fnct pointer as tl specific data in the vos buffer ------------------------------------------------------------------------*/ /*------------------------------------------------------------------------ Translate 802.3 frame to 802.11 ------------------------------------------------------------------------*/ ucStaId = pTLCb->tlBAPClient.ucBAPSTAId; if ( NULL == pTLCb->atlSTAClients[ucStaId] ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } if (( 0 == pMetaInfo->ucDisableFrmXtl ) && ( 0 != pTLCb->atlSTAClients[ucStaId]->wSTADesc.ucSwFrameTXXlation )) { vosStatus = WLANTL_Translate8023To80211Header( vosDataBuff, &vosStatus, pTLCb, &ucStaId, pMetaInfo, &ucWDSEnabled, &extraHeadSpace); if ( VOS_STATUS_SUCCESS != vosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Error when translating header WLANTL_TxBAPFrm")); return vosStatus; } pMetaInfo->ucDisableFrmXtl = 1; } /*------------------------------------------------------------------------- Call HAL to fill BD header -------------------------------------------------------------------------*/ /* Adding Type, SubType which was missing for EAPOL from BAP */ pMetaInfo->ucType |= (WLANTL_80211_DATA_TYPE << 4); pMetaInfo->ucType |= (WLANTL_80211_DATA_QOS_SUBTYPE); vosStatus = WDA_DS_BuildTxPacketInfo( pvosGCtx, vosDataBuff , &vDestMacAddr, pMetaInfo->ucDisableFrmXtl, &usPktLen, pTLCb->atlSTAClients[ucStaId]->wSTADesc.ucQosEnabled, ucWDSEnabled, extraHeadSpace, pMetaInfo->ucType, &pTLCb->atlSTAClients[ucStaId]->wSTADesc.vSelfMACAddress, pMetaInfo->ucTID, 0 /* No ACK */, pMetaInfo->usTimeStamp, pMetaInfo->ucIsEapol || pMetaInfo->ucIsWai, pMetaInfo->ucUP, pMetaInfo->ucTxBdToken); if ( VOS_STATUS_SUCCESS != vosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Failed while building TX header %d", vosStatus)); return vosStatus; } if ( NULL != pfnTlBAPTxComp ) { vos_pkt_set_user_data_ptr( vosDataBuff, VOS_PKT_USER_DATA_ID_TL, (v_PVOID_t)pfnTlBAPTxComp); } else { vos_pkt_set_user_data_ptr( vosDataBuff, VOS_PKT_USER_DATA_ID_TL, (v_PVOID_t)WLANTL_TxCompDefaultCb); } vos_atomic_set( (uintptr_t*)&pTLCb->tlBAPClient.vosPendingDataBuff, (uintptr_t)vosDataBuff); /*------------------------------------------------------------------------ Check if thre are enough resources for transmission and tx is not suspended. ------------------------------------------------------------------------*/ if (( pTLCb->uResCount >= WDA_TLI_MIN_RES_BAP ) && ( 0 == pTLCb->ucTxSuspended )) { WDA_DS_StartXmit(pvosGCtx); } else { /*--------------------------------------------------------------------- No error code is sent because TL will resume tx autonomously if resources become available or tx gets resumed ---------------------------------------------------------------------*/ TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Request to send from BAP but condition not met.Res: %d," "Suspend: %d", pTLCb->uResCount, pTLCb->ucTxSuspended )); } return VOS_STATUS_SUCCESS; }/* WLANTL_TxBAPFrm */ /*---------------------------------------------------------------------------- INTERACTION WITH SME ---------------------------------------------------------------------------*/ /*========================================================================== FUNCTION WLANTL_GetRssi DESCRIPTION TL will extract the RSSI information from every data packet from the ongoing traffic and will store it. It will provide the result to SME upon request. DEPENDENCIES WARNING: the read and write of this value will not be protected by locks, therefore the information obtained after a read might not always be consistent. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's or SME's control block can be extracted from its context ucSTAId: station identifier for the requested value OUT puRssi: the average value of the RSSI RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input parameters are invalid VOS_STATUS_E_FAULT: Station ID is outside array boundaries or pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_E_EXISTS: STA was not yet registered VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_GetRssi ( v_PVOID_t pvosGCtx, v_U8_t ucSTAId, v_S7_t* pRssi ) { WLANTL_CbType* pTLCb = NULL; WLANTL_STAClientType* pClientSTA = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( NULL == pRssi ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid parameter sent on WLANTL_GetRssi")); return VOS_STATUS_E_INVAL; } if ( WLANTL_STA_ID_INVALID( ucSTAId ) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid station id requested on WLANTL_GetRssi")); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Extract TL control block and check existance ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_GetRssi")); return VOS_STATUS_E_FAULT; } pClientSTA = pTLCb->atlSTAClients[ucSTAId]; if ( NULL == pClientSTA ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } if ( 0 == pClientSTA->ucExists ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Station was not previously registered on WLANTL_GetRssi")); return VOS_STATUS_E_EXISTS; } /*------------------------------------------------------------------------ Copy will not be locked; please read restriction ------------------------------------------------------------------------*/ if(pTLCb->isBMPS || IS_ACTIVEMODE_OFFLOAD_FEATURE_ENABLE) { *pRssi = pClientSTA->rssiAvgBmps; /* Check If RSSI is zero because we are reading rssAvgBmps updated by HAL in previous GetStatsRequest. It may be updated as zero by Hal because EnterBmps might not have happend by that time. Hence reading the most recent Rssi calcluated by TL*/ if(0 == *pRssi) { *pRssi = pClientSTA->rssiAvg; } } else { *pRssi = pClientSTA->rssiAvg; } TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:WLANTL_GetRssi for STA: %d RSSI: %d%s", ucSTAId, *pRssi, pTLCb->isBMPS ? " in BMPS" : "")); return VOS_STATUS_SUCCESS; }/* WLANTL_GetRssi */ /*========================================================================== FUNCTION WLANTL_GetSnr DESCRIPTION TL will extract the SNR information from every data packet from the ongoing traffic and will store it. It will provide the result to SME upon request. DEPENDENCIES WARNING: the read and write of this value will not be protected by locks, therefore the information obtained after a read might not always be consistent. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's or SME's control block can be extracted from its context ucSTAId: station identifier for the requested value OUT pSnr: the average value of the SNR RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input parameters are invalid VOS_STATUS_E_FAULT: Station ID is outside array boundaries or pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_E_EXISTS: STA was not yet registered VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_GetSnr ( tANI_U8 ucSTAId, tANI_S8* pSnr ) { WLANTL_CbType* pTLCb = NULL; WLANTL_STAClientType* pClientSTA = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if (NULL == pSnr) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid parameter sent on %s", __func__)); return VOS_STATUS_E_INVAL; } if (WLANTL_STA_ID_INVALID(ucSTAId)) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid station id requested on %s", __func__)); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Extract TL control block and check existance ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(vos_get_global_context(VOS_MODULE_ID_TL, NULL)); if (NULL == pTLCb) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on %s", __func__)); return VOS_STATUS_E_FAULT; } pClientSTA = pTLCb->atlSTAClients[ucSTAId]; if (NULL == pClientSTA) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } if (0 == pClientSTA->ucExists) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Station was not previously registered on %s", __func__)); return VOS_STATUS_E_EXISTS; } /*------------------------------------------------------------------------ Copy will not be locked; please read restriction ------------------------------------------------------------------------*/ if (pTLCb->isBMPS) { *pSnr = pClientSTA->snrAvgBmps; } else { /* SNR is averaged over WLANTL_MAX_SNR_DATA_SAMPLES, if there are not enough * data samples (snridx) to calculate the average then return the * average for the window of prevoius 20 packets. And if there aren't * enough samples and the average for previous window of 20 packets is * not available then return a predefined value * * NOTE: the SNR_HACK_BMPS value is defined to 127, documents from HW * team reveal that the SNR value has a ceiling well below 127 dBm, * so if SNR has value of 127 the userspace applications can know that * the SNR has not been computed yet because enough data was not * available for SNR calculation */ if (pClientSTA->snrIdx > (WLANTL_MAX_SNR_DATA_SAMPLES/2) || !(pClientSTA->prevSnrAvg)) { *pSnr = pClientSTA->snrSum / pClientSTA->snrIdx; } else if (pClientSTA->prevSnrAvg) { *pSnr = pClientSTA->prevSnrAvg; } else { *pSnr = SNR_HACK_BMPS; } } VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:WLANTL_GetSnr for STA: %d SNR: %d%s", ucSTAId, *pSnr, pTLCb->isBMPS ? " in BMPS" : ""); return VOS_STATUS_SUCCESS; }/* WLANTL_GetSnr */ /*========================================================================== FUNCTION WLANTL_GetLinkQuality DESCRIPTION TL will extract the SNR information from every data packet from the ongoing traffic and will store it. It will provide the result to SME upon request. DEPENDENCIES WARNING: the read and write of this value will not be protected by locks, therefore the information obtained after a read might not always be consistent. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's or SME's control block can be extracted from its context ucSTAId: station identifier for the requested value OUT puLinkQuality: the average value of the SNR RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input parameters are invalid VOS_STATUS_E_FAULT: Station ID is outside array boundaries or pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_E_EXISTS: STA was not yet registered VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_GetLinkQuality ( v_PVOID_t pvosGCtx, v_U8_t ucSTAId, v_U32_t* puLinkQuality ) { WLANTL_CbType* pTLCb = NULL; WLANTL_STAClientType* pClientSTA = NULL; /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( NULL == puLinkQuality ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "Invalid parameter sent on WLANTL_GetLinkQuality")); return VOS_STATUS_E_INVAL; } if ( WLANTL_STA_ID_INVALID( ucSTAId ) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "Invalid station id requested on WLANTL_GetLinkQuality")); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Extract TL control block and check existance ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "Invalid TL pointer from pvosGCtx on WLANTL_GetLinkQuality")); return VOS_STATUS_E_FAULT; } pClientSTA = pTLCb->atlSTAClients[ucSTAId]; if ( NULL == pClientSTA ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } if ( 0 == pClientSTA->ucExists ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "Station was not previously registered on WLANTL_GetLinkQuality")); return VOS_STATUS_E_EXISTS; } /*------------------------------------------------------------------------ Copy will not be locked; please read restriction ------------------------------------------------------------------------*/ *puLinkQuality = pClientSTA->uLinkQualityAvg; TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLANTL_GetLinkQuality for STA: %d LinkQuality: %d", ucSTAId, *puLinkQuality)); return VOS_STATUS_SUCCESS; }/* WLANTL_GetLinkQuality */ /*========================================================================== FUNCTION WLANTL_FlushStaTID DESCRIPTION TL provides this API as an interface to SME (BAP) layer. TL inturn posts a message to HAL. This API is called by the SME inorder to perform a flush operation. DEPENDENCIES PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's or SME's control block can be extracted from its context ucSTAId: station identifier for the requested value ucTid: Tspec ID for the new BA session OUT The response for this post is received in the main thread, via a response message from HAL to TL. RETURN VALUE VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_FlushStaTID ( v_PVOID_t pvosGCtx, v_U8_t ucSTAId, v_U8_t ucTid ) { WLANTL_CbType* pTLCb = NULL; tpFlushACReq FlushACReqPtr = NULL; vos_msg_t vosMessage; if ( WLANTL_STA_ID_INVALID( ucSTAId ) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "Invalid station id requested on WLANTL_FlushStaTID")); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Extract TL control block and check existance ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "Invalid TL pointer from pvosGCtx on WLANTL_FlushStaTID")); return VOS_STATUS_E_FAULT; } if ( NULL == pTLCb->atlSTAClients[ucSTAId] ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } if ( 0 == pTLCb->atlSTAClients[ucSTAId]->ucExists ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "Station was not previously registered on WLANTL_FlushStaTID")); return VOS_STATUS_E_EXISTS; } /*------------------------------------------------------------------------ We need to post a message with the STA, TID value to HAL. HAL performs the flush ------------------------------------------------------------------------*/ FlushACReqPtr = vos_mem_malloc(sizeof(tFlushACReq)); if ( NULL == FlushACReqPtr ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: fatal failure, cannot allocate Flush Req structure")); VOS_ASSERT(0); return VOS_STATUS_E_NOMEM; } // Start constructing the message for HAL FlushACReqPtr->mesgType = SIR_TL_HAL_FLUSH_AC_REQ; FlushACReqPtr->mesgLen = sizeof(tFlushACReq); FlushACReqPtr->mesgLen = sizeof(tFlushACReq); FlushACReqPtr->ucSTAId = ucSTAId; FlushACReqPtr->ucTid = ucTid; vosMessage.type = WDA_TL_FLUSH_AC_REQ; vosMessage.bodyptr = (void *)FlushACReqPtr; vos_mq_post_message(VOS_MQ_ID_WDA, &vosMessage); return VOS_STATUS_SUCCESS; } /*---------------------------------------------------------------------------- INTERACTION WITH PE ---------------------------------------------------------------------------*/ /*========================================================================== FUNCTION WLANTL_updateSpoofMacAddr DESCRIPTION Called by HDD to update macaddr DEPENDENCIES TL must be initialized before this API can be called. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context spoofMacAddr: spoofed mac adderess selfMacAddr: self Mac Address RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input parameters are invalid VOS_STATUS_E_FAULT: pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_updateSpoofMacAddr ( v_PVOID_t pvosGCtx, v_MACADDR_t* spoofMacAddr, v_MACADDR_t* selfMacAddr ) { WLANTL_CbType* pTLCb = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_ChangeSTAState"); return VOS_STATUS_E_FAULT; } vos_mem_copy(pTLCb->spoofMacAddr.selfMac.bytes, selfMacAddr, VOS_MAC_ADDRESS_LEN); vos_mem_copy(pTLCb->spoofMacAddr.spoofMac.bytes, spoofMacAddr, VOS_MAC_ADDRESS_LEN); VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_LOW, "TL: SelfSTA mac Addr for current Scan "MAC_ADDRESS_STR, MAC_ADDR_ARRAY(pTLCb->spoofMacAddr.selfMac.bytes)); return VOS_STATUS_SUCCESS; }/* WLANTL_updateSpoofMacAddr */ /*========================================================================== FUNCTION WLANTL_RegisterMgmtFrmClient DESCRIPTION Called by PE to register as a client for management frames delivery. DEPENDENCIES TL must be initialized before this API can be called. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context pfnTlMgmtFrmRx: pointer to the receive processing routine for management frames RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input parameters are invalid VOS_STATUS_E_FAULT: pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_E_EXISTS: Mgmt Frame client was already registered VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_RegisterMgmtFrmClient ( v_PVOID_t pvosGCtx, WLANTL_MgmtFrmRxCBType pfnTlMgmtFrmRx ) { WLANTL_CbType* pTLCb = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( NULL == pfnTlMgmtFrmRx ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid parameter sent on WLANTL_RegisterMgmtFrmClient")); return VOS_STATUS_E_INVAL; } /*------------------------------------------------------------------------ Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_ChangeSTAState")); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Make sure this is the first registration attempt ------------------------------------------------------------------------*/ if ( 0 != pTLCb->tlMgmtFrmClient.ucExists ) { pTLCb->tlMgmtFrmClient.ucExists++; TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Management frame client was already registered")); return VOS_STATUS_E_EXISTS; } /*------------------------------------------------------------------------ Register station with TL ------------------------------------------------------------------------*/ TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Registering Management Frame Client" )); pTLCb->tlMgmtFrmClient.ucExists++; if ( NULL != pfnTlMgmtFrmRx ) { pTLCb->tlMgmtFrmClient.pfnTlMgmtFrmRx = pfnTlMgmtFrmRx; } pTLCb->tlMgmtFrmClient.vosPendingDataBuff = NULL; return VOS_STATUS_SUCCESS; }/* WLANTL_RegisterMgmtFrmClient */ /*========================================================================== FUNCTION WLANTL_DeRegisterMgmtFrmClient DESCRIPTION Called by PE to deregister as a client for management frames delivery. DEPENDENCIES TL must be initialized before this API can be called. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_E_EXISTS: Mgmt Frame client was never registered VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_DeRegisterMgmtFrmClient ( v_PVOID_t pvosGCtx ) { WLANTL_CbType* pTLCb = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_ChangeSTAState")); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Make sure this is the first registration attempt ------------------------------------------------------------------------*/ if ( 0 == pTLCb->tlMgmtFrmClient.ucExists ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Management frame client was never registered")); return VOS_STATUS_E_EXISTS; } /*------------------------------------------------------------------------ Clear registration with TL ------------------------------------------------------------------------*/ TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Deregistering Management Frame Client" )); pTLCb->tlMgmtFrmClient.pfnTlMgmtFrmRx = WLANTL_MgmtFrmRxDefaultCb; if ( NULL != pTLCb->tlMgmtFrmClient.vosPendingDataBuff) { TLLOGW(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "WLAN TL:Management cache buffer not empty on deregistering" " - dropping packet" )); vos_pkt_return_packet(pTLCb->tlMgmtFrmClient.vosPendingDataBuff); pTLCb->tlMgmtFrmClient.vosPendingDataBuff = NULL; } pTLCb->tlMgmtFrmClient.ucExists = 0; return VOS_STATUS_SUCCESS; }/* WLANTL_RegisterMgmtFrmClient */ /*========================================================================== FUNCTION WLANTL_TxMgmtFrm DESCRIPTION Called by PE when it want to send out a management frame. HAL will also use this API for the few frames it sends out, they are not management frames howevere it is accepted that an exception will be allowed ONLY for the usage of HAL. Generic data frames SHOULD NOT travel through this function. DEPENDENCIES TL must be initialized before this API can be called. RESTRICTION: If PE sends another packet before TL manages to process the previously sent packet call will end in failure Frames comming through here must be 802.11 frames, frame translation in UMA will be automatically disabled. PARAMETERS IN pvosGCtx: pointer to the global vos context;a handle to TL's control block can be extracted from its context vosFrmBuf: pointer to a vOSS buffer containing the management frame to be transmitted usFrmLen: the length of the frame to be transmitted; information is already included in the vOSS buffer wFrmType: the type of the frame being transmitted tid: tid used to transmit this frame pfnCompTxFunc: function pointer to the transmit complete routine pvBDHeader: pointer to the BD header, if NULL it means it was not yet constructed and it lies within TL's responsibility to do so; if not NULL it is expected that it was already packed inside the vos packet ucAckResponse: flag notifying it an interrupt is needed for the acknowledgement received when the frame is sent out the air and ; the interrupt will be processed by HAL, only one such frame can be pending in the system at one time. RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input parameters are invalid VOS_STATUS_E_FAULT: pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_E_EXISTS: Mgmt Frame client was not yet registered VOS_STATUS_E_BUSY: The previous Mgmt packet was not yet transmitted VOS_STATUS_SUCCESS: Everything is good :) Other failure messages may be returned from the BD header handling routines, please check apropriate API for more info. SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_TxMgmtFrm ( v_PVOID_t pvosGCtx, vos_pkt_t* vosFrmBuf, v_U16_t usFrmLen, v_U8_t wFrmType, v_U8_t ucTid, WLANTL_TxCompCBType pfnCompTxFunc, v_PVOID_t pvBDHeader, v_U32_t ucAckResponse, v_U32_t ucTxBdToken ) { WLANTL_CbType* pTLCb = NULL; v_MACADDR_t vDestMacAddr; VOS_STATUS vosStatus = VOS_STATUS_SUCCESS; v_U16_t usPktLen; v_U32_t usTimeStamp = 0; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( NULL == vosFrmBuf ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid parameter sent on WLANTL_TxMgmtFrm")); return VOS_STATUS_E_INVAL; } /*------------------------------------------------------------------------ Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGP(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_FATAL, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_TxMgmtFrm")); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Ensure that management frame client was previously registered ------------------------------------------------------------------------*/ if ( 0 == pTLCb->tlMgmtFrmClient.ucExists ) { pTLCb->tlMgmtFrmClient.ucExists++; TLLOGP(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_FATAL, "WLAN TL:Management Frame client not register on WLANTL_TxMgmtFrm")); return VOS_STATUS_E_EXISTS; } /*------------------------------------------------------------------------ Check if any Mgmt Frm is pending ------------------------------------------------------------------------*/ //vosTempBuff = pTLCb->tlMgmtFrmClient.vosPendingDataBuff; if ( NULL != pTLCb->tlMgmtFrmClient.vosPendingDataBuff ) { TLLOGP(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_FATAL, "WLAN TL:Management Frame already pending tx in TL: failing old one")); /*Failing the tx for the previous packet enqued by PE*/ //vos_atomic_set( (uintptr_t*)&pTLCb->tlMgmtFrmClient.vosPendingDataBuff, // (uintptr_t)NULL); //vos_pkt_get_user_data_ptr( vosTempBuff, VOS_PKT_USER_DATA_ID_TL, // (v_PVOID_t)&pfnTxComp); /*it should never be NULL - default handler should be registered if none*/ //if ( NULL == pfnTxComp ) //{ // VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, // "NULL pointer to Tx Complete on WLANTL_TxMgmtFrm"); // VOS_ASSERT(0); // return VOS_STATUS_E_FAULT; //} //pfnTxComp( pvosGCtx, vosTempBuff, VOS_STATUS_E_RESOURCES ); //return VOS_STATUS_E_BUSY; //pfnCompTxFunc( pvosGCtx, vosFrmBuf, VOS_STATUS_E_RESOURCES); return VOS_STATUS_E_RESOURCES; } /*------------------------------------------------------------------------ Check if BD header was build, if not construct ------------------------------------------------------------------------*/ if ( NULL == pvBDHeader ) { v_MACADDR_t* pvAddr2MacAddr; v_U8_t uQosHdr = VOS_FALSE; /* Get address 2 of Mangement Frame to give to WLANHAL_FillTxBd */ vosStatus = vos_pkt_peek_data( vosFrmBuf, WLANTL_MAC_ADDR_ALIGN(1) + VOS_MAC_ADDR_SIZE, (v_PVOID_t)&pvAddr2MacAddr, VOS_MAC_ADDR_SIZE); if ( VOS_STATUS_SUCCESS != vosStatus ) { TLLOGP(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_FATAL, "WLAN TL:Failed while attempting to get addr2 %d", vosStatus)); return vosStatus; } /* ESE IAPP/TDLS Frame which are data frames but technically used * for management functionality comes through route. */ if (WLANTL_IS_QOS_DATA_FRAME(wFrmType)) \ { uQosHdr = VOS_TRUE; } if (WLANTL_IS_PROBE_REQ(wFrmType)) { if (VOS_TRUE == vos_mem_compare((v_VOID_t*) pvAddr2MacAddr, (v_VOID_t*) &pTLCb->spoofMacAddr.spoofMac, VOS_MAC_ADDRESS_LEN)) { pvAddr2MacAddr = (v_PVOID_t)pTLCb->spoofMacAddr.selfMac.bytes; VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "TL: using self sta addr to get staidx for spoofed probe req " MAC_ADDRESS_STR, MAC_ADDR_ARRAY(pvAddr2MacAddr->bytes)); } } /*---------------------------------------------------------------------- Call WDA to build TX header ----------------------------------------------------------------------*/ vosStatus = WDA_DS_BuildTxPacketInfo( pvosGCtx, vosFrmBuf , &vDestMacAddr, 1 /* always 802.11 frames*/, &usPktLen, uQosHdr /*qos not enabled !!!*/, 0 /* WDS off */, 0, wFrmType, pvAddr2MacAddr, ucTid, ucAckResponse, usTimeStamp, 0, 0, ucTxBdToken); if ( !VOS_IS_STATUS_SUCCESS(vosStatus) ) { TLLOGP(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_FATAL, "WLAN TL:Failed while attempting to build TX header %d", vosStatus)); return vosStatus; } }/* if BD header not present */ /*------------------------------------------------------------------------ Save buffer and notify BAL; no lock is needed if the above restriction is met Save the tx complete fnct pointer as tl specific data in the vos buffer ------------------------------------------------------------------------*/ if ( NULL != pfnCompTxFunc ) { vos_pkt_set_user_data_ptr( vosFrmBuf, VOS_PKT_USER_DATA_ID_TL, (v_PVOID_t)pfnCompTxFunc); } else { vos_pkt_set_user_data_ptr( vosFrmBuf, VOS_PKT_USER_DATA_ID_TL, (v_PVOID_t)WLANTL_TxCompDefaultCb); } vos_atomic_set( (uintptr_t*)&pTLCb->tlMgmtFrmClient.vosPendingDataBuff, (uintptr_t)vosFrmBuf); /*------------------------------------------------------------------------ Check if thre are enough resources for transmission and tx is not suspended. ------------------------------------------------------------------------*/ if ( pTLCb->uResCount >= WDA_TLI_MIN_RES_MF ) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Issuing Xmit start request to BAL for MGMT")); vosStatus = WDA_DS_StartXmit(pvosGCtx); if(VOS_STATUS_SUCCESS != vosStatus) { TLLOGP(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_FATAL, "WLAN TL:WDA_DS_StartXmit fails. vosStatus %d", vosStatus)); vos_atomic_set( (uintptr_t*)&pTLCb->tlMgmtFrmClient.vosPendingDataBuff,0); } return vosStatus; } else { /*--------------------------------------------------------------------- No error code is sent because TL will resume tx autonomously if resources become available or tx gets resumed ---------------------------------------------------------------------*/ TLLOGW(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "WLAN TL:Request to send for Mgmt Frm but condition not met. Res: %d", pTLCb->uResCount)); } return VOS_STATUS_SUCCESS; }/* WLANTL_TxMgmtFrm */ /*---------------------------------------------------------------------------- INTERACTION WITH HAL ---------------------------------------------------------------------------*/ /*========================================================================== FUNCTION WLANTL_ResetNotification DESCRIPTION HAL notifies TL when the module is being reset. Currently not used. DEPENDENCIES PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_ResetNotification ( v_PVOID_t pvosGCtx ) { WLANTL_CbType* pTLCb = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_ResetNotification")); return VOS_STATUS_E_FAULT; } WLANTL_CleanCB(pTLCb, 1 /*empty all queues and pending packets*/); return VOS_STATUS_SUCCESS; }/* WLANTL_ResetNotification */ /*========================================================================== FUNCTION WLANTL_SuspendDataTx DESCRIPTION HAL calls this API when it wishes to suspend transmission for a particular STA. DEPENDENCIES The STA for which the request is made must be first registered with TL by HDD. RESTRICTION: In case of a suspend, the flag write and read will not be locked: worst case scenario one more packet can get through before the flag gets updated (we can make this write atomic as well to guarantee consistency) PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context pucSTAId: identifier of the station for which the request is made; a value of NULL assumes suspend on all active station pfnSuspendTxCB: pointer to the suspend result notification in case the call is asynchronous RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: Station ID is outside array boundaries or pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_E_EXISTS: Station was not registered VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_SuspendDataTx ( v_PVOID_t pvosGCtx, v_U8_t* pucSTAId, WLANTL_SuspendCBType pfnSuspendTx ) { WLANTL_CbType* pTLCb = NULL; vos_msg_t vosMsg; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_SuspendDataTx")); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Check the type of request: generic suspend, or per station suspend ------------------------------------------------------------------------*/ if (NULL == pucSTAId) { /* General Suspend Request received */ TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:General suspend requested")); vos_atomic_set_U8( &pTLCb->ucTxSuspended, 1); vosMsg.reserved = WLAN_MAX_STA_COUNT; } else { if ( WLANTL_STA_ID_INVALID( *pucSTAId ) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid station id %d requested on WLANTL_SuspendDataTx", *pucSTAId)); return VOS_STATUS_E_FAULT; } if ( NULL == pTLCb->atlSTAClients[*pucSTAId] ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid pTLCb->atlSTAClients pointer for STA Id :%d on " "WLANTL_SuspendDataTx", *pucSTAId)); return VOS_STATUS_E_FAULT; } if ( 0 == pTLCb->atlSTAClients[*pucSTAId]->ucExists ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Station %d was not previously registered on WLANTL_SuspendDataTx", *pucSTAId)); return VOS_STATUS_E_EXISTS; } TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Suspend request for station: %d", *pucSTAId)); vos_atomic_set_U8( &pTLCb->atlSTAClients[*pucSTAId]->ucTxSuspended, 1); vosMsg.reserved = *pucSTAId; } /*------------------------------------------------------------------------ Serialize request through TX thread ------------------------------------------------------------------------*/ vosMsg.type = WLANTL_TX_SIG_SUSPEND; vosMsg.bodyptr = (v_PVOID_t)pfnSuspendTx; MTRACE(vos_trace(VOS_MODULE_ID_TL, TRACE_CODE_TL_SUSPEND_DATA_TX, vosMsg.reserved , 0 )); if(!VOS_IS_STATUS_SUCCESS(vos_tx_mq_serialize( VOS_MQ_ID_TL, &vosMsg))) { VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, " %s fails to post message", __func__); } return VOS_STATUS_SUCCESS; }/* WLANTL_SuspendDataTx */ /*========================================================================== FUNCTION WLANTL_ResumeDataTx DESCRIPTION Called by HAL to resume data transmission for a given STA. WARNING: If a station was individually suspended a global resume will not resume that station DEPENDENCIES PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context pucSTAId: identifier of the station which is being resumed; NULL translates into global resume RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: Station ID is outside array boundaries or pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_E_EXISTS: Station was not registered VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_ResumeDataTx ( v_PVOID_t pvosGCtx, v_U8_t* pucSTAId ) { WLANTL_CbType* pTLCb = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_ResumeDataTx")); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Check to see the type of resume ------------------------------------------------------------------------*/ if ( NULL == pucSTAId ) { MTRACE(vos_trace(VOS_MODULE_ID_TL, TRACE_CODE_TL_RESUME_DATA_TX, 41 , 0 )); TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:General resume requested")); vos_atomic_set_U8( &pTLCb->ucTxSuspended, 0); } else { MTRACE(vos_trace(VOS_MODULE_ID_TL, TRACE_CODE_TL_RESUME_DATA_TX, *pucSTAId , 0 )); if ( WLANTL_STA_ID_INVALID( *pucSTAId )) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid station id %d requested on WLANTL_ResumeDataTx", *pucSTAId)); return VOS_STATUS_E_FAULT; } if ( NULL == pTLCb->atlSTAClients[*pucSTAId] ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid pTLCb->atlSTAClients pointer for STA Id :%d on " "WLANTL_ResumeDataTx", *pucSTAId)); return VOS_STATUS_E_FAULT; } if ( 0 == pTLCb->atlSTAClients[*pucSTAId]->ucExists ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Station %d was not previously registered on WLANTL_ResumeDataTx", *pucSTAId)); return VOS_STATUS_E_EXISTS; } TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Resume request for station: %d", *pucSTAId)); vos_atomic_set_U8( &pTLCb->atlSTAClients[*pucSTAId]->ucTxSuspended, 0); } /*------------------------------------------------------------------------ Resuming transmission ------------------------------------------------------------------------*/ if (( pTLCb->uResCount >= WDA_TLI_MIN_RES_MF ) && ( 0 == pTLCb->ucTxSuspended )) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Resuming transmission")); return WDA_DS_StartXmit(pvosGCtx); } return VOS_STATUS_SUCCESS; }/* WLANTL_ResumeDataTx */ /*========================================================================== FUNCTION WLANTL_SuspendCB DESCRIPTION Callback function for serializing Suspend signal through Tx thread DEPENDENCIES Just notify HAL that suspend in TL is complete. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context pUserData: user data sent with the callback RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: invalid input parameters VOS_STATUS_E_FAULT: pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_SuspendCB ( v_PVOID_t pvosGCtx, WLANTL_SuspendCBType pfnSuspendCB, v_U16_t usReserved ) { WLANTL_CbType* pTLCb = NULL; v_U8_t ucSTAId = (v_U8_t)usReserved; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( NULL == pfnSuspendCB ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "WLAN TL: No Call back processing requested WLANTL_SuspendCB")); return VOS_STATUS_SUCCESS; } /*------------------------------------------------------------------------ Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_SuspendCB")); return VOS_STATUS_E_FAULT; } if ( WLANTL_STA_ID_INVALID( ucSTAId ) ) { pfnSuspendCB(pvosGCtx, NULL, VOS_STATUS_SUCCESS); } else { pfnSuspendCB(pvosGCtx, &ucSTAId, VOS_STATUS_SUCCESS); } return VOS_STATUS_SUCCESS; }/*WLANTL_SuspendCB*/ /*---------------------------------------------------------------------------- CLIENT INDEPENDENT INTERFACE ---------------------------------------------------------------------------*/ /*========================================================================== FUNCTION WLANTL_GetTxPktCount DESCRIPTION TL will provide the number of transmitted packets counted per STA per TID. DEPENDENCIES PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context ucSTAId: identifier of the station ucTid: identifier of the tspec OUT puTxPktCount: the number of packets tx packet for this STA and TID RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input parameters are invalid VOS_STATUS_E_FAULT: Station ID is outside array boundaries or pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_E_EXISTS: Station was not registered VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_GetTxPktCount ( v_PVOID_t pvosGCtx, v_U8_t ucSTAId, v_U8_t ucTid, v_U32_t* puTxPktCount ) { WLANTL_CbType* pTLCb = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( NULL == puTxPktCount ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid parameter sent on WLANTL_GetTxPktCount")); return VOS_STATUS_E_INVAL; } if ( WLANTL_STA_ID_INVALID( ucSTAId ) || WLANTL_TID_INVALID( ucTid) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid station id %d/tid %d requested on WLANTL_GetTxPktCount", ucSTAId, ucTid)); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Extract TL control block and check if station exists ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_GetTxPktCount")); return VOS_STATUS_E_FAULT; } if ( NULL == pTLCb->atlSTAClients[ucSTAId] ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } if ( 0 == pTLCb->atlSTAClients[ucSTAId]->ucExists ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Station was not previously registered on WLANTL_GetTxPktCount %d", ucSTAId)); return VOS_STATUS_E_EXISTS; } /*------------------------------------------------------------------------ Return data ------------------------------------------------------------------------*/ //VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_MED, // "WLAN TL:Requested tx packet count for STA: %d, TID: %d", // ucSTAId, ucTid); *puTxPktCount = pTLCb->atlSTAClients[ucSTAId]->auTxCount[ucTid]; return VOS_STATUS_SUCCESS; }/* WLANTL_GetTxPktCount */ /*========================================================================== FUNCTION WLANTL_GetRxPktCount DESCRIPTION TL will provide the number of received packets counted per STA per TID. DEPENDENCIES PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context ucSTAId: identifier of the station ucTid: identifier of the tspec OUT puTxPktCount: the number of packets rx packet for this STA and TID RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input parameters are invalid VOS_STATUS_E_FAULT: Station ID is outside array boundaries or pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_E_EXISTS: Station was not registered VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_GetRxPktCount ( v_PVOID_t pvosGCtx, v_U8_t ucSTAId, v_U8_t ucTid, v_U32_t* puRxPktCount ) { WLANTL_CbType* pTLCb = NULL; WLANTL_STAClientType* pClientSTA = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( NULL == puRxPktCount ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid parameter sent on WLANTL_GetRxPktCount")); return VOS_STATUS_E_INVAL; } if ( WLANTL_STA_ID_INVALID( ucSTAId ) || WLANTL_TID_INVALID( ucTid) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid station id %d/tid %d requested on WLANTL_GetRxPktCount", ucSTAId, ucTid)); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Extract TL control block and existance ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_GetRxPktCount")); return VOS_STATUS_E_FAULT; } pClientSTA = pTLCb->atlSTAClients[ucSTAId]; if ( NULL == pClientSTA ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } if ( 0 == pClientSTA->ucExists ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Station was not previously registered on WLANTL_GetRxPktCount")); return VOS_STATUS_E_EXISTS; } /*------------------------------------------------------------------------ Return data ------------------------------------------------------------------------*/ TLLOG3(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_MED, "WLAN TL:Requested rx packet count for STA: %d, TID: %d", ucSTAId, ucTid)); *puRxPktCount = pClientSTA->auRxCount[ucTid]; return VOS_STATUS_SUCCESS; }/* WLANTL_GetRxPktCount */ VOS_STATUS WLANTL_TxFCFrame ( v_PVOID_t pvosGCtx ); /*========================================================================== FUNCTION WLANTL_IsEAPOLPending DESCRIPTION HDD calls this function when hdd_tx_timeout occurs. This checks whether EAPOL is pending. DEPENDENCIES HDD must have registered with TL at least one STA before this function can be called. PARAMETERS IN pvosGCtx: pointer to the global vos context RETURN VALUE The result code associated with performing the operation Success : Indicates EAPOL frame is pending and sta is in connected state Failure : EAPOL frame is not pending SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_IsEAPOLPending ( v_PVOID_t pvosGCtx ) { WLANTL_CbType* pTLCb = NULL; v_U32_t i = 0; /*------------------------------------------------------------------------ Sanity check Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if (NULL == pTLCb) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer for pvosGCtx")); return VOS_STATUS_E_FAILURE; } /*--------------------------------------------------------------------- Check to see if there was any EAPOL packet is pending *--------------------------------------------------------------------*/ for ( i = 0; i < WLAN_MAX_STA_COUNT; i++) { if ((NULL != pTLCb->atlSTAClients[i]) && (pTLCb->atlSTAClients[i]->ucExists) && (0 == pTLCb->atlSTAClients[i]->ucTxSuspended) && (WLANTL_STA_CONNECTED == pTLCb->atlSTAClients[i]->tlState) && (pTLCb->atlSTAClients[i]->ucPktPending) ) return VOS_STATUS_SUCCESS; } return VOS_STATUS_E_FAILURE; } /*============================================================================ TL INTERNAL API DEFINITION ============================================================================*/ /*========================================================================== FUNCTION WLANTL_GetFrames DESCRIPTION BAL calls this function at the request of the lower bus interface. When this request is being received TL will retrieve packets from HDD in accordance with the priority rules and the count supplied by BAL. DEPENDENCIES HDD must have registered with TL at least one STA before this function can be called. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's or BAL's control block can be extracted from its context uSize: maximum size accepted by the lower layer uFlowMask TX flow control mask for Prima. Each bit is defined as WDA_TXFlowEnumType OUT vosDataBuff: it will contain a pointer to the first buffer supplied by TL, if there is more than one packet supplied, TL will chain them through vOSS buffers RETURN VALUE The result code associated with performing the operation 1 or more: number of required resources if there are still frames to fetch 0 : error or HDD queues are drained SIDE EFFECTS NOTE Featurized uFlowMask. If we want to remove featurization, we need to change BAL on Volans. ============================================================================*/ v_U32_t WLANTL_GetFrames ( v_PVOID_t pvosGCtx, vos_pkt_t **ppFrameDataBuff, v_U32_t uSize, v_U8_t uFlowMask, v_BOOL_t* pbUrgent ) { vos_pkt_t** pvosDataBuff = (vos_pkt_t**)ppFrameDataBuff; WLANTL_CbType* pTLCb = NULL; WLANTL_STAClientType* pClientSTA = NULL; v_U32_t uRemaining = uSize; vos_pkt_t* vosRoot; vos_pkt_t* vosTempBuf; WLANTL_STAFuncType pfnSTAFsm; v_U16_t usPktLen; v_U32_t uResLen; v_U8_t ucSTAId; v_U8_t ucAC; vos_pkt_t* vosDataBuff; v_U32_t uTotalPktLen; v_U32_t i=0; v_U32_t j=0; v_U32_t ucResult = 0; VOS_STATUS vosStatus; WLANTL_STAEventType wSTAEvent; tBssSystemRole systemRole; tpAniSirGlobal pMac; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if (( NULL == pTLCb ) || ( NULL == pvosDataBuff )) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_ChangeSTAState")); return ucResult; } pMac = vos_get_context(VOS_MODULE_ID_PE, pvosGCtx); if ( NULL == pMac ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "%s: Invalid pMac", __func__)); return ucResult; } vosDataBuff = pTLCb->vosDummyBuf; /* Just to avoid checking for NULL at each iteration */ pTLCb->uResCount = uSize; /*----------------------------------------------------------------------- Save the root as we will walk this chain as we fill it -----------------------------------------------------------------------*/ vosRoot = vosDataBuff; /*----------------------------------------------------------------------- There is still data - until FSM function says otherwise -----------------------------------------------------------------------*/ pTLCb->bUrgent = FALSE; while (( pTLCb->tlConfigInfo.uMinFramesProcThres < pTLCb->uResCount ) && ( 0 < uRemaining )) { systemRole = wdaGetGlobalSystemRole(pMac); #ifdef WLAN_SOFTAP_FLOWCTRL_EN /* FIXME: The code has been disabled since it is creating issues in power save */ if (eSYSTEM_AP_ROLE == systemRole) { if (pTLCb->done_once == 0 && NULL == pTLCb->vosTxFCBuf) { WLANTL_TxFCFrame (pvosGCtx); pTLCb->done_once ++; } } if ( NULL != pTLCb->vosTxFCBuf ) { //there is flow control packet waiting to be sent WDA_TLI_PROCESS_FRAME_LEN( pTLCb->vosTxFCBuf, usPktLen, uResLen, uTotalPktLen); if ( ( pTLCb->uResCount > uResLen ) && ( uRemaining > uTotalPktLen ) && ( uFlowMask & ( 1 << WDA_TXFLOW_FC ) ) ) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Chaining FC frame first on GetFrame")); vos_pkt_chain_packet( vosDataBuff, pTLCb->vosTxFCBuf, 1 /*true*/ ); vos_atomic_set( (uintptr_t*)&pTLCb->vosTxFCBuf, (uintptr_t) NULL); /*FC frames cannot be delayed*/ pTLCb->bUrgent = TRUE; /*Update remaining len from SSC */ uRemaining -= (usPktLen + WDA_DXE_HEADER_SIZE); /*Update resource count */ pTLCb->uResCount -= uResLen; } else { TLLOGW(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "WLAN TL:send fc out of source %s", __func__)); ucResult = ( pTLCb->uResCount > uResLen )?VOS_TRUE:VOS_FALSE; break; /* Out of resources or reached max len */ } } else #endif //WLAN_SOFTAP_FLOWCTRL_EN if (( NULL != pTLCb->tlMgmtFrmClient.vosPendingDataBuff ) && ( uFlowMask & ( 1 << WDA_TXFLOW_MGMT ) ) ) { WDA_TLI_PROCESS_FRAME_LEN( pTLCb->tlMgmtFrmClient.vosPendingDataBuff, usPktLen, uResLen, uTotalPktLen); if (usPktLen > WLANTL_MAX_ALLOWED_LEN) { usPktLen = WLANTL_MAX_ALLOWED_LEN; VOS_ASSERT(0); } if ( ( pTLCb->uResCount > uResLen ) && ( uRemaining > uTotalPktLen ) && ( uFlowMask & ( 1 << WDA_TXFLOW_MGMT ) ) ) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Chaining management frame on GetFrame")); vos_pkt_chain_packet( vosDataBuff, pTLCb->tlMgmtFrmClient.vosPendingDataBuff, 1 /*true*/ ); vos_atomic_set( (uintptr_t*)&pTLCb->tlMgmtFrmClient. vosPendingDataBuff, (uintptr_t)NULL); /*management frames cannot be delayed*/ pTLCb->bUrgent = TRUE; /*Update remaining len from SSC */ uRemaining -= (usPktLen + WDA_DXE_HEADER_SIZE); /*Update resource count */ pTLCb->uResCount -= uResLen; } else { ucResult = ( pTLCb->uResCount > uResLen )?VOS_TRUE:VOS_FALSE; break; /* Out of resources or reached max len */ } } else if (( pTLCb->tlBAPClient.vosPendingDataBuff ) && ( WDA_TLI_MIN_RES_BAP <= pTLCb->uResCount ) && ( 0 == pTLCb->ucTxSuspended ) ) { WDA_TLI_PROCESS_FRAME_LEN( pTLCb->tlBAPClient.vosPendingDataBuff, usPktLen, uResLen, uTotalPktLen); if (usPktLen > WLANTL_MAX_ALLOWED_LEN) { usPktLen = WLANTL_MAX_ALLOWED_LEN; VOS_ASSERT(0); } if ( ( pTLCb->uResCount > (uResLen + WDA_TLI_MIN_RES_MF ) ) && ( uRemaining > uTotalPktLen )) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Chaining BT-AMP frame on GetFrame")); vos_pkt_chain_packet( vosDataBuff, pTLCb->tlBAPClient.vosPendingDataBuff, 1 /*true*/ ); /*BAP frames cannot be delayed*/ pTLCb->bUrgent = TRUE; vos_atomic_set( (uintptr_t*)&pTLCb->tlBAPClient.vosPendingDataBuff, (uintptr_t) NULL); /*Update remaining len from SSC */ uRemaining -= (usPktLen + WDA_DXE_HEADER_SIZE); /*Update resource count */ pTLCb->uResCount -= uResLen; } else { ucResult = uResLen + WDA_TLI_MIN_RES_MF; break; /* Out of resources or reached max len */ } } /* note: this feature implemented only after WLAN_INGETRATED_SOC */ /* search 'EAPOL_HI_PRIORITY' will show EAPOL HI_PRIORITY change in TL and WDI by default, EAPOL will be treated as higher priority, which means use mgmt_pool and DXE_TX_HI prority channel. this is introduced to address EAPOL failure under high background traffic with multi-channel concurrent mode. But this change works in SCC or standalone, too. see CR#387009 and WCNSOS-8 */ else if (( WDA_TLI_MIN_RES_MF <= pTLCb->uResCount )&& ( 0 == pTLCb->ucTxSuspended ) && ( uFlowMask & ( 1 << WDA_TXFLOW_MGMT ) ) ) { vosTempBuf = NULL; /*--------------------------------------------------------------------- Check to see if there was any EAPOL packet is pending *--------------------------------------------------------------------*/ for ( i = 0; i < WLAN_MAX_STA_COUNT; i++) { if ((NULL != pTLCb->atlSTAClients[i]) && (pTLCb->atlSTAClients[i]->ucExists) && (0 == pTLCb->atlSTAClients[i]->ucTxSuspended) && (WLANTL_STA_CONNECTED == pTLCb->atlSTAClients[i]->tlState) && (pTLCb->atlSTAClients[i]->ucPktPending) ) break; } if (i >= WLAN_MAX_STA_COUNT) { /* No More to Serve Exit Get Frames */ break; } /* Serve EAPOL frame with HI_FLOW_MASK */ ucSTAId = i; pClientSTA = pTLCb->atlSTAClients[ucSTAId]; MTRACE(vos_trace(VOS_MODULE_ID_TL, TRACE_CODE_TL_GET_FRAMES_EAPOL, ucSTAId, pClientSTA->tlState)); if (pClientSTA->wSTADesc.wSTAType == WLAN_STA_INFRA) { if(0 != pClientSTA->aucACMask[WLANTL_AC_HIGH_PRIO]) { pClientSTA->ucCurrentAC = WLANTL_AC_HIGH_PRIO; pTLCb->uCurServedAC = WLANTL_AC_HIGH_PRIO; } else break; } else { for (j = WLANTL_MAX_AC ; j > 0; j--) { if (0 != pClientSTA->aucACMask[j-1]) { pClientSTA->ucCurrentAC = j-1; pTLCb->uCurServedAC = j-1; break; } } } wSTAEvent = WLANTL_TX_EVENT; pfnSTAFsm = tlSTAFsm[pClientSTA->tlState]. pfnSTATbl[wSTAEvent]; if ( NULL != pfnSTAFsm ) { pClientSTA->ucNoMoreData = 0; vosStatus = pfnSTAFsm( pvosGCtx, ucSTAId, &vosTempBuf, VOS_FALSE); if (( VOS_STATUS_SUCCESS != vosStatus ) && ( NULL != vosTempBuf )) { pClientSTA->pfnSTATxComp( pvosGCtx, vosTempBuf, vosStatus ); vosTempBuf = NULL; break; }/* status success*/ } if (NULL != vosTempBuf) { WDA_TLI_PROCESS_FRAME_LEN( vosTempBuf, usPktLen, uResLen, uTotalPktLen); if (usPktLen > WLANTL_MAX_ALLOWED_LEN) { usPktLen = WLANTL_MAX_ALLOWED_LEN; VOS_ASSERT(0); } TLLOG4(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_LOW, "WLAN TL:Resources needed by frame: %d", uResLen)); if ( ( pTLCb->uResCount >= (uResLen + WDA_TLI_MIN_RES_MF ) ) && ( uRemaining > uTotalPktLen ) ) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Chaining data frame on GetFrame")); vos_pkt_chain_packet( vosDataBuff, vosTempBuf, 1 /*true*/ ); /*EAPOL frame cannot be delayed*/ pTLCb->bUrgent = TRUE; vosTempBuf = NULL; /*Update remaining len from SSC */ uRemaining -= (usPktLen + WDA_DXE_HEADER_SIZE); /*Update resource count */ pTLCb->uResCount -= uResLen; //fow control update pClientSTA->uIngress_length += uResLen; pClientSTA->uBuffThresholdMax = (pClientSTA->uBuffThresholdMax >= uResLen) ? (pClientSTA->uBuffThresholdMax - uResLen) : 0; pClientSTA->ucEapolPktPending = 0; VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:GetFrames STA: %d EAPOLPktPending %d", ucSTAId, pClientSTA->ucEapolPktPending); } } else { // no EAPOL frames exit Get frames TLLOG2(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:GetFrames STA: %d, no EAPOL frame, continue.", ucSTAId)); continue; } } else if (( WDA_TLI_MIN_RES_DATA <= pTLCb->uResCount ) && ( 0 == pTLCb->ucTxSuspended ) && ( uFlowMask & WLANTL_DATA_FLOW_MASK)) { /*--------------------------------------------------------------------- Check to see if there was any packet left behind previously due to size constraints ---------------------------------------------------------------------*/ vosTempBuf = NULL; if ( NULL != pTLCb->vosTempBuf ) { vosTempBuf = pTLCb->vosTempBuf; pTLCb->vosTempBuf = NULL; ucSTAId = pTLCb->ucCachedSTAId; ucAC = pTLCb->ucCachedAC; if ( NULL == pTLCb->atlSTAClients[ucSTAId] ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); continue; } pTLCb->atlSTAClients[ucSTAId]->ucNoMoreData = 0; pClientSTA = pTLCb->atlSTAClients[ucSTAId]; TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Chaining cached data frame on GetFrame")); } else { WLAN_TLGetNextTxIds( pvosGCtx, &ucSTAId); if (ucSTAId >= WLAN_MAX_STA_COUNT) { /* Packets start coming in even after insmod Without * starting Hostapd or Interface being up * During which cases STAID is invaled and hence the check. HalMsg_ScnaComplete Triggers */ break; } /* ucCurrentAC should have correct AC to be served by calling WLAN_TLGetNextTxIds */ pClientSTA = pTLCb->atlSTAClients[ucSTAId]; if ( NULL == pClientSTA ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); continue; } ucAC = pClientSTA->ucCurrentAC; pClientSTA->ucNoMoreData = 1; TLLOG4(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_LOW, "WLAN TL: %s get one data frame, station ID %d ", __func__, ucSTAId)); /*------------------------------------------------------------------- Check to see that STA is valid and tx is not suspended -------------------------------------------------------------------*/ if ( ( ! WLANTL_STA_ID_INVALID( ucSTAId ) ) && ( 0 == pClientSTA->ucTxSuspended ) && ( 0 == pClientSTA->fcStaTxDisabled) ) { TLLOG4(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_LOW, "WLAN TL: %s sta id valid and not suspended ",__func__)); wSTAEvent = WLANTL_TX_EVENT; pfnSTAFsm = tlSTAFsm[pClientSTA->tlState]. pfnSTATbl[wSTAEvent]; if ( NULL != pfnSTAFsm ) { pClientSTA->ucNoMoreData = 0; vosStatus = pfnSTAFsm( pvosGCtx, ucSTAId, &vosTempBuf, VOS_FALSE); if (( VOS_STATUS_SUCCESS != vosStatus ) && ( NULL != vosTempBuf )) { pClientSTA->pfnSTATxComp( pvosGCtx, vosTempBuf, vosStatus ); vosTempBuf = NULL; }/* status success*/ }/*NULL function state*/ }/* valid STA id and ! suspended*/ else { if ( ! WLANTL_STA_ID_INVALID( ucSTAId ) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Not fetching frame because suspended for sta ID %d", ucSTAId)); } } }/* data */ if ( NULL != vosTempBuf ) { WDA_TLI_PROCESS_FRAME_LEN( vosTempBuf, usPktLen, uResLen, uTotalPktLen); if (usPktLen > WLANTL_MAX_ALLOWED_LEN) { usPktLen = WLANTL_MAX_ALLOWED_LEN; VOS_ASSERT(0); } TLLOG4(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_LOW, "WLAN TL:Resources needed by frame: %d", uResLen)); if ( ( pTLCb->uResCount >= (uResLen + WDA_TLI_MIN_RES_BAP ) ) && ( uRemaining > uTotalPktLen ) && ( uFlowMask & WLANTL_DATA_FLOW_MASK ) ) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Chaining data frame on GetFrame")); vos_pkt_chain_packet( vosDataBuff, vosTempBuf, 1 /*true*/ ); vosTempBuf = NULL; /*Update remaining len from SSC */ uRemaining -= (usPktLen + WDA_DXE_HEADER_SIZE); /*Update resource count */ pTLCb->uResCount -= uResLen; //fow control update pClientSTA->uIngress_length += uResLen; pClientSTA->uBuffThresholdMax = (pClientSTA->uBuffThresholdMax >= uResLen) ? (pClientSTA->uBuffThresholdMax - uResLen) : 0; } else { /* Store this for later tx - already fetched from HDD */ pTLCb->vosTempBuf = vosTempBuf; pTLCb->ucCachedSTAId = ucSTAId; pTLCb->ucCachedAC = ucAC; ucResult = uResLen + WDA_TLI_MIN_RES_BAP; TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "min %d res required by TL.", ucResult )); break; /* Out of resources or reached max len */ } } else { for ( i = 0; i < WLAN_MAX_STA_COUNT; i++) { if (NULL != pTLCb->atlSTAClients[i] && (pTLCb->atlSTAClients[i]->ucExists) && (pTLCb->atlSTAClients[i]->ucPktPending)) { /* There is station to be Served */ break; } } if (i >= WLAN_MAX_STA_COUNT) { /* No More to Serve Exit Get Frames */ break; } else { /* More to be Served */ continue; } } } else { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Returning from GetFrame: resources = %d suspended = %d", pTLCb->uResCount, pTLCb->ucTxSuspended)); /* TL is starving even when DXE is not in low resource condition Return min resource number required and Let DXE deceide what to do */ if(( 0 == pTLCb->ucTxSuspended ) && ( uFlowMask & WLANTL_DATA_FLOW_MASK ) ) { TLLOG1(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "WLAN TL:Returning from GetFrame: resources = %d", pTLCb->uResCount)); ucResult = WDA_TLI_MIN_RES_DATA; } break; /*out of min data resources*/ } pTLCb->usPendingTxCompleteCount++; /* Move data buffer up one packet */ vos_pkt_walk_packet_chain( vosDataBuff, &vosDataBuff, 0/*false*/ ); } /*---------------------------------------------------------------------- Packet chain starts at root + 1 ----------------------------------------------------------------------*/ vos_pkt_walk_packet_chain( vosRoot, &vosDataBuff, 1/*true*/ ); *pvosDataBuff = vosDataBuff; if (pbUrgent) { *pbUrgent = pTLCb->bUrgent; } else { VOS_ASSERT( pbUrgent ); } return ucResult; }/* WLANTL_GetFrames */ /*========================================================================== FUNCTION WLANTL_TxComp DESCRIPTION It is being called by BAL upon asynchronous notification of the packet or packets being sent over the bus. DEPENDENCIES Tx complete cannot be called without a previous transmit. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's or BAL's control block can be extracted from its context vosDataBuff: it will contain a pointer to the first buffer for which the BAL report is being made, if there is more then one packet they will be chained using vOSS buffers. wTxStatus: the status of the transmitted packet, see above chapter on HDD interaction for a list of possible values RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input parameters are invalid VOS_STATUS_E_FAULT: pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_E_EXISTS: Station was not registered VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_TxComp ( v_PVOID_t pvosGCtx, vos_pkt_t *pFrameDataBuff, VOS_STATUS wTxStatus ) { vos_pkt_t* vosDataBuff = (vos_pkt_t*)pFrameDataBuff; WLANTL_CbType* pTLCb = NULL; WLANTL_TxCompCBType pfnTxComp = NULL; VOS_STATUS vosStatus = VOS_STATUS_SUCCESS; vos_pkt_t* vosTempTx = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( NULL == vosDataBuff ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Extraneous NULL data pointer on WLANTL_TxComp")); return VOS_STATUS_E_INVAL; } /*------------------------------------------------------------------------ Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_TxComp")); return VOS_STATUS_E_FAULT; } while ((0 < pTLCb->usPendingTxCompleteCount) && ( VOS_STATUS_SUCCESS == vosStatus ) && ( NULL != vosDataBuff)) { vos_pkt_get_user_data_ptr( vosDataBuff, VOS_PKT_USER_DATA_ID_TL, (v_PVOID_t)&pfnTxComp); /*it should never be NULL - default handler should be registered if none*/ if ( NULL == pfnTxComp ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:NULL pointer to Tx Complete on WLANTL_TxComp")); VOS_ASSERT(0); return VOS_STATUS_E_FAULT; } TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Calling Tx complete for pkt %p in function %p", vosDataBuff, pfnTxComp)); vosTempTx = vosDataBuff; vosStatus = vos_pkt_walk_packet_chain( vosDataBuff, &vosDataBuff, 1/*true*/); pfnTxComp( pvosGCtx, vosTempTx, wTxStatus ); pTLCb->usPendingTxCompleteCount--; } TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL: current TL values are: resources = %d " "pTLCb->usPendingTxCompleteCount = %d", pTLCb->uResCount, pTLCb->usPendingTxCompleteCount)); return VOS_STATUS_SUCCESS; }/* WLANTL_TxComp */ /*========================================================================== FUNCTION WLANTL_CacheSTAFrame DESCRIPTION Internal utility function for for caching incoming data frames that do not have a registered station yet. DEPENDENCIES TL must be initiailized before this function gets called. In order to benefit from thsi caching, the components must ensure that they will only register with TL at the moment when they are fully setup and ready to receive incoming data PARAMETERS IN pTLCb: TL control block ucSTAId: station id vosTempBuff: the data packet uDPUSig: DPU signature of the incoming packet bBcast: true if packet had the MC/BC bit set RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to TL cb is NULL or STA Id invalid ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ static VOS_STATUS WLANTL_CacheSTAFrame ( WLANTL_CbType* pTLCb, v_U8_t ucSTAId, vos_pkt_t* vosTempBuff, v_U32_t uDPUSig, v_U8_t bBcast, v_U8_t ucFrmType ) { v_U8_t ucUcastSig; v_U8_t ucBcastSig; v_BOOL_t bOldSTAPkt; WLANTL_STAClientType* pClientSTA = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /*------------------------------------------------------------------------- Sanity check -------------------------------------------------------------------------*/ if (( NULL == pTLCb ) || ( NULL == vosTempBuff ) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: Invalid input pointer on WLANTL_CacheSTAFrame TL %p" " Packet %p", pTLCb, vosTempBuff )); return VOS_STATUS_E_FAULT; } if ( WLANTL_STA_ID_INVALID( ucSTAId ) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid station id requested on WLANTL_CacheSTAFrame")); return VOS_STATUS_E_FAULT; } pClientSTA = pTLCb->atlSTAClients[ucSTAId]; if ( NULL == pClientSTA ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Attempting to cache pkt for STA %d, BD DPU Sig: %d with sig UC: %d, BC: %d", ucSTAId, uDPUSig, pClientSTA->wSTADesc.ucUcastSig, pClientSTA->wSTADesc.ucBcastSig)); if(WLANTL_IS_CTRL_FRAME(ucFrmType)) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL: No need to cache CTRL frame. Dropping")); vos_pkt_return_packet(vosTempBuff); return VOS_STATUS_SUCCESS; } /*------------------------------------------------------------------------- Check if the packet that we are trying to cache belongs to the old registered station (if any) or the new (potentially)upcoming station - If the STA with this Id was never registered with TL - the signature will be invalid; - If the STA was previously registered TL will have cached the former set of DPU signatures -------------------------------------------------------------------------*/ if ( bBcast ) { ucBcastSig = (v_U8_t)uDPUSig; bOldSTAPkt = (( WLAN_TL_INVALID_B_SIG != pClientSTA->wSTADesc.ucBcastSig ) && ( ucBcastSig == pClientSTA->wSTADesc.ucBcastSig )); } else { ucUcastSig = (v_U8_t)uDPUSig; bOldSTAPkt = (( WLAN_TL_INVALID_U_SIG != pClientSTA->wSTADesc.ucUcastSig ) && ( ucUcastSig == pClientSTA->wSTADesc.ucUcastSig )); } /*------------------------------------------------------------------------ If the value of the DPU SIG matches the old, this packet will not be cached as it belonged to the former association In case the SIG does not match - this is a packet for a potentially new associated station -------------------------------------------------------------------------*/ if ( bOldSTAPkt || bBcast ) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Data packet matches old sig for sig DPU: %d UC: %d, " "BC: %d - dropping", uDPUSig, pClientSTA->wSTADesc.ucUcastSig, pClientSTA->wSTADesc.ucBcastSig)); vos_pkt_return_packet(vosTempBuff); } else { if ( NULL == pClientSTA->vosBegCachedFrame ) { /*this is the first frame that we are caching */ pClientSTA->vosBegCachedFrame = vosTempBuff; pClientSTA->tlCacheInfo.cacheInitTime = vos_timer_get_system_time(); pClientSTA->tlCacheInfo.cacheDoneTime = pClientSTA->tlCacheInfo.cacheInitTime; pClientSTA->tlCacheInfo.cacheSize = 1; MTRACE(vos_trace(VOS_MODULE_ID_TL, TRACE_CODE_TL_CACHE_FRAME, ucSTAId, pClientSTA->tlCacheInfo.cacheSize)); } else { /*this is a subsequent frame that we are caching: chain to the end */ vos_pkt_chain_packet(pClientSTA->vosEndCachedFrame, vosTempBuff, VOS_TRUE); pClientSTA->tlCacheInfo.cacheDoneTime = vos_timer_get_system_time(); pClientSTA->tlCacheInfo.cacheSize ++; if (pClientSTA->tlCacheInfo.cacheSize % WLANTL_CACHE_TRACE_WATERMARK == 0) { VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "%s: Cache High watermark for staid:%d (%d)", __func__,ucSTAId, pClientSTA->tlCacheInfo.cacheSize); MTRACE(vos_trace(VOS_MODULE_ID_TL, TRACE_CODE_TL_CACHE_FRAME, ucSTAId, pClientSTA->tlCacheInfo.cacheSize)); } } pClientSTA->vosEndCachedFrame = vosTempBuff; }/*else new packet*/ return VOS_STATUS_SUCCESS; }/*WLANTL_CacheSTAFrame*/ /*========================================================================== FUNCTION WLANTL_FlushCachedFrames DESCRIPTION Internal utility function used by TL to flush the station cache DEPENDENCIES TL must be initiailized before this function gets called. PARAMETERS IN vosDataBuff: it will contain a pointer to the first cached buffer received, RETURN VALUE The result code associated with performing the operation VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS NOTE This function doesn't re-initialize vosDataBuff to NULL. It's caller's responsibility to do so, if required, after this function call. Because of this restriction, we decide to make this function to static so that upper layer doesn't need to be aware of this restriction. ============================================================================*/ static VOS_STATUS WLANTL_FlushCachedFrames ( vos_pkt_t* vosDataBuff ) { /*---------------------------------------------------------------------- Return the entire chain to vos if there are indeed cache frames ----------------------------------------------------------------------*/ if ( NULL != vosDataBuff ) { vos_pkt_return_packet(vosDataBuff); } return VOS_STATUS_SUCCESS; }/*WLANTL_FlushCachedFrames*/ /*========================================================================== FUNCTION WLANTL_ForwardSTAFrames DESCRIPTION Internal utility function for either forwarding cached data to the station after the station has been registered, or flushing cached data if the station has not been registered. DEPENDENCIES TL must be initiailized before this function gets called. PARAMETERS IN pTLCb: TL control block ucSTAId: station id RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS This function doesn't re-initialize vosDataBuff to NULL. It's caller's responsibility to do so, if required, after this function call. Because of this restriction, we decide to make this function to static so that upper layer doesn't need to be aware of this restriction. ============================================================================*/ static VOS_STATUS WLANTL_ForwardSTAFrames ( void* pvosGCtx, v_U8_t ucSTAId, v_U8_t ucUcastSig, v_U8_t ucBcastSig ) { WLANTL_CbType* pTLCb = NULL; WLANTL_STAClientType* pClientSTA = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /*------------------------------------------------------------------------- Sanity check -------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: Invalid input pointer on WLANTL_ForwardSTAFrames TL %p", pTLCb )); return VOS_STATUS_E_FAULT; } if ( WLANTL_STA_ID_INVALID( ucSTAId ) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid station id requested on WLANTL_ForwardSTAFrames")); return VOS_STATUS_E_FAULT; } //WLAN_TL_LOCK_STA_CACHE(pTLCb->atlSTAClients[ucSTAId]); /*------------------------------------------------------------------------ Check if station has not been registered in the mean while if not registered, flush cached frames. ------------------------------------------------------------------------*/ pClientSTA = pTLCb->atlSTAClients[ucSTAId]; if ( NULL == pClientSTA ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } if ( 0 == pClientSTA->ucExists ) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Station has been deleted for STA %d - flushing cache", ucSTAId)); MTRACE(vos_trace(VOS_MODULE_ID_TL, TRACE_CODE_TL_FLUSH_CACHED_FRAMES, ucSTAId, pClientSTA->tlCacheInfo.cacheSize)); WLANTL_FlushCachedFrames(pClientSTA->vosBegCachedFrame); goto done; } /*------------------------------------------------------------------------ Forwarding cache frames received while the station was in the process of being registered with the rest of the SW components Access to the cache must be locked; similarly updating the signature and the existence flag must be synchronized because these values are checked during cached ------------------------------------------------------------------------*/ TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Preparing to fwd packets for STA %d", ucSTAId)); /*----------------------------------------------------------------------- Save the new signature values ------------------------------------------------------------------------*/ pClientSTA->wSTADesc.ucUcastSig = ucUcastSig; pClientSTA->wSTADesc.ucBcastSig = ucBcastSig; TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Fwd-ing packets for STA %d UC %d BC %d", ucSTAId, ucUcastSig, ucBcastSig)); /*------------------------------------------------------------------------- Check to see if we have any cached data to forward -------------------------------------------------------------------------*/ if ( NULL != pClientSTA->vosBegCachedFrame ) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL: Fwd-ing Cached packets for station %d", ucSTAId )); WLANTL_RxCachedFrames( pTLCb, ucSTAId, pClientSTA->vosBegCachedFrame); } else { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL: NO cached packets for station %d", ucSTAId )); } done: /*------------------------------------------------------------------------- Clear the station cache -------------------------------------------------------------------------*/ pClientSTA->vosBegCachedFrame = NULL; pClientSTA->vosEndCachedFrame = NULL; pClientSTA->tlCacheInfo.cacheSize = 0; pClientSTA->tlCacheInfo.cacheClearTime = vos_timer_get_system_time(); /*----------------------------------------------------------------------- After all the init is complete we can mark the existance flag ----------------------------------------------------------------------*/ pClientSTA->enableCaching = 0; //WLAN_TL_UNLOCK_STA_CACHE(pTLCb->atlSTAClients[ucSTAId]); return VOS_STATUS_SUCCESS; }/*WLANTL_ForwardSTAFrames*/ #if defined(FEATURE_WLAN_ESE) || defined(FEATURE_WLAN_ESE_UPLOAD) /*========================================================================== FUNCTION WLANTL_IsIAPPFrame DESCRIPTION Internal utility function for detecting incoming ESE IAPP frames DEPENDENCIES PARAMETERS IN pvBDHeader: pointer to the BD header vosTempBuff: the data packet IN/OUT pFirstDataPktArrived: static from caller function; used for rssi computation RETURN VALUE The result code associated with performing the operation VOS_TRUE: It is a IAPP frame VOS_FALSE: It is NOT IAPP frame SIDE EFFECTS ============================================================================*/ v_BOOL_t WLANTL_IsIAPPFrame ( v_PVOID_t pvBDHeader, vos_pkt_t* vosTempBuff ) { v_U16_t usMPDUDOffset; v_U8_t ucOffset; v_U8_t ucSnapHdr[WLANTL_LLC_SNAP_SIZE]; v_SIZE_t usSnapHdrSize = WLANTL_LLC_SNAP_SIZE; VOS_STATUS vosStatus; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Check if OUI field is present. -------------------------------------------------------------------------*/ if ( VOS_FALSE == WDA_IS_RX_LLC_PRESENT(pvBDHeader) ) { TLLOGW(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "WLAN TL:LLC header removed, cannot determine BT-AMP type -" "dropping pkt")); /* Drop packet */ vos_pkt_return_packet(vosTempBuff); return VOS_TRUE; } usMPDUDOffset = (v_U8_t)WDA_GET_RX_MPDU_DATA_OFFSET(pvBDHeader); ucOffset = (v_U8_t)usMPDUDOffset + WLANTL_LLC_SNAP_OFFSET; vosStatus = vos_pkt_extract_data( vosTempBuff, ucOffset, (v_PVOID_t)ucSnapHdr, &usSnapHdrSize); if (( VOS_STATUS_SUCCESS != vosStatus)) { TLLOGW(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "Unable to extract Snap Hdr of data packet -" "dropping pkt")); return VOS_FALSE; } /*------------------------------------------------------------------------ Check if this is IAPP frame by matching Aironet Snap hdr. -------------------------------------------------------------------------*/ // Compare returns 1 if values are same and 0 // if not the same. if (( WLANTL_LLC_SNAP_SIZE != usSnapHdrSize ) || ( 0 == vos_mem_compare(ucSnapHdr, (v_PVOID_t)WLANTL_AIRONET_SNAP_HEADER, WLANTL_LLC_SNAP_SIZE ) )) { return VOS_FALSE; } return VOS_TRUE; } #endif //FEATURE_WLAN_ESE /*========================================================================== FUNCTION WLANTL_ProcessBAPFrame DESCRIPTION Internal utility function for processing incoming BT-AMP frames DEPENDENCIES TL must be initiailized before this function gets called. Bothe the BT-AMP station and the BAP Ctrl path must have been previously registered with TL. PARAMETERS IN pvBDHeader: pointer to the BD header vosTempBuff: the data packet pTLCb: TL control block ucSTAId: station id IN/OUT pFirstDataPktArrived: static from caller function; used for rssi computation RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input parameters are invalid VOS_STATUS_E_FAULT: pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ v_BOOL_t WLANTL_ProcessBAPFrame ( v_PVOID_t pvBDHeader, vos_pkt_t* vosTempBuff, WLANTL_CbType* pTLCb, v_U8_t* pFirstDataPktArrived, v_U8_t ucSTAId ) { v_U16_t usMPDUDOffset; v_U8_t ucOffset; v_U8_t ucOUI[WLANTL_LLC_OUI_SIZE]; v_SIZE_t usOUISize = WLANTL_LLC_OUI_SIZE; VOS_STATUS vosStatus; v_U16_t usType; v_SIZE_t usTypeLen = sizeof(usType); v_U8_t ucMPDUHOffset; v_U8_t ucMPDUHLen = 0; v_U16_t usActualHLen = 0; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Extract OUI and type from LLC and validate; if non-data send to BAP -------------------------------------------------------------------------*/ if ( VOS_FALSE == WDA_IS_RX_LLC_PRESENT(pvBDHeader) ) { TLLOGW(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "WLAN TL:LLC header removed, cannot determine BT-AMP type -" "dropping pkt")); /* Drop packet */ vos_pkt_return_packet(vosTempBuff); return VOS_TRUE; } usMPDUDOffset = (v_U8_t)WDA_GET_RX_MPDU_DATA_OFFSET(pvBDHeader); ucMPDUHOffset = (v_U8_t)WDA_GET_RX_MPDU_HEADER_OFFSET(pvBDHeader); ucMPDUHLen = (v_U8_t)WDA_GET_RX_MPDU_HEADER_LEN(pvBDHeader); ucOffset = (v_U8_t)usMPDUDOffset + WLANTL_LLC_OUI_OFFSET; vosStatus = vos_pkt_extract_data( vosTempBuff, ucOffset, (v_PVOID_t)ucOUI, &usOUISize); #if 0 // Compare returns 1 if values are same and 0 // if not the same. if (( WLANTL_LLC_OUI_SIZE != usOUISize ) || ( 0 == vos_mem_compare(ucOUI, (v_PVOID_t)WLANTL_BT_AMP_OUI, WLANTL_LLC_OUI_SIZE ) )) { TLLOGW(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "LLC header points to diff OUI in BT-AMP station -" "dropping pkt")); /* Drop packet */ vos_pkt_return_packet(vosTempBuff); return VOS_TRUE; } #endif /*------------------------------------------------------------------------ Extract LLC OUI and ensure that this is indeed a BT-AMP frame ------------------------------------------------------------------------*/ vosStatus = vos_pkt_extract_data( vosTempBuff, ucOffset + WLANTL_LLC_OUI_SIZE, (v_PVOID_t)&usType, &usTypeLen); if (( VOS_STATUS_SUCCESS != vosStatus) || ( sizeof(usType) != usTypeLen )) { TLLOGW(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "Unable to extract type on incoming BAP packet -" "dropping pkt")); /* Drop packet */ vos_pkt_return_packet(vosTempBuff); return VOS_TRUE; } /*------------------------------------------------------------------------ Check if this is BT-AMP data or ctrl packet(RSN, LinkSvision, ActivityR) ------------------------------------------------------------------------*/ usType = vos_be16_to_cpu(usType); if (WLANTL_BAP_IS_NON_DATA_PKT_TYPE(usType)) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Non-data packet received over BT-AMP link: %d, => BAP", usType)); /*Flatten packet as BAP expects to be able to peek*/ if ( VOS_STATUS_SUCCESS != vos_pkt_flatten_rx_pkt(&vosTempBuff)) { TLLOGW(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "WLAN TL:Cannot flatten BT-AMP packet - dropping")); /* Drop packet */ vos_pkt_return_packet(vosTempBuff); return VOS_TRUE; } /* Send packet to BAP client*/ if ( VOS_STATUS_SUCCESS != WDA_DS_TrimRxPacketInfo( vosTempBuff ) ) { TLLOGW(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "WLAN TL:BD header corrupted - dropping packet")); /* Drop packet */ vos_pkt_return_packet(vosTempBuff); return VOS_TRUE; } if ( 0 == WDA_GET_RX_FT_DONE(pvBDHeader) ) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "Non-data packet received over BT-AMP link: Sending it for " "frame Translation")); if (usMPDUDOffset > ucMPDUHOffset) { usActualHLen = usMPDUDOffset - ucMPDUHOffset; } /* software frame translation for BTAMP WDS.*/ WLANTL_Translate80211To8023Header( vosTempBuff, &vosStatus, usActualHLen, ucMPDUHLen, pTLCb,ucSTAId, VOS_FALSE); } if (pTLCb->tlBAPClient.pfnTlBAPRx) pTLCb->tlBAPClient.pfnTlBAPRx( vos_get_global_context(VOS_MODULE_ID_TL,pTLCb), vosTempBuff, (WLANTL_BAPFrameEnumType)usType ); else { VOS_ASSERT(0); } return VOS_TRUE; } else { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL: BAP DATA packet received over BT-AMP link: %d, => BAP", usType)); /*!!!FIX ME!!*/ #if 0 /*-------------------------------------------------------------------- For data packet collect phy stats RSSI and Link Quality Calculate the RSSI average and save it. Continuous average is done. --------------------------------------------------------------------*/ if ( *pFirstDataPktArrived == 0) { pTLCb->atlSTAClients[ucSTAId].rssiAvg = WLANHAL_GET_RSSI_AVERAGE( pvBDHeader ); pTLCb->atlSTAClients[ucSTAId].uLinkQualityAvg = WLANHAL_RX_BD_GET_SNR( pvBDHeader ); // Rcvd 1st pkt, start average from next time *pFirstDataPktArrived = 1; } else { pTLCb->atlSTAClients[ucSTAId].rssiAvg = (WLANHAL_GET_RSSI_AVERAGE( pvBDHeader ) + pTLCb->atlSTAClients[ucSTAId].rssiAvg)/2; pTLCb->atlSTAClients[ucSTAId].uLinkQualityAvg = (WLANHAL_RX_BD_GET_SNR( pvBDHeader ) + pTLCb->atlSTAClients[ucSTAId].uLinkQualityAvg)/2; }/*Else, first data packet*/ #endif }/*BT-AMP data packet*/ return VOS_FALSE; }/*WLANTL_ProcessBAPFrame*/ /*========================================================================== FUNCTION WLANTL_ProcessFCFrame DESCRIPTION Internal utility function for processing incoming Flow Control frames. Enable or disable LWM mode based on the information. DEPENDENCIES TL must be initiailized before this function gets called. FW sends up special flow control frame. PARAMETERS IN pvosGCtx pointer to vos global context pvBDHeader: pointer to the BD header pTLCb: TL control block pvBDHeader pointer to BD header. IN/OUT pFirstDataPktArrived: static from caller function; used for rssi computation RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input frame are invalid VOS_STATUS_E_FAULT: pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS The ingress and egress of each station will be updated. If needed, LWM mode will be enabled or disabled based on the flow control algorithm. ============================================================================*/ v_BOOL_t WLANTL_ProcessFCFrame ( v_PVOID_t pvosGCtx, vos_pkt_t* pvosDataBuff, v_PVOID_t pvBDHeader ) { #if 1 //enable processing of only fcStaTxDisabled bitmap for now. the else part is old better qos code. // need to revisit the old code for full implementation. v_U8_t ucSTAId; v_U16_t ucStaValidBitmap; v_U16_t ucStaTxDisabledBitmap; WLANTL_CbType* pTLCb = NULL; #ifdef TL_DEBUG_FC v_U32_t rxTimeStamp; v_U32_t curTick; #endif /*------------------------------------------------------------------------ Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_SuspendDataTx")); return VOS_STATUS_E_FAULT; } ucStaValidBitmap = WDA_GET_RX_FC_VALID_STA_MASK(pvBDHeader); ucStaTxDisabledBitmap = WDA_GET_RX_FC_STA_TX_DISABLED_BITMAP(pvBDHeader); #ifdef TL_DEBUG_FC rxTimeStamp = WDA_GET_RX_TIMESTAMP(pvBDHeader); /* hard code of MTU_GLOBAL_TIMER_ADDR to calculate the time between generated and processed */ wpalReadRegister(0x03081400+0x1D4, &curTick); TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "%ld (%ld-%ld): Disabled %x Valid %x", curTick > rxTimeStamp ? curTick - rxTimeStamp : rxTimeStamp - (0xFFFFFFFF - curTick), curTick, rxTimeStamp, ucStaTxDisabledBitmap, ucStaValidBitmap)); #endif for(ucSTAId = 0; ucStaValidBitmap != 0; ucStaValidBitmap >>=1, ucStaTxDisabledBitmap >>= 1, ucSTAId ++) { if ( (0 == (ucStaValidBitmap & 0x1)) || (pTLCb->atlSTAClients[ucSTAId] && (0 == pTLCb->atlSTAClients[ucSTAId]->ucExists)) ) continue; if (ucStaTxDisabledBitmap & 0x1) { WLANTL_SuspendDataTx(pvosGCtx, &ucSTAId, NULL); } else { WLANTL_ResumeDataTx(pvosGCtx, &ucSTAId); } } #else VOS_STATUS vosStatus; tpHalFcRxBd pvFcRxBd = NULL; v_U8_t ucBitCheck = 0x1; v_U8_t ucStaValid = 0; v_U8_t ucSTAId = 0; VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "Received FC Response"); if ( (NULL == pTLCb) || (NULL == pvosDataBuff)) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid pointer in %s", __func__)); return VOS_STATUS_E_FAULT; } vosStatus = vos_pkt_peek_data( pvosDataBuff, 0, (v_PVOID_t)&pvFcRxBd, sizeof(tHalFcRxBd)); if ( (VOS_STATUS_SUCCESS != vosStatus) || (NULL == pvFcRxBd) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:wrong FC Rx packet")); return VOS_STATUS_E_INVAL; } // need to swap bytes in the FC contents. WLANHAL_SwapFcRxBd(&pvFcRxBd->fcSTATxQLen[0]); //logic to enable/disable LWM mode for each station for( ucStaValid = (v_U8_t)pvFcRxBd->fcSTAValidMask; ucStaValid; ucStaValid >>= 1, ucBitCheck <<= 1, ucSTAId ++) { if ( (0 == (ucStaValid & 0x1)) || (0 == pTLCb->atlSTAClients[ucSTAId].ucExists) ) { continue; } if ( pvFcRxBd->fcSTAThreshIndMask & ucBitCheck ) { //LWM event is reported by FW. Able to fetch more packet if( pTLCb->atlSTAClients[ucSTAId].ucLwmModeEnabled ) { //Now memory usage is below LWM. Station can send more packets. pTLCb->atlSTAClients[ucSTAId].ucLwmEventReported = TRUE; } else { TLLOG4(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_LOW, "WLAN TL: FW report LWM event but the station %d is not in LWM mode", ucSTAId)); } } //calculate uEgress_length/uIngress_length only after receiving enough packets if (WLANTL_LWM_INGRESS_SAMPLE_THRESHOLD <= pTLCb->atlSTAClients[ucSTAId].uIngress_length) { //check memory usage info to see whether LWM mode should be enabled for the station v_U32_t uEgress_length = pTLCb->atlSTAClients[ucSTAId].uIngress_length + pTLCb->atlSTAClients[ucSTAId].bmuMemConsumed - pvFcRxBd->fcSTATxQLen[ucSTAId]; //if ((float)uEgress_length/(float)pTLCb->atlSTAClients[ucSTAId].uIngress_length // <= WLANTL_LWM_EGRESS_INGRESS_THRESHOLD) if ( (pTLCb->atlSTAClients[ucSTAId].uIngress_length > uEgress_length) && ((pTLCb->atlSTAClients[ucSTAId].uIngress_length - uEgress_length ) >= (pTLCb->atlSTAClients[ucSTAId].uIngress_length >> 2)) ) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Enable LWM mode for station %d", ucSTAId)); pTLCb->atlSTAClients[ucSTAId].ucLwmModeEnabled = TRUE; } else { if( pTLCb->atlSTAClients[ucSTAId].ucLwmModeEnabled ) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Disable LWM mode for station %d", ucSTAId)); pTLCb->atlSTAClients[ucSTAId].ucLwmModeEnabled = FALSE; } } //remember memory usage in FW starting from this round pTLCb->atlSTAClients[ucSTAId].bmuMemConsumed = pvFcRxBd->fcSTATxQLen[ucSTAId]; pTLCb->atlSTAClients[ucSTAId].uIngress_length = 0; } //(WLANTL_LWM_INGRESS_SAMPLE_THRESHOLD <= pTLCb->atlSTAClients[ucSTAId].uIngress_length) if( pTLCb->atlSTAClients[ucSTAId].ucLwmModeEnabled ) { //always update current maximum allowed memeory usage pTLCb->atlSTAClients[ucSTAId].uBuffThresholdMax = WLANTL_STA_BMU_THRESHOLD_MAX - pvFcRxBd->fcSTATxQLen[ucSTAId]; } } #endif return VOS_STATUS_SUCCESS; } /*========================================================================== FUNCTION WLANTL_RxFrames DESCRIPTION Callback registered by TL and called by BAL when a packet is received over the bus. Upon the call of this function TL will make the necessary decision with regards to the forwarding or queuing of this packet and the layer it needs to be delivered to. DEPENDENCIES TL must be initiailized before this function gets called. If the frame carried is a data frame then the station for which it is destined to must have been previously registered with TL. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's or BAL's control block can be extracted from its context vosDataBuff: it will contain a pointer to the first buffer received, if there is more then one packet they will be chained using vOSS buffers. RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input parameters are invalid VOS_STATUS_E_FAULT: pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_RxFrames ( v_PVOID_t pvosGCtx, vos_pkt_t *pFrameDataBuff ) { vos_pkt_t* vosDataBuff = (vos_pkt_t*)pFrameDataBuff; WLANTL_CbType* pTLCb = NULL; WLANTL_STAClientType* pClientSTA = NULL; WLANTL_STAFuncType pfnSTAFsm; vos_pkt_t* vosTempBuff; v_U8_t ucSTAId; VOS_STATUS vosStatus; v_U8_t ucFrmType; v_PVOID_t pvBDHeader = NULL; WLANTL_STAEventType wSTAEvent = WLANTL_RX_EVENT; v_U8_t ucTid = 0; v_BOOL_t broadcast = VOS_FALSE; v_BOOL_t selfBcastLoopback = VOS_FALSE; static v_U8_t first_data_pkt_arrived; v_U32_t uDPUSig; v_U16_t usPktLen; v_BOOL_t bForwardIAPPwithLLC = VOS_FALSE; #ifdef WLAN_FEATURE_LINK_LAYER_STATS v_S7_t currentAvgRSSI = 0; v_U8_t ac; #endif /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:TL Receive Frames called")); /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( NULL == vosDataBuff ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid parameter sent on WLANTL_RxFrames")); return VOS_STATUS_E_INVAL; } /*------------------------------------------------------------------------ Popolaute timestamp as the time when packet arrives ---------------------------------------------------------------------- */ vosDataBuff->timestamp = vos_timer_get_system_ticks(); /*------------------------------------------------------------------------ Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_ChangeSTAState")); return VOS_STATUS_E_FAULT; } /*--------------------------------------------------------------------- Save the initial buffer - this is the first received buffer ---------------------------------------------------------------------*/ vosTempBuff = vosDataBuff; while ( NULL != vosTempBuff ) { broadcast = VOS_FALSE; selfBcastLoopback = VOS_FALSE; vos_pkt_walk_packet_chain( vosDataBuff, &vosDataBuff, 1/*true*/ ); if( vos_get_conparam() == VOS_MONITOR_MODE ) { if( pTLCb->isConversionReq ) WLANTL_MonTranslate80211To8023Header(vosTempBuff, pTLCb); pTLCb->pfnMonRx(pvosGCtx, vosTempBuff, pTLCb->isConversionReq); vosTempBuff = vosDataBuff; continue; } /*--------------------------------------------------------------------- Peek at BD header - do not remove !!! Optimize me: only part of header is needed; not entire one ---------------------------------------------------------------------*/ vosStatus = WDA_DS_PeekRxPacketInfo( vosTempBuff, (v_PVOID_t)&pvBDHeader, 1/*Swap BD*/ ); if ( NULL == pvBDHeader ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Cannot extract BD header")); /* Drop packet */ vos_pkt_return_packet(vosTempBuff); vosTempBuff = vosDataBuff; continue; } /*--------------------------------------------------------------------- Check if FC frame reported from FW ---------------------------------------------------------------------*/ if(WDA_IS_RX_FC(pvBDHeader)) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:receive one FC frame")); WLANTL_ProcessFCFrame(pvosGCtx, vosTempBuff, pvBDHeader); /* Drop packet */ vos_pkt_return_packet(vosTempBuff); vosTempBuff = vosDataBuff; continue; } /* AMSDU HW bug fix * After 2nd AMSDU subframe HW could not handle BD correctly * HAL workaround is needed */ if(WDA_GET_RX_ASF(pvBDHeader)) { WDA_DS_RxAmsduBdFix(pvosGCtx, pvBDHeader); } /*--------------------------------------------------------------------- Extract frame control field from 802.11 header if present (frame translation not done) ---------------------------------------------------------------------*/ vosStatus = WDA_DS_GetFrameTypeSubType( pvosGCtx, vosTempBuff, pvBDHeader, &ucFrmType ); if ( VOS_STATUS_SUCCESS != vosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Cannot extract Frame Control Field")); /* Drop packet */ vos_pkt_return_packet(vosTempBuff); vosTempBuff = vosDataBuff; continue; } vos_pkt_get_packet_length(vosTempBuff, &usPktLen); /*--------------------------------------------------------------------- Check if management and send to PE ---------------------------------------------------------------------*/ if ( WLANTL_IS_MGMT_FRAME(ucFrmType)) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Sending packet to management client")); if ( VOS_STATUS_SUCCESS != vos_pkt_flatten_rx_pkt(&vosTempBuff)) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Cannot flatten packet - dropping")); /* Drop packet */ vos_pkt_return_packet(vosTempBuff); vosTempBuff = vosDataBuff; continue; } ucSTAId = (v_U8_t)WDA_GET_RX_STAID( pvBDHeader ); /* Read RSSI and update */ if(!WLANTL_STA_ID_INVALID(ucSTAId)) { #if defined WLAN_FEATURE_NEIGHBOR_ROAMING /* Read RSSI and update */ vosStatus = WLANTL_HSHandleRXFrame(pvosGCtx, WLANTL_MGMT_FRAME_TYPE, pvBDHeader, ucSTAId, VOS_FALSE, NULL); #else vosStatus = WLANTL_ReadRSSI(pvosGCtx, pvBDHeader, ucSTAId); #endif if (!VOS_IS_STATUS_SUCCESS(vosStatus)) { TLLOGW(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "Handle RX Management Frame fail within Handoff " "support module")); /* Do Not Drop packet at here * Revisit why HO module return fail * vos_pkt_return_packet(vosTempBuff); * vosTempBuff = vosDataBuff; * continue; */ } vosStatus = WLANTL_ReadSNR(pvosGCtx, pvBDHeader, ucSTAId); if (!VOS_IS_STATUS_SUCCESS(vosStatus)) { TLLOGW(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, FL("Failed to Read SNR"))); } #ifdef WLAN_FEATURE_LINK_LAYER_STATS pClientSTA = pTLCb->atlSTAClients[ucSTAId]; if ( NULL != pClientSTA) { pClientSTA->interfaceStats.mgmtRx++; } #endif } pTLCb->tlMgmtFrmClient.pfnTlMgmtFrmRx( pvosGCtx, vosTempBuff); } else /* Data Frame */ { ucSTAId = (v_U8_t)WDA_GET_RX_STAID( pvBDHeader ); ucTid = (v_U8_t)WDA_GET_RX_TID( pvBDHeader ); TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Data packet received for STA %d", ucSTAId)); /*------------------------------------------------------------------ This should be corrected when multipe sta support is added !!! for now bcast frames will be sent to the last registered STA ------------------------------------------------------------------*/ if ( WDA_IS_RX_BCAST(pvBDHeader)) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:TL rx Bcast frame - sending to last registered station")); broadcast = VOS_TRUE; /*------------------------------------------------------------------- If Addr1 is b/mcast, but Addr3 is our own self MAC, it is a b/mcast pkt we sent looping back to us. To be dropped if we are non BTAMP -------------------------------------------------------------------*/ if( WLANHAL_RX_BD_ADDR3_SELF_IDX == (v_U8_t)WDA_GET_RX_ADDR3_IDX( pvBDHeader )) { selfBcastLoopback = VOS_TRUE; } }/*if bcast*/ if ((WLANTL_STA_ID_INVALID(ucSTAId)) || (WLANTL_TID_INVALID(ucTid))) { TLLOGW(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "WLAN TL:STAId %d, Tid %d. Invalid STA ID/TID- dropping pkt", ucSTAId, ucTid)); /* Drop packet */ vos_pkt_return_packet(vosTempBuff); vosTempBuff = vosDataBuff; continue; } #ifdef WLAN_FEATURE_LINK_LAYER_STATS ac = WLANTL_TID_2_AC[ucTid]; #endif /*---------------------------------------------------------------------- No need to lock cache access because cache manipulation only happens in the transport thread/task context - These frames are to be forwarded to the station upon registration which happens in the main thread context The caching here can happen in either Tx or Rx thread depending on the current SSC scheduling - also we need to make sure that the frames in the cache are fwd-ed to the station before the new incoming ones -----------------------------------------------------------------------*/ pClientSTA = pTLCb->atlSTAClients[ucSTAId]; if (NULL == pClientSTA) { TLLOGW(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "WLAN TL:STA not allocated memory. Dropping packet")); vos_pkt_return_packet(vosTempBuff); vosTempBuff = vosDataBuff; continue; } #ifdef FEATURE_WLAN_TDLS if (( pClientSTA->ucExists ) && (WLAN_STA_TDLS == pClientSTA->wSTADesc.wSTAType) && (pClientSTA->ucTxSuspended)) vos_atomic_set_U8( &pClientSTA->ucTxSuspended, 0 ); else if ( !broadcast && (pClientSTA->ucExists == 0 ) ) { tpSirMacMgmtHdr pMacHeader = WDA_GET_RX_MAC_HEADER( pvBDHeader ); /* from the direct peer while it is not registered to TL yet */ if ( (pMacHeader->fc.fromDS == 0) && (pMacHeader->fc.toDS == 0) ) { v_U8_t ucAddr3STAId; ucAddr3STAId = WDA_GET_RX_ADDR3_IDX(pvBDHeader); if ( WLANTL_STA_ID_INVALID(ucAddr3STAId) ) { TLLOGW(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "WLAN TL:STA ID %d invalid - dropping pkt", ucAddr3STAId)); /* Drop packet */ vos_pkt_return_packet(vosTempBuff); vosTempBuff = vosDataBuff; continue; } if (!(pTLCb->atlSTAClients[ucAddr3STAId] && pTLCb->atlSTAClients[ucAddr3STAId]->ucExists && (WLAN_STA_INFRA == pTLCb->atlSTAClients[ucAddr3STAId]->wSTADesc.wSTAType) && (WLANTL_STA_AUTHENTICATED == pTLCb->atlSTAClients[ucAddr3STAId]->tlState))) { TLLOGW(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "%s: staId %d addr3Id %d tlState %d. Unkown Receiver/Transmitter Dropping packet", __func__, ucSTAId, ucAddr3STAId, pTLCb->atlSTAClients[ucAddr3STAId]->tlState)); vos_pkt_return_packet(vosTempBuff); vosTempBuff = vosDataBuff; continue; } else { TLLOGW(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "%s: staId %d doesn't exist, but mapped to AP staId %d", __func__, ucSTAId, ucAddr3STAId)); ucSTAId = ucAddr3STAId; pClientSTA = pTLCb->atlSTAClients[ucAddr3STAId]; } } } #endif if (( pClientSTA->enableCaching == 1 ) && /*Dont buffer Broadcast/Multicast frames. If AP transmits bursts of Broadcast/Multicast data frames, * libra buffers all Broadcast/Multicast packets after authentication with AP, * So it will lead to low resource condition in Rx Data Path.*/ ( WDA_IS_RX_BCAST(pvBDHeader) == 0 )) { if( WDA_IsSelfSTA(pvosGCtx,ucSTAId)) { //drop packet for Self STA index TLLOGW(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "%s: Packet dropped for Self STA with staId %d ", __func__, ucSTAId )); vos_pkt_return_packet(vosTempBuff); vosTempBuff = vosDataBuff; continue; } uDPUSig = WDA_GET_RX_DPUSIG( pvBDHeader ); //Station has not yet been registered with TL - cache the frame TLLOGW(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "%s: staId %d exist %d tlState %d cache rx frame", __func__, ucSTAId, pClientSTA->ucExists, pClientSTA->tlState)); WLANTL_CacheSTAFrame( pTLCb, ucSTAId, vosTempBuff, uDPUSig, broadcast, ucFrmType); vosTempBuff = vosDataBuff; continue; } #ifdef FEATURE_WLAN_ESE_UPLOAD if ((pClientSTA->wSTADesc.ucIsEseSta)|| broadcast) { /*-------------------------------------------------------------------- Filter the IAPP frames for ESE connection; if data it will return false and it will be routed through the regular data path --------------------------------------------------------------------*/ if ( WLANTL_IsIAPPFrame(pvBDHeader, vosTempBuff)) { bForwardIAPPwithLLC = VOS_TRUE; } } #endif #if defined(FEATURE_WLAN_ESE) && !defined(FEATURE_WLAN_ESE_UPLOAD) if ((pClientSTA->wSTADesc.ucIsEseSta)|| broadcast) { /*-------------------------------------------------------------------- Filter the IAPP frames for ESE connection; if data it will return false and it will be routed through the regular data path --------------------------------------------------------------------*/ if ( WLANTL_IsIAPPFrame(pvBDHeader, vosTempBuff)) { if ( VOS_STATUS_SUCCESS != vos_pkt_flatten_rx_pkt(&vosTempBuff)) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Cannot flatten packet - dropping")); /* Drop packet */ vos_pkt_return_packet(vosTempBuff); } else { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "WLAN TL: Received ESE IAPP Frame")); pTLCb->tlMgmtFrmClient.pfnTlMgmtFrmRx( pvosGCtx, vosTempBuff); } vosTempBuff = vosDataBuff; continue; } } #endif /* defined(FEATURE_WLAN_ESE) && !defined(FEATURE_WLAN_ESE_UPLOAD) */ if ( WLAN_STA_BT_AMP == pClientSTA->wSTADesc.wSTAType ) { /*-------------------------------------------------------------------- Process the ctrl BAP frame; if data it will return false and it will be routed through the regular data path --------------------------------------------------------------------*/ if ( WLANTL_ProcessBAPFrame( pvBDHeader, vosTempBuff, pTLCb, &first_data_pkt_arrived, ucSTAId)) { vosTempBuff = vosDataBuff; continue; } }/*if BT-AMP station*/ else if(selfBcastLoopback == VOS_TRUE) { /* Drop packet */ vos_pkt_return_packet(vosTempBuff); vosTempBuff = vosDataBuff; continue; } /*--------------------------------------------------------------------- Data packet received, send to state machine ---------------------------------------------------------------------*/ wSTAEvent = WLANTL_RX_EVENT; pfnSTAFsm = tlSTAFsm[pClientSTA->tlState]. pfnSTATbl[wSTAEvent]; if ( NULL != pfnSTAFsm ) { #if defined WLAN_FEATURE_NEIGHBOR_ROAMING /* Read RSSI and update */ vosStatus = WLANTL_HSHandleRXFrame(pvosGCtx, WLANTL_DATA_FRAME_TYPE, pvBDHeader, ucSTAId, broadcast, vosTempBuff); broadcast = VOS_FALSE; #else vosStatus = WLANTL_ReadRSSI(pvosGCtx, pvBDHeader, ucSTAId); #endif if (!VOS_IS_STATUS_SUCCESS(vosStatus)) { TLLOGW(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "Handle RX Data Frame fail within Handoff support module")); /* Do Not Drop packet at here * Revisit why HO module return fail * vos_pkt_return_packet(vosTempBuff); * vosTempBuff = vosDataBuff; * continue; */ } #ifdef WLAN_FEATURE_LINK_LAYER_STATS pClientSTA = pTLCb->atlSTAClients[ucSTAId]; if ( NULL != pClientSTA) { tpSirMacMgmtHdr pMacHeader = WDA_GET_RX_MAC_HEADER( pvBDHeader ); if (!IS_BROADCAST_ADD(pMacHeader->da) && IS_MULTICAST_ADD(pMacHeader->da)) { pClientSTA->interfaceStats.accessCategoryStats[ac].rxMcast++; } WLANTL_HSGetDataRSSI(pvosGCtx, pvBDHeader, ucSTAId, &currentAvgRSSI); pClientSTA->interfaceStats.rssiData = currentAvgRSSI; pClientSTA->interfaceStats.accessCategoryStats[ac].rxMpdu++; if (WDA_IS_RX_AN_AMPDU (pvBDHeader)) { pClientSTA->interfaceStats.accessCategoryStats[ac].rxAmpdu++; } } #endif vosStatus = WLANTL_ReadSNR(pvosGCtx, pvBDHeader, ucSTAId); if (!VOS_IS_STATUS_SUCCESS(vosStatus)) { TLLOGW(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, FL("Failed to Read SNR"))); } pfnSTAFsm( pvosGCtx, ucSTAId, &vosTempBuff, bForwardIAPPwithLLC); } else { TLLOGW(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "WLAN TL:NULL state function, STA:%d, State: %d- dropping packet", ucSTAId, pClientSTA->tlState)); /* Drop packet */ vos_pkt_return_packet(vosTempBuff); vosTempBuff = vosDataBuff; continue; } }/* else data frame*/ vosTempBuff = vosDataBuff; }/*while chain*/ return VOS_STATUS_SUCCESS; }/* WLANTL_RxFrames */ #ifdef WLAN_FEATURE_LINK_LAYER_STATS /*========================================================================== FUNCTION WLANTL_CollectInterfaceStats DESCRIPTION Utility function used by TL to send the statitics DEPENDENCIES PARAMETERS IN ucSTAId: station for which the statistics need to collected vosDataBuff: it will contain the pointer to the corresponding structure RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input parameters are invalid VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_CollectInterfaceStats ( v_PVOID_t pvosGCtx, v_U8_t ucSTAId, WLANTL_InterfaceStatsType *vosDataBuff ) { WLANTL_CbType* pTLCb = NULL; /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( WLANTL_STA_ID_INVALID( ucSTAId ) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid station id requested on WLANTL_CollectStats")); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_CollectStats")); return VOS_STATUS_E_FAULT; } if ( NULL == pTLCb->atlSTAClients[ucSTAId] ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL: collect WIFI_STATS_IFACE results")); vos_mem_copy(vosDataBuff, &pTLCb->atlSTAClients[ucSTAId]->interfaceStats, sizeof(WLANTL_InterfaceStatsType)); return VOS_STATUS_SUCCESS; } /*========================================================================== FUNCTION WLANTL_ClearInterfaceStats DESCRIPTION Utility function used by TL to clear the statitics DEPENDENCIES PARAMETERS IN ucSTAId: station for which the statistics need to collected RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input parameters are invalid VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_ClearInterfaceStats ( v_PVOID_t pvosGCtx, v_U8_t ucSTAId, v_U8_t statsClearReqMask ) { WLANTL_CbType* pTLCb = NULL; WLANTL_STAClientType* pClientSTA = NULL; /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( WLANTL_STA_ID_INVALID( ucSTAId ) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid station id requested on WLANTL_CollectStats")); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_CollectStats")); return VOS_STATUS_E_FAULT; } pClientSTA = pTLCb->atlSTAClients[ucSTAId]; if ( NULL == pClientSTA ) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } if ((statsClearReqMask & WIFI_STATS_IFACE_AC) || (statsClearReqMask & WIFI_STATS_IFACE)) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:cleared WIFI_STATS_IFACE_AC results")); pClientSTA->interfaceStats.accessCategoryStats[0].rxMcast = 0; pClientSTA->interfaceStats.accessCategoryStats[1].rxMcast = 0; pClientSTA->interfaceStats.accessCategoryStats[2].rxMcast = 0; pClientSTA->interfaceStats.accessCategoryStats[3].rxMcast = 0; pClientSTA->interfaceStats.accessCategoryStats[0].rxMpdu = 0; pClientSTA->interfaceStats.accessCategoryStats[1].rxMpdu = 0; pClientSTA->interfaceStats.accessCategoryStats[2].rxMpdu = 0; pClientSTA->interfaceStats.accessCategoryStats[3].rxMpdu = 0; pClientSTA->interfaceStats.accessCategoryStats[0].rxAmpdu = 0; pClientSTA->interfaceStats.accessCategoryStats[1].rxAmpdu = 0; pClientSTA->interfaceStats.accessCategoryStats[2].rxAmpdu = 0; pClientSTA->interfaceStats.accessCategoryStats[3].rxAmpdu = 0; } if (statsClearReqMask & WIFI_STATS_IFACE) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:cleared WIFI_STATS_IFACE results")); pClientSTA->interfaceStats.mgmtRx = 0; pClientSTA->interfaceStats.rssiData = 0; return VOS_STATUS_SUCCESS; } return VOS_STATUS_SUCCESS; } #endif /* WLAN_FEATURE_LINK_LAYER_STATS */ /*========================================================================== FUNCTION WLANTL_RxCachedFrames DESCRIPTION Utility function used by TL to forward the cached frames to a particular station; DEPENDENCIES TL must be initiailized before this function gets called. If the frame carried is a data frame then the station for which it is destined to must have been previously registered with TL. PARAMETERS IN pTLCb: pointer to TL handle ucSTAId: station for which we need to forward the packets vosDataBuff: it will contain a pointer to the first cached buffer received, if there is more then one packet they will be chained using vOSS buffers. RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input parameters are invalid VOS_STATUS_E_FAULT: pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_RxCachedFrames ( WLANTL_CbType* pTLCb, v_U8_t ucSTAId, vos_pkt_t* vosDataBuff ) { WLANTL_STAClientType* pClientSTA = NULL; WLANTL_STAFuncType pfnSTAFsm; vos_pkt_t* vosTempBuff; VOS_STATUS vosStatus; v_PVOID_t pvBDHeader = NULL; WLANTL_STAEventType wSTAEvent = WLANTL_RX_EVENT; v_U8_t ucTid = 0; v_BOOL_t broadcast = VOS_FALSE; v_BOOL_t bSigMatch = VOS_FALSE; v_BOOL_t selfBcastLoopback = VOS_FALSE; static v_U8_t first_data_pkt_arrived; v_U32_t uDPUSig; v_U8_t ucUcastSig; v_U8_t ucBcastSig; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:TL Receive Cached Frames called")); /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( NULL == vosDataBuff ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid parameter sent on WLANTL_RxFrames")); return VOS_STATUS_E_INVAL; } pClientSTA = pTLCb->atlSTAClients[ucSTAId]; if ( NULL == pClientSTA ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } MTRACE(vos_trace(VOS_MODULE_ID_TL, TRACE_CODE_TL_FORWARD_CACHED_FRAMES, ucSTAId, 1<<16 | pClientSTA->tlCacheInfo.cacheSize)); /*--------------------------------------------------------------------- Save the initial buffer - this is the first received buffer ---------------------------------------------------------------------*/ vosTempBuff = vosDataBuff; while ( NULL != vosTempBuff ) { broadcast = VOS_FALSE; selfBcastLoopback = VOS_FALSE; vos_pkt_walk_packet_chain( vosDataBuff, &vosDataBuff, 1/*true*/ ); TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Sending new cached packet to station %d", ucSTAId)); /*--------------------------------------------------------------------- Peek at BD header - do not remove !!! Optimize me: only part of header is needed; not entire one ---------------------------------------------------------------------*/ vosStatus = WDA_DS_PeekRxPacketInfo( vosTempBuff, (v_PVOID_t)&pvBDHeader, 0 ); if ( NULL == pvBDHeader ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Cannot extract BD header")); /* Drop packet */ vos_pkt_return_packet(vosTempBuff); vosTempBuff = vosDataBuff; continue; } uDPUSig = WDA_GET_RX_DPUSIG( pvBDHeader ); /* AMSDU HW bug fix * After 2nd AMSDU subframe HW could not handle BD correctly * HAL workaround is needed */ if(WDA_GET_RX_ASF(pvBDHeader)) { WDA_DS_RxAmsduBdFix(vos_get_global_context(VOS_MODULE_ID_TL,pTLCb), pvBDHeader); } ucTid = (v_U8_t)WDA_GET_RX_TID( pvBDHeader ); VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Data packet cached for STA %d", ucSTAId); /*------------------------------------------------------------------ This should be corrected when multipe sta support is added !!! for now bcast frames will be sent to the last registered STA ------------------------------------------------------------------*/ if ( WDA_IS_RX_BCAST(pvBDHeader)) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:TL rx Bcast frame ")); broadcast = VOS_TRUE; /* If Addr1 is b/mcast, but Addr3 is our own self MAC, it is a b/mcast * pkt we sent looping back to us. To be dropped if we are non BTAMP */ if( WLANHAL_RX_BD_ADDR3_SELF_IDX == (v_U8_t)WDA_GET_RX_ADDR3_IDX( pvBDHeader )) { selfBcastLoopback = VOS_TRUE; } }/*if bcast*/ /*------------------------------------------------------------------------- Check if the packet that we cached matches the DPU signature of the newly added station -------------------------------------------------------------------------*/ pClientSTA = pTLCb->atlSTAClients[ucSTAId]; if ( NULL == pClientSTA ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } if ( broadcast ) { ucBcastSig = (v_U8_t)uDPUSig; bSigMatch = (( WLAN_TL_INVALID_B_SIG != pClientSTA->wSTADesc.ucBcastSig ) && ( ucBcastSig == pClientSTA->wSTADesc.ucBcastSig )); } else { ucUcastSig = (v_U8_t)uDPUSig; bSigMatch = (( WLAN_TL_INVALID_U_SIG != pClientSTA->wSTADesc.ucUcastSig ) && ( ucUcastSig == pClientSTA->wSTADesc.ucUcastSig )); } /*------------------------------------------------------------------------- If the packet doesn't match - drop it -------------------------------------------------------------------------*/ if ( !bSigMatch ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_MED, "WLAN TL: Cached packet does not match DPU Sig of the new STA - drop " " DPU Sig %d UC %d BC %d B %d", uDPUSig, pClientSTA->wSTADesc.ucUcastSig, pClientSTA->wSTADesc.ucBcastSig, broadcast)); /* Drop packet */ vos_pkt_return_packet(vosTempBuff); vosTempBuff = vosDataBuff; continue; }/*if signature mismatch*/ /*------------------------------------------------------------------------ Check if BT-AMP frame: - additional processing needed in this case to separate BT-AMP date from BT-AMP Ctrl path ------------------------------------------------------------------------*/ if ( WLAN_STA_BT_AMP == pClientSTA->wSTADesc.wSTAType ) { /*-------------------------------------------------------------------- Process the ctrl BAP frame; if data it will return false and it will be routed through the regular data path --------------------------------------------------------------------*/ if ( WLANTL_ProcessBAPFrame( pvBDHeader, vosTempBuff, pTLCb, &first_data_pkt_arrived, ucSTAId)) { vosTempBuff = vosDataBuff; continue; } }/*if BT-AMP station*/ else if(selfBcastLoopback == VOS_TRUE) { /* Drop packet */ vos_pkt_return_packet(vosTempBuff); vosTempBuff = vosDataBuff; continue; } /*--------------------------------------------------------------------- Data packet received, send to state machine ---------------------------------------------------------------------*/ wSTAEvent = WLANTL_RX_EVENT; pfnSTAFsm = tlSTAFsm[pClientSTA->tlState]. pfnSTATbl[wSTAEvent]; if ( NULL != pfnSTAFsm ) { #if defined WLAN_FEATURE_NEIGHBOR_ROAMING /* Read RSSI and update */ vosStatus = WLANTL_HSHandleRXFrame(vos_get_global_context( VOS_MODULE_ID_TL,pTLCb), WLANTL_DATA_FRAME_TYPE, pvBDHeader, ucSTAId, broadcast, vosTempBuff); broadcast = VOS_FALSE; #else vosStatus = WLANTL_ReadRSSI(vos_get_global_context(VOS_MODULE_ID_TL,pTLCb), pvBDHeader, ucSTAId); #endif if(!VOS_IS_STATUS_SUCCESS(vosStatus)) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "Handle RX Data Frame fail within Handoff support module")); /* Do Not Drop packet at here * Revisit why HO module return fail vos_pkt_return_packet(vosTempBuff); vosTempBuff = vosDataBuff; continue; */ } pfnSTAFsm( vos_get_global_context(VOS_MODULE_ID_TL,pTLCb), ucSTAId, &vosTempBuff, VOS_FALSE); } else { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:NULL state function, STA:%d, State: %d- dropping packet", ucSTAId, pClientSTA->tlState)); /* Drop packet */ vos_pkt_return_packet(vosTempBuff); vosTempBuff = vosDataBuff; continue; } vosTempBuff = vosDataBuff; }/*while chain*/ return VOS_STATUS_SUCCESS; }/* WLANTL_RxCachedFrames */ /*========================================================================== FUNCTION WLANTL_RxProcessMsg DESCRIPTION Called by VOSS when a message was serialized for TL through the rx thread/task. DEPENDENCIES The TL must be initialized before this function can be called. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context message: type and content of the message RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: invalid input parameters VOS_STATUS_E_FAULT: pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) Other values can be returned as a result of a function call, please check corresponding API for more info. SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_RxProcessMsg ( v_PVOID_t pvosGCtx, vos_msg_t* message ) { VOS_STATUS vosStatus = VOS_STATUS_SUCCESS; v_U32_t uData; v_U8_t ucSTAId; v_U8_t ucUcastSig; v_U8_t ucBcastSig; /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( NULL == message ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid parameter sent on WLANTL_RxProcessMessage")); return VOS_STATUS_E_INVAL; } /*------------------------------------------------------------------------ Process message ------------------------------------------------------------------------*/ TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Received message: %d through rx flow", message->type)); switch( message->type ) { case WLANTL_RX_FWD_CACHED: /*--------------------------------------------------------------------- The data sent with the message has the following structure: | 00 | ucBcastSignature | ucUcastSignature | ucSTAID | each field above is one byte ---------------------------------------------------------------------*/ uData = message->bodyval; ucSTAId = ( uData & 0x000000FF); ucUcastSig = ( uData & 0x0000FF00)>>8; ucBcastSig = (v_U8_t)(( uData & 0x00FF0000)>>16); vosStatus = WLANTL_ForwardSTAFrames( pvosGCtx, ucSTAId, ucUcastSig, ucBcastSig); break; default: /*no processing for now*/ break; } return VOS_STATUS_SUCCESS; } /*========================================================================== FUNCTION WLANTL_ResourceCB DESCRIPTION Called by the TL when it has packets available for transmission. DEPENDENCIES The TL must be registered with BAL before this function can be called. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's or BAL's control block can be extracted from its context RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_ResourceCB ( v_PVOID_t pvosGCtx, v_U32_t uCount ) { WLANTL_CbType* pTLCb = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_ChangeSTAState")); return VOS_STATUS_E_FAULT; } pTLCb->uResCount = uCount; /*----------------------------------------------------------------------- Resume Tx if enough res and not suspended -----------------------------------------------------------------------*/ if (( pTLCb->uResCount >= WDA_TLI_MIN_RES_MF ) && ( 0 == pTLCb->ucTxSuspended )) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Issuing Xmit start request to BAL for avail res ASYNC")); return WDA_DS_StartXmit(pvosGCtx); } return VOS_STATUS_SUCCESS; }/* WLANTL_ResourceCB */ /*========================================================================== FUNCTION WLANTL_IsTxXmitPending DESCRIPTION Called by the WDA when it wants to know whether WDA_DS_TX_START_XMIT msg is pending in TL msg queue DEPENDENCIES The TL must be registered with WDA before this function can be called. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's or WDA's control block can be extracted from its context RETURN VALUE The result code associated with performing the operation 0: No WDA_DS_TX_START_XMIT msg pending 1: Msg WDA_DS_TX_START_XMIT already pending in TL msg queue SIDE EFFECTS ============================================================================*/ v_BOOL_t WLANTL_IsTxXmitPending ( v_PVOID_t pvosGCtx ) { WLANTL_CbType* pTLCb = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_FATAL, "WLAN TL:Invalid TL pointer from pvosGCtx in WLANTL_IsTxXmitPending ")); return FALSE; } return pTLCb->isTxTranmitMsgPending; }/*WLANTL_IsTxXmitPending */ /*========================================================================== FUNCTION WLANTL_SetTxXmitPending DESCRIPTION Called by the WDA when it wants to indicate that WDA_DS_TX_START_XMIT msg is pending in TL msg queue DEPENDENCIES The TL must be registered with WDA before this function can be called. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's or WDA's control block can be extracted from its context RETURN VALUE None SIDE EFFECTS ============================================================================*/ v_VOID_t WLANTL_SetTxXmitPending ( v_PVOID_t pvosGCtx ) { WLANTL_CbType* pTLCb = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_FATAL, "WLAN TL:Invalid TL pointer from pvosGCtx in WLANTL_SetTxXmitPending")); return; } pTLCb->isTxTranmitMsgPending = 1; return; }/*WLANTL_SetTxXmitPending */ /*========================================================================== FUNCTION WLANTL_ClearTxXmitPending DESCRIPTION Called by the WDA when it wants to indicate that no WDA_DS_TX_START_XMIT msg is pending in TL msg queue DEPENDENCIES The TL must be registered with WDA before this function can be called. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's or WDA's control block can be extracted from its context RETURN VALUE None SIDE EFFECTS ============================================================================*/ v_VOID_t WLANTL_ClearTxXmitPending ( v_PVOID_t pvosGCtx ) { WLANTL_CbType* pTLCb = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_FATAL, "WLAN TL:Invalid TL pointer from pvosGCtx in WLANTL_ClearTxXmitPending ")); return; } pTLCb->isTxTranmitMsgPending = 0; return; }/*WLANTL_ClearTxXmitPending */ /*========================================================================== FUNCTION WLANTL_TxThreadDebugHandler DESCRIPTION Printing TL Snapshot dump, processed under TxThread context, currently information regarding the global TlCb struture. Dumps information related to per active STA connection currently in use by TL. DEPENDENCIES The TL must be initialized before this gets called. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's or WDA's control block can be extracted from its context RETURN VALUE None SIDE EFFECTS ============================================================================*/ v_VOID_t WLANTL_TxThreadDebugHandler ( v_PVOID_t *pVosContext ) { WLANTL_CbType* pTLCb = NULL; WLANTL_STAClientType* pClientSTA = NULL; int i = 0; v_U8_t uFlowMask; // TX FlowMask from WDA TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_FATAL, "WLAN TL: %s Enter ", __func__)); pTLCb = VOS_GET_TL_CB(pVosContext); if ( NULL == pVosContext || NULL == pTLCb ) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "Global VoS Context or TL Context are NULL")); return; } if (VOS_STATUS_SUCCESS == WDA_DS_GetTxFlowMask(pVosContext, &uFlowMask)) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WDA uTxFlowMask: 0x%x", uFlowMask)); } TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "************************TL DUMP INFORMATION**************")); TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "uDelayedTriggerFrmInt:%d\tuMinFramesProcThres:%d", pTLCb->tlConfigInfo.uDelayedTriggerFrmInt, pTLCb->tlConfigInfo.uMinFramesProcThres)); TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "Management Frame Client exists: %d", pTLCb->tlMgmtFrmClient.ucExists)); TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "usPendingTxCompleteCount: %d\tucTxSuspended: %d", pTLCb->usPendingTxCompleteCount, pTLCb->ucTxSuspended)); TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "uResCount: %d", pTLCb->uResCount)); TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "ucRegisteredStaId: %d\tucCurrentSTA: %d", pTLCb->ucRegisteredStaId, pTLCb->ucCurrentSTA)); TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "UrgentFrameProcessing: %s\tuFramesProcThres: %d", (pTLCb->bUrgent?"True":"False"), pTLCb->uFramesProcThres)); TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "isTxTranmitMsgPending: %d\t isBMPS: %s", pTLCb->isTxTranmitMsgPending, pTLCb->isBMPS?"True":"False")); #ifdef FEATURE_WLAN_TDLS TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "TDLS Peer Count: %d", pTLCb->ucTdlsPeerCount)); #endif TLLOG1(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "++++++++++++++++++++Registerd Client Information++++++++++")); for ( i =0; i<WLAN_MAX_STA_COUNT; i++ ) { pClientSTA = pTLCb->atlSTAClients[i]; if( NULL == pClientSTA || 0 == pClientSTA->ucExists) { continue; } TLLOG1(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "######################STA Index: %d ############################",i)); TLLOG1(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "WLAN_STADescType:")); TLLOG1(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "STAId: %d\t STA MAC Address: %pM", pClientSTA->wSTADesc.ucSTAId, pClientSTA->wSTADesc.vSTAMACAddress.bytes)); TLLOG1(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "STA Type: %d\tProtectedFrame: %d", pClientSTA->wSTADesc.wSTAType, pClientSTA->wSTADesc.ucProtectedFrame)); TLLOG1(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "QoS: %d\tRxFrameTrans: %d\tTxFrameTrans: %d", pClientSTA->wSTADesc.ucQosEnabled, pClientSTA->wSTADesc.ucSwFrameRXXlation, pClientSTA->wSTADesc.ucSwFrameTXXlation)); TLLOG1(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "ucUcastSig: %d\tucBcastSig: %d", pClientSTA->wSTADesc.ucUcastSig, pClientSTA->wSTADesc.ucBcastSig)); TLLOG1(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "ClientIndex: %d\t Exists: %d", i, pClientSTA->ucExists)); TLLOG1(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "TL State: %d\t TL Priority: %d", pClientSTA->tlState, pClientSTA->tlPri)); TLLOG1(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "ucTxSuspended: %d\tucPktPending: %d", pClientSTA->ucTxSuspended, pClientSTA->ucPktPending)); TLLOG1(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "ucEAPOLPktPending: %d\tucNoMoreData: %d", pClientSTA->ucEapolPktPending, pClientSTA->ucNoMoreData)); TLLOG1(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "enableCaching: %d\t fcStaTxDisabled: %d", pClientSTA->enableCaching, pClientSTA->fcStaTxDisabled)); TLLOG1(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "ucCurrentAC: %d\tucServicedAC: %d", pClientSTA->ucCurrentAC, pClientSTA->ucServicedAC)); TLLOG1(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "TID: %d\tautTxCount[0]: %d\tauRxCount[0]: %d",0, pClientSTA->auTxCount[0], pClientSTA->auRxCount[0])); TLLOG1(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "aucAcMask[0]: %d\taucAcMask[1]: %d\taucAcMask[2]: %d\taucAcMask[3]: %d\t", pClientSTA->aucACMask[0], pClientSTA->aucACMask[1], pClientSTA->aucACMask[2], pClientSTA->aucACMask[3])); TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "ucCurrentWeight: %d", pClientSTA->ucCurrentWeight)); if( WLAN_STA_SOFTAP == pClientSTA->wSTADesc.wSTAType) { TLLOG1(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "TrafficStatistics for SOFTAP Station:")); TLLOG1(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "RUF=%d\tRMF=%d\tRBF=%d", pClientSTA->trafficStatistics.rxUCFcnt, pClientSTA->trafficStatistics.rxMCFcnt, pClientSTA->trafficStatistics.rxBCFcnt)); TLLOG1(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "RUB=%d\tRMB=%d\tRBB=%d", pClientSTA->trafficStatistics.rxUCBcnt, pClientSTA->trafficStatistics.rxMCBcnt, pClientSTA->trafficStatistics.rxBCBcnt)); TLLOG1(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "TUF=%d\tTMF=%d\tTBF=%d", pClientSTA->trafficStatistics.txUCFcnt, pClientSTA->trafficStatistics.txMCFcnt, pClientSTA->trafficStatistics.txBCFcnt)); TLLOG1(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "TUB=%d\tTMB=%d\tTBB=%d", pClientSTA->trafficStatistics.txUCBcnt, pClientSTA->trafficStatistics.txMCBcnt, pClientSTA->trafficStatistics.txBCBcnt)); } } return; } /*========================================================================== FUNCTION WLANTL_FatalErrorHandler DESCRIPTION Handle Fatal errors detected on the TX path. Currently issues SSR to recover from the error. DEPENDENCIES The TL must be initialized before this gets called. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's or WDA's control block can be extracted from its context RETURN VALUE None SIDE EFFECTS ============================================================================*/ v_VOID_t WLANTL_FatalErrorHandler ( v_PVOID_t *pVosContext ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_FATAL, "WLAN TL: %s Enter ", __func__)); if ( NULL == pVosContext ) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "%s: Global VoS Context or TL Context are NULL", __func__)); return; } /* * Issue SSR. vos_wlanRestart has tight checks to make sure that * we do not send an FIQ if previous FIQ is not processed */ vos_wlanRestart(); } /*========================================================================== FUNCTION WLANTL_TLDebugMessage DESCRIPTION Post a TL Snapshot request, posts message in TxThread. DEPENDENCIES The TL must be initialized before this gets called. PARAMETERS IN displaySnapshot Boolean showing whether to dump the snapshot or not. RETURN VALUE None SIDE EFFECTS ============================================================================*/ v_VOID_t WLANTL_TLDebugMessage ( v_U32_t debugFlags ) { vos_msg_t vosMsg; VOS_STATUS status; if(debugFlags & WLANTL_DEBUG_TX_SNAPSHOT) { vosMsg.reserved = 0; vosMsg.bodyptr = NULL; vosMsg.type = WLANTL_TX_SNAPSHOT; status = vos_tx_mq_serialize( VOS_MODULE_ID_TL, &vosMsg); if(status != VOS_STATUS_SUCCESS) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "TX Msg Posting Failed with status: %d",status)); return; } } if (debugFlags & WLANTL_DEBUG_FW_CLEANUP) { vosMsg.reserved = 0; vosMsg.bodyptr = NULL; vosMsg.type = WLANTL_TX_FW_DEBUG; status = vos_tx_mq_serialize( VOS_MODULE_ID_TL, &vosMsg); if(status != VOS_STATUS_SUCCESS) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "TX Msg Posting Failed with status: %d",status)); return; } } if(debugFlags & WLANTL_DEBUG_KICKDXE) { vosMsg.reserved = 0; vosMsg.bodyptr = NULL; vosMsg.type = WLANTL_TX_KICKDXE; status = vos_tx_mq_serialize( VOS_MODULE_ID_TL, &vosMsg); if(status != VOS_STATUS_SUCCESS) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "TX Msg Posting Failed with status: %d",status)); return; } } return; } /*========================================================================== FUNCTION WLANTL_FatalError DESCRIPTION Fatal error reported in TX path, post an event to TX Thread for further handling DEPENDENCIES The TL must be initialized before this gets called. PARAMETERS VOID RETURN VALUE None SIDE EFFECTS ============================================================================*/ v_VOID_t WLANTL_FatalError ( v_VOID_t ) { vos_msg_t vosMsg; VOS_STATUS status; vosMsg.reserved = 0; vosMsg.bodyptr = NULL; vosMsg.type = WLANTL_TX_FATAL_ERROR; status = vos_tx_mq_serialize( VOS_MODULE_ID_TL, &vosMsg); if(status != VOS_STATUS_SUCCESS) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "%s: TX Msg Posting Failed with status: %d", __func__,status)); } return; } /*============================================================================ TL STATE MACHINE ============================================================================*/ /*========================================================================== FUNCTION WLANTL_STATxConn DESCRIPTION Transmit in connected state - only EAPOL and WAI packets allowed DEPENDENCIES The STA must be registered with TL before this function can be called. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context ucSTAId: identifier of the station being processed vosDataBuff: pointer to the tx vos buffer RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) Other return values are possible coming from the called functions. Please check API for additional info. SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_STATxConn ( v_PVOID_t pvosGCtx, v_U8_t ucSTAId, vos_pkt_t** pvosDataBuff, v_BOOL_t bForwardIAPPwithLLC ) { v_U16_t usPktLen; VOS_STATUS vosStatus; v_MACADDR_t vDestMacAddr; vos_pkt_t* vosDataBuff = NULL; WLANTL_CbType* pTLCb = NULL; WLANTL_STAClientType* pClientSTA = NULL; WLANTL_MetaInfoType tlMetaInfo; v_U8_t ucTypeSubtype = 0; v_U8_t ucTid; v_U8_t extraHeadSpace = 0; v_U8_t ucWDSEnabled = 0; v_U8_t ucAC, ucACMask, i; v_U32_t txFlag = HAL_TX_NO_ENCRYPTION_MASK; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_STATxConn"); *pvosDataBuff = NULL; return VOS_STATUS_E_FAULT; } pClientSTA = pTLCb->atlSTAClients[ucSTAId]; if ( NULL == pClientSTA ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } /*------------------------------------------------------------------- Disable AC temporary - if successfull retrieve re-enable The order is justified because of the possible scenario - TL tryes to fetch packet for AC and it returns NULL - TL analyzes the data it has received to see if there are any more pkts available for AC -> if not TL will disable AC - however it is possible that while analyzing results TL got preempted by a pending indication where the mask was again set TL will not check again and as a result when it resumes execution it will disable AC To prevent this the AC will be disabled here and if retrieve is successfull it will be re-enabled -------------------------------------------------------------------*/ //LTI:pTLCb->atlSTAClients[ucSTAId]. //LTI: aucACMask[pTLCb->atlSTAClients[ucSTAId].ucCurrentAC] = 0; /*------------------------------------------------------------------------ Fetch packet from HDD ------------------------------------------------------------------------*/ #ifdef FEATURE_WLAN_TDLS if ((WLAN_STA_SOFTAP != pClientSTA->wSTADesc.wSTAType) && !(vos_concurrent_open_sessions_running()) && !pTLCb->ucTdlsPeerCount) { #else if ((WLAN_STA_SOFTAP != pClientSTA->wSTADesc.wSTAType) && !(vos_concurrent_open_sessions_running())) { #endif ucAC = pClientSTA->ucCurrentAC; /*------------------------------------------------------------------- Disable AC temporary - if successfull retrieve re-enable The order is justified because of the possible scenario - TL tryes to fetch packet for AC and it returns NULL - TL analyzes the data it has received to see if there are any more pkts available for AC -> if not TL will disable AC - however it is possible that while analyzing results TL got preempted by a pending indication where the mask was again set TL will not check again and as a result when it resumes execution it will disable AC To prevent this the AC will be disabled here and if retrieve is successfull it will be re-enabled -------------------------------------------------------------------*/ pClientSTA->aucACMask[ucAC] = 0; } else { //softap case ucAC = pTLCb->uCurServedAC; pClientSTA->aucACMask[ucAC] = 0; } /*You make an initial assumption that HDD has no more data and if the assumption was wrong you reset the flags to their original state This will prevent from exposing a race condition between checking with HDD for packets and setting the flags to false*/ //LTI: vos_atomic_set_U8( &pTLCb->atlSTAClients[ucSTAId].ucPktPending, 0); //LTI: pTLCb->atlSTAClients[ucSTAId].ucNoMoreData = 1; vos_atomic_set_U8( &pClientSTA->ucPktPending, 0); WLAN_TL_AC_ARRAY_2_MASK( pClientSTA, ucACMask, i); /*You make an initial assumption that HDD has no more data and if the assumption was wrong you reset the flags to their original state This will prevent from exposing a race condition between checking with HDD for packets and setting the flags to false*/ if ( 0 == ucACMask ) { pClientSTA->ucNoMoreData = 1; } else { vos_atomic_set_U8( &pClientSTA->ucPktPending, 1); } VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "WLAN TL: WLANTL_STATxConn fetching packet from HDD for AC: %d AC Mask: %d Pkt Pending: %d", ucAC, ucACMask, pClientSTA->ucPktPending); /*------------------------------------------------------------------------ Fetch tx packet from HDD ------------------------------------------------------------------------*/ vosStatus = pClientSTA->pfnSTAFetchPkt( pvosGCtx, &ucSTAId, ucAC, &vosDataBuff, &tlMetaInfo ); if (( VOS_STATUS_SUCCESS != vosStatus ) || ( NULL == vosDataBuff )) { TLLOG1(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "WLAN TL:No more data at HDD status %d", vosStatus)); *pvosDataBuff = NULL; /*-------------------------------------------------------------------- Reset AC for the serviced station to the highest priority AC -> due to no more data at the station Even if this AC is not supported by the station, correction will be made in the main TL loop --------------------------------------------------------------------*/ pClientSTA->ucCurrentAC = WLANTL_AC_HIGH_PRIO; pClientSTA->ucCurrentWeight = 0; VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "WLAN TL: WLANTL_STATxConn no more packets in HDD for AC: %d AC Mask: %d", ucAC, ucACMask); return vosStatus; } /*There are still packets in HDD - set back the pending packets and the no more data assumption*/ vos_atomic_set_U8( &pClientSTA->ucPktPending, 1); pClientSTA->ucNoMoreData = 0; pClientSTA->aucACMask[ucAC] = 1; #ifdef WLAN_PERF vos_pkt_set_user_data_ptr( vosDataBuff, VOS_PKT_USER_DATA_ID_BAL, (v_PVOID_t)0); #endif /*WLAN_PERF*/ #ifdef FEATURE_WLAN_WAPI /*------------------------------------------------------------------------ If the packet is neither an Eapol packet nor a WAI packet then drop it ------------------------------------------------------------------------*/ if ( 0 == tlMetaInfo.ucIsEapol && 0 == tlMetaInfo.ucIsWai ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "WLAN TL:Only EAPOL or WAI packets allowed before authentication")); /* Fail tx for packet */ pClientSTA->pfnSTATxComp( pvosGCtx, vosDataBuff, VOS_STATUS_E_BADMSG); vosDataBuff = NULL; *pvosDataBuff = NULL; return VOS_STATUS_SUCCESS; } #else if ( 0 == tlMetaInfo.ucIsEapol ) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Received non EAPOL packet before authentication")); /* Fail tx for packet */ pClientSTA->pfnSTATxComp( pvosGCtx, vosDataBuff, VOS_STATUS_E_BADMSG); vosDataBuff = NULL; *pvosDataBuff = NULL; return VOS_STATUS_SUCCESS; } #endif /* FEATURE_WLAN_WAPI */ /*------------------------------------------------------------------------- Check TID -------------------------------------------------------------------------*/ ucTid = tlMetaInfo.ucTID; /*Make sure TID is valid*/ if ( WLANTL_TID_INVALID(ucTid)) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Invalid TID sent in meta info %d - defaulting to 0 (BE)", ucTid)); ucTid = 0; } TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Attaching BD header to pkt on WLANTL_STATxConn")); #ifdef FEATURE_WLAN_WAPI /*------------------------------------------------------------------------ Translate 802.3 frame to 802.11 if Frame translation is enabled or if frame is a WAI frame. ------------------------------------------------------------------------*/ if ( ( 1 == tlMetaInfo.ucIsWai ) || ( 0 == tlMetaInfo.ucDisableFrmXtl ) ) #else /*------------------------------------------------------------------------ Translate 802.3 frame to 802.11 if Frame translation is enabled ------------------------------------------------------------------------*/ if ( ( 0 == tlMetaInfo.ucDisableFrmXtl ) && ( 0 != pClientSTA->wSTADesc.ucSwFrameTXXlation) ) #endif //#ifdef FEATURE_WLAN_WAPI { vosStatus = WLANTL_Translate8023To80211Header( vosDataBuff, &vosStatus, pTLCb, &ucSTAId, &tlMetaInfo, &ucWDSEnabled, &extraHeadSpace); if ( VOS_STATUS_SUCCESS != vosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Error when translating header WLANTL_STATxConn")); return vosStatus; } tlMetaInfo.ucDisableFrmXtl = 1; } /*------------------------------------------------------------------------- Call HAL to fill BD header -------------------------------------------------------------------------*/ ucTypeSubtype |= (WLANTL_80211_DATA_TYPE << 4); if ( pClientSTA->wSTADesc.ucQosEnabled ) { ucTypeSubtype |= (WLANTL_80211_DATA_QOS_SUBTYPE); } #ifdef FEATURE_WLAN_WAPI /* TL State does not transition to AUTHENTICATED till GTK is installed, So in * case of WPA where GTK handshake is done after the 4 way handshake, the * unicast 2/2 EAPOL packet from the STA->AP has to be encrypted even before * the TL is in authenticated state. Since the PTK has been installed * already (after the 4 way handshake) we make sure that all traffic * is encrypted henceforth.(Note: TL is still not in AUTHENTICATED state so * we will only allow EAPOL data or WAI in case of WAPI) */ if (tlMetaInfo.ucIsEapol && pClientSTA->ptkInstalled) { txFlag = 0; } #else if (pClientSTA->ptkInstalled) { txFlag = 0; } #endif vosStatus = (VOS_STATUS)WDA_DS_BuildTxPacketInfo( pvosGCtx, vosDataBuff , &vDestMacAddr, tlMetaInfo.ucDisableFrmXtl, &usPktLen, pClientSTA->wSTADesc.ucQosEnabled, ucWDSEnabled, extraHeadSpace, ucTypeSubtype, &pClientSTA->wSTADesc.vSelfMACAddress, ucTid, txFlag, tlMetaInfo.usTimeStamp, tlMetaInfo.ucIsEapol || tlMetaInfo.ucIsWai, tlMetaInfo.ucUP, tlMetaInfo.ucTxBdToken); if ( VOS_STATUS_SUCCESS != vosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Failed while attempting to fill BD %d", vosStatus)); *pvosDataBuff = NULL; return vosStatus; } /*----------------------------------------------------------------------- Update tx counter for BA session query for tx side !1 - should this be done for EAPOL frames? -----------------------------------------------------------------------*/ pClientSTA->auTxCount[ucTid]++; vos_pkt_set_user_data_ptr( vosDataBuff, VOS_PKT_USER_DATA_ID_TL, (v_PVOID_t)pClientSTA->pfnSTATxComp ); /*------------------------------------------------------------------------ Save data to input pointer for TL core ------------------------------------------------------------------------*/ *pvosDataBuff = vosDataBuff; /*security frames cannot be delayed*/ pTLCb->bUrgent = TRUE; /* TX Statistics */ if (!(tlMetaInfo.ucBcast || tlMetaInfo.ucMcast)) { /* This is TX UC frame */ pClientSTA->trafficStatistics.txUCFcnt++; pClientSTA->trafficStatistics.txUCBcnt += usPktLen; } return VOS_STATUS_SUCCESS; }/* WLANTL_STATxConn */ /*========================================================================== FUNCTION WLANTL_STATxAuth DESCRIPTION Transmit in authenticated state - all data allowed DEPENDENCIES The STA must be registered with TL before this function can be called. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context ucSTAId: identifier of the station being processed vosDataBuff: pointer to the tx vos buffer RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) Other return values are possible coming from the called functions. Please check API for additional info. SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_STATxAuth ( v_PVOID_t pvosGCtx, v_U8_t ucSTAId, vos_pkt_t** pvosDataBuff, v_BOOL_t bForwardIAPPwithLLC ) { v_U16_t usPktLen; VOS_STATUS vosStatus; v_MACADDR_t vDestMacAddr; vos_pkt_t* vosDataBuff = NULL; WLANTL_CbType* pTLCb = NULL; WLANTL_MetaInfoType tlMetaInfo; v_U8_t ucTypeSubtype = 0; WLANTL_ACEnumType ucAC; WLANTL_ACEnumType ucNextAC; v_U8_t ucTid; v_U8_t ucSwFrmXtl = 0; v_U8_t extraHeadSpace = 0; WLANTL_STAClientType *pStaClient = NULL; v_U8_t ucWDSEnabled = 0; v_U32_t ucTxFlag = 0; v_U8_t ucACMask, i; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if (( NULL == pTLCb ) || ( NULL == pvosDataBuff )) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid input params on WLANTL_STATxAuth TL %p DB %p", pTLCb, pvosDataBuff)); if (NULL != pvosDataBuff) { *pvosDataBuff = NULL; } if(NULL != pTLCb) { if ( NULL == pTLCb->atlSTAClients[ucSTAId] ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } pTLCb->atlSTAClients[ucSTAId]->ucNoMoreData = 1; } return VOS_STATUS_E_FAULT; } pStaClient = pTLCb->atlSTAClients[ucSTAId]; if ( NULL == pStaClient ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } vos_mem_zero(&tlMetaInfo, sizeof(tlMetaInfo)); /*------------------------------------------------------------------------ Fetch packet from HDD ------------------------------------------------------------------------*/ #ifdef FEATURE_WLAN_TDLS if ((WLAN_STA_SOFTAP != pStaClient->wSTADesc.wSTAType) && (!vos_concurrent_open_sessions_running()) && !pTLCb->ucTdlsPeerCount) { #else if ((WLAN_STA_SOFTAP != pStaClient->wSTADesc.wSTAType) && (!vos_concurrent_open_sessions_running())) { #endif ucAC = pStaClient->ucCurrentAC; /*------------------------------------------------------------------- Disable AC temporary - if successfull retrieve re-enable The order is justified because of the possible scenario - TL tryes to fetch packet for AC and it returns NULL - TL analyzes the data it has received to see if there are any more pkts available for AC -> if not TL will disable AC - however it is possible that while analyzing results TL got preempted by a pending indication where the mask was again set TL will not check again and as a result when it resumes execution it will disable AC To prevent this the AC will be disabled here and if retrieve is successfull it will be re-enabled -------------------------------------------------------------------*/ pStaClient->aucACMask[pStaClient->ucCurrentAC] = 0; // don't reset it, as other AC queues in HDD may have packets //vos_atomic_set_U8( &pStaClient->ucPktPending, 0); } else { //softap case ucAC = pTLCb->uCurServedAC; pStaClient->aucACMask[ucAC] = 0; //vos_atomic_set_U8( &pStaClient->ucPktPending, 0); } WLAN_TL_AC_ARRAY_2_MASK( pStaClient, ucACMask, i); /*You make an initial assumption that HDD has no more data and if the assumption was wrong you reset the flags to their original state This will prevent from exposing a race condition between checking with HDD for packets and setting the flags to false*/ if ( 0 == ucACMask ) { vos_atomic_set_U8( &pStaClient->ucPktPending, 0); pStaClient->ucNoMoreData = 1; } vosStatus = pStaClient->pfnSTAFetchPkt( pvosGCtx, &ucSTAId, ucAC, &vosDataBuff, &tlMetaInfo ); if (( VOS_STATUS_SUCCESS != vosStatus ) || ( NULL == vosDataBuff )) { VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "WLAN TL:Failed while attempting to fetch pkt from HDD QId:%d status:%d", ucAC, vosStatus); *pvosDataBuff = NULL; /*-------------------------------------------------------------------- Reset AC for the serviced station to the highest priority AC -> due to no more data at the station Even if this AC is not supported by the station, correction will be made in the main TL loop --------------------------------------------------------------------*/ pStaClient->ucCurrentAC = WLANTL_AC_HIGH_PRIO; pStaClient->ucCurrentWeight = 0; return vosStatus; } WLANTL_StatHandleTXFrame(pvosGCtx, ucSTAId, vosDataBuff, NULL, &tlMetaInfo); /*There are still packets in HDD - set back the pending packets and the no more data assumption*/ vos_atomic_set_U8( &pStaClient->ucPktPending, 1); pStaClient->ucNoMoreData = 0; if (WLAN_STA_SOFTAP != pStaClient->wSTADesc.wSTAType) { // don't need to set it, as we don't reset it in this function. //vos_atomic_set_U8( &pTLCb->atlSTAClients[ucSTAId].ucPktPending, 1); } #ifdef WLAN_PERF vos_pkt_set_user_data_ptr( vosDataBuff, VOS_PKT_USER_DATA_ID_BAL, (v_PVOID_t)0); #endif /*WLAN_PERF*/ /*------------------------------------------------------------------------- Check TID -------------------------------------------------------------------------*/ ucTid = tlMetaInfo.ucTID; /*Make sure TID is valid*/ if ( WLANTL_TID_INVALID(ucTid)) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Invalid TID sent in meta info %d - defaulting to 0 (BE)", ucTid)); ucTid = 0; } /*Save for UAPSD timer consideration*/ pStaClient->ucServicedAC = ucAC; if ( ucAC == pStaClient->ucCurrentAC ) { pStaClient->aucACMask[pStaClient->ucCurrentAC] = 1; pStaClient->ucCurrentWeight--; } else { pStaClient->ucCurrentAC = ucAC; pStaClient->ucCurrentWeight = pTLCb->tlConfigInfo.ucAcWeights[ucAC] - 1; pStaClient->aucACMask[pStaClient->ucCurrentAC] = 1; } if (WLAN_STA_SOFTAP != pStaClient->wSTADesc.wSTAType) { if ( 0 == pStaClient->ucCurrentWeight ) { WLANTL_ACEnumType tempAC = ucAC; /*----------------------------------------------------------------------- Choose next AC - !!! optimize me -----------------------------------------------------------------------*/ while ( 0 != ucACMask ) { if(tempAC == WLANTL_AC_BK) ucNextAC = WLANTL_AC_HIGH_PRIO; else ucNextAC = (tempAC - 1); if ( 0 != pStaClient->aucACMask[ucNextAC] ) { pStaClient->ucCurrentAC = ucNextAC; pStaClient->ucCurrentWeight = pTLCb->tlConfigInfo.ucAcWeights[ucNextAC]; TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL: Changing serviced AC to: %d with Weight: %d", pStaClient->ucCurrentAC , pStaClient->ucCurrentWeight)); break; } tempAC = ucNextAC; } } } TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Attaching BD header to pkt on WLANTL_STATxAuth")); /*------------------------------------------------------------------------ Translate 802.3 frame to 802.11 ------------------------------------------------------------------------*/ if ( 0 == tlMetaInfo.ucDisableFrmXtl ) { /* Needs frame translation */ // if the client has not enabled SW-only frame translation // and if the frame is a unicast frame // (HW frame translation does not support multiple broadcast domains // so we use SW frame translation for broadcast/multicast frames) #ifdef FEATURE_WLAN_WAPI // and if the frame is not a WAPI frame #endif // then use HW_based frame translation if ( ( 0 == pStaClient->wSTADesc.ucSwFrameTXXlation ) && ( 0 == tlMetaInfo.ucBcast ) && ( 0 == tlMetaInfo.ucMcast ) #ifdef FEATURE_WLAN_WAPI && ( tlMetaInfo.ucIsWai != 1 ) #endif ) { #ifdef WLAN_PERF v_U32_t uFastFwdOK = 0; /* HW based translation. See if the frame could be fast forwarded */ WDA_TLI_FastHwFwdDataFrame( pvosGCtx, vosDataBuff , &vosStatus, &uFastFwdOK, &tlMetaInfo, &pStaClient->wSTADesc); if( VOS_STATUS_SUCCESS == vosStatus ) { if(uFastFwdOK) { /* Packet could be fast forwarded now */ vos_pkt_set_user_data_ptr( vosDataBuff, VOS_PKT_USER_DATA_ID_TL, (v_PVOID_t)pStaClient->pfnSTATxComp ); *pvosDataBuff = vosDataBuff; /* TODO: Do we really need to update WLANTL_HSHandleTXFrame() stats for every pkt? */ pStaClient->auTxCount[tlMetaInfo.ucTID]++; return vosStatus; } /* can't be fast forwarded, fall through normal (slow) path. */ } else { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Failed while attempting to fastFwd BD %d", vosStatus)); *pvosDataBuff = NULL; return vosStatus; } #endif /*WLAN_PERF*/ } else { /* SW based translation */ vosStatus = WLANTL_Translate8023To80211Header( vosDataBuff, &vosStatus, pTLCb, &ucSTAId, &tlMetaInfo, &ucWDSEnabled, &extraHeadSpace); if ( VOS_STATUS_SUCCESS != vosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Error when translating header WLANTL_STATxAuth")); return vosStatus; } TLLOG4(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_LOW, "WLAN TL software translation success")); ucSwFrmXtl = 1; tlMetaInfo.ucDisableFrmXtl = 1; } } #ifdef FEATURE_WLAN_TDLS /*In case of TDLS, if the packet is destined to TDLS STA ucSTAId may change. so update the pStaClient accordingly */ pStaClient = pTLCb->atlSTAClients[ucSTAId]; if ( NULL == pStaClient ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "pStaClient is NULL %s", __func__)); return VOS_STATUS_E_FAILURE; } #endif /*------------------------------------------------------------------------- Call HAL to fill BD header -------------------------------------------------------------------------*/ ucTypeSubtype |= (WLANTL_80211_DATA_TYPE << 4); if ( pStaClient->wSTADesc.ucQosEnabled ) { ucTypeSubtype |= (WLANTL_80211_DATA_QOS_SUBTYPE); } /* ucAC now points to TL Q ID with a new queue added in TL, * hence look for the uapsd info for the correct AC that * this packet belongs to. */ ucTxFlag = (0 != pStaClient->wUAPSDInfo[tlMetaInfo.ac].ucSet)? HAL_TRIGGER_ENABLED_AC_MASK:0; #ifdef FEATURE_WLAN_WAPI if ( pStaClient->wSTADesc.ucIsWapiSta == 1 ) { #ifdef LIBRA_WAPI_SUPPORT ucTxFlag = ucTxFlag | HAL_WAPI_STA_MASK; #endif //LIBRA_WAPI_SUPPORT if ( tlMetaInfo.ucIsWai == 1 ) { ucTxFlag = ucTxFlag | HAL_TX_NO_ENCRYPTION_MASK; } } #endif /* FEATURE_WLAN_WAPI */ #ifdef FEATURE_WLAN_TDLS if ( pStaClient->wSTADesc.wSTAType == WLAN_STA_TDLS ) { ucTxFlag = ucTxFlag | HAL_TDLS_PEER_STA_MASK; } #endif /* FEATURE_WLAN_TDLS */ if( tlMetaInfo.ucIsArp ) { if (pStaClient->arpOnWQ5) { ucTxFlag |= HAL_USE_FW_IN_TX_PATH; } if (pStaClient->arpRate == 0) { ucTxFlag |= HAL_USE_BD_RATE_1_MASK; } else if (pStaClient->arpRate == 1 || pStaClient->arpRate == 3) { pStaClient->arpRate ^= 0x2; ucTxFlag |= HAL_USE_BD_RATE_1_MASK<<(pStaClient->arpRate-1); } TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "arp pkt sending on BD rate: %hhu", pStaClient->arpRate)); } vosStatus = (VOS_STATUS)WDA_DS_BuildTxPacketInfo( pvosGCtx, vosDataBuff , &vDestMacAddr, tlMetaInfo.ucDisableFrmXtl, &usPktLen, pStaClient->wSTADesc.ucQosEnabled, ucWDSEnabled, extraHeadSpace, ucTypeSubtype, &pStaClient->wSTADesc.vSelfMACAddress, ucTid, ucTxFlag, tlMetaInfo.usTimeStamp, tlMetaInfo.ucIsEapol, tlMetaInfo.ucUP, tlMetaInfo.ucTxBdToken); if(!VOS_IS_STATUS_SUCCESS(vosStatus)) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "Fill TX BD Error status %d", vosStatus)); return vosStatus; } /* TX Statistics */ if (!(tlMetaInfo.ucBcast || tlMetaInfo.ucMcast)) { /* This is TX UC frame */ pStaClient->trafficStatistics.txUCFcnt++; pStaClient->trafficStatistics.txUCBcnt += usPktLen; } #ifndef FEATURE_WLAN_TDLS /*----------------------------------------------------------------------- Update tx counter for BA session query for tx side -----------------------------------------------------------------------*/ pStaClient->auTxCount[ucTid]++; #else pTLCb->atlSTAClients[ucSTAId]->auTxCount[ucTid]++; #endif /* This code is to send traffic with lower priority AC when we does not get admitted to send it. Today HAL does not downgrade AC so this code does not get executed.(In other words, HAL doesn\92t change tid. The if statement is always false.) NOTE: In the case of LA downgrade occurs in HDD (that was the change Phani made during WMM-AC plugfest). If WM & BMP also took this approach, then there will be no need for any AC downgrade logic in TL/WDI. */ #if 0 if (( ucTid != tlMetaInfo.ucTID ) && ( 0 != pStaClient->wSTADesc.ucQosEnabled ) && ( 0 != ucSwFrmXtl )) { /*--------------------------------------------------------------------- !! FIX me: Once downgrading is clear put in the proper change ---------------------------------------------------------------------*/ ucQCOffset = WLANHAL_TX_BD_HEADER_SIZE + WLANTL_802_11_HEADER_LEN; //!!!Fix this replace peek with extract vos_pkt_peek_data( vosDataBuff, ucQCOffset,(v_PVOID_t)&pucQosCtrl, sizeof(*pucQosCtrl)); *pucQosCtrl = ucTid; //? proper byte order } #endif if ( VOS_STATUS_SUCCESS != vosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Failed while attempting to fill BD %d", vosStatus)); *pvosDataBuff = NULL; return vosStatus; } vos_pkt_set_user_data_ptr( vosDataBuff, VOS_PKT_USER_DATA_ID_TL, (v_PVOID_t)pStaClient->pfnSTATxComp ); *pvosDataBuff = vosDataBuff; /*BE & BK can be delayed, VO and VI not frames cannot be delayed*/ if ( pStaClient->ucServicedAC > WLANTL_AC_BE ) { pTLCb->bUrgent= TRUE; } return VOS_STATUS_SUCCESS; }/* WLANTL_STATxAuth */ /*========================================================================== FUNCTION WLANTL_STATxDisc DESCRIPTION Transmit in disconnected state - no data allowed DEPENDENCIES The STA must be registered with TL before this function can be called. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context ucSTAId: identifier of the station being processed vosDataBuff: pointer to the tx vos buffer RETURN VALUE The result code associated with performing the operation VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_STATxDisc ( v_PVOID_t pvosGCtx, v_U8_t ucSTAId, vos_pkt_t** pvosDataBuff, v_BOOL_t bForwardIAPPwithLLC ) { WLANTL_CbType* pTLCb = NULL; WLANTL_STAClientType* pClientSTA = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_STATxAuth")); *pvosDataBuff = NULL; return VOS_STATUS_E_FAULT; } pClientSTA = pTLCb->atlSTAClients[ucSTAId]; if ( NULL == pClientSTA ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } /*------------------------------------------------------------------------ Error ------------------------------------------------------------------------*/ TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Packet should not be transmitted in state disconnected ignoring" " request")); *pvosDataBuff = NULL; pClientSTA->ucNoMoreData = 1; //Should not be anything pending in disconnect state vos_atomic_set_U8( &pClientSTA->ucPktPending, 0); return VOS_STATUS_SUCCESS; }/* WLANTL_STATxDisc */ /*========================================================================== FUNCTION WLANTL_STARxConn DESCRIPTION Receive in connected state - only EAPOL DEPENDENCIES The STA must be registered with TL before this function can be called. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context ucSTAId: identifier of the station being processed vosDataBuff: pointer to the tx/rx vos buffer RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: invalid input parameters VOS_STATUS_E_FAULT: pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_STARxConn ( v_PVOID_t pvosGCtx, v_U8_t ucSTAId, vos_pkt_t** pvosDataBuff, v_BOOL_t bForwardIAPPwithLLC ) { WLANTL_CbType* pTLCb = NULL; WLANTL_STAClientType* pClientSTA = NULL; v_U16_t usEtherType = 0; v_U16_t usPktLen; v_U8_t ucMPDUHOffset; v_U16_t usMPDUDOffset; v_U16_t usMPDULen; v_U8_t ucMPDUHLen; v_U16_t usActualHLen = 0; VOS_STATUS vosStatus = VOS_STATUS_SUCCESS; vos_pkt_t* vosDataBuff; v_PVOID_t aucBDHeader; v_U8_t ucTid; WLANTL_RxMetaInfoType wRxMetaInfo; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if (( NULL == pvosDataBuff ) || ( NULL == ( vosDataBuff = *pvosDataBuff ))) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid parameter sent on WLANTL_STARxConn")); return VOS_STATUS_E_INVAL; } /*------------------------------------------------------------------------ Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_ChangeSTAState")); return VOS_STATUS_E_FAULT; } pClientSTA = pTLCb->atlSTAClients[ucSTAId]; if ( NULL == pClientSTA ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } /*------------------------------------------------------------------------ Extract BD header and check if valid ------------------------------------------------------------------------*/ vosStatus = WDA_DS_PeekRxPacketInfo( vosDataBuff, (v_PVOID_t)&aucBDHeader, 0/*Swap BD*/ ); if ( NULL == aucBDHeader ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Cannot extract BD header")); VOS_ASSERT( 0 ); return VOS_STATUS_E_FAULT; } ucMPDUHOffset = (v_U8_t)WDA_GET_RX_MPDU_HEADER_OFFSET(aucBDHeader); usMPDUDOffset = (v_U16_t)WDA_GET_RX_MPDU_DATA_OFFSET(aucBDHeader); usMPDULen = (v_U16_t)WDA_GET_RX_MPDU_LEN(aucBDHeader); ucMPDUHLen = (v_U8_t)WDA_GET_RX_MPDU_HEADER_LEN(aucBDHeader); ucTid = (v_U8_t)WDA_GET_RX_TID(aucBDHeader); vos_pkt_get_packet_length( vosDataBuff, &usPktLen); TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:BD header processing data: HO %d DO %d Len %d HLen %d", ucMPDUHOffset, usMPDUDOffset, usMPDULen, ucMPDUHLen)); /*It will cut out the 802.11 header if not used*/ if ( VOS_STATUS_SUCCESS != WDA_DS_TrimRxPacketInfo( vosDataBuff ) ) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:BD header corrupted - dropping packet")); /* Drop packet */ vos_pkt_return_packet(vosDataBuff); return VOS_STATUS_SUCCESS; } vosStatus = WLANTL_GetEtherType(aucBDHeader,vosDataBuff,ucMPDUHLen,&usEtherType); if( VOS_IS_STATUS_SUCCESS(vosStatus) ) { #ifdef FEATURE_WLAN_WAPI /* If frame is neither an EAPOL frame nor a WAI frame then we drop the frame*/ /* TODO: Do we need a check to see if we are in WAPI mode? If not is it possible */ /* that we get an EAPOL packet in WAPI mode or vice versa? */ if ( WLANTL_LLC_8021X_TYPE != usEtherType && WLANTL_LLC_WAI_TYPE != usEtherType ) { VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "WLAN TL:RX Frame not EAPOL or WAI EtherType %d - dropping", usEtherType ); /* Drop packet */ vos_pkt_return_packet(vosDataBuff); } #else if ( WLANTL_LLC_8021X_TYPE != usEtherType ) { VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:RX Frame not EAPOL EtherType %d - dropping", usEtherType); /* Drop packet */ vos_pkt_return_packet(vosDataBuff); } #endif /* FEATURE_WLAN_WAPI */ else /* Frame is an EAPOL frame or a WAI frame*/ { MTRACE(vos_trace(VOS_MODULE_ID_TL, TRACE_CODE_TL_RX_CONN_EAPOL, ucSTAId, usEtherType )); VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "WLAN TL:RX Frame EAPOL EtherType %d - processing", usEtherType); if (( 0 == WDA_GET_RX_FT_DONE(aucBDHeader) ) && ( 0 != pClientSTA->wSTADesc.ucSwFrameRXXlation)) { if (usMPDUDOffset > ucMPDUHOffset) { usActualHLen = usMPDUDOffset - ucMPDUHOffset; } vosStatus = WLANTL_Translate80211To8023Header( vosDataBuff, &vosStatus, usActualHLen, ucMPDUHLen, pTLCb, ucSTAId, bForwardIAPPwithLLC); if ( VOS_STATUS_SUCCESS != vosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "WLAN TL:Failed to translate from 802.11 to 802.3 - dropping")); /* Drop packet */ vos_pkt_return_packet(vosDataBuff); return vosStatus; } } /*------------------------------------------------------------------- Increment receive counter -------------------------------------------------------------------*/ if ( !WLANTL_TID_INVALID( ucTid) ) { pClientSTA->auRxCount[ucTid]++; } else { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid tid %d (Station ID %d) on %s", ucTid, ucSTAId, __func__)); } TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Sending EAPoL frame to station %d AC %d", ucSTAId, ucTid)); /*------------------------------------------------------------------- !!!Assuming TID = UP mapping -------------------------------------------------------------------*/ wRxMetaInfo.ucUP = ucTid; TLLOG4(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_LOW, "WLAN TL %s:Sending data chain to station", __func__)); if ( WLAN_STA_SOFTAP == pClientSTA->wSTADesc.wSTAType ) { wRxMetaInfo.ucDesSTAId = WLAN_RX_SAP_SELF_STA_ID; pClientSTA->pfnSTARx( pvosGCtx, vosDataBuff, ucSTAId, &wRxMetaInfo ); } else pClientSTA->pfnSTARx( pvosGCtx, vosDataBuff, ucSTAId, &wRxMetaInfo ); }/*EAPOL frame or WAI frame*/ }/*vos status success*/ return VOS_STATUS_SUCCESS; }/* WLANTL_STARxConn */ /*========================================================================== FUNCTION WLANTL_FwdPktToHDD DESCRIPTION Determine the Destation Station ID and route the Frame to Upper Layer DEPENDENCIES PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context ucSTAId: identifier of the station being processed vosDataBuff: pointer to the rx vos buffer RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: invalid input parameters VOS_STATUS_E_FAULT: pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_FwdPktToHDD ( v_PVOID_t pvosGCtx, vos_pkt_t* pvosDataBuff, v_U8_t ucSTAId ) { v_MACADDR_t DestMacAddress; v_MACADDR_t *pDestMacAddress = &DestMacAddress; v_SIZE_t usMacAddSize = VOS_MAC_ADDR_SIZE; WLANTL_CbType* pTLCb = NULL; WLANTL_STAClientType* pClientSTA = NULL; vos_pkt_t* vosDataBuff ; VOS_STATUS vosStatus = VOS_STATUS_SUCCESS; v_U32_t* STAMetaInfoPtr; vos_pkt_t* vosNextDataBuff ; v_U8_t ucDesSTAId; WLANTL_RxMetaInfoType wRxMetaInfo; /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if (( NULL == pvosDataBuff ) || ( NULL == ( vosDataBuff = pvosDataBuff ))) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid parameter sent on WLANTL_FwdPktToHdd")); return VOS_STATUS_E_INVAL; } /*------------------------------------------------------------------------ Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_FwdPktToHdd")); return VOS_STATUS_E_FAULT; } if(WLANTL_STA_ID_INVALID(ucSTAId)) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR,"ucSTAId %d is not valid", ucSTAId)); return VOS_STATUS_E_INVAL; } pClientSTA = pTLCb->atlSTAClients[ucSTAId]; if ( NULL == pClientSTA ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } /* This the change required for SoftAp to handle Reordered Buffer. Since a STA may have packets destined to multiple destinations we have to process each packet at a time and determine its Destination. So the Voschain provided by Reorder code is unchain and forwarded to Upper Layer after Determining the Destination */ vosDataBuff = pvosDataBuff; while (vosDataBuff != NULL) { vos_pkt_walk_packet_chain( vosDataBuff, &vosNextDataBuff, 1/*true*/ ); vos_pkt_get_user_data_ptr( vosDataBuff, VOS_PKT_USER_DATA_ID_TL, (v_PVOID_t *)&STAMetaInfoPtr ); wRxMetaInfo.ucUP = (v_U8_t)((uintptr_t)STAMetaInfoPtr & WLANTL_AC_MASK); ucDesSTAId = (v_U8_t)(((uintptr_t)STAMetaInfoPtr) >> WLANTL_STAID_OFFSET); vosStatus = vos_pkt_extract_data( vosDataBuff, 0, (v_VOID_t *)pDestMacAddress, &usMacAddSize); if ( VOS_STATUS_SUCCESS != vosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: recv corrupted data packet")); vos_pkt_return_packet(vosDataBuff); return vosStatus; } TLLOG4(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_LOW, "station mac "MAC_ADDRESS_STR, MAC_ADDR_ARRAY(pDestMacAddress->bytes))); if (vos_is_macaddr_broadcast( pDestMacAddress ) || vos_is_macaddr_group(pDestMacAddress)) { // destination is mc/bc station ucDesSTAId = WLAN_RX_BCMC_STA_ID; TLLOG4(VOS_TRACE( VOS_MODULE_ID_HDD_SOFTAP, VOS_TRACE_LEVEL_INFO_LOW, "%s: BC/MC packet, id %d", __func__, WLAN_RX_BCMC_STA_ID)); } else { if (vos_is_macaddr_equal(pDestMacAddress, &pClientSTA->wSTADesc.vSelfMACAddress)) { // destination is AP itself ucDesSTAId = WLAN_RX_SAP_SELF_STA_ID; TLLOG4(VOS_TRACE( VOS_MODULE_ID_HDD_SOFTAP, VOS_TRACE_LEVEL_INFO_LOW, "%s: packet to AP itself, id %d", __func__, WLAN_RX_SAP_SELF_STA_ID)); } else if (( WLAN_MAX_STA_COUNT <= ucDesSTAId ) || (NULL != pTLCb->atlSTAClients[ucDesSTAId] && pTLCb->atlSTAClients[ucDesSTAId]->ucExists == 0)) { // destination station is something else TLLOG4(VOS_TRACE( VOS_MODULE_ID_HDD_SOFTAP, VOS_TRACE_LEVEL_INFO_LOW, "%s: get an station index larger than WLAN_MAX_STA_COUNT %d", __func__, ucDesSTAId)); ucDesSTAId = WLAN_RX_SAP_SELF_STA_ID; } //loopback unicast station comes here } wRxMetaInfo.ucUP = (v_U8_t)((uintptr_t)STAMetaInfoPtr & WLANTL_AC_MASK); wRxMetaInfo.ucDesSTAId = ucDesSTAId; vosStatus = pClientSTA->pfnSTARx( pvosGCtx, vosDataBuff, ucDesSTAId, &wRxMetaInfo ); if ( VOS_STATUS_SUCCESS != vosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: failed to send pkt to HDD")); vos_pkt_return_packet(vosDataBuff); return vosStatus; } vosDataBuff = vosNextDataBuff; } return VOS_STATUS_SUCCESS; } /*========================================================================== FUNCTION WLANTL_STARxAuth DESCRIPTION Receive in authenticated state - all data allowed DEPENDENCIES The STA must be registered with TL before this function can be called. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context ucSTAId: identifier of the station being processed vosDataBuff: pointer to the rx vos buffer RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: invalid input parameters VOS_STATUS_E_FAULT: pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_STARxAuth ( v_PVOID_t pvosGCtx, v_U8_t ucSTAId, vos_pkt_t** pvosDataBuff, v_BOOL_t bForwardIAPPwithLLC ) { WLANTL_CbType* pTLCb = NULL; WLANTL_STAClientType* pClientSTA = NULL; v_U8_t ucAsf; /* AMSDU sub frame */ v_U16_t usMPDUDOffset; v_U8_t ucMPDUHOffset; v_U16_t usMPDULen; v_U8_t ucMPDUHLen; v_U16_t usActualHLen = 0; v_U8_t ucTid; #ifdef FEATURE_WLAN_WAPI v_U16_t usEtherType = 0; tSirMacMgmtHdr *hdr; #endif v_U16_t usPktLen; vos_pkt_t* vosDataBuff ; v_PVOID_t aucBDHeader; VOS_STATUS vosStatus; WLANTL_RxMetaInfoType wRxMetaInfo; static v_U8_t ucPMPDUHLen; v_U32_t* STAMetaInfoPtr; v_U8_t ucEsf=0; /* first subframe of AMSDU flag */ v_U64_t ullcurrentReplayCounter=0; /*current replay counter*/ v_U64_t ullpreviousReplayCounter=0; /*previous replay counter*/ v_U16_t ucUnicastBroadcastType=0; /*It denotes whether received frame is UC or BC*/ struct _BARFrmStruct *pBarFrame = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if (( NULL == pvosDataBuff ) || ( NULL == ( vosDataBuff = *pvosDataBuff ))) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid parameter sent on WLANTL_STARxAuth")); return VOS_STATUS_E_INVAL; } /*------------------------------------------------------------------------ Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_STARxAuth")); return VOS_STATUS_E_FAULT; } pClientSTA = pTLCb->atlSTAClients[ucSTAId]; if ( NULL == pClientSTA ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } /*------------------------------------------------------------------------ Extract BD header and check if valid ------------------------------------------------------------------------*/ WDA_DS_PeekRxPacketInfo( vosDataBuff, (v_PVOID_t)&aucBDHeader, 0 ); ucMPDUHOffset = (v_U8_t)WDA_GET_RX_MPDU_HEADER_OFFSET(aucBDHeader); usMPDUDOffset = (v_U16_t)WDA_GET_RX_MPDU_DATA_OFFSET(aucBDHeader); usMPDULen = (v_U16_t)WDA_GET_RX_MPDU_LEN(aucBDHeader); ucMPDUHLen = (v_U8_t)WDA_GET_RX_MPDU_HEADER_LEN(aucBDHeader); ucTid = (v_U8_t)WDA_GET_RX_TID(aucBDHeader); /* Fix for a hardware bug. * H/W does not update the tid field in BD header for BAR frames. * Fix is to read the tid field from MAC header of BAR frame */ if( (WDA_GET_RX_TYPE(aucBDHeader) == SIR_MAC_CTRL_FRAME) && (WDA_GET_RX_SUBTYPE(aucBDHeader) == SIR_MAC_CTRL_BAR)) { pBarFrame = (struct _BARFrmStruct *)(WDA_GET_RX_MAC_HEADER(aucBDHeader)); ucTid = pBarFrame->barControl.numTID; } /*Host based replay check is needed for unicast data frames*/ ucUnicastBroadcastType = (v_U16_t)WDA_IS_RX_BCAST(aucBDHeader); if(0 != ucMPDUHLen) { ucPMPDUHLen = ucMPDUHLen; } TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:BD header processing data: HO %d DO %d Len %d HLen %d" " Tid %d BD %d", ucMPDUHOffset, usMPDUDOffset, usMPDULen, ucMPDUHLen, ucTid, WLANHAL_RX_BD_HEADER_SIZE)); vos_pkt_get_packet_length( vosDataBuff, &usPktLen); if ( VOS_STATUS_SUCCESS != WDA_DS_TrimRxPacketInfo( vosDataBuff ) ) { if((WDA_GET_RX_ASF(aucBDHeader) && !WDA_GET_RX_ESF(aucBDHeader))) { /* AMSDU case, ucMPDUHOffset = 0 * it should be hancdled seperatly */ if(( usMPDUDOffset > ucMPDUHOffset ) && ( usMPDULen >= ucMPDUHLen ) && ( usPktLen >= usMPDULen ) && ( !WLANTL_TID_INVALID(ucTid) )) { ucMPDUHOffset = usMPDUDOffset - WLANTL_MPDU_HEADER_LEN; } else { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:BD header corrupted - dropping packet")); /* Drop packet */ vos_pkt_return_packet(vosDataBuff); return VOS_STATUS_SUCCESS; } } else { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:BD header corrupted - dropping packet")); /* Drop packet */ vos_pkt_return_packet(vosDataBuff); return VOS_STATUS_SUCCESS; } } #ifdef FEATURE_WLAN_WAPI if ( pClientSTA->wSTADesc.ucIsWapiSta ) { vosStatus = WLANTL_GetEtherType(aucBDHeader, vosDataBuff, ucMPDUHLen, &usEtherType); if( VOS_IS_STATUS_SUCCESS(vosStatus) ) { if ( WLANTL_LLC_WAI_TYPE == usEtherType ) { hdr = WDA_GET_RX_MAC_HEADER(aucBDHeader); if ( hdr->fc.wep ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:WAI frame was received encrypted - dropping")); /* Drop packet */ /*Temporary fix added to fix wapi rekey issue*/ vos_pkt_return_packet(vosDataBuff); return vosStatus; //returning success } } else { if ( WLANHAL_RX_IS_UNPROTECTED_WPI_FRAME(aucBDHeader) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Non-WAI frame was received unencrypted - dropping")); /* Drop packet */ vos_pkt_return_packet(vosDataBuff); return vosStatus; //returning success } } } else //could not extract EtherType - this should not happen { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Could not extract EtherType")); //Packet is already freed return vosStatus; //returning failure } } #endif /* FEATURE_WLAN_WAPI */ /*---------------------------------------------------------------------- Increment receive counter !! not sure this is the best place to increase this - pkt might be dropped below or delayed in TL's queues - will leave it here for now ------------------------------------------------------------------------*/ if ( !WLANTL_TID_INVALID( ucTid) ) { pClientSTA->auRxCount[ucTid]++; } else { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid tid %d (Station ID %d) on %s", ucTid, ucSTAId, __func__)); } /*------------------------------------------------------------------------ Check if AMSDU and send for processing if so ------------------------------------------------------------------------*/ ucAsf = (v_U8_t)WDA_GET_RX_ASF(aucBDHeader); if ( 0 != ucAsf ) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Packet is AMSDU sub frame - sending for completion")); vosStatus = WLANTL_AMSDUProcess( pvosGCtx, &vosDataBuff, aucBDHeader, ucSTAId, ucMPDUHLen, usMPDULen ); if(NULL == vosDataBuff) { //Packet is already freed return VOS_STATUS_SUCCESS; } } /* After AMSDU header handled * AMSDU frame just same with normal frames */ /*------------------------------------------------------------------- Translating header if necesary !! Fix me: rmv comments below ----------------------------------------------------------------------*/ if (( 0 == WDA_GET_RX_FT_DONE(aucBDHeader) ) && ( 0 != pClientSTA->wSTADesc.ucSwFrameRXXlation) && ( WLANTL_IS_DATA_FRAME(WDA_GET_RX_TYPE_SUBTYPE(aucBDHeader)) )) { if(0 == ucMPDUHLen) { ucMPDUHLen = ucPMPDUHLen; } if (usMPDUDOffset > ucMPDUHOffset) { usActualHLen = usMPDUDOffset - ucMPDUHOffset; } vosStatus = WLANTL_Translate80211To8023Header( vosDataBuff, &vosStatus, usActualHLen, ucMPDUHLen, pTLCb, ucSTAId, bForwardIAPPwithLLC); if ( VOS_STATUS_SUCCESS != vosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "WLAN TL:Failed to translate from 802.11 to 802.3 - dropping")); /* Drop packet */ vos_pkt_return_packet(vosDataBuff); return vosStatus; } } /* Softap requires additional Info such as Destination STAID and Access Category. Voschain or Buffer returned by BA would be unchain and this Meta Data would help in routing the packets to appropriate Destination */ if( WLAN_STA_SOFTAP == pClientSTA->wSTADesc.wSTAType) { STAMetaInfoPtr = (v_U32_t *)(uintptr_t)(ucTid | (WDA_GET_RX_ADDR3_IDX(aucBDHeader) << WLANTL_STAID_OFFSET)); vos_pkt_set_user_data_ptr( vosDataBuff, VOS_PKT_USER_DATA_ID_TL, (v_PVOID_t)STAMetaInfoPtr); } /*------------------------------------------------------------------------ Check to see if re-ordering session is in place ------------------------------------------------------------------------*/ if ( 0 != pClientSTA->atlBAReorderInfo[ucTid].ucExists ) { WLANTL_MSDUReorder( pTLCb, &vosDataBuff, aucBDHeader, ucSTAId, ucTid ); } if(0 == ucUnicastBroadcastType #ifdef FEATURE_ON_CHIP_REORDERING && (WLANHAL_IsOnChipReorderingEnabledForTID(pvosGCtx, ucSTAId, ucTid) != TRUE) #endif ) { /* replay check code : check whether replay check is needed or not */ if(VOS_TRUE == pClientSTA->ucIsReplayCheckValid) { /* replay check is needed for the station */ /* check whether frame is AMSDU frame */ if ( 0 != ucAsf ) { /* Since virgo can't send AMSDU frames this leg of the code was not tested properly, it needs to be tested properly*/ /* Frame is AMSDU frame. As per 802.11n only first subframe will have replay counter */ ucEsf = WDA_GET_RX_ESF( aucBDHeader ); if( 0 != ucEsf ) { v_BOOL_t status; /* Getting 48-bit replay counter from the RX BD */ ullcurrentReplayCounter = WDA_DS_GetReplayCounter(aucBDHeader); VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "WLAN TL: AMSDU currentReplayCounter [0x%llX]",ullcurrentReplayCounter); /* Getting 48-bit previous replay counter from TL control block */ ullpreviousReplayCounter = pClientSTA->ullReplayCounter[ucTid]; VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "WLAN TL: AMSDU previousReplayCounter [0x%llX]",ullpreviousReplayCounter); /* It is first subframe of AMSDU thus it conatains replay counter perform the replay check for this first subframe*/ status = WLANTL_IsReplayPacket( ullcurrentReplayCounter, ullpreviousReplayCounter); if(VOS_FALSE == status) { /* Not a replay paket, update previous replay counter in TL CB */ pClientSTA->ullReplayCounter[ucTid] = ullcurrentReplayCounter; } else { VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: AMSDU Drop the replay packet with PN : [0x%llX]",ullcurrentReplayCounter); pClientSTA->ulTotalReplayPacketsDetected++; VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: AMSDU total dropped replay packets on STA ID %X is [0x%X]", ucSTAId, pClientSTA->ulTotalReplayPacketsDetected); /* Drop the packet */ vos_pkt_return_packet(vosDataBuff); return VOS_STATUS_SUCCESS; } } } else { v_BOOL_t status; /* Getting 48-bit replay counter from the RX BD */ ullcurrentReplayCounter = WDA_DS_GetReplayCounter(aucBDHeader); VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "WLAN TL: Non-AMSDU currentReplayCounter [0x%llX]",ullcurrentReplayCounter); /* Getting 48-bit previous replay counter from TL control block */ ullpreviousReplayCounter = pClientSTA->ullReplayCounter[ucTid]; VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "WLAN TL: Non-AMSDU previousReplayCounter [0x%llX]",ullpreviousReplayCounter); /* It is not AMSDU frame so perform reaply check for each packet, as each packet contains valid replay counter*/ status = WLANTL_IsReplayPacket( ullcurrentReplayCounter, ullpreviousReplayCounter); if(VOS_FALSE == status) { /* Not a replay paket, update previous replay counter in TL CB */ pClientSTA->ullReplayCounter[ucTid] = ullcurrentReplayCounter; } else { VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: Non-AMSDU Drop the replay packet with PN : [0x%llX]",ullcurrentReplayCounter); pClientSTA->ulTotalReplayPacketsDetected++; VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: Non-AMSDU total dropped replay packets on STA ID %X is [0x%X]", ucSTAId, pClientSTA->ulTotalReplayPacketsDetected); /* Repaly packet, drop the packet */ vos_pkt_return_packet(vosDataBuff); return VOS_STATUS_SUCCESS; } } } } /*It is a broadast packet DPU has already done replay check for broadcast packets no need to do replay check of these packets*/ if ( NULL != vosDataBuff ) { if( WLAN_STA_SOFTAP == pClientSTA->wSTADesc.wSTAType) { WLANTL_FwdPktToHDD( pvosGCtx, vosDataBuff, ucSTAId ); } else { wRxMetaInfo.ucUP = ucTid; wRxMetaInfo.rssiAvg = pClientSTA->rssiAvg; #ifdef FEATURE_WLAN_TDLS if (WLAN_STA_TDLS == pClientSTA->wSTADesc.wSTAType) { wRxMetaInfo.isStaTdls = TRUE; } else { wRxMetaInfo.isStaTdls = FALSE; } #endif pClientSTA->pfnSTARx( pvosGCtx, vosDataBuff, ucSTAId, &wRxMetaInfo ); } }/* if not NULL */ return VOS_STATUS_SUCCESS; }/* WLANTL_STARxAuth */ /*========================================================================== FUNCTION WLANTL_STARxDisc DESCRIPTION Receive in disconnected state - no data allowed DEPENDENCIES The STA must be registered with TL before this function can be called. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context ucSTAId: identifier of the station being processed vosDataBuff: pointer to the rx vos buffer RETURN VALUE The result code associated with performing the operation VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_STARxDisc ( v_PVOID_t pvosGCtx, v_U8_t ucSTAId, vos_pkt_t** pvosDataBuff, v_BOOL_t bForwardIAPPwithLLC ) { /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if (( NULL == pvosDataBuff ) || ( NULL == *pvosDataBuff )) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid parameter sent on WLANTL_STARxDisc")); return VOS_STATUS_E_INVAL; } /*------------------------------------------------------------------------ Error - drop packet ------------------------------------------------------------------------*/ TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Packet should not be received in state disconnected" " - dropping")); vos_pkt_return_packet(*pvosDataBuff); *pvosDataBuff = NULL; return VOS_STATUS_SUCCESS; }/* WLANTL_STARxDisc */ /*========================================================================== Processing main loops for MAIN and TX threads ==========================================================================*/ /*========================================================================== FUNCTION WLANTL_McProcessMsg DESCRIPTION Called by VOSS when a message was serialized for TL through the main thread/task. DEPENDENCIES The TL must be initialized before this function can be called. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context message: type and content of the message RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: invalid input parameters VOS_STATUS_E_FAULT: pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_McProcessMsg ( v_PVOID_t pvosGCtx, vos_msg_t* message ) { WLANTL_CbType* pTLCb = NULL; tAddBAInd* ptAddBaInd = NULL; tDelBAInd* ptDelBaInd = NULL; tAddBARsp* ptAddBaRsp = NULL; vos_msg_t vosMessage; VOS_STATUS vosStatus; tpFlushACRsp FlushACRspPtr; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( NULL == message ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid parameter sent on WLANTL_ProcessMainMessage")); return VOS_STATUS_E_INVAL; } /*------------------------------------------------------------------------ Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_ProcessMainMessage")); return VOS_STATUS_E_FAULT; } TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Received message: %d through main flow", message->type)); switch( message->type ) { case WDA_TL_FLUSH_AC_RSP: // Extract the message from the message body FlushACRspPtr = (tpFlushACRsp)(message->bodyptr); // Make sure the call back function is not null. if ( NULL == pTLCb->tlBAPClient.pfnFlushOpCompleteCb ) { VOS_ASSERT(0); TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer pfnFlushOpCompleteCb")); return VOS_STATUS_E_FAULT; } TLLOG2(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "Received message: Flush complete received by TL")); // Since we have the response back from HAL, just call the BAP client // registered call back from TL. There is only 1 possible // BAP client. So directly reference tlBAPClient pTLCb->tlBAPClient.pfnFlushOpCompleteCb( pvosGCtx, FlushACRspPtr->ucSTAId, FlushACRspPtr->ucTid, FlushACRspPtr->status ); // Free the PAL memory, we are done with it. TLLOG2(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "Flush complete received by TL: Freeing %p", FlushACRspPtr)); vos_mem_free((v_VOID_t *)FlushACRspPtr); break; case WDA_HDD_ADDBA_REQ: ptAddBaInd = (tAddBAInd*)(message->bodyptr); vosStatus = WLANTL_BaSessionAdd( pvosGCtx, ptAddBaInd->baSession.baSessionID, ptAddBaInd->baSession.STAID, ptAddBaInd->baSession.baTID, (v_U32_t)ptAddBaInd->baSession.baBufferSize, ptAddBaInd->baSession.winSize, ptAddBaInd->baSession.SSN); ptAddBaRsp = vos_mem_malloc(sizeof(*ptAddBaRsp)); if ( NULL == ptAddBaRsp ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: fatal failure, cannot allocate BA Rsp structure")); VOS_ASSERT(0); return VOS_STATUS_E_NOMEM; } if ( VOS_STATUS_SUCCESS == vosStatus ) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL: Sending success indication to HAL for ADD BA")); /*Send success*/ ptAddBaRsp->mesgType = WDA_HDD_ADDBA_RSP; vosMessage.type = WDA_HDD_ADDBA_RSP; } else { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL: Sending failure indication to HAL for ADD BA")); /*Send failure*/ ptAddBaRsp->mesgType = WDA_BA_FAIL_IND; vosMessage.type = WDA_BA_FAIL_IND; } ptAddBaRsp->mesgLen = sizeof(tAddBARsp); ptAddBaRsp->baSessionID = ptAddBaInd->baSession.baSessionID; /* This is default, reply win size has to be handled BA module, FIX THIS */ ptAddBaRsp->replyWinSize = WLANTL_MAX_WINSIZE; vosMessage.bodyptr = ptAddBaRsp; vos_mq_post_message( VOS_MQ_ID_WDA, &vosMessage ); WLANTL_McFreeMsg (pvosGCtx, message); break; case WDA_DELETEBA_IND: ptDelBaInd = (tDelBAInd*)(message->bodyptr); vosStatus = WLANTL_BaSessionDel(pvosGCtx, ptDelBaInd->staIdx, ptDelBaInd->baTID); if ( VOS_STATUS_SUCCESS != vosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: Failed to del BA session STA:%d TID:%d Status :%d", ptDelBaInd->staIdx, ptDelBaInd->baTID, vosStatus)); } WLANTL_McFreeMsg (pvosGCtx, message); break; default: /*no processing for now*/ break; } return VOS_STATUS_SUCCESS; }/* WLANTL_ProcessMainMessage */ /*========================================================================== FUNCTION WLANTL_McFreeMsg DESCRIPTION Called by VOSS to free a given TL message on the Main thread when there are messages pending in the queue when the whole system is been reset. For now, TL does not allocate any body so this function shout translate into a NOOP DEPENDENCIES The TL must be initialized before this function can be called. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context message: type and content of the message RETURN VALUE The result code associated with performing the operation VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_McFreeMsg ( v_PVOID_t pvosGCtx, vos_msg_t* message ) { WLANTL_CbType* pTLCb = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( NULL == message ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid parameter sent on WLANTL_McFreeMsg")); return VOS_STATUS_E_INVAL; } /*------------------------------------------------------------------------ Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_McFreeMsg")); return VOS_STATUS_E_FAULT; } TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Received message: %d through main free", message->type)); switch( message->type ) { case WDA_HDD_ADDBA_REQ: case WDA_DELETEBA_IND: /*vos free body pointer*/ vos_mem_free(message->bodyptr); message->bodyptr = NULL; break; default: /*no processing for now*/ break; } return VOS_STATUS_SUCCESS; }/*WLANTL_McFreeMsg*/ /*========================================================================== FUNCTION WLANTL_TxProcessMsg DESCRIPTION Called by VOSS when a message was serialized for TL through the tx thread/task. DEPENDENCIES The TL must be initialized before this function can be called. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context message: type and content of the message RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: invalid input parameters VOS_STATUS_E_FAULT: pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) Other values can be returned as a result of a function call, please check corresponding API for more info. SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_TxProcessMsg ( v_PVOID_t pvosGCtx, vos_msg_t* message ) { VOS_STATUS vosStatus = VOS_STATUS_SUCCESS; void (*callbackRoutine) (void *callbackContext); void *callbackContext; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( NULL == message ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid parameter sent on WLANTL_ProcessTxMessage")); return VOS_STATUS_E_INVAL; } /*------------------------------------------------------------------------ Process message ------------------------------------------------------------------------*/ TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Received message: %d through tx flow", message->type)); switch( message->type ) { case WLANTL_TX_SIG_SUSPEND: vosStatus = WLANTL_SuspendCB( pvosGCtx, (WLANTL_SuspendCBType)message->bodyptr, message->reserved); break; case WLANTL_TX_RES_NEEDED: vosStatus = WLANTL_GetTxResourcesCB( pvosGCtx ); break; case WDA_DS_TX_START_XMIT: WLANTL_ClearTxXmitPending(pvosGCtx); vosStatus = WDA_DS_TxFrames( pvosGCtx ); break; case WDA_DS_FINISH_ULA: callbackContext = message->bodyptr; callbackRoutine = message->callback; if ( NULL != callbackRoutine ) { callbackRoutine(callbackContext); } break; case WLANTL_TX_SNAPSHOT: /*Dumping TL State and then continuing to print the DXE Dump*/ WLANTL_TxThreadDebugHandler(pvosGCtx); WDA_TransportChannelDebug(NULL, VOS_TRUE, VOS_FALSE); break; case WLANTL_TX_FATAL_ERROR: WLANTL_FatalErrorHandler(pvosGCtx); break; case WLANTL_TX_FW_DEBUG: vos_fwDumpReq(274, 0, 0, 0, 0, 1); //Async event break; case WLANTL_TX_KICKDXE: WDA_TransportKickDxe(); break; default: /*no processing for now*/ break; } return vosStatus; }/* WLANTL_TxProcessMsg */ /*========================================================================== FUNCTION WLANTL_McFreeMsg DESCRIPTION Called by VOSS to free a given TL message on the Main thread when there are messages pending in the queue when the whole system is been reset. For now, TL does not allocate any body so this function shout translate into a NOOP DEPENDENCIES The TL must be initialized before this function can be called. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context message: type and content of the message RETURN VALUE The result code associated with performing the operation VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_TxFreeMsg ( v_PVOID_t pvosGCtx, vos_msg_t* message ) { /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*Nothing to do for now!!!*/ return VOS_STATUS_SUCCESS; }/*WLANTL_TxFreeMsg*/ /*========================================================================== FUNCTION WLANTL_TxFCFrame DESCRIPTION Internal utility function to send FC frame. Enable or disable LWM mode based on the information. DEPENDENCIES TL must be initiailized before this function gets called. FW sends up special flow control frame. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input pointers are NULL. VOS_STATUS_E_FAULT: Something is wrong. VOS_STATUS_SUCCESS: Everything is good. SIDE EFFECTS Newly formed FC frame is generated and waits to be transmitted. Previously unsent frame will be released. ============================================================================*/ VOS_STATUS WLANTL_TxFCFrame ( v_PVOID_t pvosGCtx ) { #if 0 WLANTL_CbType* pTLCb = NULL; VOS_STATUS vosStatus; tpHalFcTxBd pvFcTxBd = NULL; vos_pkt_t * pPacket = NULL; v_U8_t ucSTAId = 0; v_U8_t ucBitCheck = 1; VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "WLAN TL: Send FC frame %s", __func__); /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( NULL == pvosGCtx ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid parameter %s", __func__)); return VOS_STATUS_E_INVAL; } /*------------------------------------------------------------------------ Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if (NULL == pTLCb) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid pointer in %s \n", __func__)); return VOS_STATUS_E_INVAL; } //Get one voss packet vosStatus = vos_pkt_get_packet( &pPacket, VOS_PKT_TYPE_TX_802_11_MGMT, sizeof(tHalFcTxBd), 1, VOS_FALSE, NULL, NULL ); if ( VOS_STATUS_SUCCESS != vosStatus ) { return VOS_STATUS_E_INVAL; } vosStatus = vos_pkt_reserve_head( pPacket, (void *)&pvFcTxBd, sizeof(tHalFcTxBd)); if( VOS_STATUS_SUCCESS != vosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "%s: failed to reserve FC TX BD %d\n",__func__, sizeof(tHalFcTxBd))); vos_pkt_return_packet( pPacket ); return VOS_STATUS_E_FAULT; } //Generate most recent tlFCInfo. Most fields are correct. pTLCb->tlFCInfo.fcSTAThreshEnabledMask = 0; pTLCb->tlFCInfo.fcSTATxMoreDataMask = 0; for( ucSTAId = 0, ucBitCheck = 1 ; ucSTAId < WLAN_MAX_STA_COUNT; ucBitCheck <<= 1, ucSTAId ++) { if (0 == pTLCb->atlSTAClients[ucSTAId].ucExists) { continue; } if (pTLCb->atlSTAClients[ucSTAId].ucPktPending) { pTLCb->tlFCInfo.fcSTATxMoreDataMask |= ucBitCheck; } if ( (pTLCb->atlSTAClients[ucSTAId].ucLwmModeEnabled) && (pTLCb->atlSTAClients[ucSTAId].bmuMemConsumed > pTLCb->atlSTAClients[ucSTAId].uLwmThreshold)) { pTLCb->tlFCInfo.fcSTAThreshEnabledMask |= ucBitCheck; pTLCb->tlFCInfo.fcSTAThresh[ucSTAId] = (tANI_U8)pTLCb->atlSTAClients[ucSTAId].uLwmThreshold; pTLCb->atlSTAClients[ucSTAId].ucLwmEventReported = FALSE; } } //request immediate feedback pTLCb->tlFCInfo.fcConfig |= 0x4; //fill in BD to sent vosStatus = WLANHAL_FillFcTxBd(pvosGCtx, &pTLCb->tlFCInfo, (void *)pvFcTxBd); if( VOS_STATUS_SUCCESS != vosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "%s: Fill FC TX BD unsuccessful\n", __func__)); vos_pkt_return_packet( pPacket ); return VOS_STATUS_E_FAULT; } if (NULL != pTLCb->vosTxFCBuf) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "%s: Previous FC TX BD not sent\n", __func__)); vos_pkt_return_packet(pTLCb->vosTxFCBuf); } pTLCb->vosTxFCBuf = pPacket; vos_pkt_set_user_data_ptr( pPacket, VOS_PKT_USER_DATA_ID_TL, (v_PVOID_t)WLANTL_TxCompDefaultCb); vosStatus = WDA_DS_StartXmit(pvosGCtx); TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL: send FC frame leave %s", __func__)); #endif return VOS_STATUS_SUCCESS; } /*========================================================================== FUNCTION WLANTL_GetTxResourcesCB DESCRIPTION Processing function for Resource needed signal. A request will be issued to BAL to get more tx resources. DEPENDENCIES The TL must be initialized before this function can be called. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) Other values can be returned as a result of a function call, please check corresponding API for more info. SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_GetTxResourcesCB ( v_PVOID_t pvosGCtx ) { WLANTL_CbType* pTLCb = NULL; v_U32_t uResCount = WDA_TLI_MIN_RES_DATA; VOS_STATUS vosStatus = VOS_STATUS_SUCCESS; v_U8_t ucMgmt = 0; v_U8_t ucBAP = 0; v_U8_t ucData = 0; #ifdef WLAN_SOFTAP_FLOWCTRL_EN tBssSystemRole systemRole; tpAniSirGlobal pMac; #endif /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on" " WLANTL_ProcessTxMessage")); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Get tx resources from BAL ------------------------------------------------------------------------*/ vosStatus = WDA_DS_GetTxResources( pvosGCtx, &uResCount ); if ( (VOS_STATUS_SUCCESS != vosStatus) && (VOS_STATUS_E_RESOURCES != vosStatus)) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:TL failed to get resources from BAL, Err: %d", vosStatus)); return vosStatus; } /* Currently only Linux BAL returns the E_RESOURCES error code when it is running out of BD/PDUs. To make use of this interrupt for throughput enhancement, similar changes should be done in BAL code of AMSS and WM */ if (VOS_STATUS_E_RESOURCES == vosStatus) { #ifdef VOLANS_PERF WLANHAL_EnableIdleBdPduInterrupt(pvosGCtx, (tANI_U8)bdPduInterruptGetThreshold); VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL: Enabling Idle BD/PDU interrupt, Current resources = %d", uResCount); #else return VOS_STATUS_E_FAILURE; #endif } pTLCb->uResCount = uResCount; #ifdef WLAN_SOFTAP_FLOWCTRL_EN /* FIXME: disabled since creating issues in power-save, needs to be addressed */ pTLCb->sendFCFrame ++; pMac = vos_get_context(VOS_MODULE_ID_WDA, pvosGCtx); systemRole = wdaGetGlobalSystemRole(pMac); if (eSYSTEM_AP_ROLE == systemRole) { if (pTLCb->sendFCFrame % 16 == 0) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "Transmit FC")); WLANTL_TxFCFrame (pvosGCtx); } } #endif //WLAN_SOFTAP_FLOWCTRL_EN ucData = ( pTLCb->uResCount >= WDA_TLI_MIN_RES_DATA ); ucBAP = ( pTLCb->uResCount >= WDA_TLI_MIN_RES_BAP ) && ( NULL != pTLCb->tlBAPClient.vosPendingDataBuff ); ucMgmt = ( pTLCb->uResCount >= WDA_TLI_MIN_RES_MF ) && ( NULL != pTLCb->tlMgmtFrmClient.vosPendingDataBuff ); TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL: Eval Resume tx Res: %d DATA: %d BAP: %d MGMT: %d", pTLCb->uResCount, ucData, ucBAP, ucMgmt)); if (( 0 == pTLCb->ucTxSuspended ) && (( 0 != ucData ) || ( 0 != ucMgmt ) || ( 0 != ucBAP ) ) ) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "Issuing Xmit start request to BAL for avail res SYNC")); vosStatus =WDA_DS_StartXmit(pvosGCtx); } return vosStatus; }/*WLANTL_GetTxResourcesCB*/ /*========================================================================== Utility functions ==========================================================================*/ /*========================================================================== FUNCTION WLANTL_Translate8023To80211Header DESCRIPTION Inline function for translating and 802.11 header into an 802.3 header. DEPENDENCIES PARAMETERS IN pTLCb: TL control block IN/OUT ucStaId: station ID. Incase of TDLS, this returns actual TDLS station ID used IN/OUT vosDataBuff: vos data buffer, will contain the new header on output OUT pvosStatus: status of the operation RETURN VALUE VOS_STATUS_SUCCESS: Everything is good :) Other error codes might be returned from the vos api used in the function please check those return values. SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_Translate8023To80211Header ( vos_pkt_t* vosDataBuff, VOS_STATUS* pvosStatus, WLANTL_CbType* pTLCb, v_U8_t *pucStaId, WLANTL_MetaInfoType *tlMetaInfo, v_U8_t *ucWDSEnabled, v_U8_t *extraHeadSpace ) { WLANTL_8023HeaderType w8023Header; WLANTL_80211HeaderType *pw80211Header; // Allocate an aligned BD and then fill it. VOS_STATUS vosStatus; v_U8_t MandatoryucHeaderSize = WLAN80211_MANDATORY_HEADER_SIZE; v_U8_t ucHeaderSize = 0; v_VOID_t *ppvBDHeader = NULL; WLANTL_STAClientType* pClientSTA = NULL; v_U8_t ucQoSOffset = WLAN80211_MANDATORY_HEADER_SIZE; v_U8_t ucStaId; #ifdef FEATURE_WLAN_ESE_UPLOAD v_BOOL_t bIAPPTxwithLLC = VOS_FALSE; v_SIZE_t wIAPPSnapSize = WLANTL_LLC_HEADER_LEN; v_U8_t wIAPPSnap[WLANTL_LLC_HEADER_LEN] = {0}; #endif *ucWDSEnabled = 0; // default WDS off. vosStatus = vos_pkt_pop_head( vosDataBuff, &w8023Header, sizeof(w8023Header)); if ( VOS_STATUS_SUCCESS != vosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: Packet pop header fails on WLANTL_Translate8023To80211Header")); return vosStatus; } if( NULL == pucStaId ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: Invalid pointer for StaId")); return VOS_STATUS_E_INVAL; } ucStaId = *pucStaId; pClientSTA = pTLCb->atlSTAClients[ucStaId]; if ( NULL == pClientSTA ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } #ifdef FEATURE_WLAN_TDLS if ( WLAN_STA_INFRA == pTLCb->atlSTAClients[ucStaId]->wSTADesc.wSTAType && pTLCb->ucTdlsPeerCount ) { v_U8_t ucIndex = 0; for ( ucIndex = 0; ucIndex < WLAN_MAX_STA_COUNT ; ucIndex++) { if ( ucIndex != ucStaId && pTLCb->atlSTAClients[ucIndex] && pTLCb->atlSTAClients[ucIndex]->ucExists && (pTLCb->atlSTAClients[ucIndex]->tlState == WLANTL_STA_AUTHENTICATED) && (!pTLCb->atlSTAClients[ucIndex]->ucTxSuspended) && vos_mem_compare( (void*)pTLCb->atlSTAClients[ucIndex]->wSTADesc.vSTAMACAddress.bytes, (void*)w8023Header.vDA, 6) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_LOW, "WLAN TL: Got a TDLS station. Using that index")); ucStaId = ucIndex; *pucStaId = ucStaId; pClientSTA = pTLCb->atlSTAClients[ucStaId]; if ( NULL == pClientSTA ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } break; } } } #endif #ifdef FEATURE_WLAN_ESE_UPLOAD if ((0 == w8023Header.usLenType) && (pClientSTA->wSTADesc.ucIsEseSta)) { vos_pkt_extract_data(vosDataBuff,0,&wIAPPSnap[0],&wIAPPSnapSize); if (vos_mem_compare(wIAPPSnap,WLANTL_AIRONET_SNAP_HEADER,WLANTL_LLC_HEADER_LEN)) { /*The SNAP and the protocol type are already in the data buffer. They are filled by the application (wpa_supplicant). So, Skip Adding LLC below.*/ bIAPPTxwithLLC = VOS_TRUE; } else { bIAPPTxwithLLC = VOS_FALSE; } } #endif /* FEATURE_WLAN_ESE_UPLOAD */ if ((0 != pClientSTA->wSTADesc.ucAddRmvLLC) #ifdef FEATURE_WLAN_ESE_UPLOAD && (!bIAPPTxwithLLC) #endif /* FEATURE_WLAN_ESE_UPLOAD */ ) { /* Push the length */ vosStatus = vos_pkt_push_head(vosDataBuff, &w8023Header.usLenType, sizeof(w8023Header.usLenType)); if ( VOS_STATUS_SUCCESS != vosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: Packet push ether type fails on" " WLANTL_Translate8023To80211Header")); return vosStatus; } #ifdef BTAMP_TEST // The STA side will execute this, a hack to test BTAMP by using the // infra setup. On real BTAMP this will come from BAP itself. { static v_U8_t WLANTL_BT_AMP_LLC_HEADER[] = {0xAA, 0xAA, 0x03, 0x00, 0x19, 0x58 }; vosStatus = vos_pkt_push_head(vosDataBuff, WLANTL_BT_AMP_LLC_HEADER, sizeof(WLANTL_BT_AMP_LLC_HEADER)); if ( VOS_STATUS_SUCCESS != vosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: Packet push LLC header fails on" " WLANTL_Translate8023To80211Header")); return vosStatus; } } #else vosStatus = vos_pkt_push_head(vosDataBuff, WLANTL_LLC_HEADER, sizeof(WLANTL_LLC_HEADER)); if ( VOS_STATUS_SUCCESS != vosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: Packet push LLC header fails on" " WLANTL_Translate8023To80211Header")); return vosStatus; } #endif }/*If add LLC is enabled*/ else { #ifdef FEATURE_WLAN_ESE_UPLOAD bIAPPTxwithLLC = VOS_FALSE; /*Reset the Flag here to start afresh with the next TX pkt*/ #endif /* FEATURE_WLAN_ESE_UPLOAD */ TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL: STA Client registered to not remove LLC" " WLANTL_Translate8023To80211Header")); } #ifdef BTAMP_TEST pClientSTA->wSTADesc.wSTAType = WLAN_STA_BT_AMP; #endif // Find the space required for the 802.11 header format // based on the frame control fields. ucHeaderSize = MandatoryucHeaderSize; if (pClientSTA->wSTADesc.ucQosEnabled) { ucHeaderSize += sizeof(pw80211Header->usQosCtrl); } if (pClientSTA->wSTADesc.wSTAType == WLAN_STA_BT_AMP) { ucHeaderSize += sizeof(pw80211Header->optvA4); ucQoSOffset += sizeof(pw80211Header->optvA4); } TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, " WLANTL_Translate8023To80211Header : Header size = %d ", ucHeaderSize)); vos_pkt_reserve_head( vosDataBuff, &ppvBDHeader, ucHeaderSize ); if ( NULL == ppvBDHeader ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:VOSS packet corrupted ")); *pvosStatus = VOS_STATUS_E_INVAL; return *pvosStatus; } // OK now we have the space. Fill the 80211 header /* Fill A2 */ pw80211Header = (WLANTL_80211HeaderType *)(ppvBDHeader); // only clear the required space. vos_mem_set( pw80211Header, ucHeaderSize, 0 ); vos_mem_copy( pw80211Header->vA2, w8023Header.vSA, VOS_MAC_ADDR_SIZE); #ifdef FEATURE_WLAN_WAPI if (( WLANTL_STA_AUTHENTICATED == pClientSTA->tlState || pClientSTA->ptkInstalled ) && (tlMetaInfo->ucIsWai != 1)) #else if ( WLANTL_STA_AUTHENTICATED == pClientSTA->tlState || pClientSTA->ptkInstalled ) #endif { pw80211Header->wFrmCtrl.wep = pClientSTA->wSTADesc.ucProtectedFrame; } pw80211Header->usDurationId = 0; pw80211Header->usSeqCtrl = 0; pw80211Header->wFrmCtrl.type = WLANTL_80211_DATA_TYPE; if(pClientSTA->wSTADesc.ucQosEnabled) { pw80211Header->wFrmCtrl.subType = WLANTL_80211_DATA_QOS_SUBTYPE; *((v_U16_t *)((v_U8_t *)ppvBDHeader + ucQoSOffset)) = tlMetaInfo->ucUP; } else { pw80211Header->wFrmCtrl.subType = 0; tlMetaInfo->ucUP = 0; tlMetaInfo->ucTID = 0; // NO NO NO - there is not enough memory allocated to write the QOS ctrl // field, it will overwrite the first 2 bytes of the data packet(LLC header) // pw80211Header->usQosCtrl = 0; } switch( pClientSTA->wSTADesc.wSTAType ) { case WLAN_STA_IBSS: pw80211Header->wFrmCtrl.toDS = 0; pw80211Header->wFrmCtrl.fromDS = 0; vos_copy_macaddr( (v_MACADDR_t*)&pw80211Header->vA1, (v_MACADDR_t*)&w8023Header.vDA); vos_mem_copy( pw80211Header->vA3, &pClientSTA->wSTADesc.vBSSIDforIBSS , VOS_MAC_ADDR_SIZE); break; case WLAN_STA_BT_AMP: *ucWDSEnabled = 1; // WDS on. pw80211Header->wFrmCtrl.toDS = 1; pw80211Header->wFrmCtrl.fromDS = 1; vos_copy_macaddr( (v_MACADDR_t*)&pw80211Header->vA1, &pClientSTA->wSTADesc.vSTAMACAddress); vos_mem_copy( pw80211Header->vA2, w8023Header.vSA, VOS_MAC_ADDR_SIZE); vos_copy_macaddr( (v_MACADDR_t*)&pw80211Header->vA3, &pClientSTA->wSTADesc.vSTAMACAddress); /* fill the optional A4 header */ vos_mem_copy( pw80211Header->optvA4, w8023Header.vSA, VOS_MAC_ADDR_SIZE); TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "BTAMP CASE NOW ---------staid=%d", ucStaId)); break; case WLAN_STA_SOFTAP: *ucWDSEnabled = 0; // WDS off. pw80211Header->wFrmCtrl.toDS = 0; pw80211Header->wFrmCtrl.fromDS = 1; /*Copy the DA to A1*/ vos_mem_copy( pw80211Header->vA1, w8023Header.vDA , VOS_MAC_ADDR_SIZE); vos_copy_macaddr( (v_MACADDR_t*)&pw80211Header->vA2, &pClientSTA->wSTADesc.vSelfMACAddress); vos_mem_copy( pw80211Header->vA3, w8023Header.vSA, VOS_MAC_ADDR_SIZE); TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "sw 802 to 80211 softap case ---------staid=%d", ucStaId)); break; #ifdef FEATURE_WLAN_TDLS case WLAN_STA_TDLS: pw80211Header->wFrmCtrl.toDS = 0; pw80211Header->wFrmCtrl.fromDS = 0; /*Fix me*/ vos_copy_macaddr( (v_MACADDR_t*)&pw80211Header->vA1, &pClientSTA->wSTADesc.vSTAMACAddress); vos_mem_copy( pw80211Header->vA3, &pClientSTA->wSTADesc.vBSSIDforIBSS , VOS_MAC_ADDR_SIZE); VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, ("TL:TDLS CASE NOW ---------staid=%d"), ucStaId); break; #endif case WLAN_STA_INFRA: default: pw80211Header->wFrmCtrl.toDS = 1; pw80211Header->wFrmCtrl.fromDS = 0; vos_copy_macaddr( (v_MACADDR_t*)&pw80211Header->vA1, &pClientSTA->wSTADesc.vSTAMACAddress); vos_mem_copy( pw80211Header->vA3, w8023Header.vDA , VOS_MAC_ADDR_SIZE); TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "REGULAR INFRA LINK CASE---------staid=%d", ucStaId)); break; } // OK now we have the space. Fill the 80211 header /* Fill A2 */ pw80211Header = (WLANTL_80211HeaderType *)(ppvBDHeader); return VOS_STATUS_SUCCESS; }/*WLANTL_Translate8023To80211Header*/ /*============================================================================= BEGIN LOG FUNCTION !!! Remove me or clean me =============================================================================*/ #if 0 //def WLANTL_DEBUG #define WLANTL_DEBUG_FRAME_BYTE_PER_LINE 16 #define WLANTL_DEBUG_FRAME_BYTE_PER_BYTE 4 static v_VOID_t WLANTL_DebugFrame ( v_PVOID_t dataPointer, v_U32_t dataSize ) { v_U8_t lineBuffer[WLANTL_DEBUG_FRAME_BYTE_PER_LINE]; v_U32_t numLines; v_U32_t numBytes; v_U32_t idx; v_U8_t *linePointer; numLines = dataSize / WLANTL_DEBUG_FRAME_BYTE_PER_LINE; numBytes = dataSize % WLANTL_DEBUG_FRAME_BYTE_PER_LINE; linePointer = (v_U8_t *)dataPointer; TLLOGE(VOS_TRACE(VOS_MODULE_ID_SAL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Frame Debug Frame Size %d, Pointer 0x%p", dataSize, dataPointer)); for(idx = 0; idx < numLines; idx++) { memset(lineBuffer, 0, WLANTL_DEBUG_FRAME_BYTE_PER_LINE); memcpy(lineBuffer, linePointer, WLANTL_DEBUG_FRAME_BYTE_PER_LINE); TLLOGE(VOS_TRACE(VOS_MODULE_ID_SAL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:0x%2x 0x%2x 0x%2x 0x%2x 0x%2x 0x%2x 0x%2x 0x%2x 0x%2x 0x%2x 0x%2x 0x%2x 0x%2x 0x%2x 0x%2x 0x%2x", lineBuffer[0], lineBuffer[1], lineBuffer[2], lineBuffer[3], lineBuffer[4], lineBuffer[5], lineBuffer[6], lineBuffer[7], lineBuffer[8], lineBuffer[9], lineBuffer[10], lineBuffer[11], lineBuffer[12], lineBuffer[13], lineBuffer[14], lineBuffer[15])); linePointer += WLANTL_DEBUG_FRAME_BYTE_PER_LINE; } if(0 == numBytes) return; memset(lineBuffer, 0, WLANTL_DEBUG_FRAME_BYTE_PER_LINE); memcpy(lineBuffer, linePointer, numBytes); for(idx = 0; idx < WLANTL_DEBUG_FRAME_BYTE_PER_LINE / WLANTL_DEBUG_FRAME_BYTE_PER_BYTE; idx++) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_SAL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:0x%2x 0x%2x 0x%2x 0x%2x", lineBuffer[idx * WLANTL_DEBUG_FRAME_BYTE_PER_BYTE], lineBuffer[1 + idx * WLANTL_DEBUG_FRAME_BYTE_PER_BYTE], lineBuffer[2 + idx * WLANTL_DEBUG_FRAME_BYTE_PER_BYTE], lineBuffer[3 + idx * WLANTL_DEBUG_FRAME_BYTE_PER_BYTE])); if(((idx + 1) * WLANTL_DEBUG_FRAME_BYTE_PER_BYTE) >= numBytes) break; } return; } #endif /*============================================================================= END LOG FUNCTION =============================================================================*/ /*========================================================================== FUNCTION WLANTL_Translate80211To8023Header DESCRIPTION Inline function for translating and 802.11 header into an 802.3 header. DEPENDENCIES PARAMETERS IN pTLCb: TL control block ucStaId: station ID ucHeaderLen: Length of the header from BD ucActualHLen: Length of header including padding or any other trailers IN/OUT vosDataBuff: vos data buffer, will contain the new header on output OUT pvosStatus: status of the operation RETURN VALUE The result code associated with performing the operation VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_Translate80211To8023Header ( vos_pkt_t* vosDataBuff, VOS_STATUS* pvosStatus, v_U16_t usActualHLen, v_U8_t ucHeaderLen, WLANTL_CbType* pTLCb, v_U8_t ucSTAId, v_BOOL_t bForwardIAPPwithLLC ) { WLANTL_8023HeaderType w8023Header; WLANTL_80211HeaderType w80211Header; v_U8_t aucLLCHeader[WLANTL_LLC_HEADER_LEN]; VOS_STATUS vosStatus; v_U16_t usDataStartOffset = 0; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ if ( sizeof(w80211Header) < ucHeaderLen ) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "Warning !: Check the header size for the Rx frame structure=%d received=%dn", sizeof(w80211Header), ucHeaderLen)); ucHeaderLen = sizeof(w80211Header); } // This will take care of headers of all sizes, 3 address, 3 addr QOS, // WDS non-QOS and WDS QoS etc. We have space for all in the 802.11 header structure. vosStatus = vos_pkt_pop_head( vosDataBuff, &w80211Header, ucHeaderLen); if ( VOS_STATUS_SUCCESS != vosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: Failed to pop 80211 header from packet %d", vosStatus)); return vosStatus; } switch ( w80211Header.wFrmCtrl.fromDS ) { case 0: if ( w80211Header.wFrmCtrl.toDS ) { //SoftAP AP mode vos_mem_copy( w8023Header.vDA, w80211Header.vA3, VOS_MAC_ADDR_SIZE); vos_mem_copy( w8023Header.vSA, w80211Header.vA2, VOS_MAC_ADDR_SIZE); TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL SoftAP: 802 3 DA %08x SA %08x", w8023Header.vDA, w8023Header.vSA)); } else { /* IBSS */ vos_mem_copy( w8023Header.vDA, w80211Header.vA1, VOS_MAC_ADDR_SIZE); vos_mem_copy( w8023Header.vSA, w80211Header.vA2, VOS_MAC_ADDR_SIZE); } break; case 1: if ( w80211Header.wFrmCtrl.toDS ) { /* BT-AMP case */ vos_mem_copy( w8023Header.vDA, w80211Header.vA1, VOS_MAC_ADDR_SIZE); vos_mem_copy( w8023Header.vSA, w80211Header.vA2, VOS_MAC_ADDR_SIZE); } else { /* Infra */ vos_mem_copy( w8023Header.vDA, w80211Header.vA1, VOS_MAC_ADDR_SIZE); vos_mem_copy( w8023Header.vSA, w80211Header.vA3, VOS_MAC_ADDR_SIZE); } break; } if( usActualHLen > ucHeaderLen ) { usDataStartOffset = usActualHLen - ucHeaderLen; } if ( 0 < usDataStartOffset ) { vosStatus = vos_pkt_trim_head( vosDataBuff, usDataStartOffset ); if ( VOS_STATUS_SUCCESS != vosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: Failed to trim header from packet %d", vosStatus)); return vosStatus; } } if ( NULL == pTLCb->atlSTAClients[ucSTAId] ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } if ( 0 != pTLCb->atlSTAClients[ucSTAId]->wSTADesc.ucAddRmvLLC #ifdef FEATURE_WLAN_ESE_UPLOAD && (!bForwardIAPPwithLLC) #endif /* FEATURE_WLAN_ESE_UPLOAD */ ) { // Extract the LLC header vosStatus = vos_pkt_pop_head( vosDataBuff, aucLLCHeader, WLANTL_LLC_HEADER_LEN); if ( VOS_STATUS_SUCCESS != vosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "WLAN TL: Failed to pop LLC header from packet %d", vosStatus)); return vosStatus; } //Extract the length vos_mem_copy(&w8023Header.usLenType, &aucLLCHeader[WLANTL_LLC_HEADER_LEN - sizeof(w8023Header.usLenType)], sizeof(w8023Header.usLenType) ); } else { vosStatus = vos_pkt_get_packet_length(vosDataBuff, &w8023Header.usLenType); if ( VOS_STATUS_SUCCESS != vosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: Failed to get packet length %d", vosStatus)); return vosStatus; } TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL: BTAMP len (ethertype) fld = %d", w8023Header.usLenType)); w8023Header.usLenType = vos_cpu_to_be16(w8023Header.usLenType); } vos_pkt_push_head(vosDataBuff, &w8023Header, sizeof(w8023Header)); #ifdef BTAMP_TEST { // AP side will execute this. v_U8_t *temp_w8023Header = NULL; vosStatus = vos_pkt_peek_data( vosDataBuff, 0, &temp_w8023Header, sizeof(w8023Header) ); } #endif #if 0 /*TL_DEBUG*/ vos_pkt_get_packet_length(vosDataBuff, &usLen); vos_pkt_pop_head( vosDataBuff, aucData, usLen); WLANTL_DebugFrame(aucData, usLen); vos_pkt_push_head(vosDataBuff, aucData, usLen); #endif *pvosStatus = VOS_STATUS_SUCCESS; return VOS_STATUS_SUCCESS; }/*WLANTL_Translate80211To8023Header*/ VOS_STATUS WLANTL_MonTranslate80211To8023Header ( vos_pkt_t* vosDataBuff, WLANTL_CbType* pTLCb ) { v_U16_t usMPDUDOffset; v_U8_t ucMPDUHOffset; v_U8_t ucMPDUHLen; v_U16_t usActualHLen = 0; v_U16_t usDataStartOffset = 0; v_PVOID_t aucBDHeader; WLANTL_8023HeaderType w8023Header; WLANTL_80211HeaderType w80211Header; VOS_STATUS vosStatus; v_U8_t aucLLCHeader[WLANTL_LLC_HEADER_LEN]; WDA_DS_PeekRxPacketInfo( vosDataBuff, (v_PVOID_t)&aucBDHeader, 0 ); ucMPDUHOffset = (v_U8_t)WDA_GET_RX_MPDU_HEADER_OFFSET(aucBDHeader); usMPDUDOffset = (v_U16_t)WDA_GET_RX_MPDU_DATA_OFFSET(aucBDHeader); ucMPDUHLen = (v_U8_t)WDA_GET_RX_MPDU_HEADER_LEN(aucBDHeader); if (usMPDUDOffset > ucMPDUHOffset) { usActualHLen = usMPDUDOffset - ucMPDUHOffset; } if ( sizeof(w80211Header) < ucMPDUHLen ) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "Warning !: Check the header size for the Rx frame structure=%d received=%dn", sizeof(w80211Header), ucMPDUHLen)); ucMPDUHLen = sizeof(w80211Header); } vosStatus = vos_pkt_pop_head( vosDataBuff, &w80211Header, ucMPDUHLen); if ( VOS_STATUS_SUCCESS != vosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: Failed to pop 80211 header from packet %d", vosStatus)); return vosStatus; } switch ( w80211Header.wFrmCtrl.fromDS ) { case 0: if ( w80211Header.wFrmCtrl.toDS ) { vos_mem_copy( w8023Header.vDA, w80211Header.vA3, VOS_MAC_ADDR_SIZE); vos_mem_copy( w8023Header.vSA, w80211Header.vA2, VOS_MAC_ADDR_SIZE); TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL SoftAP: 802 3 DA %08x SA %08x", w8023Header.vDA, w8023Header.vSA)); } else { vos_mem_copy( w8023Header.vDA, w80211Header.vA1, VOS_MAC_ADDR_SIZE); vos_mem_copy( w8023Header.vSA, w80211Header.vA2, VOS_MAC_ADDR_SIZE); } break; case 1: if ( w80211Header.wFrmCtrl.toDS ) { vos_mem_copy( w8023Header.vDA, w80211Header.vA1, VOS_MAC_ADDR_SIZE); vos_mem_copy( w8023Header.vSA, w80211Header.vA2, VOS_MAC_ADDR_SIZE); } else { vos_mem_copy( w8023Header.vDA, w80211Header.vA1, VOS_MAC_ADDR_SIZE); vos_mem_copy( w8023Header.vSA, w80211Header.vA3, VOS_MAC_ADDR_SIZE); } break; } if( usActualHLen > ucMPDUHLen ) { usDataStartOffset = usActualHLen - ucMPDUHLen; } if ( 0 < usDataStartOffset ) { vosStatus = vos_pkt_trim_head( vosDataBuff, usDataStartOffset ); if ( VOS_STATUS_SUCCESS != vosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: Failed to trim header from packet %d", vosStatus)); return vosStatus; } } // Extract the LLC header vosStatus = vos_pkt_pop_head( vosDataBuff, aucLLCHeader, WLANTL_LLC_HEADER_LEN); if ( VOS_STATUS_SUCCESS != vosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "WLAN TL: Failed to pop LLC header from packet %d", vosStatus)); return vosStatus; } //Extract the length vos_mem_copy(&w8023Header.usLenType, &aucLLCHeader[WLANTL_LLC_HEADER_LEN - sizeof(w8023Header.usLenType)], sizeof(w8023Header.usLenType) ); vos_pkt_push_head(vosDataBuff, &w8023Header, sizeof(w8023Header)); return VOS_STATUS_SUCCESS; } /*========================================================================== FUNCTION WLANTL_FindFrameTypeBcMcUc DESCRIPTION Utility function to find whether received frame is broadcast, multicast or unicast. DEPENDENCIES The STA must be registered with TL before this function can be called. PARAMETERS IN pTLCb: pointer to the TL's control block ucSTAId: identifier of the station being processed vosDataBuff: pointer to the vos buffer IN/OUT pucBcMcUc: pointer to buffer, will contain frame type on return RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: invalid input parameters VOS_STATUS_E_BADMSG: failed to extract info from data buffer VOS_STATUS_SUCCESS: success SIDE EFFECTS None. ============================================================================*/ VOS_STATUS WLANTL_FindFrameTypeBcMcUc ( WLANTL_CbType *pTLCb, v_U8_t ucSTAId, vos_pkt_t *vosDataBuff, v_U8_t *pucBcMcUc ) { VOS_STATUS vosStatus = VOS_STATUS_SUCCESS; v_PVOID_t aucBDHeader; v_PVOID_t pvPeekData; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ((NULL == pTLCb) || (NULL == vosDataBuff) || (NULL == pucBcMcUc)) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid parameter in WLANTL_FindFrameTypeBcMcUc")); return VOS_STATUS_E_INVAL; } if ( NULL == pTLCb->atlSTAClients[ucSTAId] ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } /*------------------------------------------------------------------------ Extract BD header and check if valid ------------------------------------------------------------------------*/ vosStatus = WDA_DS_PeekRxPacketInfo(vosDataBuff, (v_PVOID_t)&aucBDHeader, 0/*Swap BD*/ ); if (NULL == aucBDHeader) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:WLANTL_FindFrameTypeBcMcUc - Cannot extract BD header")); VOS_ASSERT(0); return VOS_STATUS_E_BADMSG; } if ((0 == WDA_GET_RX_FT_DONE(aucBDHeader)) && (0 != pTLCb->atlSTAClients[ucSTAId]->wSTADesc.ucSwFrameRXXlation)) { /* Its an 802.11 frame, extract MAC address 1 */ TLLOG2(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:WLANTL_FindFrameTypeBcMcUc - 802.11 frame, peeking Addr1")); vosStatus = vos_pkt_peek_data(vosDataBuff, WLANTL_MAC_ADDR_ALIGN(1), (v_PVOID_t)&pvPeekData, VOS_MAC_ADDR_SIZE); } else { /* Its an 802.3 frame, extract Destination MAC address */ TLLOG2(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:WLANTL_FindFrameTypeBcMcUc - 802.3 frame, peeking DA")); vosStatus = vos_pkt_peek_data(vosDataBuff, WLANTL_MAC_ADDR_ALIGN(0), (v_PVOID_t)&pvPeekData, VOS_MAC_ADDR_SIZE); } if (VOS_STATUS_SUCCESS != vosStatus) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:WLANTL_FindFrameTypeBcMcUc - Failed to peek MAC address")); return vosStatus; } if (((tANI_U8 *)pvPeekData)[0] == 0xff) { *pucBcMcUc = WLANTL_FRAME_TYPE_BCAST; } else { if ((((tANI_U8 *)pvPeekData)[0] & 0x01) == 0x01) *pucBcMcUc = WLANTL_FRAME_TYPE_MCAST; else *pucBcMcUc = WLANTL_FRAME_TYPE_UCAST; } TLLOG2(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:WLANTL_FindFrameTypeBcMcUc - Addr1Byte1 is: %x", ((tANI_U8 *)pvPeekData)[0])); return VOS_STATUS_SUCCESS; } #if 0 #ifdef WLAN_PERF /*========================================================================== FUNCTION WLANTL_FastHwFwdDataFrame DESCRIPTION Fast path function to quickly forward a data frame if HAL determines BD signature computed here matches the signature inside current VOSS packet. If there is a match, HAL and TL fills in the swapped packet length into BD header and DxE header, respectively. Otherwise, packet goes back to normal (slow) path and a new BD signature would be tagged into BD in this VOSS packet later by the WLANHAL_FillTxBd() function. DEPENDENCIES PARAMETERS IN pvosGCtx VOS context vosDataBuff Ptr to VOSS packet pMetaInfo For getting frame's TID pStaInfo For checking STA type OUT pvosStatus returned status puFastFwdOK Flag to indicate whether frame could be fast forwarded RETURN VALUE No return. SIDE EFFECTS ============================================================================*/ static void WLANTL_FastHwFwdDataFrame ( v_PVOID_t pvosGCtx, vos_pkt_t* vosDataBuff, VOS_STATUS* pvosStatus, v_U32_t* puFastFwdOK, WLANTL_MetaInfoType* pMetaInfo, WLAN_STADescType* pStaInfo ) { v_PVOID_t pvPeekData; v_U8_t ucDxEBDWLANHeaderLen = WLANTL_BD_HEADER_LEN(0) + sizeof(WLANBAL_sDXEHeaderType); v_U8_t ucIsUnicast; WLANBAL_sDXEHeaderType *pDxEHeader; v_PVOID_t pvBDHeader; v_PVOID_t pucBuffPtr; v_U16_t usPktLen; /*----------------------------------------------------------------------- Extract packet length -----------------------------------------------------------------------*/ vos_pkt_get_packet_length( vosDataBuff, &usPktLen); /*----------------------------------------------------------------------- Extract MAC address -----------------------------------------------------------------------*/ *pvosStatus = vos_pkt_peek_data( vosDataBuff, WLANTL_MAC_ADDR_ALIGN(0), (v_PVOID_t)&pvPeekData, VOS_MAC_ADDR_SIZE ); if ( VOS_STATUS_SUCCESS != *pvosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Failed while attempting to extract MAC Addr %d", *pvosStatus)); *pvosStatus = VOS_STATUS_E_INVAL; return; } /*----------------------------------------------------------------------- Reserve head room for DxE header, BD, and WLAN header -----------------------------------------------------------------------*/ vos_pkt_reserve_head( vosDataBuff, &pucBuffPtr, ucDxEBDWLANHeaderLen ); if ( NULL == pucBuffPtr ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:No enough space in VOSS packet %p for DxE/BD/WLAN header", vosDataBuff)); *pvosStatus = VOS_STATUS_E_INVAL; return; } pDxEHeader = (WLANBAL_sDXEHeaderType *)pucBuffPtr; pvBDHeader = (v_PVOID_t) &pDxEHeader[1]; /* UMA Tx acceleration is enabled. * UMA would help convert frames to 802.11, fill partial BD fields and * construct LLC header. To further accelerate this kind of frames, * HAL would attempt to reuse the BD descriptor if the BD signature * matches to the saved BD descriptor. */ if(pStaInfo->wSTAType == WLAN_STA_IBSS) ucIsUnicast = !(((tANI_U8 *)pvPeekData)[0] & 0x01); else ucIsUnicast = 1; *puFastFwdOK = (v_U32_t) WLANHAL_TxBdFastFwd(pvosGCtx, pvPeekData, pMetaInfo->ucTID, ucIsUnicast, pvBDHeader, usPktLen ); /* Can't be fast forwarded. Trim the VOS head back to original location. */ if(! *puFastFwdOK){ vos_pkt_trim_head(vosDataBuff, ucDxEBDWLANHeaderLen); }else{ /* could be fast forwarded. Now notify BAL DxE header filling could be completely skipped */ v_U32_t uPacketSize = WLANTL_BD_HEADER_LEN(0) + usPktLen; vos_pkt_set_user_data_ptr( vosDataBuff, VOS_PKT_USER_DATA_ID_BAL, (v_PVOID_t)uPacketSize); pDxEHeader->size = SWAP_ENDIAN_UINT32(uPacketSize); } *pvosStatus = VOS_STATUS_SUCCESS; return; } #endif /*WLAN_PERF*/ #endif #if 0 /*========================================================================== FUNCTION WLANTL_PrepareBDHeader DESCRIPTION Inline function for preparing BD header before HAL processing. DEPENDENCIES Just notify HAL that suspend in TL is complete. PARAMETERS IN vosDataBuff: vos data buffer ucDisableFrmXtl: is frame xtl disabled OUT ppvBDHeader: it will contain the BD header pvDestMacAdddr: it will contain the destination MAC address pvosStatus: status of the combined processing pusPktLen: packet len. RETURN VALUE No return. SIDE EFFECTS ============================================================================*/ void WLANTL_PrepareBDHeader ( vos_pkt_t* vosDataBuff, v_PVOID_t* ppvBDHeader, v_MACADDR_t* pvDestMacAdddr, v_U8_t ucDisableFrmXtl, VOS_STATUS* pvosStatus, v_U16_t* pusPktLen, v_U8_t ucQosEnabled, v_U8_t ucWDSEnabled, v_U8_t extraHeadSpace ) { v_U8_t ucHeaderOffset; v_U8_t ucHeaderLen; v_U8_t ucBDHeaderLen = WLANTL_BD_HEADER_LEN(ucDisableFrmXtl); /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------- Get header pointer from VOSS !!! make sure reserve head zeros out the memory -------------------------------------------------------------------------*/ vos_pkt_get_packet_length( vosDataBuff, pusPktLen); if ( WLANTL_MAC_HEADER_LEN(ucDisableFrmXtl) > *pusPktLen ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: Length of the packet smaller than expected network" " header %d", *pusPktLen )); *pvosStatus = VOS_STATUS_E_INVAL; return; } vos_pkt_reserve_head( vosDataBuff, ppvBDHeader, ucBDHeaderLen ); if ( NULL == *ppvBDHeader ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:VOSS packet corrupted on Attach BD header")); *pvosStatus = VOS_STATUS_E_INVAL; return; } /*----------------------------------------------------------------------- Extract MAC address -----------------------------------------------------------------------*/ { v_SIZE_t usMacAddrSize = VOS_MAC_ADDR_SIZE; *pvosStatus = vos_pkt_extract_data( vosDataBuff, ucBDHeaderLen + WLANTL_MAC_ADDR_ALIGN(ucDisableFrmXtl), (v_PVOID_t)pvDestMacAdddr, &usMacAddrSize ); } if ( VOS_STATUS_SUCCESS != *pvosStatus ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Failed while attempting to extract MAC Addr %d", *pvosStatus)); } else { /*--------------------------------------------------------------------- Fill MPDU info fields: - MPDU data start offset - MPDU header start offset - MPDU header length - MPDU length - this is a 16b field - needs swapping --------------------------------------------------------------------*/ ucHeaderOffset = ucBDHeaderLen; ucHeaderLen = WLANTL_MAC_HEADER_LEN(ucDisableFrmXtl); if ( 0 != ucDisableFrmXtl ) { if ( 0 != ucQosEnabled ) { ucHeaderLen += WLANTL_802_11_HEADER_QOS_CTL; } // Similar to Qos we need something for WDS format ! if ( ucWDSEnabled != 0 ) { // If we have frame translation enabled ucHeaderLen += WLANTL_802_11_HEADER_ADDR4_LEN; } if ( extraHeadSpace != 0 ) { // Decrease the packet length with the extra padding after the header *pusPktLen = *pusPktLen - extraHeadSpace; } } WLANHAL_TX_BD_SET_MPDU_HEADER_LEN( *ppvBDHeader, ucHeaderLen); WLANHAL_TX_BD_SET_MPDU_HEADER_OFFSET( *ppvBDHeader, ucHeaderOffset); WLANHAL_TX_BD_SET_MPDU_DATA_OFFSET( *ppvBDHeader, ucHeaderOffset + ucHeaderLen + extraHeadSpace); WLANHAL_TX_BD_SET_MPDU_LEN( *ppvBDHeader, *pusPktLen); TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL: VALUES ARE HLen=%x Hoff=%x doff=%x len=%x ex=%d", ucHeaderLen, ucHeaderOffset, (ucHeaderOffset + ucHeaderLen + extraHeadSpace), *pusPktLen, extraHeadSpace)); }/* if peek MAC success*/ }/* WLANTL_PrepareBDHeader */ #endif //THIS IS A HACK AND NEEDS TO BE FIXED FOR CONCURRENCY /*========================================================================== FUNCTION WLAN_TLGetNextTxIds DESCRIPTION Gets the next station and next AC in the list that should be served by the TL. Multiple Station Scheduling and TL queue management. 4 HDD BC/MC data packet queue status is specified as Station 0's status. Weights used in WFQ algorith are initialized in WLANTL_OPEN and contained in tlConfigInfo field. Each station has fields of ucPktPending and AC mask to tell whether a AC has traffic or not. Stations are served in a round-robin fashion from highest priority to lowest priority. The number of round-robin times of each prioirty equals to the WFQ weights and differetiates the traffic of different prioirty. As such, stations can not provide low priority packets if high priority packets are all served. DEPENDENCIES PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context OUT pucSTAId: Station ID RETURN VALUE The result code associated with performing the operation VOS_STATUS_SUCCESS: Everything is good SIDE EFFECTS TL context contains currently served station ID in ucCurrentSTA field, currently served AC in uCurServedAC field, and unserved weights of current AC in uCurLeftWeight. When existing from the function, these three fields are changed accordingly. ============================================================================*/ VOS_STATUS WLAN_TLAPGetNextTxIds ( v_PVOID_t pvosGCtx, v_U8_t* pucSTAId ) { WLANTL_CbType* pTLCb; v_U8_t ucACFilter = 1; v_U8_t ucNextSTA ; v_BOOL_t isServed = TRUE; //current round has find a packet or not v_U8_t ucACLoopNum = WLANTL_AC_HIGH_PRIO + 1; //number of loop to go v_U8_t uFlowMask; // TX FlowMask from WDA uint8 ucACMask; uint8 i = 0; /*------------------------------------------------------------------------ Extract TL control block ------------------------------------------------------------------------*/ //ENTER(); pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLAN_TLAPGetNextTxIds")); return VOS_STATUS_E_FAULT; } if ( VOS_STATUS_SUCCESS != WDA_DS_GetTxFlowMask( pvosGCtx, &uFlowMask ) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Failed to retrieve Flow control mask from WDA")); return VOS_STATUS_E_FAULT; } /* The flow mask does not differentiate between different ACs/Qs * since we use a single dxe channel for all ACs/Qs, hence it is * enough to check that there are dxe resources on data channel */ uFlowMask &= WLANTL_DATA_FLOW_MASK; if (0 == uFlowMask) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: No resources to send packets")); // Setting STA Id to invalid if mask is 0 *pucSTAId = WLAN_MAX_STA_COUNT; return VOS_STATUS_E_FAULT; } ucNextSTA = pTLCb->ucCurrentSTA; ++ucNextSTA; if ( WLAN_MAX_STA_COUNT <= ucNextSTA ) { //one round is done. ucNextSTA = 0; pTLCb->ucCurLeftWeight--; isServed = FALSE; if ( 0 == pTLCb->ucCurLeftWeight ) { //current prioirty is done if ( WLANTL_AC_BK == (WLANTL_ACEnumType)pTLCb->uCurServedAC ) { //end of current VO, VI, BE, BK loop. Reset priority. pTLCb->uCurServedAC = WLANTL_AC_HIGH_PRIO; } else { pTLCb->uCurServedAC --; } pTLCb->ucCurLeftWeight = pTLCb->tlConfigInfo.ucAcWeights[pTLCb->uCurServedAC]; } // (0 == pTLCb->ucCurLeftWeight) } //( WLAN_MAX_STA_COUNT == ucNextSTA ) //decide how many loops to go. if current loop is partial, do one extra to make sure //we cover every station if ((1 == pTLCb->ucCurLeftWeight) && (ucNextSTA != 0)) { ucACLoopNum ++; // now is 5 loops } /* Start with highest priority. ucNextSTA, pTLCb->uCurServedAC, pTLCb->ucCurLeftWeight all have previous values.*/ for (; ucACLoopNum > 0; ucACLoopNum--) { ucACFilter = 1 << pTLCb->uCurServedAC; // pTLCb->ucCurLeftWeight keeps previous results. for (; (pTLCb->ucCurLeftWeight > 0) ; pTLCb->ucCurLeftWeight-- ) { for ( ; ucNextSTA < WLAN_MAX_STA_COUNT; ucNextSTA ++ ) { if(NULL == pTLCb->atlSTAClients[ucNextSTA]) { continue; } WLAN_TL_AC_ARRAY_2_MASK (pTLCb->atlSTAClients[ucNextSTA], ucACMask, i); if ( (0 == pTLCb->atlSTAClients[ucNextSTA]->ucExists) || ((0 == pTLCb->atlSTAClients[ucNextSTA]->ucPktPending) && !(ucACMask)) || (0 == (ucACMask & ucACFilter)) ) { //current station does not exist or have any packet to serve. continue; } if (WLANTL_STA_AUTHENTICATED != pTLCb->atlSTAClients[ucNextSTA]->tlState) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "%s Sta %d not in auth state so skipping it.", __func__, ucNextSTA)); continue; } //go to next station if current station can't send due to flow control //Station is allowed to send when it is not in LWM mode. When station is in LWM mode, //station is allowed to send only after FW reports FW memory is below threshold and on-fly //packets are less then allowed value if ( (TRUE == pTLCb->atlSTAClients[ucNextSTA]->ucLwmModeEnabled) && ((FALSE == pTLCb->atlSTAClients[ucNextSTA]->ucLwmEventReported) || (0 < pTLCb->atlSTAClients[ucNextSTA]->uBuffThresholdMax)) ) { continue; } // Find a station. Weight is updated already. *pucSTAId = ucNextSTA; pTLCb->ucCurrentSTA = ucNextSTA; pTLCb->atlSTAClients[*pucSTAId]->ucCurrentAC = pTLCb->uCurServedAC; TLLOG4(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_LOW, " TL serve one station AC: %d W: %d StaId: %d", pTLCb->uCurServedAC, pTLCb->ucCurLeftWeight, pTLCb->ucCurrentSTA )); return VOS_STATUS_SUCCESS; } //STA loop ucNextSTA = 0; if ( FALSE == isServed ) { //current loop finds no packet.no need to repeat for the same priority break; } //current loop is partial loop. go for one more loop. isServed = FALSE; } //Weight loop if (WLANTL_AC_BK == pTLCb->uCurServedAC) { pTLCb->uCurServedAC = WLANTL_AC_HIGH_PRIO; } else { pTLCb->uCurServedAC--; } pTLCb->ucCurLeftWeight = pTLCb->tlConfigInfo.ucAcWeights[pTLCb->uCurServedAC]; }// AC loop TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, " TL can't find one station to serve" )); pTLCb->uCurServedAC = WLANTL_AC_BK; pTLCb->ucCurLeftWeight = 1; //invalid number will be captured by caller pTLCb->ucCurrentSTA = WLAN_MAX_STA_COUNT; *pucSTAId = pTLCb->ucCurrentSTA; return VOS_STATUS_E_FAULT; } /*========================================================================== FUNCTION WLAN_TLGetNextTxIds DESCRIPTION Gets the next station and next AC in the list DEPENDENCIES PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context OUT pucSTAId: Station ID RETURN VALUE The result code associated with performing the operation VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLAN_TLGetNextTxIds ( v_PVOID_t pvosGCtx, v_U8_t* pucSTAId ) { WLANTL_CbType* pTLCb; v_U8_t ucNextAC; v_U8_t ucNextSTA; v_U8_t ucCount; v_U8_t uFlowMask; // TX FlowMask from WDA v_U8_t ucACMask = 0; v_U8_t i = 0; tBssSystemRole systemRole; //RG HACK to be removed tpAniSirGlobal pMac; pMac = vos_get_context(VOS_MODULE_ID_PE, pvosGCtx); if ( NULL == pMac ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "%s: Invalid pMac", __func__)); return VOS_STATUS_E_FAULT; } systemRole = wdaGetGlobalSystemRole(pMac); /*------------------------------------------------------------------------ Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLAN_TLGetNextTxIds")); return VOS_STATUS_E_FAULT; } #ifdef FEATURE_WLAN_TDLS if ((eSYSTEM_AP_ROLE == systemRole) || (eSYSTEM_STA_IN_IBSS_ROLE == systemRole) || (vos_concurrent_open_sessions_running()) || pTLCb->ucTdlsPeerCount) #else if ((eSYSTEM_AP_ROLE == systemRole) || (eSYSTEM_STA_IN_IBSS_ROLE == systemRole) || (vos_concurrent_open_sessions_running())) #endif { return WLAN_TLAPGetNextTxIds(pvosGCtx,pucSTAId); } if ( VOS_STATUS_SUCCESS != WDA_DS_GetTxFlowMask( pvosGCtx, &uFlowMask ) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Failed to retrieve Flow control mask from WDA")); return VOS_STATUS_E_FAULT; } /* The flow mask does not differentiate between different ACs/Qs * since we use a single dxe channel for all ACs/Qs, hence it is * enough to check that there are dxe resources on data channel */ uFlowMask &= WLANTL_DATA_FLOW_MASK; if (0 == uFlowMask) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: No resources to send packets")); // Setting STA id to invalid if mask is 0 *pucSTAId = WLAN_MAX_STA_COUNT; return VOS_STATUS_E_FAULT; } /*STA id - no priority yet implemented */ /*----------------------------------------------------------------------- Choose the next STA for tx - for now go in a round robin fashion through all the stations that have pending packets -------------------------------------------------------------------------*/ ucNextSTA = pTLCb->ucCurrentSTA; pTLCb->ucCurrentSTA = WLAN_MAX_STA_COUNT; for ( ucCount = 0; ucCount < WLAN_MAX_STA_COUNT; ucCount++ ) { ucNextSTA = ( (ucNextSTA+1) >= WLAN_MAX_STA_COUNT )?0:(ucNextSTA+1); if(NULL == pTLCb->atlSTAClients[ucNextSTA]) { continue; } if (( pTLCb->atlSTAClients[ucNextSTA]->ucExists ) && ( pTLCb->atlSTAClients[ucNextSTA]->ucPktPending )) { if (WLANTL_STA_AUTHENTICATED == pTLCb->atlSTAClients[ucNextSTA]->tlState) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "STA ID: %d on WLAN_TLGetNextTxIds", *pucSTAId)); pTLCb->ucCurrentSTA = ucNextSTA; break; } else { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "%s Sta %d is not in auth state, skipping this sta.", __func__, ucNextSTA)); } } } *pucSTAId = pTLCb->ucCurrentSTA; if ( WLANTL_STA_ID_INVALID( *pucSTAId ) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:No station registered with TL at this point")); return VOS_STATUS_E_FAULT; } /*Convert the array to a mask for easier operation*/ WLAN_TL_AC_ARRAY_2_MASK( pTLCb->atlSTAClients[*pucSTAId], ucACMask, i); if ( 0 == ucACMask ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: Mask 0 " "STA ID: %d on WLAN_TLGetNextTxIds", *pucSTAId)); /*setting STA id to invalid if mask is 0*/ *pucSTAId = WLAN_MAX_STA_COUNT; return VOS_STATUS_E_FAULT; } /*----------------------------------------------------------------------- AC is updated whenever a packet is fetched from HDD -> the current weight of such an AC cannot be 0 -> in this case TL is expected to exit this function at this point during the main Tx loop -----------------------------------------------------------------------*/ if ( 0 < pTLCb->atlSTAClients[*pucSTAId]->ucCurrentWeight ) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL: Maintaining serviced AC to: %d for Weight: %d", pTLCb->atlSTAClients[*pucSTAId]->ucCurrentAC , pTLCb->atlSTAClients[*pucSTAId]->ucCurrentWeight)); return VOS_STATUS_SUCCESS; } /*----------------------------------------------------------------------- Choose highest priority AC - !!! optimize me -----------------------------------------------------------------------*/ ucNextAC = pTLCb->atlSTAClients[*pucSTAId]->ucCurrentAC; TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "Next AC: %d", ucNextAC)); while ( 0 != ucACMask ) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, " AC Mask: %d Next: %d Res : %d", ucACMask, ( 1 << ucNextAC ), ( ucACMask & ( 1 << ucNextAC )))); if ( 0 != ( ucACMask & ( 1 << ucNextAC ))) { pTLCb->atlSTAClients[*pucSTAId]->ucCurrentAC = (WLANTL_ACEnumType)ucNextAC; pTLCb->atlSTAClients[*pucSTAId]->ucCurrentWeight = pTLCb->tlConfigInfo.ucAcWeights[ucNextAC]; TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL: Switching serviced AC to: %d with Weight: %d", pTLCb->atlSTAClients[*pucSTAId]->ucCurrentAC , pTLCb->atlSTAClients[*pucSTAId]->ucCurrentWeight)); break; } if (ucNextAC == WLANTL_AC_BK) ucNextAC = WLANTL_AC_HIGH_PRIO; else ucNextAC--; TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "Next AC %d", ucNextAC)); } TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, " C AC: %d C W: %d", pTLCb->atlSTAClients[*pucSTAId]->ucCurrentAC, pTLCb->atlSTAClients[*pucSTAId]->ucCurrentWeight)); return VOS_STATUS_SUCCESS; }/* WLAN_TLGetNextTxIds */ /*========================================================================== DEFAULT HANDLERS: Registered at initialization with TL ==========================================================================*/ /*========================================================================== FUNCTION WLANTL_MgmtFrmRxDefaultCb DESCRIPTION Default Mgmt Frm rx callback: asserts all the time. If this function gets called it means there is no registered rx cb pointer for Mgmt Frm. DEPENDENCIES PARAMETERS Not used. RETURN VALUE VOS_STATUS_E_FAILURE: Always FAILURE. ============================================================================*/ VOS_STATUS WLANTL_MgmtFrmRxDefaultCb ( v_PVOID_t pvosGCtx, v_PVOID_t vosBuff ) { if ( NULL != vosBuff ) { TLLOGP(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_FATAL, "WLAN TL:Fatal failure: No registered Mgmt Frm client on pkt RX")); /* Drop packet */ vos_pkt_return_packet((vos_pkt_t *)vosBuff); } TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL: No registered Mgmt Frm client on pkt RX. Load/Unload in progress, Ignore")); return VOS_STATUS_E_FAILURE; }/*WLANTL_MgmtFrmRxDefaultCb*/ /*========================================================================== FUNCTION WLANTL_BAPRxDefaultCb DESCRIPTION Default BAP rx callback: asserts all the time. If this function gets called it means there is no registered rx cb pointer for BAP. DEPENDENCIES PARAMETERS Not used. RETURN VALUE VOS_STATUS_E_FAILURE: Always FAILURE. ============================================================================*/ VOS_STATUS WLANTL_BAPRxDefaultCb ( v_PVOID_t pvosGCtx, vos_pkt_t* vosDataBuff, WLANTL_BAPFrameEnumType frameType ) { TLLOGP(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_FATAL, "WLAN TL:Fatal failure: No registered BAP client on BAP pkt RX")); #ifndef BTAMP_TEST VOS_ASSERT(0); #endif return VOS_STATUS_E_FAILURE; }/*WLANTL_MgmtFrmRxDefaultCb*/ /*========================================================================== FUNCTION WLANTL_STARxDefaultCb DESCRIPTION Default STA rx callback: asserts all the time. If this function gets called it means there is no registered rx cb pointer for station. (Mem corruption most likely, it should never happen) DEPENDENCIES PARAMETERS Not used. RETURN VALUE VOS_STATUS_E_FAILURE: Always FAILURE. ============================================================================*/ VOS_STATUS WLANTL_STARxDefaultCb ( v_PVOID_t pvosGCtx, vos_pkt_t* vosDataBuff, v_U8_t ucSTAId, WLANTL_RxMetaInfoType* pRxMetaInfo ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: No registered STA client rx cb for STAID: %d dropping pkt", ucSTAId)); vos_pkt_return_packet(vosDataBuff); return VOS_STATUS_SUCCESS; }/*WLANTL_MgmtFrmRxDefaultCb*/ /*========================================================================== FUNCTION WLANTL_STAFetchPktDefaultCb DESCRIPTION Default fetch callback: asserts all the time. If this function gets called it means there is no registered fetch cb pointer for station. (Mem corruption most likely, it should never happen) DEPENDENCIES PARAMETERS Not used. RETURN VALUE VOS_STATUS_E_FAILURE: Always FAILURE. ============================================================================*/ VOS_STATUS WLANTL_STAFetchPktDefaultCb ( v_PVOID_t pvosGCtx, v_U8_t* pucSTAId, WLANTL_ACEnumType ucAC, vos_pkt_t** vosDataBuff, WLANTL_MetaInfoType* tlMetaInfo ) { TLLOGP(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_FATAL, "WLAN TL:Fatal failure: No registered STA client on data pkt RX")); VOS_ASSERT(0); return VOS_STATUS_E_FAILURE; }/*WLANTL_MgmtFrmRxDefaultCb*/ /*========================================================================== FUNCTION WLANTL_TxCompDefaultCb DESCRIPTION Default tx complete handler. It will release the completed pkt to prevent memory leaks. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL/HAL/PE/BAP/HDD control block can be extracted from its context vosDataBuff: pointer to the VOSS data buffer that was transmitted wTxSTAtus: status of the transmission RETURN VALUE The result code associated with performing the operation; please check vos_pkt_return_packet for possible error codes. Please check vos_pkt_return_packet API for possible return values. ============================================================================*/ VOS_STATUS WLANTL_TxCompDefaultCb ( v_PVOID_t pvosGCtx, vos_pkt_t* vosDataBuff, VOS_STATUS wTxSTAtus ) { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:TXComp not registered, releasing pkt to prevent mem leak")); return vos_pkt_return_packet(vosDataBuff); }/*WLANTL_TxCompDefaultCb*/ /*========================================================================== Cleanup functions ==========================================================================*/ /*========================================================================== FUNCTION WLANTL_CleanCB DESCRIPTION Cleans TL control block DEPENDENCIES PARAMETERS IN pTLCb: pointer to TL's control block ucEmpty: set if TL has to clean up the queues and release pedning pkts RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: invalid input parameters VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_CleanCB ( WLANTL_CbType* pTLCb, v_U8_t ucEmpty ) { v_U8_t ucIndex; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------- Sanity check -------------------------------------------------------------------------*/ if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid parameter sent on WLANTL_CleanCB")); return VOS_STATUS_E_INVAL; } /* number of packets sent to BAL waiting for tx complete confirmation */ pTLCb->usPendingTxCompleteCount = 0; /* global suspend flag */ vos_atomic_set_U8( &pTLCb->ucTxSuspended, 1); /* resource flag */ pTLCb->uResCount = 0; /*------------------------------------------------------------------------- Client stations -------------------------------------------------------------------------*/ for ( ucIndex = 0; ucIndex < WLAN_MAX_STA_COUNT ; ucIndex++) { if(NULL != pTLCb->atlSTAClients[ucIndex]) { WLANTL_CleanSTA( pTLCb->atlSTAClients[ucIndex], ucEmpty); } } /*------------------------------------------------------------------------- Management Frame client -------------------------------------------------------------------------*/ pTLCb->tlMgmtFrmClient.ucExists = 0; if ( ( 0 != ucEmpty) && ( NULL != pTLCb->tlMgmtFrmClient.vosPendingDataBuff )) { vos_pkt_return_packet(pTLCb->tlMgmtFrmClient.vosPendingDataBuff); } pTLCb->tlMgmtFrmClient.vosPendingDataBuff = NULL; /* set to a default cb in order to prevent constant checking for NULL */ pTLCb->tlMgmtFrmClient.pfnTlMgmtFrmRx = WLANTL_MgmtFrmRxDefaultCb; /*------------------------------------------------------------------------- BT AMP client -------------------------------------------------------------------------*/ pTLCb->tlBAPClient.ucExists = 0; if (( 0 != ucEmpty) && ( NULL != pTLCb->tlBAPClient.vosPendingDataBuff )) { vos_pkt_return_packet(pTLCb->tlBAPClient.vosPendingDataBuff); } if (( 0 != ucEmpty) && ( NULL != pTLCb->vosDummyBuf )) { vos_pkt_return_packet(pTLCb->vosDummyBuf); } pTLCb->tlBAPClient.vosPendingDataBuff = NULL; pTLCb->vosDummyBuf = NULL; pTLCb->vosTempBuf = NULL; pTLCb->ucCachedSTAId = WLAN_MAX_STA_COUNT; /* set to a default cb in order to prevent constant checking for NULL */ pTLCb->tlBAPClient.pfnTlBAPRx = WLANTL_BAPRxDefaultCb; pTLCb->ucRegisteredStaId = WLAN_MAX_STA_COUNT; return VOS_STATUS_SUCCESS; }/* WLANTL_CleanCB*/ /*========================================================================== FUNCTION WLANTL_CleanSTA DESCRIPTION Cleans a station control block. DEPENDENCIES PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context ucEmpty: if set the queues and pending pkts will be emptyed RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: invalid input parameters VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_CleanSTA ( WLANTL_STAClientType* ptlSTAClient, v_U8_t ucEmpty ) { v_U8_t ucIndex; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------- Sanity check -------------------------------------------------------------------------*/ if ( NULL == ptlSTAClient ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid parameter sent on WLANTL_CleanSTA")); return VOS_STATUS_E_INVAL; } /*------------------------------------------------------------------------ Clear station from TL ------------------------------------------------------------------------*/ TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL: Clearing STA Client ID: %d, Empty flag: %d", ptlSTAClient->wSTADesc.ucSTAId, ucEmpty )); ptlSTAClient->pfnSTARx = WLANTL_STARxDefaultCb; ptlSTAClient->pfnSTATxComp = WLANTL_TxCompDefaultCb; ptlSTAClient->pfnSTAFetchPkt = WLANTL_STAFetchPktDefaultCb; ptlSTAClient->tlState = WLANTL_STA_INIT; ptlSTAClient->tlPri = WLANTL_STA_PRI_NORMAL; vos_zero_macaddr( &ptlSTAClient->wSTADesc.vSTAMACAddress ); vos_zero_macaddr( &ptlSTAClient->wSTADesc.vBSSIDforIBSS ); vos_zero_macaddr( &ptlSTAClient->wSTADesc.vSelfMACAddress ); ptlSTAClient->wSTADesc.ucSTAId = 0; ptlSTAClient->wSTADesc.wSTAType = WLAN_STA_MAX; ptlSTAClient->wSTADesc.ucQosEnabled = 0; ptlSTAClient->wSTADesc.ucAddRmvLLC = 0; ptlSTAClient->wSTADesc.ucSwFrameTXXlation = 0; ptlSTAClient->wSTADesc.ucSwFrameRXXlation = 0; ptlSTAClient->wSTADesc.ucProtectedFrame = 0; /*------------------------------------------------------------------------- AMSDU information for the STA -------------------------------------------------------------------------*/ if ( ( 0 != ucEmpty ) && ( NULL != ptlSTAClient->vosAMSDUChainRoot )) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_FATAL, "WLAN TL:Non NULL vosAMSDUChainRoot on WLANTL_CleanSTA, " "suspecting a memory corruption")); } ptlSTAClient->vosAMSDUChain = NULL; ptlSTAClient->vosAMSDUChainRoot = NULL; vos_mem_zero( (v_PVOID_t)ptlSTAClient->aucMPDUHeader, WLANTL_MPDU_HEADER_LEN); ptlSTAClient->ucMPDUHeaderLen = 0; /*------------------------------------------------------------------------- Reordering information for the STA -------------------------------------------------------------------------*/ for ( ucIndex = 0; ucIndex < WLAN_MAX_TID ; ucIndex++) { if(0 == ptlSTAClient->atlBAReorderInfo[ucIndex].ucExists) { continue; } if(NULL != ptlSTAClient->atlBAReorderInfo[ucIndex].reorderBuffer) { ptlSTAClient->atlBAReorderInfo[ucIndex].reorderBuffer->isAvailable = VOS_TRUE; memset(&ptlSTAClient->atlBAReorderInfo[ucIndex].reorderBuffer->arrayBuffer[0], 0, WLANTL_MAX_WINSIZE * sizeof(v_PVOID_t)); } vos_timer_destroy(&ptlSTAClient->atlBAReorderInfo[ucIndex].agingTimer); memset(&ptlSTAClient->atlBAReorderInfo[ucIndex], 0, sizeof(WLANTL_BAReorderType)); } /*------------------------------------------------------------------------- QOS information for the STA -------------------------------------------------------------------------*/ ptlSTAClient->ucCurrentAC = WLANTL_AC_HIGH_PRIO; ptlSTAClient->ucCurrentWeight = 0; ptlSTAClient->ucServicedAC = WLANTL_AC_BK; vos_mem_zero( ptlSTAClient->aucACMask, sizeof(ptlSTAClient->aucACMask)); vos_mem_zero( &ptlSTAClient->wUAPSDInfo, sizeof(ptlSTAClient->wUAPSDInfo)); /*-------------------------------------------------------------------- Stats info --------------------------------------------------------------------*/ vos_mem_zero( ptlSTAClient->auRxCount, sizeof(ptlSTAClient->auRxCount[0])* WLAN_MAX_TID); vos_mem_zero( ptlSTAClient->auTxCount, sizeof(ptlSTAClient->auTxCount[0])* WLAN_MAX_TID); ptlSTAClient->rssiAvg = 0; /*Tx not suspended and station fully registered*/ vos_atomic_set_U8( &ptlSTAClient->ucTxSuspended, 0); vos_atomic_set_U8( &ptlSTAClient->ucNoMoreData, 1); if ( 0 == ucEmpty ) { ptlSTAClient->wSTADesc.ucUcastSig = WLAN_TL_INVALID_U_SIG; ptlSTAClient->wSTADesc.ucBcastSig = WLAN_TL_INVALID_B_SIG; } ptlSTAClient->ucExists = 0; /*-------------------------------------------------------------------- Statistics info --------------------------------------------------------------------*/ memset(&ptlSTAClient->trafficStatistics, 0, sizeof(WLANTL_TRANSFER_STA_TYPE)); /*fix me!!: add new values from the TL Cb for cleanup */ return VOS_STATUS_SUCCESS; }/* WLANTL_CleanSTA */ /*========================================================================== FUNCTION WLANTL_EnableUAPSDForAC DESCRIPTION Called by HDD to enable UAPSD. TL in turn calls WDA API to enable the logic in FW/SLM to start sending trigger frames. Previously TL had the trigger frame logic which later moved down to FW. Hence HDD -> TL -> WDA -> FW call flow. DEPENDENCIES The TL must be initialized before this function can be called. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context ucSTAId: station Id ucAC: AC for which U-APSD is being enabled ucTid: TID for which U-APSD is setup ucUP: used to place in the trigger frame generation ucServiceInt: service interval used by TL to send trigger frames ucSuspendInt: suspend interval used by TL to determine that an app is idle and should start sending trigg frms less often wTSDir: direction of TSpec RETURN VALUE The result code associated with performing the operation VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_EnableUAPSDForAC ( v_PVOID_t pvosGCtx, v_U8_t ucSTAId, WLANTL_ACEnumType ucAC, v_U8_t ucTid, v_U8_t ucUP, v_U32_t uServiceInt, v_U32_t uSuspendInt, WLANTL_TSDirType wTSDir ) { WLANTL_CbType* pTLCb = NULL; VOS_STATUS vosStatus = VOS_STATUS_SUCCESS; tUapsdInfo halUAPSDInfo; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if (( NULL == pTLCb ) || WLANTL_STA_ID_INVALID( ucSTAId ) || WLANTL_AC_INVALID(ucAC)) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid input params on WLANTL_EnableUAPSDForAC" " TL: %p STA: %d AC: %d", pTLCb, ucSTAId, ucAC)); return VOS_STATUS_E_FAULT; } if ( NULL == pTLCb->atlSTAClients[ucSTAId] ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } /*Set this flag in order to remember that this is a trigger enabled AC*/ pTLCb->atlSTAClients[ucSTAId]->wUAPSDInfo[ucAC].ucSet = 1; #ifdef FEATURE_WLAN_TDLS if(pTLCb->atlSTAClients[ucSTAId]->wSTADesc.wSTAType != WLAN_STA_TDLS) #endif { if( 0 == uServiceInt ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Input params on WLANTL_EnableUAPSDForAC" " SI: %d", uServiceInt )); } TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Enabling U-APSD in FW for STA: %d AC: %d SI: %d SPI: %d " "DI: %d", ucSTAId, ucAC, uServiceInt, uSuspendInt, pTLCb->tlConfigInfo.uDelayedTriggerFrmInt)); /*Save all info for HAL*/ halUAPSDInfo.staidx = ucSTAId; halUAPSDInfo.ac = ucAC; halUAPSDInfo.up = ucUP; halUAPSDInfo.srvInterval = uServiceInt; halUAPSDInfo.susInterval = uSuspendInt; halUAPSDInfo.delayInterval = pTLCb->tlConfigInfo.uDelayedTriggerFrmInt; /*Notify HAL*/ vosStatus = WDA_EnableUapsdAcParams(pvosGCtx, ucSTAId, &halUAPSDInfo); } return vosStatus; }/*WLANTL_EnableUAPSDForAC*/ /*========================================================================== FUNCTION WLANTL_DisableUAPSDForAC DESCRIPTION Called by HDD to disable UAPSD. TL in turn calls WDA API to disable the logic in FW/SLM to stop sending trigger frames. Previously TL had the trigger frame logic which later moved down to FW. Hence HDD -> TL -> WDA -> FW call flow. DEPENDENCIES The TL must be initialized before this function can be called. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context ucSTAId: station Id ucAC: AC for which U-APSD is being enabled RETURN VALUE The result code associated with performing the operation VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_DisableUAPSDForAC ( v_PVOID_t pvosGCtx, v_U8_t ucSTAId, WLANTL_ACEnumType ucAC ) { WLANTL_CbType* pTLCb; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check Extract TL control block ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if (( NULL == pTLCb ) || WLANTL_STA_ID_INVALID( ucSTAId ) || WLANTL_AC_INVALID(ucAC) ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid input params on WLANTL_DisableUAPSDForAC" " TL: %p STA: %d AC: %d", pTLCb, ucSTAId, ucAC )); return VOS_STATUS_E_FAULT; } if ( NULL == pTLCb->atlSTAClients[ucSTAId] ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } /*Reset this flag as this is no longer a trigger enabled AC*/ pTLCb->atlSTAClients[ucSTAId]->wUAPSDInfo[ucAC].ucSet = 1; TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Disabling U-APSD in FW for STA: %d AC: %d ", ucSTAId, ucAC)); /*Notify HAL*/ WDA_DisableUapsdAcParams(pvosGCtx, ucSTAId, ucAC); return VOS_STATUS_SUCCESS; }/* WLANTL_DisableUAPSDForAC */ #if defined WLAN_FEATURE_NEIGHBOR_ROAMING /*========================================================================== FUNCTION WLANTL_RegRSSIIndicationCB DESCRIPTION Registration function to get notification if RSSI cross threshold. Client should register threshold, direction, and notification callback function pointer DEPENDENCIES NONE PARAMETERS in pAdapter - Global handle in rssiValue - RSSI threshold value in triggerEvent - Cross direction should be notified UP, DOWN, and CROSS in crossCBFunction - Notification CB Function in usrCtxt - user context RETURN VALUE VOS_STATUS SIDE EFFECTS NONE ============================================================================*/ VOS_STATUS WLANTL_RegRSSIIndicationCB ( v_PVOID_t pAdapter, v_S7_t rssiValue, v_U8_t triggerEvent, WLANTL_RSSICrossThresholdCBType crossCBFunction, VOS_MODULE_ID moduleID, v_PVOID_t usrCtxt ) { VOS_STATUS status = VOS_STATUS_SUCCESS; status = WLANTL_HSRegRSSIIndicationCB(pAdapter, rssiValue, triggerEvent, crossCBFunction, moduleID, usrCtxt); return status; } /*========================================================================== FUNCTION WLANTL_DeregRSSIIndicationCB DESCRIPTION Remove specific threshold from list DEPENDENCIES NONE PARAMETERS in pAdapter - Global handle in rssiValue - RSSI threshold value in triggerEvent - Cross direction should be notified UP, DOWN, and CROSS RETURN VALUE VOS_STATUS SIDE EFFECTS NONE ============================================================================*/ VOS_STATUS WLANTL_DeregRSSIIndicationCB ( v_PVOID_t pAdapter, v_S7_t rssiValue, v_U8_t triggerEvent, WLANTL_RSSICrossThresholdCBType crossCBFunction, VOS_MODULE_ID moduleID ) { VOS_STATUS status = VOS_STATUS_SUCCESS; status = WLANTL_HSDeregRSSIIndicationCB(pAdapter, rssiValue, triggerEvent, crossCBFunction, moduleID); return status; } /*========================================================================== FUNCTION WLANTL_SetAlpha DESCRIPTION ALPLA is weight value to calculate AVG RSSI avgRSSI = (ALPHA * historyRSSI) + ((10 - ALPHA) * newRSSI) avgRSSI has (ALPHA * 10)% of history RSSI weight and (10 - ALPHA)% of newRSSI weight This portion is dynamically configurable. Default is ? DEPENDENCIES NONE PARAMETERS in pAdapter - Global handle in valueAlpah - ALPHA RETURN VALUE VOS_STATUS SIDE EFFECTS NONE ============================================================================*/ VOS_STATUS WLANTL_SetAlpha ( v_PVOID_t pAdapter, v_U8_t valueAlpha ) { VOS_STATUS status = VOS_STATUS_SUCCESS; status = WLANTL_HSSetAlpha(pAdapter, valueAlpha); return status; } /*========================================================================== FUNCTION DESCRIPTION PARAMETERS RETURN VALUE ============================================================================*/ VOS_STATUS WLANTL_BMPSRSSIRegionChangedNotification ( v_PVOID_t pAdapter, tpSirRSSINotification pRSSINotification ) { VOS_STATUS status = VOS_STATUS_SUCCESS; status = WLANTL_HSBMPSRSSIRegionChangedNotification(pAdapter, pRSSINotification); return status; } /*========================================================================== FUNCTION WLANTL_RegGetTrafficStatus DESCRIPTION Registration function for traffic status monitoring During measure period count data frames. If frame count is larger then IDLE threshold set as traffic ON or OFF. And traffic status is changed send report to client with registered callback function DEPENDENCIES NONE PARAMETERS in pAdapter - Global handle in idleThreshold - Traffic on or off threshold in measurePeriod - Traffic state check period in trfficStatusCB - traffic status changed notification CB function in usrCtxt - user context RETURN VALUE VOS_STATUS SIDE EFFECTS NONE ============================================================================*/ VOS_STATUS WLANTL_RegGetTrafficStatus ( v_PVOID_t pAdapter, v_U32_t idleThreshold, v_U32_t measurePeriod, WLANTL_TrafficStatusChangedCBType trfficStatusCB, v_PVOID_t usrCtxt ) { VOS_STATUS status = VOS_STATUS_SUCCESS; status = WLANTL_HSRegGetTrafficStatus(pAdapter, idleThreshold, measurePeriod, trfficStatusCB, usrCtxt); return status; } #endif /*========================================================================== FUNCTION WLANTL_GetStatistics DESCRIPTION Get traffic statistics for identified station DEPENDENCIES NONE PARAMETERS in pAdapter - Global handle in statType - specific statistics field to reset out statBuffer - traffic statistics buffer RETURN VALUE VOS_STATUS SIDE EFFECTS NONE ============================================================================*/ VOS_STATUS WLANTL_GetStatistics ( v_PVOID_t pAdapter, WLANTL_TRANSFER_STA_TYPE *statBuffer, v_U8_t STAid ) { WLANTL_CbType *pTLCb = VOS_GET_TL_CB(pAdapter); WLANTL_STAClientType* pClientSTA = NULL; VOS_STATUS status = VOS_STATUS_SUCCESS; WLANTL_TRANSFER_STA_TYPE *statistics = NULL; /*------------------------------------------------------------------------ Sanity check Extract TL control block ------------------------------------------------------------------------*/ if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer on WLANTL_GetStatistics")); return VOS_STATUS_E_FAULT; } pClientSTA = pTLCb->atlSTAClients[STAid]; if ( NULL == pClientSTA ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } if(0 == pClientSTA->ucExists) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: %d STA ID does not exist", STAid)); return VOS_STATUS_E_INVAL; } if(NULL == statBuffer) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL statistics buffer pointer on WLANTL_GetStatistics")); return VOS_STATUS_E_INVAL; } statistics = &pClientSTA->trafficStatistics; vos_mem_copy(statBuffer, statistics, sizeof(WLANTL_TRANSFER_STA_TYPE)); return status; } /*========================================================================== FUNCTION WLANTL_ResetStatistics DESCRIPTION Reset statistics structure for identified station ID Reset means set values as 0 DEPENDENCIES NONE PARAMETERS in pAdapter - Global handle in statType - specific statistics field to reset RETURN VALUE VOS_STATUS SIDE EFFECTS NONE ============================================================================*/ VOS_STATUS WLANTL_ResetStatistics ( v_PVOID_t pAdapter, v_U8_t STAid ) { WLANTL_CbType *pTLCb = VOS_GET_TL_CB(pAdapter); WLANTL_STAClientType* pClientSTA = NULL; VOS_STATUS status = VOS_STATUS_SUCCESS; WLANTL_TRANSFER_STA_TYPE *statistics = NULL; /*------------------------------------------------------------------------ Sanity check Extract TL control block ------------------------------------------------------------------------*/ if (NULL == pTLCb) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer on WLANTL_GetStatistics")); return VOS_STATUS_E_FAULT; } pClientSTA = pTLCb->atlSTAClients[STAid]; if ( NULL == pClientSTA ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } if(0 == pClientSTA->ucExists) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: %d STA ID does not exist", STAid)); return VOS_STATUS_E_INVAL; } statistics = &pClientSTA->trafficStatistics; vos_mem_zero((v_VOID_t *)statistics, sizeof(WLANTL_TRANSFER_STA_TYPE)); return status; } /*========================================================================== FUNCTION WLANTL_GetSpecStatistic DESCRIPTION Get specific field within statistics structure for identified station ID DEPENDENCIES NONE PARAMETERS in pAdapter - Global handle in statType - specific statistics field to reset in STAid - Station ID out buffer - Statistic value RETURN VALUE VOS_STATUS SIDE EFFECTS NONE ============================================================================*/ VOS_STATUS WLANTL_GetSpecStatistic ( v_PVOID_t pAdapter, WLANTL_TRANSFER_STATIC_TYPE statType, v_U32_t *buffer, v_U8_t STAid ) { WLANTL_CbType *pTLCb = VOS_GET_TL_CB(pAdapter); WLANTL_STAClientType* pClientSTA = NULL; VOS_STATUS status = VOS_STATUS_SUCCESS; WLANTL_TRANSFER_STA_TYPE *statistics = NULL; /*------------------------------------------------------------------------ Sanity check Extract TL control block ------------------------------------------------------------------------*/ if (NULL == pTLCb) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer on WLANTL_GetStatistics")); return VOS_STATUS_E_FAULT; } pClientSTA = pTLCb->atlSTAClients[STAid]; if ( NULL == pClientSTA ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } if(0 == pClientSTA->ucExists) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: %d STA ID does not exist", STAid)); return VOS_STATUS_E_INVAL; } if(NULL == buffer) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL statistic buffer pointer on WLANTL_GetStatistics")); return VOS_STATUS_E_INVAL; } statistics = &pClientSTA->trafficStatistics; switch(statType) { case WLANTL_STATIC_TX_UC_FCNT: *buffer = statistics->txUCFcnt; break; case WLANTL_STATIC_TX_MC_FCNT: *buffer = statistics->txMCFcnt; break; case WLANTL_STATIC_TX_BC_FCNT: *buffer = statistics->txBCFcnt; break; case WLANTL_STATIC_TX_UC_BCNT: *buffer = statistics->txUCBcnt; break; case WLANTL_STATIC_TX_MC_BCNT: *buffer = statistics->txMCBcnt; break; case WLANTL_STATIC_TX_BC_BCNT: *buffer = statistics->txBCBcnt; break; case WLANTL_STATIC_RX_UC_FCNT: *buffer = statistics->rxUCFcnt; break; case WLANTL_STATIC_RX_MC_FCNT: *buffer = statistics->rxMCFcnt; break; case WLANTL_STATIC_RX_BC_FCNT: *buffer = statistics->rxBCFcnt; break; case WLANTL_STATIC_RX_UC_BCNT: *buffer = statistics->rxUCBcnt; break; case WLANTL_STATIC_RX_MC_BCNT: *buffer = statistics->rxMCBcnt; break; case WLANTL_STATIC_RX_BC_BCNT: *buffer = statistics->rxBCBcnt; break; case WLANTL_STATIC_RX_BCNT: *buffer = statistics->rxBcnt; break; case WLANTL_STATIC_RX_BCNT_CRC_OK: *buffer = statistics->rxBcntCRCok; break; case WLANTL_STATIC_RX_RATE: *buffer = statistics->rxRate; break; default: *buffer = 0; status = VOS_STATUS_E_INVAL; break; } return status; } /*========================================================================== FUNCTION WLANTL_ResetSpecStatistic DESCRIPTION Reset specific field within statistics structure for identified station ID Reset means set as 0 DEPENDENCIES NONE PARAMETERS in pAdapter - Global handle in statType - specific statistics field to reset in STAid - Station ID RETURN VALUE VOS_STATUS SIDE EFFECTS NONE ============================================================================*/ VOS_STATUS WLANTL_ResetSpecStatistic ( v_PVOID_t pAdapter, WLANTL_TRANSFER_STATIC_TYPE statType, v_U8_t STAid ) { WLANTL_CbType *pTLCb = VOS_GET_TL_CB(pAdapter); WLANTL_STAClientType* pClientSTA = NULL; VOS_STATUS status = VOS_STATUS_SUCCESS; WLANTL_TRANSFER_STA_TYPE *statistics = NULL; /*------------------------------------------------------------------------ Sanity check Extract TL control block ------------------------------------------------------------------------*/ if (NULL == pTLCb) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer on WLANTL_GetStatistics")); return VOS_STATUS_E_FAULT; } pClientSTA = pTLCb->atlSTAClients[STAid]; if ( NULL == pClientSTA ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } if(0 == pClientSTA->ucExists) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: %d STA ID does not exist", STAid)); return VOS_STATUS_E_INVAL; } statistics = &pClientSTA->trafficStatistics; switch(statType) { case WLANTL_STATIC_TX_UC_FCNT: statistics->txUCFcnt = 0; break; case WLANTL_STATIC_TX_MC_FCNT: statistics->txMCFcnt = 0; break; case WLANTL_STATIC_TX_BC_FCNT: statistics->txBCFcnt = 0; break; case WLANTL_STATIC_TX_UC_BCNT: statistics->txUCBcnt = 0; break; case WLANTL_STATIC_TX_MC_BCNT: statistics->txMCBcnt = 0; break; case WLANTL_STATIC_TX_BC_BCNT: statistics->txBCBcnt = 0; break; case WLANTL_STATIC_RX_UC_FCNT: statistics->rxUCFcnt = 0; break; case WLANTL_STATIC_RX_MC_FCNT: statistics->rxMCFcnt = 0; break; case WLANTL_STATIC_RX_BC_FCNT: statistics->rxBCFcnt = 0; break; case WLANTL_STATIC_RX_UC_BCNT: statistics->rxUCBcnt = 0; break; case WLANTL_STATIC_RX_MC_BCNT: statistics->rxMCBcnt = 0; break; case WLANTL_STATIC_RX_BC_BCNT: statistics->rxBCBcnt = 0; break; case WLANTL_STATIC_RX_BCNT: statistics->rxBcnt = 0; break; case WLANTL_STATIC_RX_BCNT_CRC_OK: statistics->rxBcntCRCok = 0; break; case WLANTL_STATIC_RX_RATE: statistics->rxRate = 0; break; default: status = VOS_STATUS_E_INVAL; break; } return status; } /*========================================================================== FUNCTION DESCRIPTION Read RSSI value out of a RX BD PARAMETERS: Caller must validate all parameters RETURN VALUE ============================================================================*/ VOS_STATUS WLANTL_ReadRSSI ( v_PVOID_t pAdapter, v_PVOID_t pBDHeader, v_U8_t STAid ) { WLANTL_CbType *tlCtxt = VOS_GET_TL_CB(pAdapter); v_S7_t currentRSSI, currentRSSI0, currentRSSI1; if(NULL == tlCtxt) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "%s Invalid TL handle", __func__)); return VOS_STATUS_E_INVAL; } if ( NULL == tlCtxt->atlSTAClients[STAid] ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } currentRSSI0 = WLANTL_GETRSSI0(pBDHeader); currentRSSI1 = WLANTL_GETRSSI1(pBDHeader); currentRSSI = (currentRSSI0 > currentRSSI1) ? currentRSSI0 : currentRSSI1; tlCtxt->atlSTAClients[STAid]->rssiAvg = currentRSSI; return VOS_STATUS_SUCCESS; } /*========================================================================== FUNCTION DESCRIPTION Read SNR value out of a RX BD PARAMETERS: Caller must validate all parameters RETURN VALUE ============================================================================*/ VOS_STATUS WLANTL_ReadSNR ( v_PVOID_t pAdapter, v_PVOID_t pBDHeader, v_U8_t STAid ) { WLANTL_CbType *tlCtxt = VOS_GET_TL_CB(pAdapter); v_S7_t currentSNR; if (NULL == tlCtxt) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "%s Invalid TL handle", __func__)); return VOS_STATUS_E_INVAL; } if (NULL == tlCtxt->atlSTAClients[STAid]) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Client Memory was not allocated on %s", __func__)); return VOS_STATUS_E_FAILURE; } currentSNR = WLANTL_GETSNR(pBDHeader); /* SNR reported in the Buffer Descriptor is scaled up by 2(SNR*2), * Get the correct SNR value */ currentSNR = currentSNR >> 1; /* SNR reported by HW cannot be more than 35dB due to HW limitations */ currentSNR = (WLANTL_MAX_HW_SNR > currentSNR ? currentSNR : WLANTL_MAX_HW_SNR); TLLOG2(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "%s: snrsum: %d snridx: %d prevsnravg: %d", __func__, tlCtxt->atlSTAClients[STAid]->snrSum, tlCtxt->atlSTAClients[STAid]->snrIdx, tlCtxt->atlSTAClients[STAid]->prevSnrAvg)); /* The SNR returned for all purposes is the average SNR over * WLANTL_MAX_SNR_DATA_SMAPLES.When data samples * > WLANTL_MAX_SNR_DATA_SAMPLES are obtained, * store the average of the samples in prevSnrAvg * and start a new averaging window. The prevSnrAvg is used when * enough data samples are not available when applications * actually query for SNR. * * SEE: WLANTL_GetSnr() */ if (tlCtxt->atlSTAClients[STAid]->snrIdx >= WLANTL_MAX_SNR_DATA_SAMPLES) { tlCtxt->atlSTAClients[STAid]->prevSnrAvg = tlCtxt->atlSTAClients[STAid]->snrSum / tlCtxt->atlSTAClients[STAid]->snrIdx; tlCtxt->atlSTAClients[STAid]->snrSum = 0; tlCtxt->atlSTAClients[STAid]->snrIdx = 0; } tlCtxt->atlSTAClients[STAid]->snrSum += currentSNR; tlCtxt->atlSTAClients[STAid]->snrIdx += 1; return VOS_STATUS_SUCCESS; } /* DESCRIPTION TL returns the weight currently maintained in TL. IN pvosGCtx: pointer to the global vos context; a handle to TL's or SME's control block can be extracted from its context OUT pACWeights: Caller allocated memory for filling in weights RETURN VALUE VOS_STATUS */ VOS_STATUS WLANTL_GetACWeights ( v_PVOID_t pvosGCtx, v_U8_t* pACWeights ) { WLANTL_CbType* pTLCb = NULL; v_U8_t ucIndex; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( NULL == pACWeights ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid parameter sent on WLANTL_GetACWeights")); return VOS_STATUS_E_INVAL; } /*------------------------------------------------------------------------ Extract TL control block and check existance ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_GetACWeights")); return VOS_STATUS_E_FAULT; } for ( ucIndex = 0; ucIndex < WLANTL_MAX_AC ; ucIndex++) { pACWeights[ucIndex] = pTLCb->tlConfigInfo.ucAcWeights[ucIndex]; } return VOS_STATUS_SUCCESS; } /* DESCRIPTION Change the weight currently maintained by TL. IN pvosGCtx: pointer to the global vos context; a handle to TL's or SME's control block can be extracted from its context pACWeights: Caller allocated memory contain the weights to use RETURN VALUE VOS_STATUS */ VOS_STATUS WLANTL_SetACWeights ( v_PVOID_t pvosGCtx, v_U8_t* pACWeights ) { WLANTL_CbType* pTLCb = NULL; v_U8_t ucIndex; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( NULL == pACWeights ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid parameter sent on WLANTL_GetACWeights")); return VOS_STATUS_E_INVAL; } /*------------------------------------------------------------------------ Extract TL control block and check existance ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Invalid TL pointer from pvosGCtx on WLANTL_GetACWeights")); return VOS_STATUS_E_FAULT; } for ( ucIndex = 0; ucIndex < WLANTL_MAX_AC ; ucIndex++) { pTLCb->tlConfigInfo.ucAcWeights[ucIndex] = pACWeights[ucIndex]; } pTLCb->tlConfigInfo.ucAcWeights[WLANTL_AC_HIGH_PRIO] = pACWeights[WLANTL_AC_VO]; return VOS_STATUS_SUCCESS; } /*========================================================================== FUNCTION DESCRIPTION PARAMETERS RETURN VALUE ============================================================================*/ void WLANTL_PowerStateChangedCB ( v_PVOID_t pAdapter, tPmcState newState ) { WLANTL_CbType *tlCtxt = VOS_GET_TL_CB(pAdapter); if (NULL == tlCtxt) { VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "%s: Invalid TL Control Block", __func__ ); return; } VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO, "Power state changed, new state is %d", newState ); switch(newState) { case FULL_POWER: tlCtxt->isBMPS = VOS_FALSE; break; case BMPS: #if defined WLAN_FEATURE_NEIGHBOR_ROAMING WLANTL_SetFWRSSIThresholds(pAdapter); #endif tlCtxt->isBMPS = VOS_TRUE; break; case IMPS: case LOW_POWER: case REQUEST_BMPS: case REQUEST_FULL_POWER: case REQUEST_IMPS: case STOPPED: case REQUEST_START_UAPSD: case REQUEST_STOP_UAPSD: case UAPSD: case REQUEST_STANDBY: case STANDBY: case REQUEST_ENTER_WOWL: case REQUEST_EXIT_WOWL: case WOWL: TLLOGW(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, "Not handle this events %d", newState )); break; default: TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "Not a valid event %d", newState )); break; } return; } /*========================================================================== FUNCTION WLANTL_GetEtherType DESCRIPTION Extract Ether type information from the BD DEPENDENCIES NONE PARAMETERS in aucBDHeader - BD header in vosDataBuff - data buffer in ucMPDUHLen - MPDU header length out pUsEtherType - pointer to Ethertype RETURN VALUE VOS_STATUS_SUCCESS : if the EtherType is successfully extracted VOS_STATUS_FAILURE : if the EtherType extraction failed and the packet was dropped SIDE EFFECTS NONE ============================================================================*/ static VOS_STATUS WLANTL_GetEtherType ( v_U8_t * aucBDHeader, vos_pkt_t * vosDataBuff, v_U8_t ucMPDUHLen, v_U16_t * pUsEtherType ) { v_U8_t ucOffset; v_U16_t usEtherType = *pUsEtherType; v_SIZE_t usLLCSize = sizeof(usEtherType); VOS_STATUS vosStatus = VOS_STATUS_SUCCESS; /*------------------------------------------------------------------------ Check if LLC is present - if not, TL is unable to determine type ------------------------------------------------------------------------*/ if ( VOS_FALSE == WDA_IS_RX_LLC_PRESENT( aucBDHeader ) ) { ucOffset = WLANTL_802_3_HEADER_LEN - sizeof(usEtherType); } else { ucOffset = ucMPDUHLen + WLANTL_LLC_PROTO_TYPE_OFFSET; } /*------------------------------------------------------------------------ Extract LLC type ------------------------------------------------------------------------*/ vosStatus = vos_pkt_extract_data( vosDataBuff, ucOffset, (v_PVOID_t)&usEtherType, &usLLCSize); if (( VOS_STATUS_SUCCESS != vosStatus ) || ( sizeof(usEtherType) != usLLCSize )) { TLLOGE(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL:Error extracting Ether type from data packet")); /* Drop packet */ vos_pkt_return_packet(vosDataBuff); vosStatus = VOS_STATUS_E_FAILURE; } else { TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Ether type retrieved before endianess conv: %d", usEtherType)); usEtherType = vos_be16_to_cpu(usEtherType); *pUsEtherType = usEtherType; TLLOG2(VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL:Ether type retrieved: %d", usEtherType)); } return vosStatus; } /*========================================================================== FUNCTION WLANTL_GetSoftAPStatistics DESCRIPTION Collect the cumulative statistics for all Softap stations DEPENDENCIES NONE PARAMETERS in pvosGCtx - Pointer to the global vos context bReset - If set TL statistics will be cleared after reading out statsSum - pointer to collected statistics RETURN VALUE VOS_STATUS_SUCCESS : if the Statistics are successfully extracted SIDE EFFECTS NONE ============================================================================*/ VOS_STATUS WLANTL_GetSoftAPStatistics(v_PVOID_t pAdapter, WLANTL_TRANSFER_STA_TYPE *statsSum, v_BOOL_t bReset) { v_U8_t i = 0; VOS_STATUS vosStatus = VOS_STATUS_SUCCESS; WLANTL_CbType *pTLCb = VOS_GET_TL_CB(pAdapter); WLANTL_TRANSFER_STA_TYPE statBufferTemp; vos_mem_zero((v_VOID_t *)&statBufferTemp, sizeof(WLANTL_TRANSFER_STA_TYPE)); vos_mem_zero((v_VOID_t *)statsSum, sizeof(WLANTL_TRANSFER_STA_TYPE)); if ( NULL == pTLCb ) { return VOS_STATUS_E_FAULT; } // Sum up all the statistics for stations of Soft AP from TL for (i = 0; i < WLAN_MAX_STA_COUNT; i++) { if ( NULL == pTLCb->atlSTAClients[i]) { continue; } if (pTLCb->atlSTAClients[i]->wSTADesc.wSTAType == WLAN_STA_SOFTAP) { vosStatus = WLANTL_GetStatistics(pAdapter, &statBufferTemp, i);// Can include staId 1 because statistics not collected for it if (!VOS_IS_STATUS_SUCCESS(vosStatus)) return VOS_STATUS_E_FAULT; // Add to the counters statsSum->txUCFcnt += statBufferTemp.txUCFcnt; statsSum->txMCFcnt += statBufferTemp.txMCFcnt; statsSum->txBCFcnt += statBufferTemp.txBCFcnt; statsSum->txUCBcnt += statBufferTemp.txUCBcnt; statsSum->txMCBcnt += statBufferTemp.txMCBcnt; statsSum->txBCBcnt += statBufferTemp.txBCBcnt; statsSum->rxUCFcnt += statBufferTemp.rxUCFcnt; statsSum->rxMCFcnt += statBufferTemp.rxMCFcnt; statsSum->rxBCFcnt += statBufferTemp.rxBCFcnt; statsSum->rxUCBcnt += statBufferTemp.rxUCBcnt; statsSum->rxMCBcnt += statBufferTemp.rxMCBcnt; statsSum->rxBCBcnt += statBufferTemp.rxBCBcnt; if (bReset) { vosStatus = WLANTL_ResetStatistics(pAdapter, i); if (!VOS_IS_STATUS_SUCCESS(vosStatus)) return VOS_STATUS_E_FAULT; } } } return vosStatus; } /*=============================================================================== FUNCTION WLANTL_IsReplayPacket DESCRIPTION This function does replay check for valid stations DEPENDENCIES Validity of replay check must be done before the function is called PARAMETERS currentReplayCounter current replay counter taken from RX BD previousReplayCounter previous replay counter taken from TL CB RETRUN VOS_TRUE packet is a replay packet VOS_FALSE packet is not a replay packet SIDE EFFECTS none ===============================================================================*/ v_BOOL_t WLANTL_IsReplayPacket ( v_U64_t ullcurrentReplayCounter, v_U64_t ullpreviousReplayCounter ) { /* Do the replay check by comparing previous received replay counter with current received replay counter*/ if(ullpreviousReplayCounter < ullcurrentReplayCounter) { /* Valid packet not replay */ return VOS_FALSE; } else { /* Current packet number is less than or equal to previuos received packet no, this means current packet is replay packet */ VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, "WLAN TL: Replay packet found with replay counter :[0x%llX]",ullcurrentReplayCounter); return VOS_TRUE; } } #if 0 /*=============================================================================== FUNCTION WLANTL_GetReplayCounterFromRxBD DESCRIPTION This function extracts 48-bit replay packet number from RX BD DEPENDENCIES Validity of replay check must be done before the function is called PARAMETERS pucRxHeader pointer to RX BD header RETRUN v_U64_t Packet number extarcted from RX BD SIDE EFFECTS none ===============================================================================*/ v_U64_t WLANTL_GetReplayCounterFromRxBD ( v_U8_t *pucRxBDHeader ) { /* 48-bit replay counter is created as follows from RX BD 6 byte PMI command: Addr : AES/TKIP 0x38 : pn3/tsc3 0x39 : pn2/tsc2 0x3a : pn1/tsc1 0x3b : pn0/tsc0 0x3c : pn5/tsc5 0x3d : pn4/tsc4 */ #ifdef ANI_BIG_BYTE_ENDIAN v_U64_t ullcurrentReplayCounter = 0; /* Getting 48-bit replay counter from the RX BD */ ullcurrentReplayCounter = WLANHAL_RX_BD_GET_PMICMD_20TO23(pucRxBDHeader); ullcurrentReplayCounter <<= 16; ullcurrentReplayCounter |= (( WLANHAL_RX_BD_GET_PMICMD_24TO25(pucRxBDHeader) & 0xFFFF0000) >> 16); return ullcurrentReplayCounter; #else v_U64_t ullcurrentReplayCounter = 0; /* Getting 48-bit replay counter from the RX BD */ ullcurrentReplayCounter = (WLANHAL_RX_BD_GET_PMICMD_24TO25(pucRxBDHeader) & 0x0000FFFF); ullcurrentReplayCounter <<= 32; ullcurrentReplayCounter |= WLANHAL_RX_BD_GET_PMICMD_20TO23(pucRxBDHeader); return ullcurrentReplayCounter; #endif } #endif /*=============================================================================== FUNCTION WLANTL_PostResNeeded DESCRIPTION This function posts message to TL to reserve BD/PDU memory DEPENDENCIES None PARAMETERS pvosGCtx RETURN None SIDE EFFECTS none ===============================================================================*/ void WLANTL_PostResNeeded(v_PVOID_t pvosGCtx) { vos_msg_t vosMsg; vosMsg.reserved = 0; vosMsg.bodyptr = NULL; vosMsg.type = WLANTL_TX_RES_NEEDED; VOS_TRACE( VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN TL: BD/PDU available interrupt received, Posting message to TL"); if(!VOS_IS_STATUS_SUCCESS(vos_tx_mq_serialize( VOS_MQ_ID_TL, &vosMsg))) { VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, " %s fails to post message", __func__); } } /*=============================================================================== FUNCTION WLANTL_UpdateRssiBmps DESCRIPTION This function updates the TL's RSSI (in BMPS mode) DEPENDENCIES None PARAMETERS pvosGCtx VOS context VOS Global context staId Station ID Station ID rssi RSSI (BMPS mode) RSSI in BMPS mode RETURN None SIDE EFFECTS none ===============================================================================*/ void WLANTL_UpdateRssiBmps(v_PVOID_t pvosGCtx, v_U8_t staId, v_S7_t rssi) { WLANTL_CbType* pTLCb = VOS_GET_TL_CB(pvosGCtx); if (NULL != pTLCb && NULL != pTLCb->atlSTAClients[staId]) { pTLCb->atlSTAClients[staId]->rssiAvgBmps = rssi; } } /*=============================================================================== FUNCTION WLANTL_UpdateSnrBmps DESCRIPTION This function updates the TL's SNR (in BMPS mode) DEPENDENCIES None PARAMETERS pvosGCtx VOS context VOS Global context staId Station ID Station ID snr SNR (BMPS mode) SNR in BMPS mode RETURN None SIDE EFFECTS none ===============================================================================*/ void WLANTL_UpdateSnrBmps(v_PVOID_t pvosGCtx, v_U8_t staId, v_S7_t snr) { WLANTL_CbType* pTLCb = VOS_GET_TL_CB(pvosGCtx); if (NULL != pTLCb && NULL != pTLCb->atlSTAClients[staId]) { pTLCb->atlSTAClients[staId]->snrAvgBmps = snr; } } /*=============================================================================== FUNCTION WLANTL_UpdateLinkCapacity DESCRIPTION This function updates the STA's Link Capacity in TL DEPENDENCIES None PARAMETERS pvosGCtx VOS context VOS Global context staId Station ID Station ID linkCapacity linkCapacity Link Capacity RETURN None SIDE EFFECTS none ===============================================================================*/ void WLANTL_UpdateLinkCapacity(v_PVOID_t pvosGCtx, v_U8_t staId, v_U32_t linkCapacity) { WLANTL_CbType* pTLCb = VOS_GET_TL_CB(pvosGCtx); if (NULL != pTLCb && NULL != pTLCb->atlSTAClients[staId]) { pTLCb->atlSTAClients[staId]->linkCapacity = linkCapacity; } } /*=========================================================================== FUNCTION WLANTL_GetSTALinkCapacity DESCRIPTION Returns Link Capacity of a particular STA. DEPENDENCIES A station must have been registered before its state can be retrieved. PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to TL's control block can be extracted from its context ucSTAId: identifier of the station OUT plinkCapacity: the current link capacity the connection to the given station RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input parameters are invalid VOS_STATUS_E_FAULT: Station ID is outside array boundaries or pointer to TL cb is NULL ; access would cause a page fault VOS_STATUS_E_EXISTS: Station was not registered VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANTL_GetSTALinkCapacity ( v_PVOID_t pvosGCtx, v_U8_t ucSTAId, v_U32_t *plinkCapacity ) { WLANTL_CbType* pTLCb = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if ( NULL == plinkCapacity ) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, FL("WLAN TL:Invalid parameter"))); return VOS_STATUS_E_INVAL; } if ( WLANTL_STA_ID_INVALID( ucSTAId ) ) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, FL("WLAN TL:Invalid station id"))); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Extract TL control block and check existance ------------------------------------------------------------------------*/ pTLCb = VOS_GET_TL_CB(pvosGCtx); if ( NULL == pTLCb ) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, FL("WLAN TL:Invalid TL pointer from pvosGCtx"))); return VOS_STATUS_E_FAULT; } if ( NULL == pTLCb->atlSTAClients[ucSTAId] ) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR, FL("WLAN TL:Client Memory was not allocated"))); return VOS_STATUS_E_FAILURE; } if ( 0 == pTLCb->atlSTAClients[ucSTAId]->ucExists ) { TLLOGE(VOS_TRACE(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_WARN, FL("WLAN TL:Station was not previously registered"))); return VOS_STATUS_E_EXISTS; } /*------------------------------------------------------------------------ Get STA state ------------------------------------------------------------------------*/ *plinkCapacity = pTLCb->atlSTAClients[ucSTAId]->linkCapacity; return VOS_STATUS_SUCCESS; }/* WLANTL_GetSTALinkCapacity */
idprophecy/android_kernel_oneplus_msm8974-3.10
drivers/staging/prima/CORE/TL/src/wlan_qct_tl.c
C
gpl-2.0
454,719
<?php /** * Main class * * @author Yithemes * @package YITH WooCommerce Ajax Search * @version 1.1.1 */ if ( !defined( 'YITH_WCAS' ) ) { exit; } // Exit if accessed directly if ( !class_exists( 'YITH_WCAS' ) ) { /** * YITH WooCommerce Ajax Search * * @since 1.0.0 */ class YITH_WCAS { /** * Plugin version * * @var string * @since 1.0.0 */ public $version = YITH_WCAS_VERSION; /** * Plugin object * * @var string * @since 1.0.0 */ public $obj = null; /** * Constructor * * @return mixed|YITH_WCAS_Admin|YITH_WCAS_Frontend * @since 1.0.0 */ public function __construct() { // Load Plugin Framework add_action( 'after_setup_theme', array( $this, 'plugin_fw_loader' ), 1 ); // actions add_action( 'widgets_init', array( $this, 'registerWidgets' ) ); add_action( 'wp_ajax_yith_ajax_search_products', array( $this, 'ajax_search_products' ) ); add_action( 'wp_ajax_nopriv_yith_ajax_search_products', array( $this, 'ajax_search_products' ) ); //register shortcode add_shortcode( 'yith_woocommerce_ajax_search', array( $this, 'add_woo_ajax_search_shortcode' ) ); if ( is_admin() ) { $this->obj = new YITH_WCAS_Admin( $this->version ); } else { $this->obj = new YITH_WCAS_Frontend( $this->version ); } return $this->obj; } /** * Load Plugin Framework * * @since 1.0 * @access public * @return void * @author Andrea Grillo <andrea.grillo@yithemes.com> */ public function plugin_fw_loader() { if ( !defined( 'YIT' ) || !defined( 'YIT_CORE_PLUGIN' ) ) { require_once( 'plugin-fw/yit-plugin.php' ); } } /** * Load template for [yith_woocommerce_ajax_search] shortcode * * @access public * * @param $args array * * @return void * @since 1.0.0 */ public function add_woo_ajax_search_shortcode( $args = array() ) { $args = shortcode_atts( array(), $args ); ob_start(); $wc_get_template = function_exists( 'wc_get_template' ) ? 'wc_get_template' : 'woocommerce_get_template'; $wc_get_template( 'yith-woocommerce-ajax-search.php', $args, '', YITH_WCAS_DIR . 'templates/' ); return ob_get_clean(); } /** * Load and register widgets * * @access public * @since 1.0.0 */ public function registerWidgets() { register_widget( 'YITH_WCAS_Ajax_Search_Widget' ); } /** * Perform ajax search products */ public function ajax_search_products() { global $woocommerce; $search_keyword = $_REQUEST['query']; $ordering_args = $woocommerce->query->get_catalog_ordering_args( 'title', 'asc' ); $suggestions = array(); $args = array( 's' => apply_filters( 'yith_wcas_ajax_search_products_search_query', $search_keyword ), 'post_type' => 'product', 'post_status' => 'publish', 'ignore_sticky_posts' => 1, 'orderby' => $ordering_args['orderby'], 'order' => $ordering_args['order'], 'posts_per_page' => apply_filters( 'yith_wcas_ajax_search_products_posts_per_page', get_option( 'yith_wcas_posts_per_page' ) ), 'suppress_filters' => false, 'meta_query' => array( array( 'key' => '_visibility', 'value' => array( 'search', 'visible' ), 'compare' => 'IN' ) ) ); if ( isset( $_REQUEST['product_cat'] ) ) { $args['tax_query'] = array( 'relation' => 'AND', array( 'taxonomy' => 'product_cat', 'field' => 'slug', 'terms' => $_REQUEST['product_cat'] ) ); } $products = get_posts( $args ); if ( !empty( $products ) ) { foreach ( $products as $post ) { $product = wc_get_product( $post ); $suggestions[] = apply_filters( 'yith_wcas_suggestion', array( 'id' => $product->id, 'value' => strip_tags($product->get_title()), 'url' => $product->get_permalink() ), $product ); } } else { $suggestions[] = array( 'id' => - 1, 'value' => __( 'No results', 'yit' ), 'url' => '', ); } wp_reset_postdata(); $suggestions = array( 'suggestions' => $suggestions ); echo json_encode( $suggestions ); die(); } } }
booklein/wp
wp-content/plugins/yith-woocommerce-ajax-search/class.yith-wcas.php
PHP
gpl-2.0
5,757
<?php /** * Smarty Internal Plugin Compile Capture * Compiles the {capture} tag * * @package Smarty * @subpackage Compiler * @author Uwe Tews */ /** * Smarty Internal Plugin Compile Capture Class * * @package Smarty * @subpackage Compiler */ class Smarty_Internal_Compile_Capture extends Smarty_Internal_CompileBase { /** * Attribute definition: Overwrites base class. * * @var array * @see Smarty_Internal_CompileBase */ public $shorttag_order = array('name'); /** * Attribute definition: Overwrites base class. * * @var array * @see Smarty_Internal_CompileBase */ public $optional_attributes = array('name', 'assign', 'append'); /** * Compiles code for the {capture} tag * * @param array $args array with attributes from parser * @param object $compiler compiler object * * @return string compiled code */ public function compile($args, $compiler) { // check and get attributes $_attr = $this->getAttributes($compiler, $args); $buffer = isset($_attr['name']) ? $_attr['name'] : "'default'"; $assign = isset($_attr['assign']) ? $_attr['assign'] : 'null'; $append = isset($_attr['append']) ? $_attr['append'] : 'null'; $compiler->_capture_stack[0][] = array($buffer, $assign, $append, $compiler->nocache); // maybe nocache because of nocache variables $compiler->nocache = $compiler->nocache | $compiler->tag_nocache; $_output = "<?php \$_smarty_tpl->_capture_stack[0][] = array($buffer, $assign, $append); ob_start(); ?>"; return $_output; } } /** * Smarty Internal Plugin Compile Captureclose Class * * @package Smarty * @subpackage Compiler */ class Smarty_Internal_Compile_CaptureClose extends Smarty_Internal_CompileBase { /** * Compiles code for the {/capture} tag * * @param array $args array with attributes from parser * @param object $compiler compiler object * * @return string compiled code */ public function compile($args, $compiler) { // check and get attributes $_attr = $this->getAttributes($compiler, $args); // must endblock be nocache? if ($compiler->nocache) { $compiler->tag_nocache = true; } list($buffer, $assign, $append, $compiler->nocache) = array_pop($compiler->_capture_stack[0]); $_output = "<?php list(\$_capture_buffer, \$_capture_assign, \$_capture_append) = array_pop(\$_smarty_tpl->_capture_stack[0]);\n"; $_output .= "if (!empty(\$_capture_buffer)) {\n"; $_output .= " if (isset(\$_capture_assign)) \$_smarty_tpl->assign(\$_capture_assign, ob_get_contents());\n"; $_output .= " if (isset( \$_capture_append)) \$_smarty_tpl->append( \$_capture_append, ob_get_contents());\n"; $_output .= " Smarty::\$_smarty_vars['capture'][\$_capture_buffer]=ob_get_clean();\n"; $_output .= "} else \$_smarty_tpl->capture_error();?>"; return $_output; } }
khadim-raath/gambioTest
includes/classes/Smarty/sysplugins/smarty_internal_compile_capture.php
PHP
gpl-2.0
3,188
/* packet-gfp.c * Routines for Generic Framing Procedure dissection * Copyright 2015, John Thacker <johnthacker@gmail.com> * * Wireshark - Network traffic analyzer * By Gerald Combs <gerald@wireshark.org> * Copyright 1998 Gerald Combs * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ /* * Generic Framing Procedure (GFP) is used to map octet-aligned variable * length payloads (e.g. Ethernet, MPLS, octet-aligned PPP, IP) into * octet-synchronous signals such as SONET/SDH (ITU-T G.707) and OTN * (ITU-T G.709). GFP is a telecommunications industry standard defined in * ITU-T G.7041/Y.1303. * * Reference: * https://www.itu.int/rec/T-REC-G.7041/ */ #include <config.h> #include <epan/packet.h> /* Should be first Wireshark include (other than config.h) */ #include <epan/expert.h> #include <epan/prefs.h> #include <epan/crc16-tvb.h> #include <epan/crc32-tvb.h> #include <epan/decode_as.h> #include <epan/proto_data.h> #include <wiretap/wtap.h> /* Prototypes */ /* (Required to prevent [-Wmissing-prototypes] warnings */ void proto_reg_handoff_gfp(void); void proto_register_gfp(void); /* Initialize the protocol and registered fields */ static int proto_gfp = -1; static int hf_gfp_pli = -1; static int hf_gfp_chec = -1; static int hf_gfp_chec_good = -1; static int hf_gfp_chec_bad = -1; static int hf_gfp_type = -1; static int hf_gfp_pti = -1; static int hf_gfp_pfi = -1; static int hf_gfp_exi = -1; static int hf_gfp_upi_data = -1; static int hf_gfp_upi_management = -1; static int hf_gfp_thec = -1; static int hf_gfp_thec_good = -1; static int hf_gfp_thec_bad = -1; static int hf_gfp_cid = -1; static int hf_gfp_ehec = -1; static int hf_gfp_ehec_good = -1; static int hf_gfp_ehec_bad = -1; static int hf_gfp_fcs = -1; static int hf_gfp_fcs_good = -1; static int hf_gfp_fcs_bad = -1; static expert_field ei_gfp_pli_idle_nonempty = EI_INIT; static expert_field ei_gfp_pli_unknown = EI_INIT; static expert_field ei_gfp_pli_invalid = EI_INIT; static expert_field ei_gfp_chec_bad = EI_INIT; static expert_field ei_gfp_thec_bad = EI_INIT; static expert_field ei_gfp_ehec_bad = EI_INIT; static expert_field ei_gfp_exi_short = EI_INIT; static expert_field ei_gfp_pfi_short = EI_INIT; static expert_field ei_gfp_payload_undecoded = EI_INIT; static expert_field ei_gfp_fcs_bad = EI_INIT; #define GFP_USER_DATA 0 #define GFP_CLIENT_MANAGEMENT 4 #define GFP_MANAGEMENT_COMMUNICATIONS 5 #define GFP_EXT_NULL 0 #define GFP_EXT_LINEAR 1 #define GFP_EXT_RING 2 /* Initialize the subtree pointers */ static gint ett_gfp = -1; static gint ett_gfp_chec = -1; static gint ett_gfp_type = -1; static gint ett_gfp_thec = -1; static gint ett_gfp_ehec = -1; static gint ett_gfp_fcs = -1; static dissector_table_t gfp_dissector_table; /* ITU-T G.7041 6.1.1, 6.2 */ static const range_string gfp_pli_rvals[] = { {0, 0, "Idle Frame"}, {1, 3, "Control Frame (Reserved)"}, {4, G_MAXUINT16, "Client Frame"}, {0, 0, NULL} }; static const int *gfp_type_data_fields[] = { &hf_gfp_pti, &hf_gfp_pfi, &hf_gfp_exi, &hf_gfp_upi_data, NULL }; static const int *gfp_type_management_fields[] = { &hf_gfp_pti, &hf_gfp_pfi, &hf_gfp_exi, &hf_gfp_upi_management, NULL }; static const value_string gfp_pti_vals[] = { {GFP_USER_DATA, "User Data"}, {GFP_CLIENT_MANAGEMENT, "Client Management"}, {GFP_MANAGEMENT_COMMUNICATIONS, "Management Communications"}, {0, NULL} }; static const value_string gfp_exi_vals[] = { {GFP_EXT_NULL, "Null Extension Header"}, {GFP_EXT_LINEAR, "Linear Frame"}, {GFP_EXT_RING, "Ring Frame"}, {0, NULL} }; static const range_string gfp_upi_data_rvals[] = { {0, 0, "Reserved and not available"}, {1, 1, "Frame-Mapped Ethernet"}, {2, 2, "Frame-Mapped PPP"}, {3, 3, "Transparent Fibre Channel"}, {4, 4, "Transparent FICON"}, {5, 5, "Transparent ESCON"}, {6, 6, "Transparent Gbit Ethernet"}, {7, 7, "Reserved"}, {8, 8, "Frame-Mapped Multiple Access Protocol over SDH (MAPOS)"}, {9, 9, "Transparent DVB ASI"}, {10, 10, "Frame-Mapped IEEE 802.17 Resilient Packet Ring"}, {11, 11, "Frame-Mapped Fibre Channel FC-BBW"}, {12, 12, "Asycnchronous Transparent Fibre Channel"}, {13, 13, "Frame-Mapped MPLS"}, {14, 14, "Frame-Mapped MPLS (Multicast) [Deprecrated]"}, {15, 15, "Frame-Mapped OSI network layer protocols (IS-IS, ES-IS, CLNP)"}, {16, 16, "Frame-Mapped IPv4"}, {17, 17, "Frame-Mapped IPv6"}, {18, 18, "Frame-Mapped DVB-ASI"}, {19, 19, "Frame-Mapped 64B/66B encoded Ethernet, including frame preamble"}, {20, 20, "Frame-Mapped 64B/66B encoded Ethernet ordered set information"}, {21, 21, "Transparent transcoded FC-1200"}, /*UPI value 22 & 23 from Amendment 3 (01/2015)*/ {22, 22, "Precision Time Protocol message"}, {23, 23, "Synchronization status message"}, {24, 239, "Reserved for future standardization"}, {240, 252, "Reserved for proprietary use"}, {253, 253, "Reserved for proprietary use, formerly Frame-Mapped 64B/66B encoded Ethernet, including frame preamble"}, {254, 254, "Reserved for proprietary use, formerly Frame-Mapped 64B/66B encoded Ethernet ordered set information"}, {255, 255, "Reserved and not available"}, {0, 0, NULL } }; static const range_string gfp_upi_management_rvals[] = { {0, 0, "Reserved and not available"}, {1, 1, "Client Signal Fail (Loss of Client Signal)"}, {2, 2, "Client Signal Fail (Loss of Character Synchronisation)"}, {3, 3, "Defect Clear Indication (DCI)"}, {4, 4, "Forward Defect Indication (FDI)"}, {5, 5, "Reverse Defect Indication (RDI)"}, {6, 223, "Reserved for future use"}, {224, 254, "Reserved for proprietary use"}, {255, 255, "Reserved and not available"}, {0, 0, NULL} }; /* Even GFP idle frames must have 4 bytes for the core header. * If data is received with fewer than this it is rejected. */ #define GFP_MIN_LENGTH 4 static void gfp_prompt(packet_info *pinfo, gchar* result) { g_snprintf(result, MAX_DECODE_AS_PROMPT_LEN, "UPI %u as", GPOINTER_TO_UINT(p_get_proto_data(pinfo->pool, pinfo, proto_gfp, 0))); } static gpointer gfp_value(packet_info *pinfo) { return p_get_proto_data(pinfo->pool, pinfo, proto_gfp, 0); } /* GFP has several identical 16 bit CRCs in its header (HECs). Note that * this function increases the offset. */ static void gfp_add_hec_tree(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, guint *offset, const guint len, const int field, const int field_good, const int field_bad, const gint ett, expert_field *ei_bad) { proto_item* ti = NULL; proto_tree* hec_tree = NULL; guint hec, hec_calc; hec_calc = crc16_r3_ccitt_tvb(tvb, *offset, len); *offset += len; hec = tvb_get_ntohs(tvb, *offset); if ( hec == hec_calc ) { ti = proto_tree_add_uint_format_value(tree, field, tvb, *offset, 2, hec, "0x%04x [correct]", hec); hec_tree = proto_item_add_subtree(ti, ett); ti = proto_tree_add_boolean(hec_tree, field_good, tvb, *offset, 2, TRUE); PROTO_ITEM_SET_GENERATED(ti); ti = proto_tree_add_boolean(hec_tree, field_bad, tvb, *offset, 2, FALSE); PROTO_ITEM_SET_GENERATED(ti); } else { ti = proto_tree_add_uint_format_value(tree, field, tvb, *offset, 2, hec, "0x%04x [incorrect, should be 0x%04x]", hec, hec_calc); hec_tree = proto_item_add_subtree(ti, ett); ti = proto_tree_add_boolean(hec_tree, field_good, tvb, *offset, 2, FALSE); PROTO_ITEM_SET_GENERATED(ti); ti = proto_tree_add_boolean(hec_tree, field_bad, tvb, *offset, 2, TRUE); PROTO_ITEM_SET_GENERATED(ti); expert_add_info(pinfo, ti, ei_bad); } *offset += 2; } /* G.7041 6.1.2 GFP payload area */ static void dissect_gfp_payload(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, proto_tree *gfp_tree, guint *offset, guint payload_len) { tvbuff_t *payload_tvb; proto_item *type_ti = NULL; proto_item *fcs_ti; proto_tree *fcs_tree = NULL; guint pti, pfi, exi, upi; guint fcs, fcs_calc; guint fcs_len = 0; /* G.7041 6.1.2.3 Payload area scrambling * Note that payload when sent on the wire is scrambled as per ATM * with a 1 + x^43 multiplicative scrambler. Likely already removed by * the time we get a capture file (as with ATM). Could have a pref, * but if it's present we have to save state over subsequent frames, * always would fail to decode the first 43 payload bytes of a capture. */ /* G.7041 6.1.2.1 Payload Header - at least 4 bytes */ tvb_ensure_bytes_exist(tvb, *offset, 4); payload_len -= 4; /* G.7041 6.1.2.1.1 GFP type field - mandatory 2 bytes */ pti = tvb_get_bits8(tvb, 8*(*offset), 3); pfi = tvb_get_bits8(tvb, 8*(*offset)+3, 1); exi = tvb_get_bits8(tvb, 8*(*offset)+4, 4); upi = tvb_get_guint8(tvb, *offset+1); p_add_proto_data(pinfo->pool, pinfo, proto_gfp, 0, GUINT_TO_POINTER(upi)); col_add_str(pinfo->cinfo, COL_INFO, val_to_str(pti, gfp_pti_vals, "Reserved PTI (%d)")); if (pti == GFP_USER_DATA || pti == GFP_MANAGEMENT_COMMUNICATIONS) { /* G.7041 Table 6-3 - GFP_MANAGEMENT_COMMUNICATIONS * uses the same UPI table as USER_DATA, though * "not all of these UPI types are applicable" in that case. */ type_ti = proto_tree_add_bitmask_with_flags(gfp_tree, tvb, *offset, hf_gfp_type, ett_gfp_type, gfp_type_data_fields, ENC_BIG_ENDIAN, BMT_NO_FLAGS); col_append_sep_str(pinfo->cinfo, COL_INFO, ": ", rval_to_str(upi, gfp_upi_data_rvals, "Unknown 0x%02x")); } else if (pti == GFP_CLIENT_MANAGEMENT) { /* G.7041 Table 6-4 */ type_ti = proto_tree_add_bitmask_with_flags(gfp_tree, tvb, *offset, hf_gfp_type, ett_gfp_type, gfp_type_management_fields, ENC_BIG_ENDIAN, BMT_NO_FLAGS); col_append_sep_str(pinfo->cinfo, COL_INFO, ": ", rval_to_str(upi, gfp_upi_management_rvals, "Unknown 0x%02x")); } /* G.7041 6.1.2.1.2 Type HEC (tHEC) - mandatory 2 bytes */ gfp_add_hec_tree(tvb, pinfo, gfp_tree, offset, 2, hf_gfp_thec, hf_gfp_thec_good, hf_gfp_thec_bad, ett_gfp_thec, &ei_gfp_thec_bad); switch (exi) { case GFP_EXT_NULL: /* G.7041 6.1.2.1.3.1 Null extension header */ break; case GFP_EXT_LINEAR: /* G.7041 6.1.2.1.3.2 Extension header for a linear frame */ if (payload_len < 4) { expert_add_info(pinfo, type_ti, &ei_gfp_exi_short); payload_len = 0; } else { payload_len -= 4; } proto_tree_add_item(gfp_tree, hf_gfp_cid, tvb, *offset, 1, ENC_BIG_ENDIAN); /* Next byte spare field, reserved */ /* 6.1.2.1.4 Extension HEC field */ gfp_add_hec_tree(tvb, pinfo, gfp_tree, offset, 2, hf_gfp_ehec, hf_gfp_ehec_good, hf_gfp_ehec_bad, ett_gfp_ehec, &ei_gfp_ehec_bad); break; case GFP_EXT_RING: /* 6.1.2.1.3.3 Extension header for a ring frame */ /* "For further study." Undefined so fall through */ default: /* Reserved */ /* TODO: Mark as error / unhandled? */ break; } proto_item_set_end(gfp_tree, tvb, *offset); if (pfi == 1) { /* 6.1.2.2.1 Payload FCS field present */ if (payload_len < 4) { expert_add_info(pinfo, type_ti, &ei_gfp_pfi_short); fcs_len = payload_len; payload_len = 0; } else { fcs_len = 4; payload_len -= 4; } proto_tree_set_appendix(gfp_tree, tvb, *offset + payload_len, fcs_len); fcs = tvb_get_ntohl(tvb, *offset + payload_len); /* Same CRC32 as ATM */ /* As with ATM, we can either compute the CRC as it would be * calculated and compare (last step involves taking the complement), * or we can include the passed CRC in the input and check to see * if the remainder is a known value. I like the first method * only because it lets us display what we should have received. */ /* Method 1: */ fcs_calc = crc32_mpeg2_tvb_offset(tvb, *offset, payload_len); if (fcs == ~fcs_calc) { fcs_ti = proto_tree_add_uint_format_value(gfp_tree, hf_gfp_fcs, tvb, *offset+payload_len, 4, fcs, "0x%08x [correct]", fcs); fcs_tree = proto_item_add_subtree(fcs_ti, ett_gfp_fcs); fcs_ti = proto_tree_add_boolean(fcs_tree, hf_gfp_fcs_good, tvb, *offset+payload_len, 4, TRUE); PROTO_ITEM_SET_GENERATED(fcs_ti); fcs_ti = proto_tree_add_boolean(fcs_tree, hf_gfp_fcs_bad, tvb, *offset+payload_len, 4, FALSE); PROTO_ITEM_SET_GENERATED(fcs_ti); } else { fcs_ti = proto_tree_add_uint_format_value(gfp_tree, hf_gfp_fcs, tvb, *offset+payload_len, 4, fcs, "0x%08x [incorrect, should be 0x%08x]", fcs, fcs_calc); fcs_tree = proto_item_add_subtree(fcs_ti, ett_gfp_fcs); fcs_ti = proto_tree_add_boolean(fcs_tree, hf_gfp_fcs_good, tvb, *offset+payload_len, 4, FALSE); PROTO_ITEM_SET_GENERATED(fcs_ti); fcs_ti = proto_tree_add_boolean(fcs_tree, hf_gfp_fcs_bad, tvb, *offset+payload_len, 4, TRUE); PROTO_ITEM_SET_GENERATED(fcs_ti); expert_add_info(pinfo, fcs_ti, &ei_gfp_fcs_bad); } /* Method 2: */ /* fcs_calc = crc32_mpeg2_tvb_offset(tvb, *offset, payload_len+4); fcs_ti = proto_tree_add_uint(gfp_tree, hf_gfp_fcs, tvb, *offset+payload_len, 4, fcs); proto_item_append_text(fcs_ti, (fcs_calc == 0xC704DD7B) ? " [correct]" : " [incorrect]"); */ } /* Some client frames we can do. Others are not implemented yet. * Transparent mode types are much trickier than frame-mapped, * since they requires reassembling streams across multiple GFP packets. */ payload_tvb = tvb_new_subset_length(tvb, *offset, payload_len); switch (pti) { case GFP_USER_DATA: case GFP_MANAGEMENT_COMMUNICATIONS: if (!dissector_try_uint(gfp_dissector_table, upi, payload_tvb, pinfo, tree)) { expert_add_info_format(pinfo, type_ti, &ei_gfp_payload_undecoded, "Payload type 0x%02x (%s) unsupported", upi, rval_to_str_const(upi, gfp_upi_data_rvals, "UNKNOWN")); call_data_dissector(payload_tvb, pinfo, tree); } break; case GFP_CLIENT_MANAGEMENT: call_data_dissector(payload_tvb, pinfo, tree); break; default: break; } *offset += payload_len; *offset += fcs_len; } static int dissect_gfp(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data _U_) { proto_item *ti, *pli_ti; proto_tree *gfp_tree; guint offset = 0; int len = 0; guint pli; /*** HEURISTICS ***/ /* Check that the packet is long enough for it to belong to us. */ if (tvb_reported_length(tvb) < GFP_MIN_LENGTH) return 0; /*** COLUMN DATA ***/ /* Set the Protocol column to the constant string of GFP */ col_set_str(pinfo->cinfo, COL_PROTOCOL, "GFP"); col_clear(pinfo->cinfo, COL_INFO); /* Avoid asserts for leaving these blank. */ col_set_str(pinfo->cinfo, COL_RES_DL_SRC, "N/A"); col_set_str(pinfo->cinfo, COL_RES_DL_DST, "N/A"); /*** PROTOCOL TREE ***/ /* create display subtree for the protocol */ ti = proto_tree_add_item(tree, proto_gfp, tvb, 0, GFP_MIN_LENGTH, ENC_NA); gfp_tree = proto_item_add_subtree(ti, ett_gfp); /* ITU-T G.7041 6.1.1 GFP core header */ /* The core header could be scrambled (see G.7041 6.1.1.3) but isn't on * the GFP level capture files I've seen as it's removed before then. * If using this as a subdissector to a SDH or OTN dissector, that could * be an issue. TODO: Maybe add a pref for scrambling? */ len = 2; pli_ti = proto_tree_add_item_ret_uint(gfp_tree, hf_gfp_pli, tvb, offset, len, ENC_BIG_ENDIAN, &pli); if (pli < 4) { /* Don't interpret as payload length */ proto_item_append_text(pli_ti, " (%s)", rval_to_str_const(pli, gfp_pli_rvals, "Unknown")); } col_set_str(pinfo->cinfo, COL_INFO, rval_to_str_const(pli, gfp_pli_rvals, "Unknown")); /* 6.1.1.2 Core HEC field */ gfp_add_hec_tree(tvb, pinfo, gfp_tree, &offset, len, hf_gfp_chec, hf_gfp_chec_good, hf_gfp_chec_bad, ett_gfp_chec, &ei_gfp_chec_bad); if (pli == 0) { /* 6.2.1 GFP idle frames */ if (tvb_reported_length_remaining(tvb, offset)) { expert_add_info(pinfo, pli_ti, &ei_gfp_pli_idle_nonempty); } } else if (pli < 4) { /* 6.2.2 Other control frames (reserved) */ expert_add_info(pinfo, pli_ti, &ei_gfp_pli_unknown); } else { /* G.7041 6.1.2 GFP payload area */ if (tvb_reported_length(tvb) < pli + offset) { /* avoid signed / unsigned comparison */ proto_item_append_text(pli_ti, " (invalid, reported length is %u)", tvb_reported_length_remaining(tvb, offset)); expert_add_info(pinfo, pli_ti, &ei_gfp_pli_invalid); } dissect_gfp_payload(tvb, pinfo, tree, gfp_tree, &offset, pli); } /* Return the amount of data this dissector was able to dissect */ return offset; } void proto_register_gfp(void) { /* Setup list of header fields See Section 1.5 of README.dissector for * details. */ static hf_register_info hf[] = { { &hf_gfp_pli, { "Payload Length Indicator", "gfp.pli", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL } }, { &hf_gfp_chec, { "Core HEC", "gfp.chec", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL } }, { &hf_gfp_chec_good, { "Good cHEC", "gfp.chec_good", FT_BOOLEAN, BASE_NONE, NULL, 0x0, "True: cHEC matches core header; False: doesn't match", HFILL } }, { &hf_gfp_chec_bad, { "Bad cHEC", "gfp.chec_bad", FT_BOOLEAN, BASE_NONE, NULL, 0x0, "True: cHEC doesn't match core header; False: matches", HFILL } }, { &hf_gfp_type, { "Type Field", "gfp.type", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL } }, { &hf_gfp_pti, { "PTI", "gfp.pti", FT_UINT16, BASE_HEX, VALS(gfp_pti_vals), 0xE000, "Payload Type Identifier", HFILL } }, { &hf_gfp_pfi, { "PFI", "gfp.pfi", FT_BOOLEAN, 16, TFS(&tfs_present_absent), 0x1000, "Payload FCS Indicator", HFILL } }, { &hf_gfp_exi, { "EXI", "gfp.exi", FT_UINT16, BASE_HEX, VALS(gfp_exi_vals), 0x0F00, "Extension Header Identifier", HFILL } }, { &hf_gfp_upi_data, { "UPI", "gfp.upi", FT_UINT16, BASE_HEX|BASE_RANGE_STRING, RVALS(gfp_upi_data_rvals), 0xFF, "User Payload Identifier for Client Data Frame (or Management Communications Frame)", HFILL } }, { &hf_gfp_upi_management, { "UPI", "gfp.upi", FT_UINT16, BASE_HEX|BASE_RANGE_STRING, RVALS(gfp_upi_management_rvals), 0xFF, "User Payload Identifier for Client Management Frame", HFILL } }, { &hf_gfp_thec, { "Type HEC", "gfp.thec", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL } }, { &hf_gfp_thec_good, { "Good tHEC", "gfp.thec_good", FT_BOOLEAN, BASE_NONE, NULL, 0x0, "True: tHEC matches type header; False: doesn't match", HFILL } }, { &hf_gfp_thec_bad, { "Bad tHEC", "gfp.thec_bad", FT_BOOLEAN, BASE_NONE, NULL, 0x0, "True: tHEC doesn't match type header; False: matches", HFILL } }, { &hf_gfp_cid, { "Channel ID", "gfp.cid", FT_UINT8, BASE_HEX, NULL, 0x0, NULL, HFILL } }, { &hf_gfp_ehec, { "Extension HEC", "gfp.ehec", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL } }, { &hf_gfp_ehec_good, { "Good eHEC", "gfp.ehec_good", FT_BOOLEAN, BASE_NONE, NULL, 0x0, "True: eHEC matches extension header; False: doesn't match", HFILL } }, { &hf_gfp_ehec_bad, { "Bad eHEC", "gfp.ehec_bad", FT_BOOLEAN, BASE_NONE, NULL, 0x0, "True: eHEC doesn't match extension header; False: matches", HFILL } }, { &hf_gfp_fcs, { "Payload FCS", "gfp.fcs", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL } }, { &hf_gfp_fcs_good, { "Good FCS", "gfp.fcs_good", FT_BOOLEAN, BASE_NONE, NULL, 0x0, "True: FCS matches payload; False: doesn't match", HFILL } }, { &hf_gfp_fcs_bad, { "Bad eHEC", "gfp.fcs_bad", FT_BOOLEAN, BASE_NONE, NULL, 0x0, "True: FCS doesn't match payload; False: matches", HFILL } } }; /* Setup protocol subtree array */ static gint *ett[] = { &ett_gfp, &ett_gfp_chec, &ett_gfp_type, &ett_gfp_thec, &ett_gfp_ehec, &ett_gfp_fcs }; /* Setup protocol expert items */ static ei_register_info ei[] = { { &ei_gfp_pli_idle_nonempty, { "gfp.pli.idle.nonempty", PI_MALFORMED, PI_ERROR, "Payload present on idle frame", EXPFILL } }, { &ei_gfp_pli_unknown, { "gfp.pli.unknown", PI_UNDECODED, PI_WARN, "Unknown control frame type", EXPFILL } }, { &ei_gfp_pli_invalid, { "gfp.pli.invalid", PI_MALFORMED, PI_WARN, "Bogus PLI does not match reported length", EXPFILL } }, { &ei_gfp_chec_bad, { "gfp.chec.bad", PI_CHECKSUM, PI_WARN, "Bad cHEC", EXPFILL } }, { &ei_gfp_thec_bad, { "gfp.thec.bad", PI_CHECKSUM, PI_WARN, "Bad tHEC", EXPFILL } }, { &ei_gfp_ehec_bad, { "gfp.ehec.bad", PI_CHECKSUM, PI_WARN, "Bad eHEC", EXPFILL } }, { &ei_gfp_exi_short, { "gfp.exi.missing", PI_MALFORMED, PI_ERROR, "EXI bit set but PLI too short for extension header", EXPFILL} }, { &ei_gfp_pfi_short, { "gfp.pfi.missing", PI_MALFORMED, PI_ERROR, "PFI bit set but PLI too short for payload FCS", EXPFILL} }, { &ei_gfp_payload_undecoded, { "gfp.payload.undecoded", PI_UNDECODED, PI_WARN, "Payload type not supported yet by the dissector", EXPFILL} }, { &ei_gfp_fcs_bad, { "gfp.fcs.bad", PI_CHECKSUM, PI_WARN, "Bad FCS", EXPFILL } } }; /* Decode As handling */ static build_valid_func gfp_da_build_value[1] = {gfp_value}; static decode_as_value_t gfp_da_values = {gfp_prompt, 1, gfp_da_build_value}; static decode_as_t gfp_da = {"gfp", "GFP", "gfp.upi", 1, 0, &gfp_da_values, NULL, NULL, decode_as_default_populate_list, decode_as_default_reset, decode_as_default_change, NULL}; /* module_t *gfp_module; */ expert_module_t *expert_gfp; /* Register the protocol name and description */ proto_gfp = proto_register_protocol("Generic Framing Procedure", "GFP", "gfp"); /* Required function calls to register the header fields and subtrees */ proto_register_field_array(proto_gfp, hf, array_length(hf)); proto_register_subtree_array(ett, array_length(ett)); /* Required function calls to register expert items */ expert_gfp = expert_register_protocol(proto_gfp); expert_register_field_array(expert_gfp, ei, array_length(ei)); /* Subdissectors for payload */ gfp_dissector_table = register_dissector_table("gfp.upi", "GFP UPI (for Client Data frames)", proto_gfp, FT_UINT8, BASE_DEC, DISSECTOR_TABLE_NOT_ALLOW_DUPLICATE); /* Don't register a preferences module yet since there are no prefs in * order to avoid a warning. (See section 2.6 of README.dissector * for more details on preferences). */ /*gfp_module = prefs_register_protocol(proto_gfp, NULL);*/ register_decode_as(&gfp_da); } /* If this function is registered as a prefs callback (see * prefs_register_protocol above) this function is also called by Wireshark's * preferences manager whenever "Apply" or "OK" are pressed. In that case, it * should accommodate being called more than once by use of the static * 'initialized' variable included below. * * This form of the reg_handoff function is used if if you perform registration * functions which are dependent upon prefs. */ void proto_reg_handoff_gfp(void) { static dissector_handle_t gfp_handle; gfp_handle = create_dissector_handle(dissect_gfp, proto_gfp); dissector_add_uint("wtap_encap", WTAP_ENCAP_GFP_T, gfp_handle); dissector_add_uint("wtap_encap", WTAP_ENCAP_GFP_F, gfp_handle); /* Add a few of the easiest UPIs to decode. There's more that probably * would work, but are untested (frame mapped DVB, frame mapped Fibre * Channel). The transparent mode ones are trickier, since without a * one-to-one mapping of frames, we would have to reassemble payload * packets across multiple GFP packets. * * Section 7.1.1 "Ethernet MAC encapsulation" of G.7041 says * "The Ethernet MAC octets from destination address through * "frame check sequence, inclusive, are placed in the GFP payload * "information field.", so we want the dissector for Ethernet * frames including the FCS. */ dissector_add_uint("gfp.upi", 1, find_dissector("eth_withfcs")); dissector_add_uint("gfp.upi", 2, find_dissector("ppp_hdlc")); dissector_add_uint("gfp.upi", 12, find_dissector("mpls")); dissector_add_uint("gfp.upi", 13, find_dissector("mpls")); dissector_add_uint("gfp.upi", 16, find_dissector("ip")); dissector_add_uint("gfp.upi", 17, find_dissector("ipv6")); } /* * Editor modelines - https://www.wireshark.org/tools/modelines.html * * Local variables: * c-basic-offset: 4 * tab-width: 8 * indent-tabs-mode: nil * End: * * vi: set shiftwidth=4 tabstop=8 expandtab: * :indentSize=4:tabSize=8:noTabs=true: */
kynesim/wireshark
epan/dissectors/packet-gfp.c
C
gpl-2.0
27,009
\section{Chopper\_simple: An ideal chopper} \index{Optics!lens} \mcdoccomp{optics/Chopper_simple.parms} \texttt{Chopper\_simple} is an idelized version of a chopper with a rectangular chopper opening which may open instantly and has no side-scattering etc. It models the chopper with a blocking infinitely thin aperture which becomes transparent in the time interval $t\in\left[\mathit{t0},\mathit{t0}+\tau\right]$. This is an idelized version of a chopper where the chopper opening is rectangular which may open instantly (if \textit{t\_rise}$=0$, the default). For nonzero rise time the aperture simply becomes gradually less opaque for $t\in\left[\mathit{t0}-\textit{t\_rise},\mathit{t0}\right]$. For correct normalization of intesity a chopper period, \textit{T}, must also be set. \textit{is\_first} is useful when using \textbf{Chopper\_simple} with continous sources who inherently have no time-dependence. Thus the emission time of the photon ray is arbitrary, and the chopper defines the temporal signature of the beam, i.e. it simply sets the time-parameter of the photon ray randomly in the opening window of the chopper. Naturally this should only be used for the \emph{first} chopper element in a simulation.
mads-bertelsen/McCode
doc/manuals/mcxtrace/optics/Chopper_simple.tex
TeX
gpl-2.0
1,225
/** @file * IPRT - Memory Management and Manipulation. */ /* * Copyright (C) 2006-2012 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; * you can redistribute it and/or modify it under the terms of the GNU * General Public License (GPL) as published by the Free Software * Foundation, in version 2 as it comes in the "COPYING" file of the * VirtualBox OSE distribution. VirtualBox OSE is distributed in the * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. * * The contents of this file may alternatively be used under the terms * of the Common Development and Distribution License Version 1.0 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the * VirtualBox OSE distribution, in which case the provisions of the * CDDL are applicable instead of those of the GPL. * * You may elect to license modified versions of this file under the * terms and conditions of either the GPL or the CDDL or both. */ #ifndef ___iprt_mem_h #define ___iprt_mem_h #include <iprt/cdefs.h> #include <iprt/types.h> #ifdef IN_RC # error "There are no RTMem APIs available Guest Context!" #endif /** @defgroup grp_rt_mem RTMem - Memory Management and Manipulation * @ingroup grp_rt * @{ */ RT_C_DECLS_BEGIN /** @def RTMEM_ALIGNMENT * The alignment of the memory blocks returned by RTMemAlloc(), RTMemAllocZ(), * RTMemRealloc(), RTMemTmpAlloc() and RTMemTmpAllocZ() for allocations greater * than RTMEM_ALIGNMENT. * * @note This alignment is not forced if the electric fence is active! */ #if defined(RT_OS_OS2) # define RTMEM_ALIGNMENT 4 #else # define RTMEM_ALIGNMENT 8 #endif /** @def RTMEM_TAG * The default allocation tag used by the RTMem allocation APIs. * * When not defined before the inclusion of iprt/mem.h or iprt/memobj.h, this * will default to the pointer to the current file name. The memory API will * make of use of this as pointer to a volatile but read-only string. * The alternative tag includes the line number for a more-detailed analysis. */ #ifndef RTMEM_TAG # if 0 # define RTMEM_TAG (__FILE__ ":" RT_XSTR(__LINE__)) # else # define RTMEM_TAG (__FILE__) # endif #endif /** @name Allocate temporary memory. * @{ */ /** * Allocates temporary memory with default tag. * * Temporary memory blocks are used for not too large memory blocks which * are believed not to stick around for too long. Using this API instead * of RTMemAlloc() not only gives the heap manager room for optimization * but makes the code easier to read. * * @returns Pointer to the allocated memory. * @returns NULL on failure, assertion raised in strict builds. * @param cb Size in bytes of the memory block to allocated. */ #define RTMemTmpAlloc(cb) RTMemTmpAllocTag((cb), RTMEM_TAG) /** * Allocates temporary memory with custom tag. * * Temporary memory blocks are used for not too large memory blocks which * are believed not to stick around for too long. Using this API instead * of RTMemAlloc() not only gives the heap manager room for optimization * but makes the code easier to read. * * @returns Pointer to the allocated memory. * @returns NULL on failure, assertion raised in strict builds. * @param cb Size in bytes of the memory block to allocated. * @param pszTag Allocation tag used for statistics and such. */ RTDECL(void *) RTMemTmpAllocTag(size_t cb, const char *pszTag) RT_NO_THROW; /** * Allocates zero'd temporary memory with default tag. * * Same as RTMemTmpAlloc() but the memory will be zero'd. * * @returns Pointer to the allocated memory. * @returns NULL on failure, assertion raised in strict builds. * @param cb Size in bytes of the memory block to allocated. */ #define RTMemTmpAllocZ(cb) RTMemTmpAllocZTag((cb), RTMEM_TAG) /** * Allocates zero'd temporary memory with custom tag. * * Same as RTMemTmpAlloc() but the memory will be zero'd. * * @returns Pointer to the allocated memory. * @returns NULL on failure, assertion raised in strict builds. * @param cb Size in bytes of the memory block to allocated. * @param pszTag Allocation tag used for statistics and such. */ RTDECL(void *) RTMemTmpAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW; /** * Free temporary memory. * * @param pv Pointer to memory block. */ RTDECL(void) RTMemTmpFree(void *pv) RT_NO_THROW; /** @} */ /** * Allocates memory with default tag. * * @returns Pointer to the allocated memory. * @returns NULL on failure, assertion raised in strict builds. * @param cb Size in bytes of the memory block to allocated. * @param pszTag Allocation tag used for statistics and such. */ #define RTMemAlloc(cb) RTMemAllocTag((cb), RTMEM_TAG) /** * Allocates memory with custom tag. * * @returns Pointer to the allocated memory. * @returns NULL on failure, assertion raised in strict builds. * @param cb Size in bytes of the memory block to allocated. * @param pszTag Allocation tag used for statistics and such. */ RTDECL(void *) RTMemAllocTag(size_t cb, const char *pszTag) RT_NO_THROW; /** * Allocates zero'd memory with default tag. * * Instead of memset(pv, 0, sizeof()) use this when you want zero'd * memory. This keeps the code smaller and the heap can skip the memset * in about 0.42% of calls :-). * * @returns Pointer to the allocated memory. * @returns NULL on failure. * @param cb Size in bytes of the memory block to allocated. */ #define RTMemAllocZ(cb) RTMemAllocZTag((cb), RTMEM_TAG) /** * Allocates zero'd memory with custom tag. * * Instead of memset(pv, 0, sizeof()) use this when you want zero'd * memory. This keeps the code smaller and the heap can skip the memset * in about 0.42% of calls :-). * * @returns Pointer to the allocated memory. * @returns NULL on failure. * @param cb Size in bytes of the memory block to allocated. * @param pszTag Allocation tag used for statistics and such. */ RTDECL(void *) RTMemAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW; /** * Wrapper around RTMemAlloc for automatically aligning variable sized * allocations so that the various electric fence heaps works correctly. * * @returns See RTMemAlloc. * @param cbUnaligned The unaligned size. */ #define RTMemAllocVar(cbUnaligned) RTMemAllocVarTag((cbUnaligned), RTMEM_TAG) /** * Wrapper around RTMemAllocTag for automatically aligning variable sized * allocations so that the various electric fence heaps works correctly. * * @returns See RTMemAlloc. * @param cbUnaligned The unaligned size. * @param pszTag Allocation tag used for statistics and such. */ RTDECL(void *) RTMemAllocVarTag(size_t cbUnaligned, const char *pszTag) RT_NO_THROW; /** * Wrapper around RTMemAllocZ for automatically aligning variable sized * allocations so that the various electric fence heaps works correctly. * * @returns See RTMemAllocZ. * @param cbUnaligned The unaligned size. */ #define RTMemAllocZVar(cbUnaligned) RTMemAllocZVarTag((cbUnaligned), RTMEM_TAG) /** * Wrapper around RTMemAllocZTag for automatically aligning variable sized * allocations so that the various electric fence heaps works correctly. * * @returns See RTMemAllocZ. * @param cbUnaligned The unaligned size. * @param pszTag Allocation tag used for statistics and such. */ RTDECL(void *) RTMemAllocZVarTag(size_t cbUnaligned, const char *pszTag) RT_NO_THROW; /** * Duplicates a chunk of memory into a new heap block (default tag). * * @returns New heap block with the duplicate data. * @returns NULL if we're out of memory. * @param pvSrc The memory to duplicate. * @param cb The amount of memory to duplicate. */ #define RTMemDup(pvSrc, cb) RTMemDupTag((pvSrc), (cb), RTMEM_TAG) /** * Duplicates a chunk of memory into a new heap block (custom tag). * * @returns New heap block with the duplicate data. * @returns NULL if we're out of memory. * @param pvSrc The memory to duplicate. * @param cb The amount of memory to duplicate. * @param pszTag Allocation tag used for statistics and such. */ RTDECL(void *) RTMemDupTag(const void *pvSrc, size_t cb, const char *pszTag) RT_NO_THROW; /** * Duplicates a chunk of memory into a new heap block with some additional * zeroed memory (default tag). * * @returns New heap block with the duplicate data. * @returns NULL if we're out of memory. * @param pvSrc The memory to duplicate. * @param cbSrc The amount of memory to duplicate. * @param cbExtra The amount of extra memory to allocate and zero. */ #define RTMemDupEx(pvSrc, cbSrc, cbExtra) RTMemDupExTag((pvSrc), (cbSrc), (cbExtra), RTMEM_TAG) /** * Duplicates a chunk of memory into a new heap block with some additional * zeroed memory (default tag). * * @returns New heap block with the duplicate data. * @returns NULL if we're out of memory. * @param pvSrc The memory to duplicate. * @param cbSrc The amount of memory to duplicate. * @param cbExtra The amount of extra memory to allocate and zero. * @param pszTag Allocation tag used for statistics and such. */ RTDECL(void *) RTMemDupExTag(const void *pvSrc, size_t cbSrc, size_t cbExtra, const char *pszTag) RT_NO_THROW; /** * Reallocates memory with default tag. * * @returns Pointer to the allocated memory. * @returns NULL on failure. * @param pvOld The memory block to reallocate. * @param cbNew The new block size (in bytes). */ #define RTMemRealloc(pvOld, cbNew) RTMemReallocTag((pvOld), (cbNew), RTMEM_TAG) /** * Reallocates memory with custom tag. * * @returns Pointer to the allocated memory. * @returns NULL on failure. * @param pvOld The memory block to reallocate. * @param cbNew The new block size (in bytes). * @param pszTag Allocation tag used for statistics and such. */ RTDECL(void *) RTMemReallocTag(void *pvOld, size_t cbNew, const char *pszTag) RT_NO_THROW; /** * Frees memory. * * @param pv Pointer to memory block. */ RTDECL(void) RTMemFree(void *pv) RT_NO_THROW; /** @def RTR0MemAllocEx and RTR0MemAllocExTag flags. * @{ */ /** The returned memory should be zeroed. */ #define RTMEMALLOCEX_FLAGS_ZEROED RT_BIT(0) /** It must be load code into the returned memory block and execute it. */ #define RTMEMALLOCEX_FLAGS_EXEC RT_BIT(1) /** Allocation from any context. * Will return VERR_NOT_SUPPORTED if not supported. */ #define RTMEMALLOCEX_FLAGS_ANY_CTX_ALLOC RT_BIT(2) /** Allocate the memory such that it can be freed from any context. * Will return VERR_NOT_SUPPORTED if not supported. */ #define RTMEMALLOCEX_FLAGS_ANY_CTX_FREE RT_BIT(3) /** Allocate and free from any context. * Will return VERR_NOT_SUPPORTED if not supported. */ #define RTMEMALLOCEX_FLAGS_ANY_CTX (RTMEMALLOCEX_FLAGS_ANY_CTX_ALLOC | RTMEMALLOCEX_FLAGS_ANY_CTX_FREE) /** Mask of valid flags. */ #define RTMEMALLOCEX_FLAGS_VALID_MASK UINT32_C(0x0000000f) /** @} */ /** * Extended heap allocation API, default tag. * * @returns IPRT status code. * @retval VERR_NO_MEMORY if we're out of memory. * @retval VERR_NO_EXEC_MEMORY if we're out of executable memory. * @retval VERR_NOT_SUPPORTED if any of the specified flags are unsupported. * * @param cb The amount of memory to allocate. * @param cbAlignment The alignment requirements. Use 0 to indicate * default alignment. * @param fFlags A combination of the RTMEMALLOCEX_FLAGS_XXX * defines. * @param ppv Where to return the memory. */ #define RTMemAllocEx(cb, cbAlignment, fFlags, ppv) RTMemAllocExTag((cb), (cbAlignment), (fFlags), RTMEM_TAG, (ppv)) /** * Extended heap allocation API, custom tag. * * @returns IPRT status code. * @retval VERR_NO_MEMORY if we're out of memory. * @retval VERR_NO_EXEC_MEMORY if we're out of executable memory. * @retval VERR_NOT_SUPPORTED if any of the specified flags are unsupported. * * @param cb The amount of memory to allocate. * @param cbAlignment The alignment requirements. Use 0 to indicate * default alignment. * @param fFlags A combination of the RTMEMALLOCEX_FLAGS_XXX * defines. * @param pszTag The tag. * @param ppv Where to return the memory. */ RTDECL(int) RTMemAllocExTag(size_t cb, size_t cbAlignment, uint32_t fFlags, const char *pszTag, void **ppv) RT_NO_THROW; /** * For freeing memory allocated by RTMemAllocEx or RTMemAllocExTag. * * @param pv What to free, NULL is fine. * @param cb The amount of allocated memory. */ RTDECL(void) RTMemFreeEx(void *pv, size_t cb) RT_NO_THROW; /** * Allocates memory which may contain code (default tag). * * @returns Pointer to the allocated memory. * @returns NULL on failure. * @param cb Size in bytes of the memory block to allocate. */ #define RTMemExecAlloc(cb) RTMemExecAllocTag((cb), RTMEM_TAG) /** * Allocates memory which may contain code (custom tag). * * @returns Pointer to the allocated memory. * @returns NULL on failure. * @param cb Size in bytes of the memory block to allocate. * @param pszTag Allocation tag used for statistics and such. */ RTDECL(void *) RTMemExecAllocTag(size_t cb, const char *pszTag) RT_NO_THROW; /** * Free executable/read/write memory allocated by RTMemExecAlloc(). * * @param pv Pointer to memory block. * @param cb The allocation size. */ RTDECL(void) RTMemExecFree(void *pv, size_t cb) RT_NO_THROW; #if defined(IN_RING0) && defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX) /** * Donate read+write+execute memory to the exec heap. * * This API is specific to AMD64 and Linux/GNU. A kernel module that desires to * use RTMemExecAlloc on AMD64 Linux/GNU will have to donate some statically * allocated memory in the module if it wishes for GCC generated code to work. * GCC can only generate modules that work in the address range ~2GB to ~0 * currently. * * The API only accept one single donation. * * @returns IPRT status code. * @param pvMemory Pointer to the memory block. * @param cb The size of the memory block. */ RTR0DECL(int) RTR0MemExecDonate(void *pvMemory, size_t cb) RT_NO_THROW; #endif /* R0+AMD64+LINUX */ /** * Allocate page aligned memory with default tag. * * @returns Pointer to the allocated memory. * @returns NULL if we're out of memory. * @param cb Size of the memory block. Will be rounded up to page size. */ #define RTMemPageAlloc(cb) RTMemPageAllocTag((cb), RTMEM_TAG) /** * Allocate page aligned memory with custom tag. * * @returns Pointer to the allocated memory. * @returns NULL if we're out of memory. * @param cb Size of the memory block. Will be rounded up to page size. * @param pszTag Allocation tag used for statistics and such. */ RTDECL(void *) RTMemPageAllocTag(size_t cb, const char *pszTag) RT_NO_THROW; /** * Allocate zero'd page aligned memory with default tag. * * @returns Pointer to the allocated memory. * @returns NULL if we're out of memory. * @param cb Size of the memory block. Will be rounded up to page size. */ #define RTMemPageAllocZ(cb) RTMemPageAllocZTag((cb), RTMEM_TAG) /** * Allocate zero'd page aligned memory with custom tag. * * @returns Pointer to the allocated memory. * @returns NULL if we're out of memory. * @param cb Size of the memory block. Will be rounded up to page size. * @param pszTag Allocation tag used for statistics and such. */ RTDECL(void *) RTMemPageAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW; /** * Free a memory block allocated with RTMemPageAlloc() or RTMemPageAllocZ(). * * @param pv Pointer to the block as it was returned by the allocation function. * NULL will be ignored. * @param cb The allocation size. Will be rounded up to page size. * Ignored if @a pv is NULL. */ RTDECL(void) RTMemPageFree(void *pv, size_t cb) RT_NO_THROW; /** Page level protection flags for RTMemProtect(). * @{ */ /** No access at all. */ #define RTMEM_PROT_NONE 0 /** Read access. */ #define RTMEM_PROT_READ 1 /** Write access. */ #define RTMEM_PROT_WRITE 2 /** Execute access. */ #define RTMEM_PROT_EXEC 4 /** @} */ /** * Change the page level protection of a memory region. * * @returns iprt status code. * @param pv Start of the region. Will be rounded down to nearest page boundary. * @param cb Size of the region. Will be rounded up to the nearest page boundary. * @param fProtect The new protection, a combination of the RTMEM_PROT_* defines. */ RTDECL(int) RTMemProtect(void *pv, size_t cb, unsigned fProtect) RT_NO_THROW; /** * Goes thru some pains to make sure the specified memory block is thoroughly * scrambled. * * @param pv The start of the memory block. * @param cb The size of the memory block. * @param cMinPasses The minimum number of passes to make. */ RTDECL(void) RTMemWipeThoroughly(void *pv, size_t cb, size_t cMinPasses) RT_NO_THROW; #ifdef IN_RING0 /** * Allocates physical contiguous memory (below 4GB). * The allocation is page aligned and the content is undefined. * * @returns Pointer to the memory block. This is page aligned. * @param pPhys Where to store the physical address. * @param cb The allocation size in bytes. This is always * rounded up to PAGE_SIZE. */ RTR0DECL(void *) RTMemContAlloc(PRTCCPHYS pPhys, size_t cb) RT_NO_THROW; /** * Frees memory allocated ysing RTMemContAlloc(). * * @param pv Pointer to return from RTMemContAlloc(). * @param cb The cb parameter passed to RTMemContAlloc(). */ RTR0DECL(void) RTMemContFree(void *pv, size_t cb) RT_NO_THROW; /** * Copy memory from an user mode buffer into a kernel buffer. * * @retval VINF_SUCCESS on success. * @retval VERR_ACCESS_DENIED on error. * * @param pvDst The kernel mode destination address. * @param R3PtrSrc The user mode source address. * @param cb The number of bytes to copy. */ RTR0DECL(int) RTR0MemUserCopyFrom(void *pvDst, RTR3PTR R3PtrSrc, size_t cb); /** * Copy memory from a kernel buffer into a user mode one. * * @retval VINF_SUCCESS on success. * @retval VERR_ACCESS_DENIED on error. * * @param R3PtrDst The user mode destination address. * @param pvSrc The kernel mode source address. * @param cb The number of bytes to copy. */ RTR0DECL(int) RTR0MemUserCopyTo(RTR3PTR R3PtrDst, void const *pvSrc, size_t cb); /** * Tests if the specified address is in the user addressable range. * * This function does not check whether the memory at that address is accessible * or anything of that sort, only if the address it self is in the user mode * range. * * @returns true if it's in the user addressable range. false if not. * @param R3Ptr The user mode pointer to test. * * @remarks Some systems may have overlapping kernel and user address ranges. * One prominent example of this is the x86 version of Mac OS X. Use * RTR0MemAreKrnlAndUsrDifferent() to check. */ RTR0DECL(bool) RTR0MemUserIsValidAddr(RTR3PTR R3Ptr); /** * Tests if the specified address is in the kernel mode range. * * This function does not check whether the memory at that address is accessible * or anything of that sort, only if the address it self is in the kernel mode * range. * * @returns true if it's in the kernel range. false if not. * @param pv The alleged kernel mode pointer. * * @remarks Some systems may have overlapping kernel and user address ranges. * One prominent example of this is the x86 version of Mac OS X. Use * RTR0MemAreKrnlAndUsrDifferent() to check. */ RTR0DECL(bool) RTR0MemKernelIsValidAddr(void *pv); /** * Are user mode and kernel mode address ranges distinctly different. * * This determines whether RTR0MemKernelIsValidAddr and RTR0MemUserIsValidAddr * can be used for deciding whether some arbitrary address is a user mode or a * kernel mode one. * * @returns true if they are, false if not. */ RTR0DECL(bool) RTR0MemAreKrnlAndUsrDifferent(void); /** * Copy memory from an potentially unsafe kernel mode location and into a safe * (kernel) buffer. * * @retval VINF_SUCCESS on success. * @retval VERR_ACCESS_DENIED on error. * @retval VERR_NOT_SUPPORTED if not (yet) supported. * * @param pvDst The destination address (safe). * @param pvSrc The source address (potentially unsafe). * @param cb The number of bytes to copy. */ RTR0DECL(int) RTR0MemKernelCopyFrom(void *pvDst, void const *pvSrc, size_t cb); /** * Copy from a safe (kernel) buffer and to a potentially unsafe kenrel mode * location. * * @retval VINF_SUCCESS on success. * @retval VERR_ACCESS_DENIED on error. * @retval VERR_NOT_SUPPORTED if not (yet) supported. * * @param pvDst The destination address (potentially unsafe). * @param pvSrc The source address (safe). * @param cb The number of bytes to copy. */ RTR0DECL(int) RTR0MemKernelCopyTo(void *pvDst, void const *pvSrc, size_t cb); #endif /* IN_RING0 */ /** @name Electrical Fence Version of some APIs. * @{ */ /** * Same as RTMemTmpAllocTag() except that it's fenced. * * @returns Pointer to the allocated memory. * @returns NULL on failure. * @param cb Size in bytes of the memory block to allocate. * @param pszTag Allocation tag used for statistics and such. */ RTDECL(void *) RTMemEfTmpAlloc(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW; /** * Same as RTMemTmpAllocZTag() except that it's fenced. * * @returns Pointer to the allocated memory. * @returns NULL on failure. * @param cb Size in bytes of the memory block to allocate. * @param pszTag Allocation tag used for statistics and such. */ RTDECL(void *) RTMemEfTmpAllocZ(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW; /** * Same as RTMemTmpFree() except that it's for fenced memory. * * @param pv Pointer to memory block. */ RTDECL(void) RTMemEfTmpFree(void *pv, RT_SRC_POS_DECL) RT_NO_THROW; /** * Same as RTMemAllocTag() except that it's fenced. * * @returns Pointer to the allocated memory. Free with RTMemEfFree(). * @returns NULL on failure. * @param cb Size in bytes of the memory block to allocate. * @param pszTag Allocation tag used for statistics and such. */ RTDECL(void *) RTMemEfAlloc(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW; /** * Same as RTMemAllocZTag() except that it's fenced. * * @returns Pointer to the allocated memory. * @returns NULL on failure. * @param cb Size in bytes of the memory block to allocate. * @param pszTag Allocation tag used for statistics and such. */ RTDECL(void *) RTMemEfAllocZ(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW; /** * Same as RTMemAllocVarTag() except that it's fenced. * * @returns Pointer to the allocated memory. Free with RTMemEfFree(). * @returns NULL on failure. * @param cbUnaligned Size in bytes of the memory block to allocate. * @param pszTag Allocation tag used for statistics and such. */ RTDECL(void *) RTMemEfAllocVar(size_t cbUnaligned, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW; /** * Same as RTMemAllocZVarTag() except that it's fenced. * * @returns Pointer to the allocated memory. * @returns NULL on failure. * @param cbUnaligned Size in bytes of the memory block to allocate. * @param pszTag Allocation tag used for statistics and such. */ RTDECL(void *) RTMemEfAllocZVar(size_t cbUnaligned, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW; /** * Same as RTMemReallocTag() except that it's fenced. * * @returns Pointer to the allocated memory. * @returns NULL on failure. * @param pvOld The memory block to reallocate. * @param cbNew The new block size (in bytes). * @param pszTag Allocation tag used for statistics and such. */ RTDECL(void *) RTMemEfRealloc(void *pvOld, size_t cbNew, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW; /** * Free memory allocated by any of the RTMemEf* allocators. * * @param pv Pointer to memory block. */ RTDECL(void) RTMemEfFree(void *pv, RT_SRC_POS_DECL) RT_NO_THROW; /** * Same as RTMemDupTag() except that it's fenced. * * @returns New heap block with the duplicate data. * @returns NULL if we're out of memory. * @param pvSrc The memory to duplicate. * @param cb The amount of memory to duplicate. * @param pszTag Allocation tag used for statistics and such. */ RTDECL(void *) RTMemEfDup(const void *pvSrc, size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW; /** * Same as RTMemEfDupExTag except that it's fenced. * * @returns New heap block with the duplicate data. * @returns NULL if we're out of memory. * @param pvSrc The memory to duplicate. * @param cbSrc The amount of memory to duplicate. * @param cbExtra The amount of extra memory to allocate and zero. * @param pszTag Allocation tag used for statistics and such. */ RTDECL(void *) RTMemEfDupEx(const void *pvSrc, size_t cbSrc, size_t cbExtra, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW; /** @def RTMEM_WRAP_SOME_NEW_AND_DELETE_TO_EF * Define RTMEM_WRAP_SOME_NEW_AND_DELETE_TO_EF to enable electric fence new and * delete operators for classes which uses the RTMEMEF_NEW_AND_DELETE_OPERATORS * macro. */ /** @def RTMEMEF_NEW_AND_DELETE_OPERATORS * Defines the electric fence new and delete operators for a class when * RTMEM_WRAP_SOME_NEW_AND_DELETE_TO_EF is define. */ #if defined(RTMEM_WRAP_SOME_NEW_AND_DELETE_TO_EF) && !defined(RTMEM_NO_WRAP_SOME_NEW_AND_DELETE_TO_EF) # if defined(RT_EXCEPTIONS_ENABLED) # define RTMEMEF_NEW_AND_DELETE_OPERATORS() \ void *operator new(size_t cb) RT_THROW(std::bad_alloc) \ { \ void *pv = RTMemEfAlloc(cb, RTMEM_TAG, RT_SRC_POS); \ if (RT_UNLIKELY(!pv)) \ throw std::bad_alloc(); \ return pv; \ } \ void *operator new(size_t cb, const std::nothrow_t &nothrow_constant) RT_NO_THROW \ { \ NOREF(nothrow_constant); \ return RTMemEfAlloc(cb, RTMEM_TAG, RT_SRC_POS); \ } \ void *operator new[](size_t cb) RT_THROW(std::bad_alloc) \ { \ void *pv = RTMemEfAlloc(cb, RTMEM_TAG, RT_SRC_POS); \ if (RT_UNLIKELY(!pv)) \ throw std::bad_alloc(); \ return pv; \ } \ void *operator new[](size_t cb, const std::nothrow_t &nothrow_constant) RT_NO_THROW \ { \ NOREF(nothrow_constant); \ return RTMemEfAlloc(cb, RTMEM_TAG, RT_SRC_POS); \ } \ \ void operator delete(void *pv) RT_NO_THROW \ { \ RTMemEfFree(pv, RT_SRC_POS); \ } \ void operator delete(void *pv, const std::nothrow_t &nothrow_constant) RT_NO_THROW \ { \ NOREF(nothrow_constant); \ RTMemEfFree(pv, RT_SRC_POS); \ } \ void operator delete[](void *pv) RT_NO_THROW \ { \ RTMemEfFree(pv, RT_SRC_POS); \ } \ void operator delete[](void *pv, const std::nothrow_t &nothrow_constant) RT_NO_THROW \ { \ NOREF(nothrow_constant); \ RTMemEfFree(pv, RT_SRC_POS); \ } \ \ typedef int UsingElectricNewAndDeleteOperators # else # define RTMEMEF_NEW_AND_DELETE_OPERATORS() \ void *operator new(size_t cb) \ { \ return RTMemEfAlloc(cb, RTMEM_TAG, RT_SRC_POS); \ } \ void *operator new(size_t cb, const std::nothrow_t &nothrow_constant) \ { \ NOREF(nothrow_constant); \ return RTMemEfAlloc(cb, RTMEM_TAG, RT_SRC_POS); \ } \ void *operator new[](size_t cb) \ { \ return RTMemEfAlloc(cb, RTMEM_TAG, RT_SRC_POS); \ } \ void *operator new[](size_t cb, const std::nothrow_t &nothrow_constant) \ { \ NOREF(nothrow_constant); \ return RTMemEfAlloc(cb, RTMEM_TAG, RT_SRC_POS); \ } \ \ void operator delete(void *pv) \ { \ RTMemEfFree(pv, RT_SRC_POS); \ } \ void operator delete(void *pv, const std::nothrow_t &nothrow_constant) \ { \ NOREF(nothrow_constant); \ RTMemEfFree(pv, RT_SRC_POS); \ } \ void operator delete[](void *pv) \ { \ RTMemEfFree(pv, RT_SRC_POS); \ } \ void operator delete[](void *pv, const std::nothrow_t &nothrow_constant) \ { \ NOREF(nothrow_constant); \ RTMemEfFree(pv, RT_SRC_POS); \ } \ \ typedef int UsingElectricNewAndDeleteOperators # endif #else # define RTMEMEF_NEW_AND_DELETE_OPERATORS() \ typedef int UsingDefaultNewAndDeleteOperators #endif #ifdef DOXYGEN_RUNNING # define RTMEM_WRAP_SOME_NEW_AND_DELETE_TO_EF #endif /** @def RTMEM_WRAP_TO_EF_APIS * Define RTMEM_WRAP_TO_EF_APIS to wrap RTMem APIs to RTMemEf APIs. */ #if defined(RTMEM_WRAP_TO_EF_APIS) && defined(IN_RING3) && !defined(RTMEM_NO_WRAP_TO_EF_APIS) # define RTMemTmpAllocTag(cb, pszTag) RTMemEfTmpAlloc((cb), (pszTag), RT_SRC_POS) # define RTMemTmpAllocZTag(cb, pszTag) RTMemEfTmpAllocZ((cb), (pszTag), RT_SRC_POS) # define RTMemTmpFree(pv) RTMemEfTmpFree((pv), RT_SRC_POS) # define RTMemAllocTag(cb, pszTag) RTMemEfAlloc((cb), (pszTag), RT_SRC_POS) # define RTMemAllocZTag(cb, pszTag) RTMemEfAllocZ((cb), (pszTag), RT_SRC_POS) # define RTMemAllocVarTag(cbUnaligned, pszTag) RTMemEfAllocVar((cbUnaligned), (pszTag), RT_SRC_POS) # define RTMemAllocZVarTag(cbUnaligned, pszTag) RTMemEfAllocZVar((cbUnaligned), (pszTag), RT_SRC_POS) # define RTMemReallocTag(pvOld, cbNew, pszTag) RTMemEfRealloc((pvOld), (cbNew), (pszTag), RT_SRC_POS) # define RTMemFree(pv) RTMemEfFree((pv), RT_SRC_POS) # define RTMemDupTag(pvSrc, cb, pszTag) RTMemEfDup((pvSrc), (cb), (pszTag), RT_SRC_POS) # define RTMemDupExTag(pvSrc, cbSrc, cbExtra, pszTag) RTMemEfDupEx((pvSrc), (cbSrc), (cbExtra), (pszTag), RT_SRC_POS) #endif #ifdef DOXYGEN_RUNNING # define RTMEM_WRAP_TO_EF_APIS #endif /** * Fenced drop-in replacement for RTMemTmpAllocTag. * @copydoc RTMemTmpAllocTag */ RTDECL(void *) RTMemEfTmpAllocNP(size_t cb, const char *pszTag) RT_NO_THROW; /** * Fenced drop-in replacement for RTMemTmpAllocZTag. * @copydoc RTMemTmpAllocZTag */ RTDECL(void *) RTMemEfTmpAllocZNP(size_t cb, const char *pszTag) RT_NO_THROW; /** * Fenced drop-in replacement for RTMemTmpFreeTag. * @copydoc RTMemTmpFreeTag */ RTDECL(void) RTMemEfTmpFreeNP(void *pv) RT_NO_THROW; /** * Fenced drop-in replacement for RTMemAllocTag. * @copydoc RTMemAllocTag */ RTDECL(void *) RTMemEfAllocNP(size_t cb, const char *pszTag) RT_NO_THROW; /** * Fenced drop-in replacement for RTMemAllocZTag. * @copydoc RTMemAllocZTag */ RTDECL(void *) RTMemEfAllocZNP(size_t cb, const char *pszTag) RT_NO_THROW; /** * Fenced drop-in replacement for RTMemAllocVarTag * @copydoc RTMemAllocVarTag */ RTDECL(void *) RTMemEfAllocVarNP(size_t cbUnaligned, const char *pszTag) RT_NO_THROW; /** * Fenced drop-in replacement for RTMemAllocZVarTag. * @copydoc RTMemAllocZVarTag */ RTDECL(void *) RTMemEfAllocZVarNP(size_t cbUnaligned, const char *pszTag) RT_NO_THROW; /** * Fenced drop-in replacement for RTMemReallocTag. * @copydoc RTMemReallocTag */ RTDECL(void *) RTMemEfReallocNP(void *pvOld, size_t cbNew, const char *pszTag) RT_NO_THROW; /** * Fenced drop-in replacement for RTMemFree. * @copydoc RTMemFree */ RTDECL(void) RTMemEfFreeNP(void *pv) RT_NO_THROW; /** * Fenced drop-in replacement for RTMemDupExTag. * @copydoc RTMemDupExTag */ RTDECL(void *) RTMemEfDupNP(const void *pvSrc, size_t cb, const char *pszTag) RT_NO_THROW; /** * Fenced drop-in replacement for RTMemDupExTag. * @copydoc RTMemDupExTag */ RTDECL(void *) RTMemEfDupExNP(const void *pvSrc, size_t cbSrc, size_t cbExtra, const char *pszTag) RT_NO_THROW; /** @} */ RT_C_DECLS_END /** @} */ #endif
yuyuyu101/VirtualBox-NetBSD
include/iprt/mem.h
C
gpl-2.0
32,825
/****************************************************************************** Copyright(C)2008,Hisilicon Co. LTD. ****************************************************************************** File Name : NasEsmOmMsgProc.h Description : NasEsmOmMsgProc.c header file History : 1.sunbing 2008-12-30 Draft Enact 2. ******************************************************************************/ #ifndef __NASESMOMMSGPROC_H__ #define __NASESMOMMSGPROC_H__ /***************************************************************************** 1 Include Headfile *****************************************************************************/ #include "vos.h" #include "OmCommon.h" #include "LnasFtmInterface.h" #include "LnasErrlogInterface.h" /***************************************************************************** 1.1 Cplusplus Announce *****************************************************************************/ #ifdef __cplusplus #if __cplusplus extern "C" { #endif #endif /***************************************************************************** #pragma pack(*) ÉèÖÃ×Ö½Ú¶ÔÆë·½Ê½ *****************************************************************************/ #if (VOS_OS_VER != VOS_WIN32) #pragma pack(4) #else #pragma pack(push, 4) #endif /***************************************************************************** 2 macro *****************************************************************************/ #define NAS_ESM_LEN_VOS_MSG_HEADER 20 #define NAS_ESM_GetFtmInfoManageAddr() (&(g_astEsmFtmInfoManage)) #define NAS_ESM_GetEsmInfoAddr() (&(g_astEsmInfo)) #define NAS_ESM_GetFtmInfoActionFlag() (NAS_ESM_GetFtmInfoManageAddr()->ulFtmActionFlag) #define NAS_ESM_GetFtmInfoMsgSN() (NAS_ESM_GetFtmInfoManageAddr()->ulMsgSN) #define NAS_ESM_GetFtmInfoEsmCause() (NAS_ESM_GetFtmInfoManageAddr()->ucEsmCause) #define NAS_ESM_SetFtmOmManageFtmActionFlag(Flag) (NAS_ESM_GetFtmInfoActionFlag() = Flag) #define NAS_ESM_GetErrlogManageAddr() (&(g_astEsmErrlogInfoManage)) #define NAS_ESM_GetErrlogActionFlag() (NAS_ESM_GetErrlogManageAddr()->ulActionFlag) #define NAS_ESM_SetErrlogActionFlag(Flag) (NAS_ESM_GetErrlogActionFlag() = Flag) #define NAS_ESM_GetErrlogMsgSN() (NAS_ESM_GetErrlogManageAddr()->ulMsgSN) #define NAS_ESM_GetErrlogAlmLevel() (NAS_ESM_GetErrlogManageAddr()->usALMLevel) #define NAS_ESM_SetErrlogAlmLevel(usLevel) (NAS_ESM_GetErrlogAlmLevel() = usLevel) #define NAS_ESM_GetErrlogAlmType() (NAS_ESM_GetErrlogManageAddr()->usALMType) #define NAS_ESM_GetErrlogAlmLowSlice() (NAS_ESM_GetErrlogManageAddr()->ulAlmLowSlice) #define NAS_ESM_GetErrlogAlmHighSlice() (NAS_ESM_GetErrlogManageAddr()->ulAlmHighSlice) #define NAS_ESM_GetErrlogAmount() (NAS_ESM_GetErrlogManageAddr()->ulErrLogAmount) #define NAS_ESM_GetErrlogNextNullPos() (NAS_ESM_GetErrlogManageAddr()->ulNextNullPos) #define NAS_ESM_GetErrlogInfo(ulIndex) (NAS_ESM_GetErrlogManageAddr()->stEsmErrInfoDetail[ulIndex]) #define NAS_ESM_ERRLOG_MAX_NUM (4) #define NAS_ESM_COMP_OM_MSG_HEADER(pMsg,MsgLenthNoHeader)\ {\ (pMsg)->ulSenderCpuId = VOS_LOCAL_CPUID;\ (pMsg)->ulSenderPid = PS_PID_ESM;\ (pMsg)->ulReceiverCpuId = VOS_LOCAL_CPUID;\ (pMsg)->ulReceiverPid = ACPU_PID_OM;\ (pMsg)->ulLength = (MsgLenthNoHeader);\ } /***************************************************************************** 3 Massage Declare *****************************************************************************/ /***************************************************************************** 4 Enum *****************************************************************************/ /***************************************************************************** ö¾ÙÃû : NAS_ESM_TP_CMD_TYPE_ENUM ö¾Ù˵Ã÷ : ÃüÁîÀàÐÍȡֵ *****************************************************************************/ enum NAS_ESM_TP_CMD_TYPE_ENUM { NAS_ESM_TP_CMD_TYPE_TEST = 0x00, NAS_ESM_TP_CMD_TYPE_BUTT }; typedef VOS_UINT8 NAS_ESM_TP_CMD_TYPE_ENUM_UINT8 ; /***************************************************************************** ö¾ÙÃû : NAS_ESM_AIR_MSG_DIR_ENUM ö¾Ù˵Ã÷ : ÃüÁîÀàÐÍȡֵ *****************************************************************************/ enum NAS_ESM_AIR_MSG_DIR_ENUM { NAS_ESM_AIR_MSG_DIR_ENUM_UP = 0x00, NAS_ESM_AIR_MSG_DIR_ENUM_DOWN , NAS_ESM_AIR_MSG_DIR_BUTT }; typedef VOS_UINT8 NAS_ESM_AIR_MSG_DIR_ENUM_UINT8 ; enum NAS_ESM_FTM_ACTION_FLAG_ENUM { NAS_ESM_FTM_ACTION_FLAG_CLOSE = 0, NAS_ESM_FTM_ACTION_FLAG_OPEN = 1, NAS_ESM_FTM_ACTION_FLAG_BUTT }; typedef VOS_UINT32 NAS_ESM_FTM_ACTION_FLAG_ENUM_UINT32; enum NAS_ESM_ERRLOG_LEVEL_ENUM { NAS_ESM_ERRLOG_LEVEL_CRITICAL = 1,/*´ú±í½ô¼±*/ NAS_ESM_ERRLOG_LEVEL_MAJOR = 2,/*´ú±íÖØÒª*/ NAS_ESM_ERRLOG_LEVEL_MINOR = 3,/*´ú±í´ÎÒª*/ NAS_ESM_ERRLOG_LEVEL_WARING = 4,/*´ú±íÌáʾ£¬*/ NAS_ESM_ERRLOG_LEVEL_BUTT }; typedef VOS_UINT16 NAS_ESM_ERRLOG_LEVEL_ENUM_UINT16; /* ERROR LOG¹ÊÕÏ&¾¯¸æÀàÐÍ*/ enum NAS_ESM_ERRLOG_TYPE_ENUM { NAS_ESM_ERRLOG_TYPE_COMMUNICATION = 0x00, /* ͨÐÅ */ NAS_ESM_ERRLOG_TYPE_SERVING_QUALITY = 0x01, /* ÒµÎñÖÊÁ¿ */ NAS_ESM_ERRLOG_TYPE_PROCESS_ERROR = 0x02, /* ´¦Àí³ö´í */ NAS_ESM_ERRLOG_TYPE_EQUIPMENT_TROUBLE = 0x03, /* É豸¹ÊÕÏ */ NAS_ESM_ERRLOG_TYPE_ENVIRONMENT_TROUBLE = 0x04, /* »·¾³¹ÊÕÏ */ }; typedef VOS_UINT16 NAS_ESM_ERRLOG_TYPE_ENUM_UINT16; enum NAS_ESM_ERRLOG_ACTION_FLAG_ENUM { NAS_ESM_ERRLOG_ACTION_FLAG_CLOSE = 0, NAS_ESM_ERRLOG_ACTION_FLAG_OPEN = 1, NAS_ESM_ERRLOG_ACTION_FLAG_BUTT }; typedef VOS_UINT32 NAS_ESM_ERRLOG_ACTION_FLAG_ENUM_UINT32; /***************************************************************************** 5 STRUCT *****************************************************************************/ /***************************************************************************** ½á¹¹Ãû : NAS_ESM_TRANSPARENT_CMD_REQ_STRU ½á¹¹ËµÃ÷ : OMT->ESMµÄ͸Ã÷ÃüÁîÏ·¢½á¹¹ *****************************************************************************/ typedef struct { VOS_MSG_HEADER VOS_UINT32 ulMsgId; /*Ô­ÓïÀàÐÍ*/ APP_MSG_HEADER NAS_ESM_TP_CMD_TYPE_ENUM_UINT8 enEsmTpCmdType; VOS_UINT8 aucRsv[3]; }NAS_ESM_TRANSPARENT_CMD_REQ_STRU; /* xiongxianghui00253310 modify for ftmerrlog begin */ typedef struct { /* ȡֵºÍº¬Òå¼û 24301 9.9.4.4 */ VOS_UINT8 ucEsmCause; VOS_UINT8 aucRsv[3]; }ESM_DATABASE_INFO_STRU; typedef struct { /* ´ò¿ª»òÕ߹رչ¤³ÌģʽÉϱ¨¹¦ÄÜ 0:close 1:open*/ VOS_UINT32 ulFtmActionFlag; /* ESM¸øOM·¢Ë͵Äÿ¸ö¹¤³ÌģʽÏûÏ¢¶¼Óд˱àºÅ£¬ÉϵçΪ0£¬È»ºóÒÀ´Î¼Ó 1 */ VOS_UINT32 ulMsgSN; /* ESM¹¤³Ìģʽ¹ÜÀí½á¹¹ÁÙʱ´æ´¢CnCause£¬ÓÃÓÚÓë±¾µØÊý¾Ý¿â½øÐÐ±È¶Ô */ VOS_UINT8 ucEsmCause; VOS_UINT8 aucRsv[3]; }ESM_FTM_INFO_MANAGE_STRU; typedef struct { NAS_ESM_CAUSE_ENUM_UINT8 ulCauseId; /*cause ID*/ LNAS_OM_ERRLOG_ID_ENUM_UINT16 ulErrorlogID; /*error id*/ }NAS_ESM_CN_CAUSE_TRANS_STRU; typedef struct { VOS_UINT32 ulActionFlag; VOS_UINT32 ulMsgSN; /* ERR LOGÉϱ¨¼¶±ð,ÿ¸öÄ£¿éµÄÿ¸ö¼¶±ð¶ÔÓ¦Ò»¸öERR LOG»º´æÊý×é ¹ÊÕÏ&¸æ¾¯¼¶±ð Warning£º 0x04´ú±íÌáʾ£¬ Minor£º 0x03´ú±í´ÎÒª Major£º 0x02´ð±êÖØÒª Critical£º0x01´ú±í½ô¼± */ NAS_ESM_ERRLOG_LEVEL_ENUM_UINT16 usALMLevel; NAS_ESM_ERRLOG_TYPE_ENUM_UINT16 usALMType; /* EMM×îÐÂERR LOG·¢ÉúµÄʱ¼ä´Á */ VOS_UINT32 ulAlmLowSlice;/*ʱ¼ä´Á*/ VOS_UINT32 ulAlmHighSlice; VOS_UINT32 ulErrLogAmount; VOS_UINT32 ulNextNullPos; ESM_ERR_INFO_DETAIL_STRU stEsmErrInfoDetail[NAS_ESM_ERRLOG_MAX_NUM]; }ESM_ERRLOG_INFO_MANAGE_STRU; /* xiongxianghui00253310 modify for ftmerrlog end */ typedef struct { NAS_OM_ACT_PDP_INFO_STRU stActPdpInfo; }APP_ESM_DT_STRU; /***************************************************************************** 6 UNION *****************************************************************************/ /***************************************************************************** 7 Extern Global Variable *****************************************************************************/ extern VOS_UINT32 g_NasEsmOmInfoIndFlag; /* xiongxianghui00253310 modify for ftmerrlog begin */ extern ESM_FTM_INFO_MANAGE_STRU g_astEsmFtmInfoManage; extern ESM_DATABASE_INFO_STRU g_astEsmInfo; extern ESM_ERRLOG_INFO_MANAGE_STRU g_astEsmErrlogInfoManage; /* xiongxianghui00253310 modify for ftmerrlog end */ extern VOS_UINT32 g_ulRptPdpStatus; extern VOS_UINT32 g_ulNasEsmOmMsgHookFlag; /***************************************************************************** 8 Fuction Extern *****************************************************************************/ /* xiongxianghui00253310 modify for ftmerrlog begin */ extern VOS_VOID NAS_ESM_OmInfoIndProc(VOS_VOID); extern VOS_VOID NAS_ESM_FtmInfoInit(VOS_VOID); extern VOS_UINT32 NAS_ESM_RevOmFtmCtrlMsg(MsgBlock *pMsgStru); extern VOS_UINT32 NAS_ESM_CompareEsmDatabaseInfo(VOS_VOID); extern VOS_VOID NAS_ESM_UpdateEsmDatabaseInfo(VOS_VOID); extern VOS_VOID NAS_ESM_SendOmFtmMsg(VOS_VOID); extern VOS_VOID NAS_ESM_ErrlogInfoInit(VOS_VOID); extern VOS_UINT32 NAS_ESM_RevOmErrlogCtrlMsg(MsgBlock *pMsgStru); extern VOS_UINT32 NAS_ESM_RevOmReadErrlogReq(const MsgBlock *pMsgStru); extern VOS_VOID NAS_ESM_SendOmErrlogCnf(VOS_VOID); extern VOS_VOID NAS_ESM_ErrlogInfoProc(VOS_UINT8 ucCnCause); extern LNAS_OM_ERRLOG_ID_ENUM_UINT16 NAS_ESM_CnCauseProc(VOS_UINT8 ucCnCause); extern VOS_VOID NAS_ESM_OmMsgDistrForAcpuPidOm( VOS_VOID *pRcvMsg ); /* xiongxianghui00253310 modify for ftmerrlog end */ /***************************************************************************** 9 OTHERS *****************************************************************************/ extern VOS_VOID NAS_ESM_SndAirMsgReportInd ( const VOS_UINT8 *pucData, VOS_UINT32 ulLength, NAS_ESM_AIR_MSG_DIR_ENUM_UINT8 enMsgDir, OM_PS_AIR_MSG_ENUM_UINT8 enMsgId ); extern VOS_VOID NAS_ESM_SndEsmOmTpCmdCnfMsg(const VOS_UINT8 *pucData, VOS_UINT32 ulLength); extern VOS_VOID NAS_ESM_SndKeyEventReportInd(OM_PS_KEY_EVENT_ENUM_UINT8 enKeyEvent); extern VOS_VOID NAS_ESM_TransparentMsgProc( VOS_VOID* pRcvMsg ); extern VOS_VOID NAS_ESM_OmMsgDistr ( VOS_VOID *pRcvMsg ); extern VOS_VOID NAS_ESM_OmInfoIndMsgProc(VOS_VOID *pRcvMsg); extern VOS_UINT32 LTE_MsgHook(VOS_VOID * pMsg); /*niuxiufan DT end */ extern VOS_VOID NAS_ESM_ReportActPdpInfo( VOS_VOID); #if (VOS_OS_VER != VOS_WIN32) #pragma pack() #else #pragma pack(pop) #endif #ifdef __cplusplus #if __cplusplus } #endif #endif #endif /* end of NasEsmOmMsgProc.h */
asyan4ik/android_kernel_huawei_h60
drivers/vendor/hisi/modem/ps/nas/tl/lte/ESM/Inc/NasEsmOmMsgProc.h
C
gpl-2.0
11,861
$.supersized({ // Functionality slideshow : 1, // Slideshow on/off autoplay : 1, // Slideshow starts playing automatically start_slide : 1, // Start slide (0 is random) stop_loop : 0, // Pauses slideshow on last slide random : 0, // Randomize slide order (Ignores start slide) slide_interval : 12000, // Length between transitions transition : <?php if(get_option('misfit_slidertransitions') == 'Fade' ) { echo '1'; } elseif(get_option('misfit_slidertransitions') == 'Slide Top') { echo '2'; } elseif(get_option('misfit_slidertransitions') == 'Slide Right') { echo '3'; } elseif(get_option('misfit_slidertransitions') == 'Slide Bottom') { echo '4'; } elseif(get_option('misfit_slidertransitions') == 'Slide Left') { echo '5'; } elseif(get_option('misfit_slidertransitions') == 'Carousel Right') { echo '6'; } elseif(get_option('misfit_slidertransitions') == 'Carousel Left') { echo '7'; } else { echo '1'; } ?>, // 0-None, 1-Fade, 2-Slide Top, 3-Slide Right, 4-Slide Bottom, 5-Slide Left, 6-Carousel Right, 7-Carousel Left transition_speed : 1000, // Speed of transition new_window : 1, // Image links open in new window/tab pause_hover : 0, // Pause slideshow on hover keyboard_nav : 1, // Keyboard navigation on/off performance : 1, // 0-Normal, 1-Hybrid speed/quality, 2-Optimizes image quality, 3-Optimizes transition speed // (Only works for Firefox/IE, not Webkit) image_protect : 1, // Disables image dragging and right click with Javascript // Size & Position min_width : 0, // Min width allowed (in pixels) min_height : 0, // Min height allowed (in pixels) vertical_center : 1, // Vertically center background horizontal_center : 1, // Horizontally center background fit_always : 0, // Image will never exceed browser width or height (Ignores min. dimensions) fit_portrait : 0, // Portrait images will not exceed browser height fit_landscape : 0, // Landscape images will not exceed browser width // Components slide_links : 'blank', // Individual links for each slide (Options: false, 'num', 'name', 'blank') thumb_links : 1, // Individual thumb links for each slide thumbnail_navigation : 0, // Thumbnail navigation slides : [ // Slideshow Images <?php $pagename = get_option('misfit_sliderpage'); $page = get_page_by_title($pagename); $featured_id = $page->ID; query_posts( array( 'post_type' => 'page', 'p' => $featured_id, 'posts_per_page' => -1 ) ); if(have_posts()) : while(have_posts()) : the_post(); ?> <?php $galleryImages = get_post_gallery_imagess(); $imagesCount = count($galleryImages); ?> <?php if ($imagesCount > 0) : ?> <?php for ($i = 0; $i < $imagesCount; $i++): ?> <?php if (!empty($galleryImages[$i])) :?> { image : '<?php echo $galleryImages[$i]['full'][0];?>', title : '<h1 class="largetype"><?php $post = get_post($galleryImages[$i]['id']); echo $post->post_excerpt; ?></h1><h3 class="imagine"><?php $posts = get_post($galleryImages[$i]['id']); echo $posts->post_content; ?></h3>', thumb : '', url : ''}, <?php endif; ?> <?php endfor; ?> <?php endif; ?> <?php endwhile; endif; wp_reset_query(); ?> ], // Theme Options progress_bar : 0, // Timer for each slide mouse_scrub : 0 });
misfit-inc/unicef-legacy
wp-content/themes/legacy/js/images.php
PHP
gpl-2.0
4,114
<table width="100%" border="0" align="center" cellpadding="4" cellspacing="0"> <tr> <td><p><font size="2" face="Verdana, Arial, Helvetica, sans-serif"><b>You've received a new password!</b></p> <p>Log in with your new password <b>{$NEW_PASSWORD}</b> in order to change it. Should you have any difficulties, please contact us! </tr> </table>
ReichardtIT/modified-inkl-bootstrap-by-karl
templates/xtc5/mail/english/new_password_mail.html
HTML
gpl-2.0
366
/* as.c - GAS main program. Copyright (C) 1987-2017 Free Software Foundation, Inc. This file is part of GAS, the GNU Assembler. GAS is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GAS is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GAS; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ /* Main program for AS; a 32-bit assembler of GNU. Understands command arguments. Has a few routines that don't fit in other modules because they are shared. bugs : initialisers Since no-one else says they will support them in future: I don't support them now. */ #define COMMON #include "as.h" #include "subsegs.h" #include "output-file.h" #include "sb.h" #include "macro.h" #include "dwarf2dbg.h" #include "dw2gencfi.h" #include "bfdver.h" #ifdef HAVE_ITBL_CPU #include "itbl-ops.h" #else #define itbl_init() #endif #ifdef HAVE_SBRK #ifdef NEED_DECLARATION_SBRK extern void *sbrk (); #endif #endif #ifdef USING_CGEN /* Perform any cgen specific initialisation for gas. */ extern void gas_cgen_begin (void); #endif /* We build a list of defsyms as we read the options, and then define them after we have initialized everything. */ struct defsym_list { struct defsym_list *next; char *name; valueT value; }; /* True if a listing is wanted. */ int listing; /* Type of debugging to generate. */ enum debug_info_type debug_type = DEBUG_UNSPECIFIED; int use_gnu_debug_info_extensions = 0; #ifndef MD_DEBUG_FORMAT_SELECTOR #define MD_DEBUG_FORMAT_SELECTOR NULL #endif static enum debug_info_type (*md_debug_format_selector) (int *) = MD_DEBUG_FORMAT_SELECTOR; /* Maximum level of macro nesting. */ int max_macro_nest = 100; /* argv[0] */ static char * myname; /* The default obstack chunk size. If we set this to zero, the obstack code will use whatever will fit in a 4096 byte block. */ int chunksize = 0; /* To monitor memory allocation more effectively, make this non-zero. Then the chunk sizes for gas and bfd will be reduced. */ int debug_memory = 0; /* Enable verbose mode. */ int verbose = 0; #if defined OBJ_ELF || defined OBJ_MAYBE_ELF int flag_use_elf_stt_common = DEFAULT_GENERATE_ELF_STT_COMMON; #endif /* Keep the output file. */ static int keep_it = 0; segT reg_section; segT expr_section; segT text_section; segT data_section; segT bss_section; /* Name of listing file. */ static char *listing_filename = NULL; static struct defsym_list *defsyms; #ifdef HAVE_ITBL_CPU /* Keep a record of the itbl files we read in. */ struct itbl_file_list { struct itbl_file_list *next; char *name; }; static struct itbl_file_list *itbl_files; #endif static long start_time; #ifdef HAVE_SBRK char *start_sbrk; #endif static int flag_macro_alternate; #ifdef USE_EMULATIONS #define EMULATION_ENVIRON "AS_EMULATION" extern struct emulation mipsbelf, mipslelf, mipself; extern struct emulation i386coff, i386elf, i386aout; extern struct emulation crisaout, criself; static struct emulation *const emulations[] = { EMULATIONS }; static const int n_emulations = sizeof (emulations) / sizeof (emulations[0]); static void select_emulation_mode (int argc, char **argv) { int i; char *p; const char *em = NULL; for (i = 1; i < argc; i++) if (!strncmp ("--em", argv[i], 4)) break; if (i == argc) goto do_default; p = strchr (argv[i], '='); if (p) p++; else p = argv[i + 1]; if (!p || !*p) as_fatal (_("missing emulation mode name")); em = p; do_default: if (em == 0) em = getenv (EMULATION_ENVIRON); if (em == 0) em = DEFAULT_EMULATION; if (em) { for (i = 0; i < n_emulations; i++) if (!strcmp (emulations[i]->name, em)) break; if (i == n_emulations) as_fatal (_("unrecognized emulation name `%s'"), em); this_emulation = emulations[i]; } else this_emulation = emulations[0]; this_emulation->init (); } const char * default_emul_bfd_name (void) { abort (); return NULL; } void common_emul_init (void) { this_format = this_emulation->format; if (this_emulation->leading_underscore == 2) this_emulation->leading_underscore = this_format->dfl_leading_underscore; if (this_emulation->default_endian != 2) target_big_endian = this_emulation->default_endian; if (this_emulation->fake_label_name == 0) { if (this_emulation->leading_underscore) this_emulation->fake_label_name = "L0\001"; else /* What other parameters should we test? */ this_emulation->fake_label_name = ".L0\001"; } } #endif void print_version_id (void) { static int printed; if (printed) return; printed = 1; fprintf (stderr, _("GNU assembler version %s (%s) using BFD version %s\n"), VERSION, TARGET_ALIAS, BFD_VERSION_STRING); } #ifdef DEFAULT_FLAG_COMPRESS_DEBUG enum compressed_debug_section_type flag_compress_debug = COMPRESS_DEBUG_GABI_ZLIB; #endif static void show_usage (FILE * stream) { fprintf (stream, _("Usage: %s [option...] [asmfile...]\n"), myname); fprintf (stream, _("\ Options:\n\ -a[sub-option...] turn on listings\n\ Sub-options [default hls]:\n\ c omit false conditionals\n\ d omit debugging directives\n\ g include general info\n\ h include high-level source\n\ l include assembly\n\ m include macro expansions\n\ n omit forms processing\n\ s include symbols\n\ =FILE list to FILE (must be last sub-option)\n")); fprintf (stream, _("\ --alternate initially turn on alternate macro syntax\n")); #ifdef DEFAULT_FLAG_COMPRESS_DEBUG fprintf (stream, _("\ --compress-debug-sections[={none|zlib|zlib-gnu|zlib-gabi}]\n\ compress DWARF debug sections using zlib [default]\n")); fprintf (stream, _("\ --nocompress-debug-sections\n\ don't compress DWARF debug sections\n")); #else fprintf (stream, _("\ --compress-debug-sections[={none|zlib|zlib-gnu|zlib-gabi}]\n\ compress DWARF debug sections using zlib\n")); fprintf (stream, _("\ --nocompress-debug-sections\n\ don't compress DWARF debug sections [default]\n")); #endif fprintf (stream, _("\ -D produce assembler debugging messages\n")); fprintf (stream, _("\ --debug-prefix-map OLD=NEW\n\ map OLD to NEW in debug information\n")); fprintf (stream, _("\ --defsym SYM=VAL define symbol SYM to given value\n")); #ifdef USE_EMULATIONS { int i; const char *def_em; fprintf (stream, "\ --em=["); for (i = 0; i < n_emulations - 1; i++) fprintf (stream, "%s | ", emulations[i]->name); fprintf (stream, "%s]\n", emulations[i]->name); def_em = getenv (EMULATION_ENVIRON); if (!def_em) def_em = DEFAULT_EMULATION; fprintf (stream, _("\ emulate output (default %s)\n"), def_em); } #endif #if defined OBJ_ELF || defined OBJ_MAYBE_ELF fprintf (stream, _("\ --execstack require executable stack for this object\n")); fprintf (stream, _("\ --noexecstack don't require executable stack for this object\n")); fprintf (stream, _("\ --size-check=[error|warning]\n\ ELF .size directive check (default --size-check=error)\n")); fprintf (stream, _("\ --elf-stt-common=[no|yes]\n\ generate ELF common symbols with STT_COMMON type\n")); fprintf (stream, _("\ --sectname-subst enable section name substitution sequences\n")); #endif fprintf (stream, _("\ -f skip whitespace and comment preprocessing\n")); fprintf (stream, _("\ -g --gen-debug generate debugging information\n")); fprintf (stream, _("\ --gstabs generate STABS debugging information\n")); fprintf (stream, _("\ --gstabs+ generate STABS debug info with GNU extensions\n")); fprintf (stream, _("\ --gdwarf-2 generate DWARF2 debugging information\n")); fprintf (stream, _("\ --gdwarf-sections generate per-function section names for DWARF line information\n")); fprintf (stream, _("\ --hash-size=<value> set the hash table size close to <value>\n")); fprintf (stream, _("\ --help show this message and exit\n")); fprintf (stream, _("\ --target-help show target specific options\n")); fprintf (stream, _("\ -I DIR add DIR to search list for .include directives\n")); fprintf (stream, _("\ -J don't warn about signed overflow\n")); fprintf (stream, _("\ -K warn when differences altered for long displacements\n")); fprintf (stream, _("\ -L,--keep-locals keep local symbols (e.g. starting with `L')\n")); fprintf (stream, _("\ -M,--mri assemble in MRI compatibility mode\n")); fprintf (stream, _("\ --MD FILE write dependency information in FILE (default none)\n")); fprintf (stream, _("\ -nocpp ignored\n")); fprintf (stream, _("\ -no-pad-sections do not pad the end of sections to alignment boundaries\n")); fprintf (stream, _("\ -o OBJFILE name the object-file output OBJFILE (default a.out)\n")); fprintf (stream, _("\ -R fold data section into text section\n")); fprintf (stream, _("\ --reduce-memory-overheads \n\ prefer smaller memory use at the cost of longer\n\ assembly times\n")); fprintf (stream, _("\ --statistics print various measured statistics from execution\n")); fprintf (stream, _("\ --strip-local-absolute strip local absolute symbols\n")); fprintf (stream, _("\ --traditional-format Use same format as native assembler when possible\n")); fprintf (stream, _("\ --version print assembler version number and exit\n")); fprintf (stream, _("\ -W --no-warn suppress warnings\n")); fprintf (stream, _("\ --warn don't suppress warnings\n")); fprintf (stream, _("\ --fatal-warnings treat warnings as errors\n")); #ifdef HAVE_ITBL_CPU fprintf (stream, _("\ --itbl INSTTBL extend instruction set to include instructions\n\ matching the specifications defined in file INSTTBL\n")); #endif fprintf (stream, _("\ -w ignored\n")); fprintf (stream, _("\ -X ignored\n")); fprintf (stream, _("\ -Z generate object file even after errors\n")); fprintf (stream, _("\ --listing-lhs-width set the width in words of the output data column of\n\ the listing\n")); fprintf (stream, _("\ --listing-lhs-width2 set the width in words of the continuation lines\n\ of the output data column; ignored if smaller than\n\ the width of the first line\n")); fprintf (stream, _("\ --listing-rhs-width set the max width in characters of the lines from\n\ the source file\n")); fprintf (stream, _("\ --listing-cont-lines set the maximum number of continuation lines used\n\ for the output data column of the listing\n")); fprintf (stream, _("\ @FILE read options from FILE\n")); md_show_usage (stream); fputc ('\n', stream); if (REPORT_BUGS_TO[0] && stream == stdout) fprintf (stream, _("Report bugs to %s\n"), REPORT_BUGS_TO); } /* Since it is easy to do here we interpret the special arg "-" to mean "use stdin" and we set that argv[] pointing to "". After we have munged argv[], the only things left are source file name(s) and ""(s) denoting stdin. These file names are used (perhaps more than once) later. check for new machine-dep cmdline options in md_parse_option definitions in config/tc-*.c. */ static void parse_args (int * pargc, char *** pargv) { int old_argc; int new_argc; char ** old_argv; char ** new_argv; /* Starting the short option string with '-' is for programs that expect options and other ARGV-elements in any order and that care about the ordering of the two. We describe each non-option ARGV-element as if it were the argument of an option with character code 1. */ char *shortopts; extern const char *md_shortopts; static const char std_shortopts[] = { '-', 'J', #ifndef WORKING_DOT_WORD /* -K is not meaningful if .word is not being hacked. */ 'K', #endif 'L', 'M', 'R', 'W', 'Z', 'a', ':', ':', 'D', 'f', 'g', ':',':', 'I', ':', 'o', ':', #ifndef VMS /* -v takes an argument on VMS, so we don't make it a generic option. */ 'v', #endif 'w', 'X', #ifdef HAVE_ITBL_CPU /* New option for extending instruction set (see also --itbl below). */ 't', ':', #endif '\0' }; struct option *longopts; extern struct option md_longopts[]; extern size_t md_longopts_size; /* Codes used for the long options with no short synonyms. */ enum option_values { OPTION_HELP = OPTION_STD_BASE, OPTION_NOCPP, OPTION_STATISTICS, OPTION_VERSION, OPTION_DUMPCONFIG, OPTION_VERBOSE, OPTION_EMULATION, OPTION_DEBUG_PREFIX_MAP, OPTION_DEFSYM, OPTION_LISTING_LHS_WIDTH, OPTION_LISTING_LHS_WIDTH2, OPTION_LISTING_RHS_WIDTH, OPTION_LISTING_CONT_LINES, OPTION_DEPFILE, OPTION_GSTABS, OPTION_GSTABS_PLUS, OPTION_GDWARF2, OPTION_GDWARF_SECTIONS, OPTION_STRIP_LOCAL_ABSOLUTE, OPTION_TRADITIONAL_FORMAT, OPTION_WARN, OPTION_TARGET_HELP, OPTION_EXECSTACK, OPTION_NOEXECSTACK, OPTION_SIZE_CHECK, OPTION_ELF_STT_COMMON, OPTION_SECTNAME_SUBST, OPTION_ALTERNATE, OPTION_AL, OPTION_HASH_TABLE_SIZE, OPTION_REDUCE_MEMORY_OVERHEADS, OPTION_WARN_FATAL, OPTION_COMPRESS_DEBUG, OPTION_NOCOMPRESS_DEBUG, OPTION_NO_PAD_SECTIONS /* = STD_BASE + 40 */ /* When you add options here, check that they do not collide with OPTION_MD_BASE. See as.h. */ }; static const struct option std_longopts[] = { /* Note: commas are placed at the start of the line rather than the end of the preceding line so that it is simpler to selectively add and remove lines from this list. */ {"alternate", no_argument, NULL, OPTION_ALTERNATE} /* The entry for "a" is here to prevent getopt_long_only() from considering that -a is an abbreviation for --alternate. This is necessary because -a=<FILE> is a valid switch but getopt would normally reject it since --alternate does not take an argument. */ ,{"a", optional_argument, NULL, 'a'} /* Handle -al=<FILE>. */ ,{"al", optional_argument, NULL, OPTION_AL} ,{"compress-debug-sections", optional_argument, NULL, OPTION_COMPRESS_DEBUG} ,{"nocompress-debug-sections", no_argument, NULL, OPTION_NOCOMPRESS_DEBUG} ,{"debug-prefix-map", required_argument, NULL, OPTION_DEBUG_PREFIX_MAP} ,{"defsym", required_argument, NULL, OPTION_DEFSYM} ,{"dump-config", no_argument, NULL, OPTION_DUMPCONFIG} ,{"emulation", required_argument, NULL, OPTION_EMULATION} #if defined OBJ_ELF || defined OBJ_MAYBE_ELF ,{"execstack", no_argument, NULL, OPTION_EXECSTACK} ,{"noexecstack", no_argument, NULL, OPTION_NOEXECSTACK} ,{"size-check", required_argument, NULL, OPTION_SIZE_CHECK} ,{"elf-stt-common", required_argument, NULL, OPTION_ELF_STT_COMMON} ,{"sectname-subst", no_argument, NULL, OPTION_SECTNAME_SUBST} #endif ,{"fatal-warnings", no_argument, NULL, OPTION_WARN_FATAL} ,{"gdwarf-2", no_argument, NULL, OPTION_GDWARF2} /* GCC uses --gdwarf-2 but GAS uses to use --gdwarf2, so we keep it here for backwards compatibility. */ ,{"gdwarf2", no_argument, NULL, OPTION_GDWARF2} ,{"gdwarf-sections", no_argument, NULL, OPTION_GDWARF_SECTIONS} ,{"gen-debug", no_argument, NULL, 'g'} ,{"gstabs", no_argument, NULL, OPTION_GSTABS} ,{"gstabs+", no_argument, NULL, OPTION_GSTABS_PLUS} ,{"hash-size", required_argument, NULL, OPTION_HASH_TABLE_SIZE} ,{"help", no_argument, NULL, OPTION_HELP} #ifdef HAVE_ITBL_CPU /* New option for extending instruction set (see also -t above). The "-t file" or "--itbl file" option extends the basic set of valid instructions by reading "file", a text file containing a list of instruction formats. The additional opcodes and their formats are added to the built-in set of instructions, and mnemonics for new registers may also be defined. */ ,{"itbl", required_argument, NULL, 't'} #endif /* getopt allows abbreviations, so we do this to stop it from treating -k as an abbreviation for --keep-locals. Some ports use -k to enable PIC assembly. */ ,{"keep-locals", no_argument, NULL, 'L'} ,{"keep-locals", no_argument, NULL, 'L'} ,{"listing-lhs-width", required_argument, NULL, OPTION_LISTING_LHS_WIDTH} ,{"listing-lhs-width2", required_argument, NULL, OPTION_LISTING_LHS_WIDTH2} ,{"listing-rhs-width", required_argument, NULL, OPTION_LISTING_RHS_WIDTH} ,{"listing-cont-lines", required_argument, NULL, OPTION_LISTING_CONT_LINES} ,{"MD", required_argument, NULL, OPTION_DEPFILE} ,{"mri", no_argument, NULL, 'M'} ,{"nocpp", no_argument, NULL, OPTION_NOCPP} ,{"no-pad-sections", no_argument, NULL, OPTION_NO_PAD_SECTIONS} ,{"no-warn", no_argument, NULL, 'W'} ,{"reduce-memory-overheads", no_argument, NULL, OPTION_REDUCE_MEMORY_OVERHEADS} ,{"statistics", no_argument, NULL, OPTION_STATISTICS} ,{"strip-local-absolute", no_argument, NULL, OPTION_STRIP_LOCAL_ABSOLUTE} ,{"version", no_argument, NULL, OPTION_VERSION} ,{"verbose", no_argument, NULL, OPTION_VERBOSE} ,{"target-help", no_argument, NULL, OPTION_TARGET_HELP} ,{"traditional-format", no_argument, NULL, OPTION_TRADITIONAL_FORMAT} ,{"warn", no_argument, NULL, OPTION_WARN} }; /* Construct the option lists from the standard list and the target dependent list. Include space for an extra NULL option and always NULL terminate. */ shortopts = concat (std_shortopts, md_shortopts, (char *) NULL); longopts = (struct option *) xmalloc (sizeof (std_longopts) + md_longopts_size + sizeof (struct option)); memcpy (longopts, std_longopts, sizeof (std_longopts)); memcpy (((char *) longopts) + sizeof (std_longopts), md_longopts, md_longopts_size); memset (((char *) longopts) + sizeof (std_longopts) + md_longopts_size, 0, sizeof (struct option)); /* Make a local copy of the old argv. */ old_argc = *pargc; old_argv = *pargv; /* Initialize a new argv that contains no options. */ new_argv = XNEWVEC (char *, old_argc + 1); new_argv[0] = old_argv[0]; new_argc = 1; new_argv[new_argc] = NULL; while (1) { /* getopt_long_only is like getopt_long, but '-' as well as '--' can indicate a long option. */ int longind; int optc = getopt_long_only (old_argc, old_argv, shortopts, longopts, &longind); if (optc == -1) break; switch (optc) { default: /* md_parse_option should return 1 if it recognizes optc, 0 if not. */ if (md_parse_option (optc, optarg) != 0) break; /* `-v' isn't included in the general short_opts list, so check for it explicitly here before deciding we've gotten a bad argument. */ if (optc == 'v') { #ifdef VMS /* Telling getopt to treat -v's value as optional can result in it picking up a following filename argument here. The VMS code in md_parse_option can return 0 in that case, but it has no way of pushing the filename argument back. */ if (optarg && *optarg) new_argv[new_argc++] = optarg, new_argv[new_argc] = NULL; else #else case 'v': #endif case OPTION_VERBOSE: print_version_id (); verbose = 1; break; } else as_bad (_("unrecognized option -%c%s"), optc, optarg ? optarg : ""); /* Fall through. */ case '?': exit (EXIT_FAILURE); case 1: /* File name. */ if (!strcmp (optarg, "-")) optarg = (char *) ""; new_argv[new_argc++] = optarg; new_argv[new_argc] = NULL; break; case OPTION_TARGET_HELP: md_show_usage (stdout); exit (EXIT_SUCCESS); case OPTION_HELP: show_usage (stdout); exit (EXIT_SUCCESS); case OPTION_NOCPP: break; case OPTION_NO_PAD_SECTIONS: do_not_pad_sections_to_alignment = 1; break; case OPTION_STATISTICS: flag_print_statistics = 1; break; case OPTION_STRIP_LOCAL_ABSOLUTE: flag_strip_local_absolute = 1; break; case OPTION_TRADITIONAL_FORMAT: flag_traditional_format = 1; break; case OPTION_VERSION: /* This output is intended to follow the GNU standards document. */ printf (_("GNU assembler %s\n"), BFD_VERSION_STRING); printf (_("Copyright (C) 2017 Free Software Foundation, Inc.\n")); printf (_("\ This program is free software; you may redistribute it under the terms of\n\ the GNU General Public License version 3 or later.\n\ This program has absolutely no warranty.\n")); #ifdef TARGET_WITH_CPU printf (_("This assembler was configured for a target of `%s' " "and default,\ncpu type `%s'.\n"), TARGET_ALIAS, TARGET_WITH_CPU); #else printf (_("This assembler was configured for a target of `%s'.\n"), TARGET_ALIAS); #endif exit (EXIT_SUCCESS); case OPTION_EMULATION: #ifdef USE_EMULATIONS if (strcmp (optarg, this_emulation->name)) as_fatal (_("multiple emulation names specified")); #else as_fatal (_("emulations not handled in this configuration")); #endif break; case OPTION_DUMPCONFIG: fprintf (stderr, _("alias = %s\n"), TARGET_ALIAS); fprintf (stderr, _("canonical = %s\n"), TARGET_CANONICAL); fprintf (stderr, _("cpu-type = %s\n"), TARGET_CPU); #ifdef TARGET_OBJ_FORMAT fprintf (stderr, _("format = %s\n"), TARGET_OBJ_FORMAT); #endif #ifdef TARGET_FORMAT fprintf (stderr, _("bfd-target = %s\n"), TARGET_FORMAT); #endif exit (EXIT_SUCCESS); case OPTION_COMPRESS_DEBUG: if (optarg) { #if defined OBJ_ELF || defined OBJ_MAYBE_ELF if (strcasecmp (optarg, "none") == 0) flag_compress_debug = COMPRESS_DEBUG_NONE; else if (strcasecmp (optarg, "zlib") == 0) flag_compress_debug = COMPRESS_DEBUG_GABI_ZLIB; else if (strcasecmp (optarg, "zlib-gnu") == 0) flag_compress_debug = COMPRESS_DEBUG_GNU_ZLIB; else if (strcasecmp (optarg, "zlib-gabi") == 0) flag_compress_debug = COMPRESS_DEBUG_GABI_ZLIB; else as_fatal (_("Invalid --compress-debug-sections option: `%s'"), optarg); #else as_fatal (_("--compress-debug-sections=%s is unsupported"), optarg); #endif } else flag_compress_debug = COMPRESS_DEBUG_GABI_ZLIB; break; case OPTION_NOCOMPRESS_DEBUG: flag_compress_debug = COMPRESS_DEBUG_NONE; break; case OPTION_DEBUG_PREFIX_MAP: add_debug_prefix_map (optarg); break; case OPTION_DEFSYM: { char *s; valueT i; struct defsym_list *n; for (s = optarg; *s != '\0' && *s != '='; s++) ; if (*s == '\0') as_fatal (_("bad defsym; format is --defsym name=value")); *s++ = '\0'; i = bfd_scan_vma (s, (const char **) NULL, 0); n = XNEW (struct defsym_list); n->next = defsyms; n->name = optarg; n->value = i; defsyms = n; } break; #ifdef HAVE_ITBL_CPU case 't': { /* optarg is the name of the file containing the instruction formats, opcodes, register names, etc. */ struct itbl_file_list *n; if (optarg == NULL) { as_warn (_("no file name following -t option")); break; } n = XNEW (struct itbl_file_list); n->next = itbl_files; n->name = optarg; itbl_files = n; /* Parse the file and add the new instructions to our internal table. If multiple instruction tables are specified, the information from this table gets appended onto the existing internal table. */ itbl_files->name = xstrdup (optarg); if (itbl_parse (itbl_files->name) != 0) as_fatal (_("failed to read instruction table %s\n"), itbl_files->name); } break; #endif case OPTION_DEPFILE: start_dependencies (optarg); break; case 'g': /* Some backends, eg Alpha and Mips, use the -g switch for their own purposes. So we check here for an explicit -g and allow the backend to decide if it wants to process it. */ if ( old_argv[optind - 1][1] == 'g' && md_parse_option (optc, optarg)) continue; if (md_debug_format_selector) debug_type = md_debug_format_selector (& use_gnu_debug_info_extensions); else if (IS_ELF) debug_type = DEBUG_DWARF2; else debug_type = DEBUG_STABS; break; case OPTION_GSTABS_PLUS: use_gnu_debug_info_extensions = 1; /* Fall through. */ case OPTION_GSTABS: debug_type = DEBUG_STABS; break; case OPTION_GDWARF2: debug_type = DEBUG_DWARF2; break; case OPTION_GDWARF_SECTIONS: flag_dwarf_sections = TRUE; break; case 'J': flag_signed_overflow_ok = 1; break; #ifndef WORKING_DOT_WORD case 'K': flag_warn_displacement = 1; break; #endif case 'L': flag_keep_locals = 1; break; case OPTION_LISTING_LHS_WIDTH: listing_lhs_width = atoi (optarg); if (listing_lhs_width_second < listing_lhs_width) listing_lhs_width_second = listing_lhs_width; break; case OPTION_LISTING_LHS_WIDTH2: { int tmp = atoi (optarg); if (tmp > listing_lhs_width) listing_lhs_width_second = tmp; } break; case OPTION_LISTING_RHS_WIDTH: listing_rhs_width = atoi (optarg); break; case OPTION_LISTING_CONT_LINES: listing_lhs_cont_lines = atoi (optarg); break; case 'M': flag_mri = 1; #ifdef TC_M68K flag_m68k_mri = 1; #endif break; case 'R': flag_readonly_data_in_text = 1; break; case 'W': flag_no_warnings = 1; break; case OPTION_WARN: flag_no_warnings = 0; flag_fatal_warnings = 0; break; case OPTION_WARN_FATAL: flag_no_warnings = 0; flag_fatal_warnings = 1; break; #if defined OBJ_ELF || defined OBJ_MAYBE_ELF case OPTION_EXECSTACK: flag_execstack = 1; flag_noexecstack = 0; break; case OPTION_NOEXECSTACK: flag_noexecstack = 1; flag_execstack = 0; break; case OPTION_SIZE_CHECK: if (strcasecmp (optarg, "error") == 0) flag_allow_nonconst_size = FALSE; else if (strcasecmp (optarg, "warning") == 0) flag_allow_nonconst_size = TRUE; else as_fatal (_("Invalid --size-check= option: `%s'"), optarg); break; case OPTION_ELF_STT_COMMON: if (strcasecmp (optarg, "no") == 0) flag_use_elf_stt_common = 0; else if (strcasecmp (optarg, "yes") == 0) flag_use_elf_stt_common = 1; else as_fatal (_("Invalid --elf-stt-common= option: `%s'"), optarg); break; case OPTION_SECTNAME_SUBST: flag_sectname_subst = 1; break; #endif case 'Z': flag_always_generate_output = 1; break; case OPTION_AL: listing |= LISTING_LISTING; if (optarg) listing_filename = xstrdup (optarg); break; case OPTION_ALTERNATE: optarg = old_argv [optind - 1]; while (* optarg == '-') optarg ++; if (strcmp (optarg, "alternate") == 0) { flag_macro_alternate = 1; break; } optarg ++; /* Fall through. */ case 'a': if (optarg) { if (optarg != old_argv[optind] && optarg[-1] == '=') --optarg; if (md_parse_option (optc, optarg) != 0) break; while (*optarg) { switch (*optarg) { case 'c': listing |= LISTING_NOCOND; break; case 'd': listing |= LISTING_NODEBUG; break; case 'g': listing |= LISTING_GENERAL; break; case 'h': listing |= LISTING_HLL; break; case 'l': listing |= LISTING_LISTING; break; case 'm': listing |= LISTING_MACEXP; break; case 'n': listing |= LISTING_NOFORM; break; case 's': listing |= LISTING_SYMBOLS; break; case '=': listing_filename = xstrdup (optarg + 1); optarg += strlen (listing_filename); break; default: as_fatal (_("invalid listing option `%c'"), *optarg); break; } optarg++; } } if (!listing) listing = LISTING_DEFAULT; break; case 'D': /* DEBUG is implemented: it debugs different things from other people's assemblers. */ flag_debug = 1; break; case 'f': flag_no_comments = 1; break; case 'I': { /* Include file directory. */ char *temp = xstrdup (optarg); add_include_dir (temp); break; } case 'o': out_file_name = xstrdup (optarg); break; case 'w': break; case 'X': /* -X means treat warnings as errors. */ break; case OPTION_REDUCE_MEMORY_OVERHEADS: /* The only change we make at the moment is to reduce the size of the hash tables that we use. */ set_gas_hash_table_size (4051); break; case OPTION_HASH_TABLE_SIZE: { unsigned long new_size; new_size = strtoul (optarg, NULL, 0); if (new_size) set_gas_hash_table_size (new_size); else as_fatal (_("--hash-size needs a numeric argument")); break; } } } free (shortopts); free (longopts); *pargc = new_argc; *pargv = new_argv; #ifdef md_after_parse_args md_after_parse_args (); #endif } static void dump_statistics (void) { #ifdef HAVE_SBRK char *lim = (char *) sbrk (0); #endif long run_time = get_run_time () - start_time; fprintf (stderr, _("%s: total time in assembly: %ld.%06ld\n"), myname, run_time / 1000000, run_time % 1000000); #ifdef HAVE_SBRK fprintf (stderr, _("%s: data size %ld\n"), myname, (long) (lim - start_sbrk)); #endif subsegs_print_statistics (stderr); write_print_statistics (stderr); symbol_print_statistics (stderr); read_print_statistics (stderr); #ifdef tc_print_statistics tc_print_statistics (stderr); #endif #ifdef obj_print_statistics obj_print_statistics (stderr); #endif } static void close_output_file (void) { output_file_close (out_file_name); if (!keep_it) unlink_if_ordinary (out_file_name); } /* The interface between the macro code and gas expression handling. */ static size_t macro_expr (const char *emsg, size_t idx, sb *in, offsetT *val) { char *hold; expressionS ex; sb_terminate (in); hold = input_line_pointer; input_line_pointer = in->ptr + idx; expression_and_evaluate (&ex); idx = input_line_pointer - in->ptr; input_line_pointer = hold; if (ex.X_op != O_constant) as_bad ("%s", emsg); *val = ex.X_add_number; return idx; } /* Here to attempt 1 pass over each input file. We scan argv[*] looking for filenames or exactly "" which is shorthand for stdin. Any argv that is NULL is not a file-name. We set need_pass_2 TRUE if, after this, we still have unresolved expressions of the form (unknown value)+-(unknown value). Note the un*x semantics: there is only 1 logical input file, but it may be a catenation of many 'physical' input files. */ static void perform_an_assembly_pass (int argc, char ** argv) { int saw_a_file = 0; #ifndef OBJ_MACH_O flagword applicable; #endif need_pass_2 = 0; #ifndef OBJ_MACH_O /* Create the standard sections, and those the assembler uses internally. */ text_section = subseg_new (TEXT_SECTION_NAME, 0); data_section = subseg_new (DATA_SECTION_NAME, 0); bss_section = subseg_new (BSS_SECTION_NAME, 0); /* @@ FIXME -- we're setting the RELOC flag so that sections are assumed to have relocs, otherwise we don't find out in time. */ applicable = bfd_applicable_section_flags (stdoutput); bfd_set_section_flags (stdoutput, text_section, applicable & (SEC_ALLOC | SEC_LOAD | SEC_RELOC | SEC_CODE | SEC_READONLY)); bfd_set_section_flags (stdoutput, data_section, applicable & (SEC_ALLOC | SEC_LOAD | SEC_RELOC | SEC_DATA)); bfd_set_section_flags (stdoutput, bss_section, applicable & SEC_ALLOC); seg_info (bss_section)->bss = 1; #endif subseg_new (BFD_ABS_SECTION_NAME, 0); subseg_new (BFD_UND_SECTION_NAME, 0); reg_section = subseg_new ("*GAS `reg' section*", 0); expr_section = subseg_new ("*GAS `expr' section*", 0); #ifndef OBJ_MACH_O subseg_set (text_section, 0); #endif /* This may add symbol table entries, which requires having an open BFD, and sections already created. */ md_begin (); #ifdef USING_CGEN gas_cgen_begin (); #endif #ifdef obj_begin obj_begin (); #endif /* Skip argv[0]. */ argv++; argc--; while (argc--) { if (*argv) { /* Is it a file-name argument? */ PROGRESS (1); saw_a_file++; /* argv->"" if stdin desired, else->filename. */ read_a_source_file (*argv); } argv++; /* Completed that argv. */ } if (!saw_a_file) read_a_source_file (""); } int main (int argc, char ** argv) { char ** argv_orig = argv; int macro_strip_at; start_time = get_run_time (); #ifdef HAVE_SBRK start_sbrk = (char *) sbrk (0); #endif #if defined (HAVE_SETLOCALE) && defined (HAVE_LC_MESSAGES) setlocale (LC_MESSAGES, ""); #endif #if defined (HAVE_SETLOCALE) setlocale (LC_CTYPE, ""); #endif bindtextdomain (PACKAGE, LOCALEDIR); textdomain (PACKAGE); if (debug_memory) chunksize = 64; #ifdef HOST_SPECIAL_INIT HOST_SPECIAL_INIT (argc, argv); #endif myname = argv[0]; xmalloc_set_program_name (myname); expandargv (&argc, &argv); START_PROGRESS (myname, 0); #ifndef OBJ_DEFAULT_OUTPUT_FILE_NAME #define OBJ_DEFAULT_OUTPUT_FILE_NAME "a.out" #endif out_file_name = OBJ_DEFAULT_OUTPUT_FILE_NAME; hex_init (); bfd_init (); bfd_set_error_program_name (myname); #ifdef USE_EMULATIONS select_emulation_mode (argc, argv); #endif PROGRESS (1); /* Call parse_args before any of the init/begin functions so that switches like --hash-size can be honored. */ parse_args (&argc, &argv); symbol_begin (); frag_init (); subsegs_begin (); read_begin (); input_scrub_begin (); expr_begin (); /* It has to be called after dump_statistics (). */ xatexit (close_output_file); if (flag_print_statistics) xatexit (dump_statistics); macro_strip_at = 0; #ifdef TC_I960 macro_strip_at = flag_mri; #endif macro_init (flag_macro_alternate, flag_mri, macro_strip_at, macro_expr); PROGRESS (1); output_file_create (out_file_name); gas_assert (stdoutput != 0); dot_symbol_init (); #ifdef tc_init_after_args tc_init_after_args (); #endif itbl_init (); dwarf2_init (); local_symbol_make (".gasversion.", absolute_section, BFD_VERSION / 10000UL, &predefined_address_frag); /* Now that we have fully initialized, and have created the output file, define any symbols requested by --defsym command line arguments. */ while (defsyms != NULL) { symbolS *sym; struct defsym_list *next; sym = symbol_new (defsyms->name, absolute_section, defsyms->value, &zero_address_frag); /* Make symbols defined on the command line volatile, so that they can be redefined inside a source file. This makes this assembler's behaviour compatible with earlier versions, but it may not be completely intuitive. */ S_SET_VOLATILE (sym); symbol_table_insert (sym); next = defsyms->next; free (defsyms); defsyms = next; } PROGRESS (1); /* Assemble it. */ perform_an_assembly_pass (argc, argv); cond_finish_check (-1); #ifdef md_end md_end (); #endif #if defined OBJ_ELF || defined OBJ_MAYBE_ELF if ((flag_execstack || flag_noexecstack) && OUTPUT_FLAVOR == bfd_target_elf_flavour) { segT gnustack; gnustack = subseg_new (".note.GNU-stack", 0); bfd_set_section_flags (stdoutput, gnustack, SEC_READONLY | (flag_execstack ? SEC_CODE : 0)); } #endif /* If we've been collecting dwarf2 .debug_line info, either for assembly debugging or on behalf of the compiler, emit it now. */ dwarf2_finish (); /* If we constructed dwarf2 .eh_frame info, either via .cfi directives from the user or by the backend, emit it now. */ cfi_finish (); keep_it = 0; if (seen_at_least_1_file ()) { int n_warns, n_errs; char warn_msg[50]; char err_msg[50]; write_object_file (); n_warns = had_warnings (); n_errs = had_errors (); if (n_warns == 1) sprintf (warn_msg, _("%d warning"), n_warns); else sprintf (warn_msg, _("%d warnings"), n_warns); if (n_errs == 1) sprintf (err_msg, _("%d error"), n_errs); else sprintf (err_msg, _("%d errors"), n_errs); if (flag_fatal_warnings && n_warns != 0) { if (n_errs == 0) as_bad (_("%s, treating warnings as errors"), warn_msg); n_errs += n_warns; } if (n_errs == 0) keep_it = 1; else if (flag_always_generate_output) { /* The -Z flag indicates that an object file should be generated, regardless of warnings and errors. */ keep_it = 1; fprintf (stderr, _("%s, %s, generating bad object file\n"), err_msg, warn_msg); } } fflush (stderr); #ifndef NO_LISTING listing_print (listing_filename, argv_orig); #endif input_scrub_end (); END_PROGRESS (myname); /* Use xexit instead of return, because under VMS environments they may not place the same interpretation on the value given. */ if (had_errors () != 0) xexit (EXIT_FAILURE); /* Only generate dependency file if assembler was successful. */ print_dependencies (); xexit (EXIT_SUCCESS); }
habemus-papadum/binutils-gdb
gas/as.c
C
gpl-2.0
38,750
/* * empathy-connection-managers.c - Source for EmpathyConnectionManagers * Copyright (C) 2009 Collabora Ltd. * @author Sjoerd Simons <sjoerd.simons@collabora.co.uk> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdio.h> #include <stdlib.h> #include <telepathy-glib/connection-manager.h> #include <telepathy-glib/util.h> #include "empathy-connection-managers.h" #include "empathy-utils.h" #define DEBUG_FLAG EMPATHY_DEBUG_OTHER #include <libempathy/empathy-debug.h> static GObject *managers = NULL; G_DEFINE_TYPE(EmpathyConnectionManagers, empathy_connection_managers, G_TYPE_OBJECT) /* signal enum */ enum { UPDATED, LAST_SIGNAL }; static guint signals[LAST_SIGNAL] = {0}; /* properties */ enum { PROP_READY = 1 }; #define GET_PRIV(obj) EMPATHY_GET_PRIV (obj, EmpathyConnectionManagers) /* private structure */ typedef struct _EmpathyConnectionManagersPriv EmpathyConnectionManagersPriv; struct _EmpathyConnectionManagersPriv { gboolean dispose_has_run; gboolean ready; GList *cms; TpDBusDaemon *dbus; }; static void empathy_connection_managers_init (EmpathyConnectionManagers *obj) { EmpathyConnectionManagersPriv *priv = G_TYPE_INSTANCE_GET_PRIVATE ((obj), \ EMPATHY_TYPE_CONNECTION_MANAGERS, EmpathyConnectionManagersPriv); obj->priv = priv; priv->dbus = tp_dbus_daemon_dup (NULL); g_assert (priv->dbus != NULL); empathy_connection_managers_update (obj); /* allocate any data required by the object here */ } static void empathy_connection_managers_dispose (GObject *object); static GObject * empathy_connection_managers_constructor (GType type, guint n_construct_params, GObjectConstructParam *construct_params) { if (managers != NULL) return g_object_ref (managers); managers = G_OBJECT_CLASS (empathy_connection_managers_parent_class)->constructor (type, n_construct_params, construct_params); g_object_add_weak_pointer (managers, (gpointer) &managers); return managers; } static void empathy_connection_managers_get_property (GObject *object, guint prop_id, GValue *value, GParamSpec *pspec) { EmpathyConnectionManagers *self = EMPATHY_CONNECTION_MANAGERS (object); EmpathyConnectionManagersPriv *priv = GET_PRIV (self); switch (prop_id) { case PROP_READY: g_value_set_boolean (value, priv->ready); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; } } static void empathy_connection_managers_class_init ( EmpathyConnectionManagersClass *empathy_connection_managers_class) { GObjectClass *object_class = G_OBJECT_CLASS (empathy_connection_managers_class); g_type_class_add_private (empathy_connection_managers_class, sizeof (EmpathyConnectionManagersPriv)); object_class->constructor = empathy_connection_managers_constructor; object_class->dispose = empathy_connection_managers_dispose; object_class->get_property = empathy_connection_managers_get_property; g_object_class_install_property (object_class, PROP_READY, g_param_spec_boolean ("ready", "Ready", "Whether the connection manager information is ready to be used", FALSE, G_PARAM_STATIC_STRINGS | G_PARAM_READABLE)); signals[UPDATED] = g_signal_new ("updated", G_TYPE_FROM_CLASS (object_class), G_SIGNAL_RUN_LAST, 0, NULL, NULL, g_cclosure_marshal_generic, G_TYPE_NONE, 0); } static void empathy_connection_managers_free_cm_list (EmpathyConnectionManagers *self) { EmpathyConnectionManagersPriv *priv = GET_PRIV (self); GList *l; for (l = priv->cms ; l != NULL ; l = g_list_next (l)) { g_object_unref (l->data); } g_list_free (priv->cms); priv->cms = NULL; } static void empathy_connection_managers_dispose (GObject *object) { EmpathyConnectionManagers *self = EMPATHY_CONNECTION_MANAGERS (object); EmpathyConnectionManagersPriv *priv = GET_PRIV (self); if (priv->dispose_has_run) return; priv->dispose_has_run = TRUE; if (priv->dbus != NULL) g_object_unref (priv->dbus); priv->dbus = NULL; empathy_connection_managers_free_cm_list (self); /* release any references held by the object here */ if (G_OBJECT_CLASS (empathy_connection_managers_parent_class)->dispose) G_OBJECT_CLASS (empathy_connection_managers_parent_class)->dispose (object); } EmpathyConnectionManagers * empathy_connection_managers_dup_singleton (void) { return EMPATHY_CONNECTION_MANAGERS ( g_object_new (EMPATHY_TYPE_CONNECTION_MANAGERS, NULL)); } gboolean empathy_connection_managers_is_ready (EmpathyConnectionManagers *self) { EmpathyConnectionManagersPriv *priv = GET_PRIV (self); return priv->ready; } static void empathy_connection_managers_listed_cb (TpConnectionManager * const *cms, gsize n_cms, const GError *error, gpointer user_data, GObject *weak_object) { EmpathyConnectionManagers *self = EMPATHY_CONNECTION_MANAGERS (weak_object); EmpathyConnectionManagersPriv *priv = GET_PRIV (self); TpConnectionManager * const *iter; empathy_connection_managers_free_cm_list (self); if (error != NULL) { DEBUG ("Failed to get connection managers: %s", error->message); goto out; } for (iter = cms ; iter != NULL && *iter != NULL; iter++) { /* only list cms that didn't hit errors */ if (tp_connection_manager_is_ready (*iter)) priv->cms = g_list_prepend (priv->cms, g_object_ref (*iter)); } out: g_object_ref (weak_object); if (!priv->ready) { priv->ready = TRUE; g_object_notify (weak_object, "ready"); } g_signal_emit (weak_object, signals[UPDATED], 0); g_object_unref (weak_object); } void empathy_connection_managers_update (EmpathyConnectionManagers *self) { EmpathyConnectionManagersPriv *priv = GET_PRIV (self); tp_list_connection_managers (priv->dbus, empathy_connection_managers_listed_cb, NULL, NULL, G_OBJECT (self)); } GList * empathy_connection_managers_get_cms (EmpathyConnectionManagers *self) { EmpathyConnectionManagersPriv *priv = GET_PRIV (self); return priv->cms; } TpConnectionManager * empathy_connection_managers_get_cm (EmpathyConnectionManagers *self, const gchar *cm) { EmpathyConnectionManagersPriv *priv = GET_PRIV (self); GList *l; for (l = priv->cms ; l != NULL; l = g_list_next (l)) { TpConnectionManager *c = TP_CONNECTION_MANAGER (l->data); if (!tp_strdiff (c->name, cm)) return c; } return NULL; } guint empathy_connection_managers_get_cms_num (EmpathyConnectionManagers *self) { EmpathyConnectionManagersPriv *priv; g_return_val_if_fail (EMPATHY_IS_CONNECTION_MANAGERS (self), 0); priv = GET_PRIV (self); return g_list_length (priv->cms); } static void notify_ready_cb (EmpathyConnectionManagers *self, GParamSpec *spec, GSimpleAsyncResult *result) { g_simple_async_result_complete (result); g_object_unref (result); } void empathy_connection_managers_prepare_async ( EmpathyConnectionManagers *self, GAsyncReadyCallback callback, gpointer user_data) { EmpathyConnectionManagersPriv *priv = GET_PRIV (self); GSimpleAsyncResult *result; result = g_simple_async_result_new (G_OBJECT (managers), callback, user_data, empathy_connection_managers_prepare_finish); if (priv->ready) { g_simple_async_result_complete_in_idle (result); g_object_unref (result); return; } g_signal_connect (self, "notify::ready", G_CALLBACK (notify_ready_cb), result); } gboolean empathy_connection_managers_prepare_finish ( EmpathyConnectionManagers *self, GAsyncResult *result, GError **error) { GSimpleAsyncResult *simple = G_SIMPLE_ASYNC_RESULT (result); g_return_val_if_fail (g_simple_async_result_is_valid (result, G_OBJECT (self), empathy_connection_managers_prepare_finish), FALSE); if (g_simple_async_result_propagate_error (simple, error)) return FALSE; return TRUE; }
JGulic/empathy
libempathy/empathy-connection-managers.c
C
gpl-2.0
8,756
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Definitions for the IP module. * * Version: @(#)ip.h 1.0.2 05/07/93 * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Alan Cox, <gw4pts@gw4pts.ampr.org> * * Changes: * Mike McLagan : Routing by source * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #ifndef _IP_H #define _IP_H #include <linux/types.h> #include <linux/ip.h> #include <linux/in.h> #include <linux/skbuff.h> #include <net/inet_sock.h> #include <net/route.h> #include <net/snmp.h> #include <net/flow.h> #include <net/flow_keys.h> struct sock; struct inet_skb_parm { struct ip_options opt; /* Compiled IP options */ unsigned char flags; #define IPSKB_FORWARDED 1 #define IPSKB_XFRM_TUNNEL_SIZE 2 #define IPSKB_XFRM_TRANSFORMED 4 #define IPSKB_FRAG_COMPLETE 8 #define IPSKB_REROUTED 16 u16 frag_max_size; }; static inline unsigned int ip_hdrlen(const struct sk_buff *skb) { return ip_hdr(skb)->ihl * 4; } struct ipcm_cookie { __be32 addr; int oif; struct ip_options_rcu *opt; __u8 tx_flags; __u8 ttl; __s16 tos; char priority; }; #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb)) #define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb)) struct ip_ra_chain { struct ip_ra_chain __rcu *next; struct sock *sk; union { void (*destructor)(struct sock *); struct sock *saved_sk; }; struct rcu_head rcu; }; extern struct ip_ra_chain __rcu *ip_ra_chain; /* IP flags. */ #define IP_CE 0x8000 /* Flag: "Congestion" */ #define IP_DF 0x4000 /* Flag: "Don't Fragment" */ #define IP_MF 0x2000 /* Flag: "More Fragments" */ #define IP_OFFSET 0x1FFF /* "Fragment Offset" part */ #define IP_FRAG_TIME (30 * HZ) /* fragment lifetime */ struct msghdr; struct net_device; struct packet_type; struct rtable; struct sockaddr; int igmp_mc_init(void); /* * Functions provided by ip.c */ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, __be32 saddr, __be32 daddr, struct ip_options_rcu *opt); int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev); int ip_local_deliver(struct sk_buff *skb); int ip_mr_input(struct sk_buff *skb); int ip_output(struct sock *sk, struct sk_buff *skb); int ip_mc_output(struct sock *sk, struct sk_buff *skb); int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)); int ip_do_nat(struct sk_buff *skb); void ip_send_check(struct iphdr *ip); int __ip_local_out(struct sk_buff *skb); int ip_local_out_sk(struct sock *sk, struct sk_buff *skb); static inline int ip_local_out(struct sk_buff *skb) { return ip_local_out_sk(skb->sk, skb); } int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl); void ip_init(void); int ip_append_data(struct sock *sk, struct flowi4 *fl4, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int len, int protolen, struct ipcm_cookie *ipc, struct rtable **rt, unsigned int flags); int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb); ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, int offset, size_t size, int flags); struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4, struct sk_buff_head *queue, struct inet_cork *cork); int ip_send_skb(struct net *net, struct sk_buff *skb); int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4); void ip_flush_pending_frames(struct sock *sk); struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, struct ipcm_cookie *ipc, struct rtable **rtp, unsigned int flags); static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4) { return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base); } static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet) { return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos); } static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk) { return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk); } /* datagram.c */ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); void ip4_datagram_release_cb(struct sock *sk); struct ip_reply_arg { struct kvec iov[1]; int flags; __wsum csum; int csumoffset; /* u16 offset of csum in iov[0].iov_base */ /* -1 if not needed */ int bound_dev_if; u8 tos; kuid_t uid; }; #define IP_REPLY_ARG_NOSRCCHECK 1 static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg) { return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0; } void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, const struct ip_options *sopt, __be32 daddr, __be32 saddr, const struct ip_reply_arg *arg, unsigned int len); #define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field) #define IP_INC_STATS_BH(net, field) SNMP_INC_STATS64_BH((net)->mib.ip_statistics, field) #define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val) #define IP_ADD_STATS_BH(net, field, val) SNMP_ADD_STATS64_BH((net)->mib.ip_statistics, field, val) #define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val) #define IP_UPD_PO_STATS_BH(net, field, val) SNMP_UPD_PO_STATS64_BH((net)->mib.ip_statistics, field, val) #define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field) #define NET_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.net_statistics, field) #define NET_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->mib.net_statistics, field) #define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) #define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd) #define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd) unsigned long snmp_fold_field(void __percpu *mib, int offt); #if BITS_PER_LONG==32 u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off); #else static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off) { return snmp_fold_field(mib, offt); } #endif void inet_get_local_port_range(struct net *net, int *low, int *high); #ifdef CONFIG_SYSCTL static inline int inet_is_local_reserved_port(struct net *net, int port) { if (!net->ipv4.sysctl_local_reserved_ports) return 0; return test_bit(port, net->ipv4.sysctl_local_reserved_ports); } static inline bool sysctl_dev_name_is_allowed(const char *name) { return strcmp(name, "default") != 0 && strcmp(name, "all") != 0; } #else static inline int inet_is_local_reserved_port(struct net *net, int port) { return 0; } #endif /* From inetpeer.c */ extern int inet_peer_threshold; extern int inet_peer_minttl; extern int inet_peer_maxttl; /* From ip_input.c */ extern int sysctl_ip_early_demux; /* From ip_output.c */ extern int sysctl_ip_dynaddr; void ipfrag_init(void); void ip_static_sysctl_init(void); #define IP4_REPLY_MARK(net, mark) \ ((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0) static inline bool ip_is_fragment(const struct iphdr *iph) { return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0; } #ifdef CONFIG_INET #include <net/dst.h> /* The function in 2.2 was invalid, producing wrong result for * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */ static inline int ip_decrease_ttl(struct iphdr *iph) { u32 check = (__force u32)iph->check; check += (__force u32)htons(0x0100); iph->check = (__force __sum16)(check + (check>=0xFFFF)); return --iph->ttl; } static inline int ip_dont_fragment(struct sock *sk, struct dst_entry *dst) { return inet_sk(sk)->pmtudisc == IP_PMTUDISC_DO || (inet_sk(sk)->pmtudisc == IP_PMTUDISC_WANT && !(dst_metric_locked(dst, RTAX_MTU))); } static inline bool ip_sk_accept_pmtu(const struct sock *sk) { return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE && inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT; } static inline bool ip_sk_use_pmtu(const struct sock *sk) { return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE; } static inline bool ip_sk_ignore_df(const struct sock *sk) { return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO || inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT; } static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, bool forwarding) { struct net *net = dev_net(dst->dev); if (net->ipv4.sysctl_ip_fwd_use_pmtu || dst_metric_locked(dst, RTAX_MTU) || !forwarding) return dst_mtu(dst); return min(dst->dev->mtu, IP_MAX_MTU); } static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb) { if (!skb->sk || ip_sk_use_pmtu(skb->sk)) { bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED; return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding); } else { return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU); } } u32 ip_idents_reserve(u32 hash, int segs); void __ip_select_ident(struct iphdr *iph, int segs); static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs) { struct iphdr *iph = ip_hdr(skb); if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) { /* This is only to work around buggy Windows95/2000 * VJ compression implementations. If the ID field * does not change, they drop every other packet in * a TCP stream using header compression. */ if (sk && inet_sk(sk)->inet_daddr) { iph->id = htons(inet_sk(sk)->inet_id); inet_sk(sk)->inet_id += segs; } else { iph->id = 0; } } else { __ip_select_ident(iph, segs); } } static inline void ip_select_ident(struct sk_buff *skb, struct sock *sk) { ip_select_ident_segs(skb, sk, 1); } static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto) { return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, skb->len, proto, 0); } static inline void inet_set_txhash(struct sock *sk) { struct inet_sock *inet = inet_sk(sk); struct flow_keys keys; keys.src = inet->inet_saddr; keys.dst = inet->inet_daddr; keys.port16[0] = inet->inet_sport; keys.port16[1] = inet->inet_dport; sk->sk_txhash = flow_hash_from_keys(&keys); } static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto) { const struct iphdr *iph = skb_gro_network_header(skb); return csum_tcpudp_nofold(iph->saddr, iph->daddr, skb_gro_len(skb), proto, 0); } /* * Map a multicast IP onto multicast MAC for type ethernet. */ static inline void ip_eth_mc_map(__be32 naddr, char *buf) { __u32 addr=ntohl(naddr); buf[0]=0x01; buf[1]=0x00; buf[2]=0x5e; buf[5]=addr&0xFF; addr>>=8; buf[4]=addr&0xFF; addr>>=8; buf[3]=addr&0x7F; } /* * Map a multicast IP onto multicast MAC for type IP-over-InfiniBand. * Leave P_Key as 0 to be filled in by driver. */ static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf) { __u32 addr; unsigned char scope = broadcast[5] & 0xF; buf[0] = 0; /* Reserved */ buf[1] = 0xff; /* Multicast QPN */ buf[2] = 0xff; buf[3] = 0xff; addr = ntohl(naddr); buf[4] = 0xff; buf[5] = 0x10 | scope; /* scope from broadcast address */ buf[6] = 0x40; /* IPv4 signature */ buf[7] = 0x1b; buf[8] = broadcast[8]; /* P_Key */ buf[9] = broadcast[9]; buf[10] = 0; buf[11] = 0; buf[12] = 0; buf[13] = 0; buf[14] = 0; buf[15] = 0; buf[19] = addr & 0xff; addr >>= 8; buf[18] = addr & 0xff; addr >>= 8; buf[17] = addr & 0xff; addr >>= 8; buf[16] = addr & 0x0f; } static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf) { if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0) memcpy(buf, broadcast, 4); else memcpy(buf, &naddr, sizeof(naddr)); } #if IS_ENABLED(CONFIG_IPV6) #include <linux/ipv6.h> #endif static __inline__ void inet_reset_saddr(struct sock *sk) { inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0; #if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family == PF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); memset(&np->saddr, 0, sizeof(np->saddr)); memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr)); } #endif } #endif static inline int sk_mc_loop(struct sock *sk) { if (!sk) return 1; switch (sk->sk_family) { case AF_INET: return inet_sk(sk)->mc_loop; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: return inet6_sk(sk)->mc_loop; #endif } WARN_ON(1); return 1; } bool ip_call_ra_chain(struct sk_buff *skb); /* * Functions provided by ip_fragment.c */ enum ip_defrag_users { IP_DEFRAG_LOCAL_DELIVER, IP_DEFRAG_CALL_RA_CHAIN, IP_DEFRAG_CONNTRACK_IN, __IP_DEFRAG_CONNTRACK_IN_END = IP_DEFRAG_CONNTRACK_IN + USHRT_MAX, IP_DEFRAG_CONNTRACK_OUT, __IP_DEFRAG_CONNTRACK_OUT_END = IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX, IP_DEFRAG_CONNTRACK_BRIDGE_IN, __IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX, IP_DEFRAG_VS_IN, IP_DEFRAG_VS_OUT, IP_DEFRAG_VS_FWD, IP_DEFRAG_AF_PACKET, IP_DEFRAG_MACVLAN, }; int ip_defrag(struct sk_buff *skb, u32 user); #ifdef CONFIG_INET struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user); #else static inline struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) { return skb; } #endif int ip_frag_mem(struct net *net); /* * Functions provided by ip_forward.c */ int ip_forward(struct sk_buff *skb); /* * Functions provided by ip_options.c */ void ip_options_build(struct sk_buff *skb, struct ip_options *opt, __be32 daddr, struct rtable *rt, int is_frag); int __ip_options_echo(struct ip_options *dopt, struct sk_buff *skb, const struct ip_options *sopt); static inline int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb) { return __ip_options_echo(dopt, skb, &IPCB(skb)->opt); } void ip_options_fragment(struct sk_buff *skb); int ip_options_compile(struct net *net, struct ip_options *opt, struct sk_buff *skb); int ip_options_get(struct net *net, struct ip_options_rcu **optp, unsigned char *data, int optlen); int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp, unsigned char __user *data, int optlen); void ip_options_undo(struct ip_options *opt); void ip_forward_options(struct sk_buff *skb); int ip_options_rcv_srr(struct sk_buff *skb); /* * Functions provided by ip_sockglue.c */ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb); void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb); int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc, bool allow_ipv6); int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); int compat_ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); int compat_ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *)); int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len); void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, u32 info, u8 *payload); void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport, u32 info); bool icmp_global_allow(void); extern int sysctl_icmp_msgs_per_sec; extern int sysctl_icmp_msgs_burst; #ifdef CONFIG_PROC_FS int ip_misc_proc_init(void); #endif #endif /* _IP_H */
BORETS24/common.git-android-3.18
include/net/ip.h
C
gpl-2.0
16,250
<?php /** * The main template file. * * This is the most generic template file in a WordPress theme * and one of the two required files for a theme (the other being style.css). * It is used to display a page when nothing more specific matches a query. * E.g., it puts together the home page when no home.php file exists. * Learn more: http://codex.wordpress.org/Template_Hierarchy * * @package Cryout Creations * @subpackage Mantra */ get_header(); if ($mantra_frontpage=="Enable" && is_front_page() ) { mantra_frontpage_generator(); } else { ?> <section id="container"> <div id="content" role="main"> <?php cryout_before_content_hook(); ?><?php if ( have_posts() ) : ?> <?php mantra_content_nav( 'nav-above' ); ?> <?php /* Start the Loop */ ?> <?php while ( have_posts() ) : the_post(); ?> <?php get_template_part( 'content', get_post_format() ); ?> <?php endwhile; ?> <?php if($mantra_pagination=="Enable") mantra_pagination(); else mantra_content_nav( 'nav-below' ); ?> <?php else : ?> <article id="post-0" class="post no-results not-found"> <header class="entry-header"> <h1 class="entry-title"><?php _e( 'Nothing Found', 'mantra' ); ?></h1> </header><!-- .entry-header --> <div class="entry-content"> <p><?php _e( 'Apologies, but no results were found for the requested archive. Perhaps searching will help find a related post.', 'mantra' ); ?></p> <?php get_search_form(); ?> </div><!-- .entry-content --> </article><!-- #post-0 --> <?php endif; ?><?php cryout_after_content_hook(); ?> </div><!-- #content --> <?php get_sidebar(); ?> </section><!-- #container --> <?php } // else get_footer(); ?>
bjohnstonumw/430sga
wp-content/themes/mantra/index.php
PHP
gpl-2.0
1,790
/***************************************************************************** * File: 3dCamera.h * * © 1989 Mark M. Owen. All rights reserved. *****************************************************************************/ #ifndef _3d_ #include "3d.h" #endif #ifndef _Camera_ #define _Camera_ /* Horizontal angles of view for 35mm Camera Lens equivalence * when used on a 640x480 screen. A 35mm film frame has an * aspect ratio of 0.666É whereas the 640x480 screen is 0.75. * For example: using the Lens40mm selection below will cause * 46¡ of a circle centered about the viewer to span the width * of a 640x480 pixel screen. Lens specifications vary, but * often include horizontal, vertical and diagonal field of * view specifications. This implementation considers only the * horizontal specification. */ #define Lens15mm Int2Fix( 105 ) #define Lens20mm Int2Fix( 83 ) #define Lens30mm Int2Fix( 62 ) #define Lens40mm Int2Fix( 46 ) #define Lens50mm Int2Fix( 40 ) #define Lens60mm Int2Fix( 33 ) #define Lens70mm Int2Fix( 29 ) #define Lens80mm Int2Fix( 26 ) #define Lens90mm Int2Fix( 23 ) #define Lens100mm Int2Fix( 20 ) #define Lens150mm Int2Fix( 15 ) #define Lens200mm Int2Fix( 10 ) #define Lens500mm Int2Fix( 4 ) #define Lens1000mm Int2Fix( 2 ) #define AimCamera3d(fx,fy,fz,tx,ty,tz,vAngle)\ { Point3d from,to;\ \ SetPt3d(&from,(fx), (fy),(fz));\ SetPt3d(&to,(tx), (ty),(tz) );\ AimCamera(from,to,(vAngle),0.000);\ } #if XVT_CC_PROTO void AimCamera (Point3d, Point3d, Fixed, Fixed); #else void AimCamera (); #endif #endif
jesserobertson/pynoddy
noddy/3dCamera.h
C
gpl-2.0
1,573
/*************************************************************************** * * * copyright : (C) 2007 The University of Toronto * * netterfield@astro.utoronto.ca * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ #ifndef BUILTINRELATIONS_H #define BUILTINRELATIONS_H #include "kstmath_export.h" namespace Kst { namespace Builtins { KSTMATH_EXPORT void initRelations(); } } #endif // vim: ts=2 sw=2 et
RossWilliamson/kst_old
src/libkstmath/builtinrelations.h
C
gpl-2.0
1,049
/* JPC: An x86 PC Hardware Emulator for a pure Java Virtual Machine Copyright (C) 2012-2013 Ian Preston This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Details (including contact information) can be found at: jpc.sourceforge.net or the developer website sourceforge.net/projects/jpc/ End of licence header */ package org.jpc.emulator.execution.opcodes.pm; import org.jpc.emulator.execution.*; import org.jpc.emulator.execution.decoder.*; import org.jpc.emulator.processor.*; import org.jpc.emulator.processor.fpu64.*; import static org.jpc.emulator.processor.Processor.*; public class fdivp_ST2_ST6 extends Executable { public fdivp_ST2_ST6(int blockStart, int eip, int prefices, PeekableInputStream input) { super(blockStart, eip); int modrm = input.readU8(); } public Branch execute(Processor cpu) { double freg0 = cpu.fpu.ST(2); double freg1 = cpu.fpu.ST(6); if (((freg0 == 0.0) && (freg1 == 0.0)) || (Double.isInfinite(freg0) && Double.isInfinite(freg1))) cpu.fpu.setInvalidOperation(); if ((freg1 == 0.0) && !Double.isNaN(freg0) && !Double.isInfinite(freg0)) cpu.fpu.setZeroDivide(); cpu.fpu.setST(2, freg0/freg1); cpu.fpu.pop(); return Branch.None; } public boolean isBranch() { return false; } public String toString() { return this.getClass().getName(); } }
ianopolous/JPC
src/org/jpc/emulator/execution/opcodes/pm/fdivp_ST2_ST6.java
Java
gpl-2.0
2,070
/* * Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package org.jemmy.fx.control; import javafx.scene.Node; import javafx.scene.control.MenuBar; import javafx.scene.control.MenuButton; import javafx.scene.control.MenuItem; import org.jemmy.control.Wrap; import org.jemmy.fx.AppExecutor; import org.jemmy.fx.ByText; import org.jemmy.fx.Controls; import org.jemmy.fx.Root; import org.jemmy.fx.SceneDock; import org.jemmy.input.StringMenuOwner; import org.jemmy.interfaces.Parent; import org.jemmy.interfaces.Tree; import org.jemmy.lookup.LookupCriteria; import org.jemmy.resources.StringComparePolicy; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; /** * * @author shura */ public class MenuTest { public MenuTest() { } @BeforeClass public static void setUpClass() throws Exception { AppExecutor.executeNoBlock(Controls.class); } @AfterClass public static void tearDownClass() throws Exception { } @Before public void setUp() { } @After public void tearDown() { } @Test public void pushMenu() throws InterruptedException { Parent<Node> parent = Root.ROOT.lookup().as(Parent.class, Node.class); Wrap<? extends MenuBar> bar = parent.lookup(MenuBar.class).wrap(); // Parent<Menu> barParent = bar.as(Parent.class, Menu.class); // Wrap<? extends Menu> menu1 = barParent. // lookup(new ByTextMenuItem<Menu>("menu1", StringComparePolicy.EXACT)).wrap(); // menu1.mouse().move(); // menu1.mouse().move(); // Parent<MenuItem> menu1_parent = menu1.as(Parent.class, MenuItem.class); // Wrap<? extends MenuItem> sub_menu1 = menu1_parent. // lookup(new ByTextMenuItem<MenuItem>("sub-menu1", StringComparePolicy.EXACT)).wrap(); // sub_menu1.mouse().move(); // Parent<MenuItem> sub_menu1_parent = sub_menu1.as(Parent.class, MenuItem.class); // Wrap<? extends MenuItem> sub_sub_menu1 = sub_menu1_parent.lookup(new ByTextMenuItem<MenuItem>("sub-sub-menu1", StringComparePolicy.EXACT)).wrap(); // sub_sub_menu1.mouse().click(); // Parent<MenuItem> sub_sub_menu1_parent = sub_sub_menu1.as(Parent.class, MenuItem.class); // Wrap<? extends MenuItem> item = sub_sub_menu1_parent.lookup(new ByTextMenuItem<MenuItem>("item1", StringComparePolicy.EXACT)).wrap(); // item.mouse().click(); StringMenuOwner menu_owner = bar.as(StringMenuOwner.class, MenuItem.class); //menu_owner.push("menu1", "sub-menu1", "sub-sub-menu1", "item1"); // menu_owner.menu().select(new ByTextMenuItem("menu1", StringComparePolicy.EXACT), // new ByTextMenuItem("sub-menu1", StringComparePolicy.EXACT), // new ByTextMenuItem("sub-sub-menu1", StringComparePolicy.EXACT)); Wrap<MenuItem> sub_menu1 = menu_owner.menu().select(new ByTextMenuItem("menu1", StringComparePolicy.EXACT), new ByTextMenuItem("sub-menu1", StringComparePolicy.EXACT)); StringMenuOwner sub_menu1_tree = sub_menu1.as(StringMenuOwner.class, MenuItem.class); sub_menu1_tree.push("sub-sub-menu1", "item1"); Wrap<? extends MenuButton> menu_button = parent.lookup(MenuButton.class).wrap(); StringMenuOwner button_menu_owner = menu_button.as(StringMenuOwner.class, MenuItem.class); button_menu_owner.push("menu1", "sub-menu1", "sub-sub-menu1", "item1"); button_menu_owner.push("item0"); button_menu_owner.menu().push(new MenuTextLookupCriteria("menu1"), new MenuTextLookupCriteria("sub-menu1"), new MenuTextLookupCriteria("sub-sub-menu1"), new MenuTextLookupCriteria("item1")); button_menu_owner.menu().push(new ByText("item0")); // MenuBarDock bar = new MenuBarDock(new SceneDock().asParent()); //// bar.asStringMenuOwner().push("menu2"); //// new LabeledDock(new SceneDock().asParent(), "menu2 pushed", StringComparePolicy.SUBSTRING); // bar.asStringMenuOwner().push("menu0", "item0"); // new LabeledDock(new SceneDock().asParent(), "item0 pushed", StringComparePolicy.SUBSTRING); // bar.asStringMenuOwner().push("menu1", "sub-menu1", "item1"); // new LabeledDock(new SceneDock().asParent(), "item1 pushed", StringComparePolicy.SUBSTRING); } class MenuTextLookupCriteria implements LookupCriteria<MenuItem> { String str; public MenuTextLookupCriteria(String str) { this.str = str; } public boolean check(MenuItem cntrl) { return cntrl.getText().contentEquals(str); } } }
teamfx/openjfx-8u-dev-tests
tools/Jemmy/JemmyFX/test/org/jemmy/fx/control/MenuTest.java
Java
gpl-2.0
5,883
# # Chris Lumens <clumens@redhat.com> # # Copyright 2015 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, modify, # copy, or redistribute it subject to the terms and conditions of the GNU # General Public License v.2. This program is distributed in the hope that it # will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the # implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat # trademarks that are incorporated in the source code or documentation are not # subject to the GNU General Public License and may only be used or replicated # with the express permission of Red Hat, Inc. # import unittest from tests.baseclass import CommandTest, CommandSequenceTest class F23_TestCase(CommandTest): command = "reqpart" def runTest(self): # pass self.assert_parse("reqpart", "reqpart\n") # pass self.assert_parse("reqpart --add-boot", "reqpart --add-boot\n") class F23_AutopartReqpart_TestCase(CommandSequenceTest): def runTest(self): # fail - can't use both autopart and reqpart self.assert_parse_error(""" autopart reqpart""") RHEL7_TestCase = F23_TestCase if __name__ == "__main__": unittest.main()
cgwalters/pykickstart
tests/commands/reqpart.py
Python
gpl-2.0
1,533
/* * \brief Utility for synchronizing the access of interface methods * \author Norman Feske * \date 2013-05-16 */ #ifndef _INCLUDE__BASE__SYNCED_INTERFACE_H_ #define _INCLUDE__BASE__SYNCED_INTERFACE_H_ /* Genode includes */ #include <base/lock.h> namespace Genode { template <typename, typename LOCK = Genode::Lock> class Synced_interface; } /* * Utility for synchronizing the access of interface methods * * The 'Synced_interface' utility makes the serialization of interface * method calls easy. The 'Synced_interface' is a functor that takes a lock * and a pointer to an interface as arguments. When called, the functor * returns a smart pointer to the interface. When this smart pointer gets * dereferenced, the smart pointer takes care of acquiring and releasing * the lock while the interface method is executed. */ template <typename IF, typename LOCK> class Genode::Synced_interface { public: class Guard { private: LOCK &_lock; IF *_interface; Guard(LOCK &lock, IF *interface) : _lock(lock), _interface(interface) { _lock.lock(); } friend class Synced_interface; public: ~Guard() { _lock.unlock(); } IF *operator -> () { return _interface; } }; private: LOCK &_lock; IF *_interface; public: Synced_interface(LOCK &lock, IF *interface) : _lock(lock), _interface(interface) { } Guard operator () () { return Guard(_lock, _interface); } Guard operator () () const { return Guard(_lock, _interface); } }; #endif /* _INCLUDE__BASE__SYNCED_INTERFACE_H_ */
702nADOS/genode-dom0
repos/base/include/base/synced_interface.h
C
gpl-2.0
1,583
/* * Javascript Humane Dates * Copyright (c) 2008 Dean Landolt (deanlandolt.com) * Re-write by Zach Leatherman (zachleat.com) * * Adopted from the John Resig's pretty.js * at http://ejohn.org/blog/javascript-pretty-date * and henrah's proposed modification * at http://ejohn.org/blog/javascript-pretty-date/#comment-297458 * * Licensed under the MIT license. */ function humaneDate(date, compareTo){ var lang = { ago: 'Ago', now: 'Just Now', minute: 'Minute', minutes: 'Minutes', hour: 'Hour', hours: 'Hours', day: 'Day', days: 'Days', week: 'Week', weeks: 'Weeks', month: 'Month', months: 'Months', year: 'Year', years: 'Years' }, formats = [ [60, lang.now], [3600, lang.minute, lang.minutes, 60], // 60 minutes, 1 minute [86400, lang.hour, lang.hours, 3600], // 24 hours, 1 hour [604800, lang.day, lang.days, 86400], // 7 days, 1 day [2628000, lang.week, lang.weeks, 604800], // ~1 month, 1 week [31536000, lang.month, lang.months, 2628000], // 1 year, ~1 month [Infinity, lang.year, lang.years, 31536000], // Infinity, 1 year ], isString = typeof date == 'string', date = isString ? new Date(('' + date).replace(/-/g,"/").replace(/[TZ]/g," ")) : date, compareTo = compareTo || new Date, seconds = (compareTo - date + (compareTo.getTimezoneOffset() - // if we received a GMT time from a string, doesn't include time zone bias // if we got a date object, the time zone is built in, we need to remove it. (isString ? 0 : date.getTimezoneOffset()) ) * 60000 ) / 1000, token; if(seconds < 0) { seconds = Math.abs(seconds); token = ''; } else { token = ' ' + lang.ago; } /* * 0 seconds && < 60 seconds Now * 60 seconds 1 Minute * > 60 seconds && < 60 minutes X Minutes * 60 minutes 1 Hour * > 60 minutes && < 24 hours X Hours * 24 hours 1 Day * > 24 hours && < 7 days X Days * 7 days 1 Week * > 7 days && < ~ 1 Month X Weeks * ~ 1 Month 1 Month * > ~ 1 Month && < 1 Year X Months * 1 Year 1 Year * > 1 Year X Years * * Single units are +10%. 1 Year shows first at 1 Year + 10% */ function normalize(val, single) { var margin = 0.1; if(val >= single && val <= single * (1+margin)) { return single; } return val; } for(var i = 0, format = formats[0]; formats[i]; format = formats[++i]) { if(seconds < format[0]) { if(i === 0) { // Now return format[1]; } var val = Math.ceil(normalize(seconds, format[3]) / (format[3])); return val + ' ' + (val != 1 ? format[2] : format[1]) + (i > 0 ? token : ''); } } }; if(typeof jQuery != 'undefined') { jQuery.fn.humaneDates = function() { return this.each(function() { var $t = jQuery(this), date = humaneDate($t.attr('title')); if(date && $t.html() != date) { // don't modify the dom if we don't have to $t.html(date); } }); }; } /** * Shortcut to parse a date in ISO format, then format it */ function dateHuman(obj) { var date = new Date(Date.parse(obj)); return humaneDate(date); } /* * Date Format 1.2.3 * (c) 2007-2009 Steven Levithan <stevenlevithan.com> * MIT license * * Includes enhancements by Scott Trenda <scott.trenda.net> * and Kris Kowal <cixar.com/~kris.kowal/> * * Accepts a date, a mask, or a date and a mask. * Returns a formatted version of the given date. * The date defaults to the current date/time. * The mask defaults to dateFormat.masks.default. */ var dateFormat = function () { var token = /d{1,4}|m{1,4}|yy(?:yy)?|([HhMsTt])\1?|[LloSZ]|"[^"]*"|'[^']*'/g, timezone = /\b(?:[PMCEA][SDP]T|(?:Pacific|Mountain|Central|Eastern|Atlantic) (?:Standard|Daylight|Prevailing) Time|(?:GMT|UTC)(?:[-+]\d{4})?)\b/g, timezoneClip = /[^-+\dA-Z]/g, pad = function (val, len) { val = String(val); len = len || 2; while (val.length < len) val = "0" + val; return val; }; // Regexes and supporting functions are cached through closure return function (date, mask, utc) { var dF = dateFormat; // You can't provide utc if you skip other args (use the "UTC:" mask prefix) if (arguments.length == 1 && Object.prototype.toString.call(date) == "[object String]" && !/\d/.test(date)) { mask = date; date = undefined; } // Passing date through Date applies Date.parse, if necessary date = date ? new Date(date) : new Date; if (isNaN(date)) throw SyntaxError("invalid date"); mask = String(dF.masks[mask] || mask || dF.masks["default"]); // Allow setting the utc argument via the mask if (mask.slice(0, 4) == "UTC:") { mask = mask.slice(4); utc = true; } var _ = utc ? "getUTC" : "get", d = date[_ + "Date"](), D = date[_ + "Day"](), m = date[_ + "Month"](), y = date[_ + "FullYear"](), H = date[_ + "Hours"](), M = date[_ + "Minutes"](), s = date[_ + "Seconds"](), L = date[_ + "Milliseconds"](), o = utc ? 0 : date.getTimezoneOffset(), flags = { d: d, dd: pad(d), ddd: dF.i18n.dayNames[D], dddd: dF.i18n.dayNames[D + 7], m: m + 1, mm: pad(m + 1), mmm: dF.i18n.monthNames[m], mmmm: dF.i18n.monthNames[m + 12], yy: String(y).slice(2), yyyy: y, h: H % 12 || 12, hh: pad(H % 12 || 12), H: H, HH: pad(H), M: M, MM: pad(M), s: s, ss: pad(s), l: pad(L, 3), L: pad(L > 99 ? Math.round(L / 10) : L), t: H < 12 ? "a" : "p", tt: H < 12 ? "am" : "pm", T: H < 12 ? "A" : "P", TT: H < 12 ? "AM" : "PM", Z: utc ? "UTC" : (String(date).match(timezone) || [""]).pop().replace(timezoneClip, ""), o: (o > 0 ? "-" : "+") + pad(Math.floor(Math.abs(o) / 60) * 100 + Math.abs(o) % 60, 4), S: ["th", "st", "nd", "rd"][d % 10 > 3 ? 0 : (d % 100 - d % 10 != 10) * d % 10] }; return mask.replace(token, function ($0) { return $0 in flags ? flags[$0] : $0.slice(1, $0.length - 1); }); }; }(); // Some common format strings dateFormat.masks = { "default": "ddd mmm dd yyyy HH:MM:ss", shortDate: "m/d/yy", mediumDate: "mmm d, yyyy", longDate: "mmmm d, yyyy", fullDate: "dddd, mmmm d, yyyy", shortTime: "h:MM TT", mediumTime: "h:MM:ss TT", longTime: "h:MM:ss TT Z", isoDate: "yyyy-mm-dd", isoTime: "HH:MM:ss", isoDateTime: "yyyy-mm-dd'T'HH:MM:ss", isoUtcDateTime: "UTC:yyyy-mm-dd'T'HH:MM:ss'Z'" }; // Internationalization strings dateFormat.i18n = { dayNames: [ "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday" ], monthNames: [ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December" ] }; // For convenience... Date.prototype.format = function (mask, utc) { return dateFormat(this, mask, utc); }; /** * Date.parse with progressive enhancement for ISO-8601, version 2 * © 2010 Colin Snover <http://zetafleet.com> * Released under MIT license. */ (function () { var origParse = Date.parse; Date.parse = function (date) { var timestamp = origParse(date), minutesOffset = 0, struct; if (isNaN(timestamp) && (struct = /^(\d{4}|[+\-]\d{6})-(\d{2})-(\d{2})(?:[T ](\d{2}):(\d{2})(?::(\d{2})(?:\.(\d{3,}))?)?(?:(Z)|([+\-])(\d{2})(?::?(\d{2}))?))?/.exec(date))) { if (struct[8] !== 'Z') { minutesOffset = +struct[10] * 60 + (+struct[11]); if (struct[9] === '+') { minutesOffset = 0 - minutesOffset; } } timestamp = Date.UTC(+struct[1], +struct[2] - 1, +struct[3], +struct[4], +struct[5] + minutesOffset, +struct[6], +struct[7].substr(0, 3)); } return timestamp; }; }());
josmas/openwonderland
modules/tools/error-report/web/scripts/date.js
JavaScript
gpl-2.0
8,838
/* arch/arm/mach-rk30/board-rk30-sdk-sdmmc.c * * Copyright (C) 2012 ROCKCHIP, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #ifdef CONFIG_SDMMC_RK29 #if !defined(CONFIG_SDMMC_RK29_OLD) static void rk29_sdmmc_gpio_open(int device_id, int on) { switch(device_id) { case 0://mmc0 { #ifdef CONFIG_SDMMC0_RK29 if(on) { gpio_direction_output(GPIO3B_GPIO3B0,GPIO_HIGH);//set mmc0-clk to high gpio_direction_output(GPIO3B_GPIO3B1,GPIO_HIGH);// set mmc0-cmd to high. gpio_direction_output(GPIO3B_GPIO3B2,GPIO_HIGH);//set mmc0-data0 to high. gpio_direction_output(GPIO3B_GPIO3B3,GPIO_HIGH);//set mmc0-data1 to high. gpio_direction_output(GPIO3B_GPIO3B4,GPIO_HIGH);//set mmc0-data2 to high. gpio_direction_output(GPIO3B_GPIO3B5,GPIO_HIGH);//set mmc0-data3 to high. mdelay(30); } else { rk30_mux_api_set(GPIO3B0_SDMMC0CLKOUT_NAME, GPIO3B_GPIO3B0); gpio_request(RK30_PIN3_PB0, "mmc0-clk"); gpio_direction_output(RK30_PIN3_PB0,GPIO_LOW);//set mmc0-clk to low. rk30_mux_api_set(GPIO3B1_SDMMC0CMD_NAME, GPIO3B_GPIO3B1); gpio_request(RK30_PIN3_PB1, "mmc0-cmd"); gpio_direction_output(RK30_PIN3_PB1,GPIO_LOW);//set mmc0-cmd to low. rk30_mux_api_set(GPIO3B2_SDMMC0DATA0_NAME, GPIO3B_GPIO3B2); gpio_request(RK30_PIN3_PB2, "mmc0-data0"); gpio_direction_output(RK30_PIN3_PB2,GPIO_LOW);//set mmc0-data0 to low. rk30_mux_api_set(GPIO3B3_SDMMC0DATA1_NAME, GPIO3B_GPIO3B3); gpio_request(RK30_PIN3_PB3, "mmc0-data1"); gpio_direction_output(RK30_PIN3_PB3,GPIO_LOW);//set mmc0-data1 to low. rk30_mux_api_set(GPIO3B4_SDMMC0DATA2_NAME, GPIO3B_GPIO3B4); gpio_request(RK30_PIN3_PB4, "mmc0-data2"); gpio_direction_output(RK30_PIN3_PB4,GPIO_LOW);//set mmc0-data2 to low. rk30_mux_api_set(GPIO3B5_SDMMC0DATA3_NAME, GPIO3B_GPIO3B5); gpio_request(RK30_PIN3_PB5, "mmc0-data3"); gpio_direction_output(RK30_PIN3_PB5,GPIO_LOW);//set mmc0-data3 to low. mdelay(30); } #endif } break; case 1://mmc1 { #ifdef CONFIG_SDMMC1_RK29 if(on) { gpio_direction_output(RK30_PIN3_PC5,GPIO_HIGH);//set mmc1-clk to high gpio_direction_output(RK30_PIN3_PC0,GPIO_HIGH);//set mmc1-cmd to high. gpio_direction_output(RK30_PIN3_PC1,GPIO_HIGH);//set mmc1-data0 to high. gpio_direction_output(RK30_PIN3_PC2,GPIO_HIGH);//set mmc1-data1 to high. gpio_direction_output(RK30_PIN3_PC3,GPIO_HIGH);//set mmc1-data2 to high. gpio_direction_output(RK30_PIN3_PC5,GPIO_HIGH);//set mmc1-data3 to high. mdelay(100); } else { rk30_mux_api_set(GPIO3C5_SDMMC1CLKOUT_NAME, GPIO3C_GPIO3C5); gpio_request(RK30_PIN3_PC5, "mmc1-clk"); gpio_direction_output(RK30_PIN3_PC5,GPIO_LOW);//set mmc1-clk to low. rk30_mux_api_set(GPIO3C0_SMMC1CMD_NAME, GPIO3C_GPIO3C0); gpio_request(RK30_PIN3_PC0, "mmc1-cmd"); gpio_direction_output(RK30_PIN3_PC0,GPIO_LOW);//set mmc1-cmd to low. rk30_mux_api_set(GPIO3C1_SDMMC1DATA0_NAME, GPIO3C_GPIO3C1); gpio_request(RK30_PIN3_PC1, "mmc1-data0"); gpio_direction_output(RK30_PIN3_PC1,GPIO_LOW);//set mmc1-data0 to low. mdelay(100); } #endif } break; case 2: //mmc2 break; default: break; } } static void rk29_sdmmc_set_iomux_mmc0(unsigned int bus_width) { switch (bus_width) { case 1://SDMMC_CTYPE_4BIT: { rk30_mux_api_set(GPIO3B3_SDMMC0DATA1_NAME, GPIO3B_SDMMC0_DATA1); rk30_mux_api_set(GPIO3B4_SDMMC0DATA2_NAME, GPIO3B_SDMMC0_DATA2); rk30_mux_api_set(GPIO3B5_SDMMC0DATA3_NAME, GPIO3B_SDMMC0_DATA3); } break; case 0x10000://SDMMC_CTYPE_8BIT: break; case 0xFFFF: //gpio_reset { rk30_mux_api_set(GPIO3A7_SDMMC0PWREN_NAME, GPIO3A_GPIO3A7); gpio_request(RK30_PIN3_PA7,"sdmmc-power"); gpio_direction_output(RK30_PIN3_PA7,GPIO_HIGH); //power-off rk29_sdmmc_gpio_open(0, 0); gpio_direction_output(RK30_PIN3_PA7,GPIO_LOW); //power-on rk29_sdmmc_gpio_open(0, 1); } break; default: //case 0://SDMMC_CTYPE_1BIT: { rk30_mux_api_set(GPIO3B1_SDMMC0CMD_NAME, GPIO3B_SDMMC0_CMD); rk30_mux_api_set(GPIO3B0_SDMMC0CLKOUT_NAME, GPIO3B_SDMMC0_CLKOUT); rk30_mux_api_set(GPIO3B2_SDMMC0DATA0_NAME, GPIO3B_SDMMC0_DATA0); rk30_mux_api_set(GPIO3B3_SDMMC0DATA1_NAME, GPIO3B_GPIO3B3); gpio_request(RK30_PIN3_PB3, "mmc0-data1"); gpio_direction_output(RK30_PIN3_PB3,GPIO_HIGH);//set mmc0-data1 to high. rk30_mux_api_set(GPIO3B4_SDMMC0DATA2_NAME, GPIO3B_GPIO3B4); gpio_request(RK30_PIN3_PB4, "mmc0-data2"); gpio_direction_output(RK30_PIN3_PB4,GPIO_HIGH);//set mmc0-data2 to high. rk30_mux_api_set(GPIO3B5_SDMMC0DATA3_NAME, GPIO3B_GPIO3B5); gpio_request(RK30_PIN3_PB5, "mmc0-data3"); gpio_direction_output(RK30_PIN3_PB5,GPIO_HIGH);//set mmc0-data3 to high. } break; } } static void rk29_sdmmc_set_iomux_mmc1(unsigned int bus_width) { rk30_mux_api_set(GPIO3C0_SMMC1CMD_NAME, GPIO3C_SMMC1_CMD); rk30_mux_api_set(GPIO3C5_SDMMC1CLKOUT_NAME, GPIO3C_SDMMC1_CLKOUT); rk30_mux_api_set(GPIO3C1_SDMMC1DATA0_NAME, GPIO3C_SDMMC1_DATA0); rk30_mux_api_set(GPIO3C2_SDMMC1DATA1_NAME, GPIO3C_SDMMC1_DATA1); rk30_mux_api_set(GPIO3C3_SDMMC1DATA2_NAME, GPIO3C_SDMMC1_DATA2); rk30_mux_api_set(GPIO3C4_SDMMC1DATA3_NAME, GPIO3C_SDMMC1_DATA3); } static void rk29_sdmmc_set_iomux_mmc2(unsigned int bus_width) { ;// } static void rk29_sdmmc_set_iomux(int device_id, unsigned int bus_width) { switch(device_id) { case 0: #ifdef CONFIG_SDMMC0_RK29 rk29_sdmmc_set_iomux_mmc0(bus_width); #endif break; case 1: #ifdef CONFIG_SDMMC1_RK29 rk29_sdmmc_set_iomux_mmc1(bus_width); #endif break; case 2: rk29_sdmmc_set_iomux_mmc2(bus_width); break; default: break; } } #endif //int rk29sdk_wifi_power_state = 0; //int rk29sdk_bt_power_state = 0; #ifdef CONFIG_WIFI_CONTROL_FUNC //#define RK29SDK_WIFI_BT_GPIO_POWER_N RK30_PIN3_PD0 //#define RK29SDK_WIFI_GPIO_RESET_N RK30_PIN3_PD0 //#define RK29SDK_BT_GPIO_RESET_N RK30_PIN3_PD1 #define RK30SDK_WIFI_GPIO_POWER_N RK30_PIN3_PD0 //#define RK30SDK_BT_GPIO_POWER_N RK30_PIN3_PD1 #define PREALLOC_WLAN_SEC_NUM 4 #define PREALLOC_WLAN_BUF_NUM 160 #define PREALLOC_WLAN_SECTION_HEADER 24 #define WLAN_SECTION_SIZE_0 (PREALLOC_WLAN_BUF_NUM * 128) #define WLAN_SECTION_SIZE_1 (PREALLOC_WLAN_BUF_NUM * 128) #define WLAN_SECTION_SIZE_2 (PREALLOC_WLAN_BUF_NUM * 512) #define WLAN_SECTION_SIZE_3 (PREALLOC_WLAN_BUF_NUM * 1024) #define WLAN_SKB_BUF_NUM 16 static struct sk_buff *wlan_static_skb[WLAN_SKB_BUF_NUM]; struct wifi_mem_prealloc { void *mem_ptr; unsigned long size; }; static struct wifi_mem_prealloc wifi_mem_array[PREALLOC_WLAN_SEC_NUM] = { {NULL, (WLAN_SECTION_SIZE_0 + PREALLOC_WLAN_SECTION_HEADER)}, {NULL, (WLAN_SECTION_SIZE_1 + PREALLOC_WLAN_SECTION_HEADER)}, {NULL, (WLAN_SECTION_SIZE_2 + PREALLOC_WLAN_SECTION_HEADER)}, {NULL, (WLAN_SECTION_SIZE_3 + PREALLOC_WLAN_SECTION_HEADER)} }; static void *rk29sdk_mem_prealloc(int section, unsigned long size) { if (section == PREALLOC_WLAN_SEC_NUM) return wlan_static_skb; if ((section < 0) || (section > PREALLOC_WLAN_SEC_NUM)) return NULL; if (wifi_mem_array[section].size < size) return NULL; return wifi_mem_array[section].mem_ptr; } static int __init rk29sdk_init_wifi_mem(void) { int i; int j; for (i = 0 ; i < WLAN_SKB_BUF_NUM ; i++) { wlan_static_skb[i] = dev_alloc_skb( ((i < (WLAN_SKB_BUF_NUM / 2)) ? 4096 : 8192)); if (!wlan_static_skb[i]) goto err_skb_alloc; } for (i = 0 ; i < PREALLOC_WLAN_SEC_NUM ; i++) { wifi_mem_array[i].mem_ptr = kmalloc(wifi_mem_array[i].size, GFP_KERNEL); if (!wifi_mem_array[i].mem_ptr) goto err_mem_alloc; } return 0; err_mem_alloc: pr_err("Failed to mem_alloc for WLAN\n"); for (j = 0 ; j < i ; j++) kfree(wifi_mem_array[j].mem_ptr); i = WLAN_SKB_BUF_NUM; err_skb_alloc: pr_err("Failed to skb_alloc for WLAN\n"); for (j = 0 ; j < i ; j++) dev_kfree_skb(wlan_static_skb[j]); return -ENOMEM; } static int rk29sdk_wifi_cd = 0; /* wifi virtual 'card detect' status */ static void (*wifi_status_cb)(int card_present, void *dev_id); static void *wifi_status_cb_devid; static int rk29sdk_wifi_status(struct device *dev) { return rk29sdk_wifi_cd; } static int rk29sdk_wifi_status_register(void (*callback)(int card_present, void *dev_id), void *dev_id) { if(wifi_status_cb) return -EAGAIN; wifi_status_cb = callback; wifi_status_cb_devid = dev_id; return 0; } static int __init rk29sdk_wifi_bt_gpio_control_init(void) { rk29sdk_init_wifi_mem(); rk29_mux_api_set(GPIO3D0_SDMMC1PWREN_NAME, GPIO3D_GPIO3D0); if (gpio_request(RK30SDK_WIFI_GPIO_POWER_N, "wifi_power")) { pr_info("%s: request wifi power gpio failed\n", __func__); return -1; } /*if (gpio_request(RK29SDK_WIFI_GPIO_RESET_N, "wifi reset")) { pr_info("%s: request wifi reset gpio failed\n", __func__); gpio_free(RK30SDK_WIFI_GPIO_POWER_N); return -1; } if (gpio_request(RK29SDK_BT_GPIO_RESET_N, "bt reset")) { pr_info("%s: request bt reset gpio failed\n", __func__); gpio_free(RK29SDK_WIFI_GPIO_RESET_N); return -1; }*/ gpio_direction_output(RK30SDK_WIFI_GPIO_POWER_N, GPIO_LOW); //gpio_direction_output(RK29SDK_WIFI_GPIO_RESET_N, GPIO_LOW); //gpio_direction_output(RK29SDK_BT_GPIO_RESET_N, GPIO_LOW); #if defined(CONFIG_SDMMC1_RK29) && !defined(CONFIG_SDMMC_RK29_OLD) rk29_mux_api_set(GPIO3C2_SDMMC1DATA1_NAME, GPIO3C_GPIO3C2); gpio_request(RK30_PIN3_PC2, "mmc1-data1"); gpio_direction_output(RK30_PIN3_PC2,GPIO_LOW);//set mmc1-data1 to low. rk29_mux_api_set(GPIO3C3_SDMMC1DATA2_NAME, GPIO3C_GPIO3C3); gpio_request(RK30_PIN3_PC3, "mmc1-data2"); gpio_direction_output(RK30_PIN3_PC3,GPIO_LOW);//set mmc1-data2 to low. rk29_mux_api_set(GPIO3C4_SDMMC1DATA3_NAME, GPIO3C_GPIO3C4); gpio_request(RK30_PIN3_PC4, "mmc1-data3"); gpio_direction_output(RK30_PIN3_PC4,GPIO_LOW);//set mmc1-data3 to low. rk29_sdmmc_gpio_open(1, 0); //added by xbw at 2011-10-13 #endif pr_info("%s: init finished\n",__func__); return 0; } static int rk29sdk_wifi_power(int on) { pr_info("%s: %d\n", __func__, on); if (on){ gpio_set_value(RK30SDK_WIFI_GPIO_POWER_N, GPIO_HIGH); #if defined(CONFIG_SDMMC1_RK29) && !defined(CONFIG_SDMMC_RK29_OLD) rk29_sdmmc_gpio_open(1, 1); //added by xbw at 2011-10-13 #endif //gpio_set_value(RK29SDK_WIFI_GPIO_RESET_N, GPIO_HIGH); mdelay(100); pr_info("wifi turn on power\n"); }else{ // if (!rk29sdk_bt_power_state){ gpio_set_value(RK30SDK_WIFI_GPIO_POWER_N, GPIO_LOW); #if defined(CONFIG_SDMMC1_RK29) && !defined(CONFIG_SDMMC_RK29_OLD) rk29_sdmmc_gpio_open(1, 0); //added by xbw at 2011-10-13 #endif mdelay(100); pr_info("wifi shut off power\n"); // }else // { // pr_info("wifi shouldn't shut off power, bt is using it!\n"); // } //gpio_set_value(RK29SDK_WIFI_GPIO_RESET_N, GPIO_LOW); } // rk29sdk_wifi_power_state = on; return 0; } static int rk29sdk_wifi_reset_state; static int rk29sdk_wifi_reset(int on) { pr_info("%s: %d\n", __func__, on); //gpio_set_value(RK29SDK_WIFI_GPIO_RESET_N, on); //mdelay(100); rk29sdk_wifi_reset_state = on; return 0; } int rk29sdk_wifi_set_carddetect(int val) { pr_info("%s:%d\n", __func__, val); rk29sdk_wifi_cd = val; if (wifi_status_cb){ wifi_status_cb(val, wifi_status_cb_devid); }else { pr_warning("%s, nobody to notify\n", __func__); } return 0; } EXPORT_SYMBOL(rk29sdk_wifi_set_carddetect); /// ----- #define WIFI_HOST_WAKE RK30_PIN3_PD2 #ifdef CONFIG_RK903 #define WIFI_HOST_WAKE RK30_PIN6_PA7 #else #define WIFI_HOST_WAKE RK30_PIN3_PD2 #endif static struct resource resources[] = { { .start = WIFI_HOST_WAKE, .flags = IORESOURCE_IRQ, .name = "bcmdhd_wlan_irq", }, }; static struct wifi_platform_data rk29sdk_wifi_control = { .set_power = rk29sdk_wifi_power, .set_reset = rk29sdk_wifi_reset, .set_carddetect = rk29sdk_wifi_set_carddetect, .mem_prealloc = rk29sdk_mem_prealloc, }; static struct platform_device rk29sdk_wifi_device = { .name = "bcmdhd_wlan", .id = 1, .num_resources = ARRAY_SIZE(resources), .resource = resources, .dev = { .platform_data = &rk29sdk_wifi_control, }, }; #endif #endif // endif --#ifdef CONFIG_SDMMC_RK29
Myria-de/rk3066-kernel-minix-neo-x5
arch/arm/mach-rk30/board-rk30-sdk-sdmmc.c
C
gpl-2.0
14,944
/** * To create a new color scheme see the details about the new themes file structure: http://www.codeflavors.com/documentation/theme-file-structure/ */ .FA_overall_container_classic { display:block; position:relative; clear:both; background:#282828; border:1px solid #282828; z-index:1; margin:0px auto; } .FA_overall_container_classic a:focus { outline:0px; } focus { outline:0px; } .FA_overall_container_classic h3.FA_title_section { padding:0px; margin:0px; font-size:30px; color:#003333 !important; font-weight:normal; font-style:italic; } .FA_overall_container_classic .FA_featured_articles { display:block; position:relative; clear:both; height:300px; width:100%; overflow:hidden; } .FA_overall_container_classic .FA_featured_articles .FA_article{ display:block; position:relative; clear:both; /* if you plan on using left slide morph, set width on the elements */ width:100%; padding:0px; color:#CCCCCC; overflow:hidden; text-align:justify; } .FA_overall_container_classic .FA_featured_articles .FA_article .FA_wrap{ padding:30px 40px; } .FA_overall_container_classic .FA_featured_articles .FA_article .image_container{ display:block; position:relative; float:left; margin:0px 30px 30px 0px; background-color:#333333; padding:6px; border:1px #666666 solid; z-index:2000; } .FA_overall_container_classic .FA_featured_articles .FA_article .image_container img{ margin:0px; } .FA_overall_container_classic .FA_featured_articles .FA_article h2{ margin:0px; padding:0px; font-size:22px; color:#eeeeee !important; font-weight:normal; border:none; clear:none; text-align:left; background-color:#282828; z-index:1; } .FA_overall_container_classic .FA_featured_articles .FA_article h2 a{ color:inherit; } .FA_overall_container_classic .FA_featured_articles .FA_article span.FA_date{ font-size:14px; color:#a8a8a8; font-style:italic; } .FA_overall_container_classic .FA_featured_articles .FA_article p{ font-size:12px; color:#FFFFFF; margin:0px; padding:30px 0px 30px; line-height:18px; text-align:justify; } .FA_overall_container_classic .FA_featured_articles .FA_article p a{ color:#FFFFFF !important; } .FA_overall_container_classic .FA_featured_articles .FA_article a.FA_read_more{ font-size:16px; color:#ffffff; background:#3b3b3b; padding:4px 6px; font-style:italic; font-family:Georgia, "Times New Roman", Times, serif; text-decoration:none; } /* Bottom navigation */ .FA_overall_container_classic ul.FA_navigation { display:block; position:relative; background:#282828; padding:0px !important; list-style-type:none !important; margin:0px !important; text-align:left; width:100%; height:17px; border:none; } .FA_overall_container_classic ul.FA_navigation li{ display:inline; border:none; padding:0px !important; list-style-type:none !important; background:none; } .FA_overall_container_classic ul.FA_navigation li.first{ margin-left:35px !important; } .FA_overall_container_classic ul.FA_navigation li a{ display:inline-block; width:12px; height:17px; line-height:17px; font-size:0px; background-position:bottom center; background-repeat:no-repeat; margin:0px 0px 0px 5px; } .FA_overall_container_classic ul.FA_navigation li span{ display:none; position:absolute; top:-20px; left:0px; color:#CCC; text-align:left; font-family:Georgia, "Times New Roman", Times, serif; width:100%; padding-left:35px; } .FA_overall_container_classic ul.FA_navigation a.active{ background-position:top center; color:#EEE; } .FA_overall_container_classic ul.FA_navigation a:hover{ background-position:top center; } /* Sideways navigation */ .FA_overall_container_classic .FA_back{ display:block; position:absolute; left:0px; top:45%; width:20px; height:45px; background-position:top center; background-repeat:no-repeat; z-index:110; } .FA_overall_container_classic .FA_next{ display:block; position:absolute; right:0px; top:45%; width:20px; height:45px; background-position:top center; background-repeat:no-repeat; z-index:110; } .FA_overall_container_classic .FA_back:HOVER, .FA_overall_container_classic .FA_next:HOVER{ background-position:bottom center; }
msamricth/occupyourhomesdc.org
wp-content/plugins/featured-articles-lite/themes/classic/stylesheet.css
CSS
gpl-2.0
4,259
#ifndef NEWLANGUAGESERVERDLG_H #define NEWLANGUAGESERVERDLG_H #include "UI.h" #include "LanguageServerEntry.h" #include <wx/arrstr.h> class LanguageServerPage; class NewLanguageServerDlg : public NewLanguageServerDlgBase { LanguageServerPage* m_page = nullptr; public: NewLanguageServerDlg(wxWindow* parent); virtual ~NewLanguageServerDlg(); LanguageServerEntry GetData() const; protected: virtual void OnOKUI(wxUpdateUIEvent& event); }; #endif // NEWLANGUAGESERVERDLG_H
eranif/codelite
LanguageServer/NewLanguageServerDlg.h
C
gpl-2.0
496
#!/usr/bin/env python # # Plugin.py # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307 USA class Plugin(dict): """A dictionary with attribute-style access. It maps attribute access to the real dictionary. """ def __init__(self, init = None): if init is None: init = dict() dict.__init__(self, init) def __getstate__(self): return list(self.__dict__.items()) def __setstate__(self, items): for key, val in items: self.__dict__[key] = val def __repr__(self): return "%s(%s)" % (self.__class__.__name__, dict.__repr__(self)) def __setitem__(self, key, value): return super(Plugin, self).__setitem__(key, value) def __getitem__(self, name): return super(Plugin, self).__getitem__(name) def __delitem__(self, name): return super(Plugin, self).__delitem__(name) __getattr__ = __getitem__ __setattr__ = __setitem__
tweemeterjop/thug
thug/DOM/Plugin.py
Python
gpl-2.0
1,536
/* * Copyright (c) 2002-2003, Intel Corporation. All rights reserved. * Created by: rusty.lynch REMOVE-THIS AT intel DOT com * This file is licensed under the GPL license. For the full content * of this license, see the COPYING file at the top level of this * source tree. Test case for assertion #3 of the sigaction system call that shows calling sigaction with a null act argument does not change the signal handler. Steps: 1. Initialize global variable to indicate handler has not been called 2. Set the signal handler for SIGCONT to handler 3. Call sigaction with a null act 4. raise SIGCONT 5. Verify handler was called. */ #include <signal.h> #include <stdio.h> #include "posixtest.h" static volatile int handler_called; static void handler(int signo PTS_ATTRIBUTE_UNUSED) { handler_called = 1; } int main(void) { struct sigaction act; struct sigaction oact; act.sa_handler = handler; act.sa_flags = 0; sigemptyset(&act.sa_mask); if (sigaction(SIGCONT, &act, 0) == -1) { perror("Unexpected error while attempting to setup test " "pre-conditions"); return PTS_UNRESOLVED; } if (sigaction(SIGCONT, 0, &oact) == -1) { perror("Unexpected error while attempting to setup test " "pre-conditions"); return PTS_UNRESOLVED; } if (raise(SIGCONT) == -1) { perror("Unexpected error while attempting to setup test " "pre-conditions"); return PTS_UNRESOLVED; } if (handler_called) { printf("Test PASSED\n"); return PTS_PASS; } printf("Test FAILED\n"); return PTS_FAIL; }
linux-test-project/ltp
testcases/open_posix_testsuite/conformance/interfaces/sigaction/3-5.c
C
gpl-2.0
1,559
//>>built define("dojox/widget/nls/hu/FilePicker",({name:"Név",path:"Elérési út",size:"Méret (byte)"}));
hariomkumarmth/champaranexpress
wp-content/plugins/dojo/dojox/widget/nls/hu/FilePicker.js
JavaScript
gpl-2.0
112
/* * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package com.oracle.graal.asm.ptx; import sun.misc.*; import com.oracle.graal.api.code.*; import com.oracle.graal.api.meta.*; import com.oracle.graal.compiler.common.*; import com.oracle.graal.lir.*; public class PTXMacroAssembler extends PTXAssembler { public PTXMacroAssembler(TargetDescription target, RegisterConfig registerConfig) { super(target, registerConfig); } public static class LoadAddr extends LoadStoreFormat { public LoadAddr(PTXStateSpace space, Variable dst, Variable src1, Value src2) { super(space, dst, src1, src2); } public void emit(PTXMacroAssembler masm) { String ldAddrStr = "ld." + space.getStateName(); if (Unsafe.ADDRESS_SIZE == 8) { ldAddrStr = ldAddrStr + ".u64"; } else { ldAddrStr = ldAddrStr + ".u32"; } masm.emitString(ldAddrStr + " " + emitRegister(dest, false) + ", " + emitAddress(source1, source2) + ";"); } } public static class Ld extends LoadStoreFormat { public Ld(PTXStateSpace space, Variable dst, Variable src1, Value src2) { super(space, dst, src1, src2); } public void emit(PTXMacroAssembler asm) { asm.emitString("ld." + super.emit(true)); } } public static class St extends LoadStoreFormat { public St(PTXStateSpace space, Variable dst, Variable src1, Value src2) { super(space, dst, src1, src2); } public void emit(PTXMacroAssembler asm) { asm.emitString("st." + super.emit(false)); } } public static class LoadParam extends Ld { // Type of the operation is dependent on src1's type. public LoadParam(PTXStateSpace space, Variable dst, Variable src1, Value src2) { super(space, dst, src1, src2); setKind(src1.getKind()); } } public static class Add extends StandardFormat { public Add(Variable dst, Value src1, Value src2) { super(dst, src1, src2); } public void emit(PTXMacroAssembler asm) { asm.emitString("add." + super.emit()); } } public static class And extends LogicInstructionFormat { public And(Variable dst, Value src1, Value src2) { super(dst, src1, src2); } public void emit(PTXMacroAssembler asm) { asm.emitString("and." + super.emit()); } } public static class Div extends StandardFormat { public Div(Variable dst, Value src1, Value src2) { super(dst, src1, src2); } public void emit(PTXMacroAssembler asm) { asm.emitString("div." + super.emit()); } } public static class Mul extends StandardFormat { public Mul(Variable dst, Value src1, Value src2) { super(dst, src1, src2); } public void emit(PTXMacroAssembler asm) { asm.emitString("mul.lo." + super.emit()); } } public static class Or extends LogicInstructionFormat { public Or(Variable dst, Value src1, Value src2) { super(dst, src1, src2); } public void emit(PTXMacroAssembler asm) { asm.emitString("or." + super.emit()); } } public static class Rem extends StandardFormat { public Rem(Variable dst, Value src1, Value src2) { super(dst, src1, src2); } public void emit(PTXMacroAssembler asm) { asm.emitString("rem." + super.emit()); } } public static class Shl extends LogicInstructionFormat { public Shl(Variable dst, Value src1, Value src2) { super(dst, src1, src2); } public void emit(PTXMacroAssembler asm) { asm.emitString("shl." + super.emit()); } } public static class Shr extends StandardFormat { public Shr(Variable dst, Value src1, Value src2) { super(dst, src1, src2); } public void emit(PTXMacroAssembler asm) { asm.emitString("shr." + super.emit()); } } public static class Sub extends StandardFormat { public Sub(Variable dst, Value src1, Value src2) { super(dst, src1, src2); } public void emit(PTXMacroAssembler asm) { asm.emitString("sub." + super.emit()); } } public static class Ushr extends StandardFormat { public Ushr(Variable dst, Value src1, Value src2) { super(dst, src1, src2); setKind(Kind.Illegal); // get around not having an Unsigned Kind } public void emit(PTXMacroAssembler asm) { asm.emitString("shr." + super.emit()); } } public static class Xor extends LogicInstructionFormat { public Xor(Variable dst, Value src1, Value src2) { super(dst, src1, src2); } public void emit(PTXMacroAssembler asm) { asm.emitString("xor." + super.emit()); } } public static class Cvt extends ConversionFormat { public Cvt(Variable dst, Variable src, Kind dstKind, Kind srcKind) { super(dst, src, dstKind, srcKind); } public void emit(PTXMacroAssembler asm) { if (dest.getKind() == Kind.Float || dest.getKind() == Kind.Double) { // round-to-zero - might not be right asm.emitString("cvt.rz." + super.emit()); } else { asm.emitString("cvt." + super.emit()); } } } public static class Mov extends SingleOperandFormat { private int predicateRegisterNumber = -1; public Mov(Variable dst, Value src) { super(dst, src); } public Mov(Variable dst, Value src, int predicate) { super(dst, src); this.predicateRegisterNumber = predicate; } /* * public Mov(Variable dst, AbstractAddress src) { throw * GraalInternalError.unimplemented("AbstractAddress Mov"); } */ public void emit(PTXMacroAssembler asm) { if (predicateRegisterNumber >= 0) { asm.emitString("@%p" + String.valueOf(predicateRegisterNumber) + " mov." + super.emit()); } else { asm.emitString("mov." + super.emit()); } } } public static class Neg extends SingleOperandFormat { public Neg(Variable dst, Variable src) { super(dst, src); } public void emit(PTXMacroAssembler asm) { asm.emitString("neg." + super.emit()); } } public static class Not extends BinarySingleOperandFormat { public Not(Variable dst, Variable src) { super(dst, src); } public void emit(PTXMacroAssembler asm) { asm.emitString("not." + super.emit()); } } public static class Param extends SingleOperandFormat { boolean isReturnParameter; public Param(Variable d, boolean isRet) { super(d, null); isReturnParameter = isRet; } public String emitParameter(Variable v) { return (" param" + v.index); } public void emit(PTXMacroAssembler asm, boolean isLastParam) { asm.emitString(".param ." + paramForKind(dest.getKind()) + emitParameter(dest) + (isLastParam ? "" : ",")); } public String paramForKind(Kind k) { if (isReturnParameter) { if (Unsafe.ADDRESS_SIZE == 8) { return "u64"; } else { return "u32"; } } else { switch (k.getTypeChar()) { case 'z': case 'f': return "s32"; case 'b': return "b8"; case 's': return "s16"; case 'c': return "u16"; case 'i': return "s32"; case 'j': return "s64"; case 'd': return "f64"; case 'a': return "u64"; default: throw GraalInternalError.shouldNotReachHere(); } } } } }
lewurm/graal
graal/com.oracle.graal.asm.ptx/src/com/oracle/graal/asm/ptx/PTXMacroAssembler.java
Java
gpl-2.0
9,613
/* * * Copyright (C) 2011-2013 ArkCORE <http://www.arkania.net/> * * Copyright (C) 2008-2013 TrinityCore <http://www.trinitycore.org/> * * * Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/> * * * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ /* Name: Boss_Darkmaster_Gandling %Complete: 90 Comment: Doors Not yet reopening. Category: Scholomance */ #include "ScriptMgr.h" #include "ScriptedCreature.h" #include "scholomance.h" #include "SpellScript.h" enum Says { YELL_SUMMONED = 0 }; enum Spells { SPELL_ARCANEMISSILES = 15790, SPELL_SHADOWSHIELD = 12040, SPELL_CURSE = 18702, SPELL_SHADOW_PORTAL = 17950 }; enum Events { EVENT_ARCANEMISSILES = 1, EVENT_SHADOWSHIELD = 2, EVENT_CURSE = 3, EVENT_SHADOW_PORTAL = 4 }; class boss_darkmaster_gandling : public CreatureScript { public: boss_darkmaster_gandling() : CreatureScript("boss_darkmaster_gandling") { } struct boss_darkmaster_gandlingAI : public BossAI { boss_darkmaster_gandlingAI(Creature* creature) : BossAI(creature, DATA_DARKMASTERGANDLING) {} void Reset() { _Reset(); if (GameObject* gate = me->GetMap()->GetGameObject(instance->GetData64(GO_GATE_GANDLING))) gate->SetGoState(GO_STATE_ACTIVE); } void JustDied(Unit* /*killer*/) { _JustDied(); if (GameObject* gate = me->GetMap()->GetGameObject(instance->GetData64(GO_GATE_GANDLING))) gate->SetGoState(GO_STATE_ACTIVE); } void EnterCombat(Unit* /*who*/) { _EnterCombat(); events.ScheduleEvent(EVENT_ARCANEMISSILES, 4500); events.ScheduleEvent(EVENT_SHADOWSHIELD, 12000); events.ScheduleEvent(EVENT_CURSE, 2000); events.ScheduleEvent(EVENT_SHADOW_PORTAL, 16000); if (GameObject* gate = me->GetMap()->GetGameObject(instance->GetData64(GO_GATE_GANDLING))) gate->SetGoState(GO_STATE_READY); } void IsSummonedBy(Unit* /*summoner*/) { Talk(YELL_SUMMONED); me->GetMotionMaster()->MoveRandom(5); } void UpdateAI(uint32 diff) { if (!UpdateVictim()) return; events.Update(diff); if (me->HasUnitState(UNIT_STATE_CASTING)) return; while (uint32 eventId = events.ExecuteEvent()) { switch (eventId) { case EVENT_ARCANEMISSILES: DoCastVictim(SPELL_ARCANEMISSILES, true); events.ScheduleEvent(EVENT_ARCANEMISSILES, 8000); break; case EVENT_SHADOWSHIELD: DoCast(me, SPELL_SHADOWSHIELD); events.ScheduleEvent(EVENT_SHADOWSHIELD, urand(14000, 28000)); break; case EVENT_CURSE: DoCastVictim(SPELL_CURSE, true); events.ScheduleEvent(EVENT_CURSE, urand(15000, 27000)); break; case EVENT_SHADOW_PORTAL: if (HealthAbovePct(3)) { DoCast(SelectTarget(SELECT_TARGET_RANDOM, 0, 100, true), SPELL_SHADOW_PORTAL, true); events.ScheduleEvent(EVENT_SHADOW_PORTAL, urand(17000, 27000)); } } } DoMeleeAttackIfReady(); } }; CreatureAI* GetAI(Creature* creature) const { return new boss_darkmaster_gandlingAI (creature); } }; // Script for Shadow Portal spell 17950 enum Rooms { ROOM_HALL_OF_SECRETS = 0, ROOM_HALL_OF_THE_DAMNED = 1, ROOM_THE_COVEN = 2, ROOM_THE_SHADOW_VAULT = 3, ROOM_BAROV_FAMILY_VAULT = 4, ROOM_VAULT_OF_THE_RAVENIAN = 5 }; enum SPSpells { SPELL_SHADOW_PORTAL_HALLOFSECRETS = 17863, SPELL_SHADOW_PORTAL_HALLOFTHEDAMNED = 17939, SPELL_SHADOW_PORTAL_THECOVEN = 17943, SPELL_SHADOW_PORTAL_THESHADOWVAULT = 17944, SPELL_SHADOW_PORTAL_BAROVFAMILYVAULT = 17946, SPELL_SHADOW_PORTAL_VAULTOFTHERAVENIAN = 17948 }; class spell_shadow_portal : public SpellScriptLoader { public: spell_shadow_portal() : SpellScriptLoader("spell_shadow_portal") { } class spell_shadow_portal_SpellScript : public SpellScript { PrepareSpellScript(spell_shadow_portal_SpellScript); void HandleCast(SpellEffIndex /*effIndex*/) { Creature* caster = GetCaster()->ToCreature(); int8 attempts = 0; int32 spell_to_cast =0; while (!spell_to_cast) { if (attempts++ >= 6) break; switch (urand(0, 5)) { case ROOM_HALL_OF_SECRETS: if (InstanceScript* instance = GetCaster()->GetInstanceScript()) if (GameObject::GetGameObject(*caster, instance->GetData64(GO_GATE_RAVENIAN))->GetGoState() == GO_STATE_ACTIVE) spell_to_cast = SPELL_SHADOW_PORTAL_HALLOFSECRETS; break; case ROOM_HALL_OF_THE_DAMNED: if (InstanceScript* instance = GetCaster()->GetInstanceScript()) if (GameObject::GetGameObject(*caster, instance->GetData64(GO_GATE_THEOLEN))->GetGoState() == GO_STATE_ACTIVE) spell_to_cast = SPELL_SHADOW_PORTAL_HALLOFTHEDAMNED; break; case ROOM_THE_COVEN: if (InstanceScript* instance = GetCaster()->GetInstanceScript()) if (GameObject::GetGameObject(*caster, instance->GetData64(GO_GATE_MALICIA))->GetGoState() == GO_STATE_ACTIVE) spell_to_cast = SPELL_SHADOW_PORTAL_THECOVEN; break; case ROOM_THE_SHADOW_VAULT: if (InstanceScript* instance = GetCaster()->GetInstanceScript()) if (GameObject::GetGameObject(*caster, instance->GetData64(GO_GATE_ILLUCIA))->GetGoState() == GO_STATE_ACTIVE) spell_to_cast = SPELL_SHADOW_PORTAL_THESHADOWVAULT; break; case ROOM_BAROV_FAMILY_VAULT: if (InstanceScript* instance = GetCaster()->GetInstanceScript()) if (GameObject::GetGameObject(*caster, instance->GetData64(GO_GATE_BAROV))->GetGoState() == GO_STATE_ACTIVE) spell_to_cast = SPELL_SHADOW_PORTAL_BAROVFAMILYVAULT; break; case ROOM_VAULT_OF_THE_RAVENIAN: if (InstanceScript* instance = GetCaster()->GetInstanceScript()) if (GameObject::GetGameObject(*caster, instance->GetData64(GO_GATE_POLKELT))->GetGoState() == GO_STATE_ACTIVE) spell_to_cast = SPELL_SHADOW_PORTAL_VAULTOFTHERAVENIAN; break; } if (spell_to_cast) GetHitUnit()->CastSpell(GetHitUnit(), spell_to_cast); } } void Register() { OnEffectHitTarget += SpellEffectFn(spell_shadow_portal_SpellScript::HandleCast, EFFECT_0, SPELL_EFFECT_DUMMY); } }; SpellScript* GetSpellScript() const { return new spell_shadow_portal_SpellScript(); } }; // Script for Shadow Portal spells 17863, 17939, 17943, 17944, 17946, 17948 Position const SummonPos[18] = { // Hall of Secrects // The Hall of the damned { 177.9624f, -68.23893f, 84.95197f, 3.228859f }, { 183.7705f, -61.43489f, 84.92424f, 5.148721f }, { 184.7035f, -77.74805f, 84.92424f, 4.660029f }, // The Coven { 111.7203f, -1.105035f, 85.45985f, 3.961897f }, { 118.0079f, 6.430664f, 85.31169f, 2.408554f }, { 120.0276f, -7.496636f, 85.31169f, 2.984513f }, // The Shadow Vault { 245.3716f, 0.628038f, 72.73877f, 0.01745329f }, { 240.9920f, 3.405653f, 72.73877f, 6.143559f }, { 240.9543f, -3.182943f, 72.73877f, 0.2268928f }, // Barov Family Vault { 181.8245f, -42.58117f, 75.4812f, 4.660029f }, { 177.7456f, -42.74745f, 75.4812f, 4.886922f }, { 185.6157f, -42.91200f, 75.4812f, 4.45059f }, // Vault of the Ravenian }; enum Creatures { NPC_RISEN_GUARDIAN = 11598 }; enum ScriptEventId { SPELL_EVENT_HALLOFSECRETS = 5618, SPELL_EVENT_HALLOFTHEDAMNED = 5619, SPELL_EVENT_THECOVEN = 5620, SPELL_EVENT_THESHADOWVAULT = 5621, SPELL_EVENT_BAROVFAMILYVAULT = 5622, SPELL_EVENT_VAULTOFTHERAVENIAN = 5623 }; class spell_shadow_portal_rooms : public SpellScriptLoader { public: spell_shadow_portal_rooms() : SpellScriptLoader("spell_shadow_portal_rooms") { } class spell_shadow_portal_rooms_SpellScript : public SpellScript { PrepareSpellScript(spell_shadow_portal_rooms_SpellScript); void HandleSendEvent(SpellEffIndex effIndex) { // If only one player in threat list fail spell Creature* Summoned = NULL; Creature* caster = GetCaster()->ToCreature(); int8 pos_to_summon = 0; int8 phase_to_set = 0; int32 gate_to_close = 0; switch (GetSpellInfo()->Effects[effIndex].MiscValue) { case SPELL_EVENT_HALLOFSECRETS: pos_to_summon = 0; // Not yet spawned phase_to_set = 1; gate_to_close = GO_GATE_RAVENIAN; break; case SPELL_EVENT_HALLOFTHEDAMNED: pos_to_summon = 0; phase_to_set = 2; gate_to_close = GO_GATE_THEOLEN; break; case SPELL_EVENT_THECOVEN: pos_to_summon = 3; phase_to_set = 3; gate_to_close = GO_GATE_MALICIA; break; case SPELL_EVENT_THESHADOWVAULT: pos_to_summon = 6; phase_to_set = 4; gate_to_close = GO_GATE_ILLUCIA; break; case SPELL_EVENT_BAROVFAMILYVAULT: pos_to_summon = 9; phase_to_set = 5; gate_to_close = GO_GATE_BAROV; break; case SPELL_EVENT_VAULTOFTHERAVENIAN: pos_to_summon = 0; // Not yet spawned phase_to_set = 6; gate_to_close = GO_GATE_POLKELT; break; default: break; } if (gate_to_close && (GetCaster()->GetMap()->GetId() == 289)) { for (uint8 i = 0; i < 3; ++i) { Summoned = GetCaster()->SummonCreature(NPC_RISEN_GUARDIAN, SummonPos[pos_to_summon++], TEMPSUMMON_TIMED_DESPAWN_OUT_OF_COMBAT, 120000); if (Summoned) { Summoned->GetMotionMaster()->MoveRandom(5); Summoned->AI()->SetData(0, phase_to_set); } } if (InstanceScript* instance = GetCaster()->GetInstanceScript()) if (GameObject* gate = GameObject::GetGameObject(*caster, instance->GetData64(gate_to_close))) gate->SetGoState(GO_STATE_READY); } } void Register() { OnEffectHit += SpellEffectFn(spell_shadow_portal_rooms_SpellScript::HandleSendEvent, EFFECT_1, SPELL_EFFECT_SEND_EVENT); } }; SpellScript* GetSpellScript() const { return new spell_shadow_portal_rooms_SpellScript(); } }; void AddSC_boss_darkmaster_gandling() { new boss_darkmaster_gandling(); new spell_shadow_portal(); new spell_shadow_portal_rooms(); }
blitztech/master434
src/server/scripts/EasternKingdoms/Scholomance/boss_darkmaster_gandling.cpp
C++
gpl-2.0
13,844
/* * Authored By Neil Roberts <neil@linux.intel.com> * and Jasper St. Pierre <jstpierre@mecheye.net> * * Copyright (C) 2008 Intel Corporation * Copyright (C) 2012 Red Hat, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. */ /** * SECTION:meta-shaped-texture * @title: MetaShapedTexture * @short_description: An actor to draw a masked texture. */ #include <config.h> #include <meta/meta-shaped-texture.h> #include "meta-shaped-texture-private.h" #include <cogl/cogl.h> #include <gdk/gdk.h> /* for gdk_rectangle_intersect() */ #include "clutter-utils.h" #include "meta-texture-tower.h" #include "meta-cullable.h" static void meta_shaped_texture_dispose (GObject *object); static void meta_shaped_texture_paint (ClutterActor *actor); static void meta_shaped_texture_get_preferred_width (ClutterActor *self, gfloat for_height, gfloat *min_width_p, gfloat *natural_width_p); static void meta_shaped_texture_get_preferred_height (ClutterActor *self, gfloat for_width, gfloat *min_height_p, gfloat *natural_height_p); static gboolean meta_shaped_texture_get_paint_volume (ClutterActor *self, ClutterPaintVolume *volume); static void cullable_iface_init (MetaCullableInterface *iface); G_DEFINE_TYPE_WITH_CODE (MetaShapedTexture, meta_shaped_texture, CLUTTER_TYPE_ACTOR, G_IMPLEMENT_INTERFACE (META_TYPE_CULLABLE, cullable_iface_init)); #define META_SHAPED_TEXTURE_GET_PRIVATE(obj) \ (G_TYPE_INSTANCE_GET_PRIVATE ((obj), META_TYPE_SHAPED_TEXTURE, \ MetaShapedTexturePrivate)) enum { SIZE_CHANGED, LAST_SIGNAL, }; static guint signals[LAST_SIGNAL]; struct _MetaShapedTexturePrivate { MetaTextureTower *paint_tower; CoglTexture *texture; CoglTexture *mask_texture; /* The region containing only fully opaque pixels */ cairo_region_t *opaque_region; /* MetaCullable regions, see that documentation for more details */ cairo_region_t *clip_region; cairo_region_t *unobscured_region; guint tex_width, tex_height; guint fallback_width, fallback_height; guint create_mipmaps : 1; }; static void meta_shaped_texture_class_init (MetaShapedTextureClass *klass) { GObjectClass *gobject_class = (GObjectClass *) klass; ClutterActorClass *actor_class = (ClutterActorClass *) klass; gobject_class->dispose = meta_shaped_texture_dispose; actor_class->get_preferred_width = meta_shaped_texture_get_preferred_width; actor_class->get_preferred_height = meta_shaped_texture_get_preferred_height; actor_class->paint = meta_shaped_texture_paint; actor_class->get_paint_volume = meta_shaped_texture_get_paint_volume; signals[SIZE_CHANGED] = g_signal_new ("size-changed", G_TYPE_FROM_CLASS (gobject_class), G_SIGNAL_RUN_LAST, 0, NULL, NULL, NULL, G_TYPE_NONE, 0); g_type_class_add_private (klass, sizeof (MetaShapedTexturePrivate)); } static void meta_shaped_texture_init (MetaShapedTexture *self) { MetaShapedTexturePrivate *priv; priv = self->priv = META_SHAPED_TEXTURE_GET_PRIVATE (self); priv->paint_tower = meta_texture_tower_new (); priv->texture = NULL; priv->mask_texture = NULL; priv->create_mipmaps = TRUE; } static void set_unobscured_region (MetaShapedTexture *self, cairo_region_t *unobscured_region) { MetaShapedTexturePrivate *priv = self->priv; g_clear_pointer (&priv->unobscured_region, (GDestroyNotify) cairo_region_destroy); if (unobscured_region) { guint width, height; if (priv->texture) { width = priv->tex_width; height = priv->tex_height; } else { width = priv->fallback_width; height = priv->fallback_height; } cairo_rectangle_int_t bounds = { 0, 0, width, height }; priv->unobscured_region = cairo_region_copy (unobscured_region); cairo_region_intersect_rectangle (priv->unobscured_region, &bounds); } } static void set_clip_region (MetaShapedTexture *self, cairo_region_t *clip_region) { MetaShapedTexturePrivate *priv = self->priv; g_clear_pointer (&priv->clip_region, (GDestroyNotify) cairo_region_destroy); if (clip_region) priv->clip_region = cairo_region_copy (clip_region); } static void meta_shaped_texture_dispose (GObject *object) { MetaShapedTexture *self = (MetaShapedTexture *) object; MetaShapedTexturePrivate *priv = self->priv; if (priv->paint_tower) meta_texture_tower_free (priv->paint_tower); priv->paint_tower = NULL; g_clear_pointer (&priv->texture, cogl_object_unref); g_clear_pointer (&priv->opaque_region, cairo_region_destroy); meta_shaped_texture_set_mask_texture (self, NULL); set_unobscured_region (self, NULL); set_clip_region (self, NULL); G_OBJECT_CLASS (meta_shaped_texture_parent_class)->dispose (object); } static CoglPipeline * get_base_pipeline (CoglContext *ctx) { static CoglPipeline *template = NULL; if (G_UNLIKELY (template == NULL)) { template = cogl_pipeline_new (ctx); cogl_pipeline_set_layer_wrap_mode_s (template, 0, COGL_PIPELINE_WRAP_MODE_CLAMP_TO_EDGE); cogl_pipeline_set_layer_wrap_mode_t (template, 0, COGL_PIPELINE_WRAP_MODE_CLAMP_TO_EDGE); cogl_pipeline_set_layer_wrap_mode_s (template, 1, COGL_PIPELINE_WRAP_MODE_CLAMP_TO_EDGE); cogl_pipeline_set_layer_wrap_mode_t (template, 1, COGL_PIPELINE_WRAP_MODE_CLAMP_TO_EDGE); } return template; } static CoglPipeline * get_unmasked_pipeline (CoglContext *ctx) { return get_base_pipeline (ctx); } static CoglPipeline * get_masked_pipeline (CoglContext *ctx) { static CoglPipeline *template = NULL; if (G_UNLIKELY (template == NULL)) { template = cogl_pipeline_copy (get_base_pipeline (ctx)); cogl_pipeline_set_layer_combine (template, 1, "RGBA = MODULATE (PREVIOUS, TEXTURE[A])", NULL); } return template; } static CoglPipeline * get_unblended_pipeline (CoglContext *ctx) { static CoglPipeline *template = NULL; if (G_UNLIKELY (template == NULL)) { CoglColor color; template = cogl_pipeline_copy (get_base_pipeline (ctx)); cogl_color_init_from_4ub (&color, 255, 255, 255, 255); cogl_pipeline_set_blend (template, "RGBA = ADD (SRC_COLOR, 0)", NULL); cogl_pipeline_set_color (template, &color); } return template; } static void paint_clipped_rectangle (CoglFramebuffer *fb, CoglPipeline *pipeline, cairo_rectangle_int_t *rect, ClutterActorBox *alloc) { float coords[8]; float x1, y1, x2, y2; x1 = rect->x; y1 = rect->y; x2 = rect->x + rect->width; y2 = rect->y + rect->height; coords[0] = rect->x / (alloc->x2 - alloc->x1); coords[1] = rect->y / (alloc->y2 - alloc->y1); coords[2] = (rect->x + rect->width) / (alloc->x2 - alloc->x1); coords[3] = (rect->y + rect->height) / (alloc->y2 - alloc->y1); coords[4] = coords[0]; coords[5] = coords[1]; coords[6] = coords[2]; coords[7] = coords[3]; cogl_framebuffer_draw_multitextured_rectangle (fb, pipeline, x1, y1, x2, y2, &coords[0], 8); } static void set_cogl_texture (MetaShapedTexture *stex, CoglTexture *cogl_tex) { MetaShapedTexturePrivate *priv; guint width, height; g_return_if_fail (META_IS_SHAPED_TEXTURE (stex)); priv = stex->priv; if (priv->texture) cogl_object_unref (priv->texture); priv->texture = cogl_tex; if (cogl_tex != NULL) { cogl_object_ref (cogl_tex); width = cogl_texture_get_width (COGL_TEXTURE (cogl_tex)); height = cogl_texture_get_height (COGL_TEXTURE (cogl_tex)); } else { width = 0; height = 0; } if (priv->tex_width != width || priv->tex_height != height) { priv->tex_width = width; priv->tex_height = height; clutter_actor_queue_relayout (CLUTTER_ACTOR (stex)); g_signal_emit (stex, signals[SIZE_CHANGED], 0); } /* NB: We don't queue a redraw of the actor here because we don't * know how much of the buffer has changed with respect to the * previous buffer. We only queue a redraw in response to surface * damage. */ if (priv->create_mipmaps) meta_texture_tower_set_base_texture (priv->paint_tower, cogl_tex); } static void meta_shaped_texture_paint (ClutterActor *actor) { MetaShapedTexture *stex = (MetaShapedTexture *) actor; MetaShapedTexturePrivate *priv = stex->priv; guint tex_width, tex_height; guchar opacity; CoglContext *ctx; CoglFramebuffer *fb; CoglTexture *paint_tex; ClutterActorBox alloc; CoglPipelineFilter filter; if (priv->clip_region && cairo_region_is_empty (priv->clip_region)) return; if (!CLUTTER_ACTOR_IS_REALIZED (CLUTTER_ACTOR (stex))) clutter_actor_realize (CLUTTER_ACTOR (stex)); /* The GL EXT_texture_from_pixmap extension does allow for it to be * used together with SGIS_generate_mipmap, however this is very * rarely supported. Also, even when it is supported there * are distinct performance implications from: * * - Updating mipmaps that we don't need * - Having to reallocate pixmaps on the server into larger buffers * * So, we just unconditionally use our mipmap emulation code. If we * wanted to use SGIS_generate_mipmap, we'd have to query COGL to * see if it was supported (no API currently), and then if and only * if that was the case, set the clutter texture quality to HIGH. * Setting the texture quality to high without SGIS_generate_mipmap * support for TFP textures will result in fallbacks to XGetImage. */ if (priv->create_mipmaps) paint_tex = meta_texture_tower_get_paint_texture (priv->paint_tower); else paint_tex = COGL_TEXTURE (priv->texture); if (paint_tex == NULL) return; tex_width = priv->tex_width; tex_height = priv->tex_height; if (tex_width == 0 || tex_height == 0) /* no contents yet */ return; cairo_rectangle_int_t tex_rect = { 0, 0, tex_width, tex_height }; /* Use nearest-pixel interpolation if the texture is unscaled. This * improves performance, especially with software rendering. */ filter = COGL_PIPELINE_FILTER_LINEAR; if (meta_actor_painting_untransformed (tex_width, tex_height, NULL, NULL)) filter = COGL_PIPELINE_FILTER_NEAREST; ctx = clutter_backend_get_cogl_context (clutter_get_default_backend ()); fb = cogl_get_draw_framebuffer (); opacity = clutter_actor_get_paint_opacity (actor); clutter_actor_get_allocation_box (actor, &alloc); cairo_region_t *blended_region; gboolean use_opaque_region = (priv->opaque_region != NULL && opacity == 255); if (use_opaque_region) { if (priv->clip_region != NULL) blended_region = cairo_region_copy (priv->clip_region); else blended_region = cairo_region_create_rectangle (&tex_rect); cairo_region_subtract (blended_region, priv->opaque_region); } else { if (priv->clip_region != NULL) blended_region = cairo_region_reference (priv->clip_region); else blended_region = NULL; } /* Limit to how many separate rectangles we'll draw; beyond this just * fall back and draw the whole thing */ #define MAX_RECTS 16 if (blended_region != NULL) { int n_rects = cairo_region_num_rectangles (blended_region); if (n_rects > MAX_RECTS) { /* Fall back to taking the fully blended path. */ use_opaque_region = FALSE; cairo_region_destroy (blended_region); blended_region = NULL; } } /* First, paint the unblended parts, which are part of the opaque region. */ if (use_opaque_region) { CoglPipeline *opaque_pipeline; cairo_region_t *region; int n_rects; int i; if (priv->clip_region != NULL) { region = cairo_region_copy (priv->clip_region); cairo_region_intersect (region, priv->opaque_region); } else { region = cairo_region_reference (priv->opaque_region); } if (!cairo_region_is_empty (region)) { opaque_pipeline = get_unblended_pipeline (ctx); cogl_pipeline_set_layer_texture (opaque_pipeline, 0, paint_tex); cogl_pipeline_set_layer_filters (opaque_pipeline, 0, filter, filter); n_rects = cairo_region_num_rectangles (region); for (i = 0; i < n_rects; i++) { cairo_rectangle_int_t rect; cairo_region_get_rectangle (region, i, &rect); paint_clipped_rectangle (fb, opaque_pipeline, &rect, &alloc); } } cairo_region_destroy (region); } /* Now, go ahead and paint the blended parts. */ /* We have three cases: * 1) blended_region has rectangles - paint the rectangles. * 2) blended_region is empty - don't paint anything * 3) blended_region is NULL - paint fully-blended. * * 1) and 3) are the times where we have to paint stuff. This tests * for 1) and 3). */ if (blended_region == NULL || !cairo_region_is_empty (blended_region)) { CoglPipeline *blended_pipeline; if (priv->mask_texture == NULL) { blended_pipeline = get_unmasked_pipeline (ctx); } else { blended_pipeline = get_masked_pipeline (ctx); cogl_pipeline_set_layer_texture (blended_pipeline, 1, priv->mask_texture); cogl_pipeline_set_layer_filters (blended_pipeline, 1, filter, filter); } cogl_pipeline_set_layer_texture (blended_pipeline, 0, paint_tex); cogl_pipeline_set_layer_filters (blended_pipeline, 0, filter, filter); CoglColor color; cogl_color_init_from_4ub (&color, opacity, opacity, opacity, opacity); cogl_pipeline_set_color (blended_pipeline, &color); if (blended_region != NULL) { /* 1) blended_region is not empty. Paint the rectangles. */ int i; int n_rects = cairo_region_num_rectangles (blended_region); for (i = 0; i < n_rects; i++) { cairo_rectangle_int_t rect; cairo_region_get_rectangle (blended_region, i, &rect); if (!gdk_rectangle_intersect (&tex_rect, &rect, &rect)) continue; paint_clipped_rectangle (fb, blended_pipeline, &rect, &alloc); } } else { /* 3) blended_region is NULL. Do a full paint. */ cogl_framebuffer_draw_rectangle (fb, blended_pipeline, 0, 0, alloc.x2 - alloc.x1, alloc.y2 - alloc.y1); } } if (blended_region != NULL) cairo_region_destroy (blended_region); } static void meta_shaped_texture_get_preferred_width (ClutterActor *self, gfloat for_height, gfloat *min_width_p, gfloat *natural_width_p) { MetaShapedTexturePrivate *priv = META_SHAPED_TEXTURE (self)->priv; guint width; if (priv->texture) width = priv->tex_width; else width = priv->fallback_width; if (min_width_p) *min_width_p = width; if (natural_width_p) *natural_width_p = width; } static void meta_shaped_texture_get_preferred_height (ClutterActor *self, gfloat for_width, gfloat *min_height_p, gfloat *natural_height_p) { MetaShapedTexturePrivate *priv = META_SHAPED_TEXTURE (self)->priv; guint height; if (priv->texture) height = priv->tex_height; else height = priv->fallback_height; if (min_height_p) *min_height_p = height; if (natural_height_p) *natural_height_p = height; } static cairo_region_t * effective_unobscured_region (MetaShapedTexture *self) { MetaShapedTexturePrivate *priv = self->priv; ClutterActor *actor; /* Fail if we have any mapped clones. */ actor = CLUTTER_ACTOR (self); do { if (clutter_actor_has_mapped_clones (actor)) return NULL; actor = clutter_actor_get_parent (actor); } while (actor != NULL); return priv->unobscured_region; } static gboolean get_unobscured_bounds (MetaShapedTexture *self, cairo_rectangle_int_t *unobscured_bounds) { cairo_region_t *unobscured_region = effective_unobscured_region (self); if (unobscured_region) { cairo_region_get_extents (unobscured_region, unobscured_bounds); return TRUE; } else return FALSE; } static gboolean meta_shaped_texture_get_paint_volume (ClutterActor *actor, ClutterPaintVolume *volume) { MetaShapedTexture *self = META_SHAPED_TEXTURE (actor); ClutterActorBox box; cairo_rectangle_int_t unobscured_bounds; if (!clutter_actor_has_allocation (actor)) return FALSE; clutter_actor_get_allocation_box (actor, &box); if (get_unobscured_bounds (self, &unobscured_bounds)) { box.x1 = MAX (unobscured_bounds.x, box.x1); box.x2 = MIN (unobscured_bounds.x + unobscured_bounds.width, box.x2); box.y1 = MAX (unobscured_bounds.y, box.y1); box.y2 = MIN (unobscured_bounds.y + unobscured_bounds.height, box.y2); } box.x2 = MAX (box.x2, box.x1); box.y2 = MAX (box.y2, box.y1); clutter_paint_volume_union_box (volume, &box); return TRUE; } void meta_shaped_texture_set_create_mipmaps (MetaShapedTexture *stex, gboolean create_mipmaps) { MetaShapedTexturePrivate *priv; g_return_if_fail (META_IS_SHAPED_TEXTURE (stex)); priv = stex->priv; create_mipmaps = create_mipmaps != FALSE; if (create_mipmaps != priv->create_mipmaps) { CoglTexture *base_texture; priv->create_mipmaps = create_mipmaps; base_texture = create_mipmaps ? priv->texture : NULL; meta_texture_tower_set_base_texture (priv->paint_tower, base_texture); } } void meta_shaped_texture_set_mask_texture (MetaShapedTexture *stex, CoglTexture *mask_texture) { MetaShapedTexturePrivate *priv; g_return_if_fail (META_IS_SHAPED_TEXTURE (stex)); priv = stex->priv; g_clear_pointer (&priv->mask_texture, cogl_object_unref); if (mask_texture != NULL) { priv->mask_texture = mask_texture; cogl_object_ref (priv->mask_texture); } clutter_actor_queue_redraw (CLUTTER_ACTOR (stex)); } gboolean meta_shaped_texture_is_obscured (MetaShapedTexture *self) { cairo_region_t *unobscured_region = effective_unobscured_region (self); if (unobscured_region) return cairo_region_is_empty (unobscured_region); else return FALSE; } /** * meta_shaped_texture_update_area: * @stex: #MetaShapedTexture * @x: the x coordinate of the damaged area * @y: the y coordinate of the damaged area * @width: the width of the damaged area * @height: the height of the damaged area * * Repairs the damaged area indicated by @x, @y, @width and @height * and potentially queues a redraw. * * Return value: Whether a redraw have been queued or not */ gboolean meta_shaped_texture_update_area (MetaShapedTexture *stex, int x, int y, int width, int height) { MetaShapedTexturePrivate *priv; cairo_region_t *unobscured_region; const cairo_rectangle_int_t clip = { x, y, width, height }; priv = stex->priv; if (priv->texture == NULL) return FALSE; meta_texture_tower_update_area (priv->paint_tower, x, y, width, height); unobscured_region = effective_unobscured_region (stex); if (unobscured_region) { cairo_region_t *intersection; if (cairo_region_is_empty (unobscured_region)) return FALSE; intersection = cairo_region_copy (unobscured_region); cairo_region_intersect_rectangle (intersection, &clip); if (!cairo_region_is_empty (intersection)) { cairo_rectangle_int_t damage_rect; cairo_region_get_extents (intersection, &damage_rect); clutter_actor_queue_redraw_with_clip (CLUTTER_ACTOR (stex), &damage_rect); cairo_region_destroy (intersection); return TRUE; } cairo_region_destroy (intersection); return FALSE; } else { clutter_actor_queue_redraw_with_clip (CLUTTER_ACTOR (stex), &clip); return TRUE; } } /** * meta_shaped_texture_set_texture: * @stex: The #MetaShapedTexture * @pixmap: The #CoglTexture to display */ void meta_shaped_texture_set_texture (MetaShapedTexture *stex, CoglTexture *texture) { g_return_if_fail (META_IS_SHAPED_TEXTURE (stex)); set_cogl_texture (stex, texture); } /** * meta_shaped_texture_get_texture: * @stex: The #MetaShapedTexture * * Returns: (transfer none): the unshaped texture */ CoglTexture * meta_shaped_texture_get_texture (MetaShapedTexture *stex) { g_return_val_if_fail (META_IS_SHAPED_TEXTURE (stex), NULL); return COGL_TEXTURE (stex->priv->texture); } /** * meta_shaped_texture_set_opaque_region: * @stex: a #MetaShapedTexture * @opaque_region: (transfer full): the region of the texture that * can have blending turned off. * * As most windows have a large portion that does not require blending, * we can easily turn off blending if we know the areas that do not * require blending. This sets the region where we will not blend for * optimization purposes. */ void meta_shaped_texture_set_opaque_region (MetaShapedTexture *stex, cairo_region_t *opaque_region) { MetaShapedTexturePrivate *priv; g_return_if_fail (META_IS_SHAPED_TEXTURE (stex)); priv = stex->priv; if (priv->opaque_region) cairo_region_destroy (priv->opaque_region); if (opaque_region) priv->opaque_region = cairo_region_reference (opaque_region); else priv->opaque_region = NULL; } /** * meta_shaped_texture_get_image: * @stex: A #MetaShapedTexture * @clip: A clipping rectangle, to help prevent extra processing. * In the case that the clipping rectangle is partially or fully * outside the bounds of the texture, the rectangle will be clipped. * * Flattens the two layers of the shaped texture into one ARGB32 * image by alpha blending the two images, and returns the flattened * image. * * Returns: (transfer full): a new cairo surface to be freed with * cairo_surface_destroy(). */ cairo_surface_t * meta_shaped_texture_get_image (MetaShapedTexture *stex, cairo_rectangle_int_t *clip) { CoglTexture *texture, *mask_texture; cairo_rectangle_int_t texture_rect = { 0, 0, 0, 0 }; cairo_surface_t *surface; g_return_val_if_fail (META_IS_SHAPED_TEXTURE (stex), NULL); texture = COGL_TEXTURE (stex->priv->texture); if (texture == NULL) return NULL; texture_rect.width = cogl_texture_get_width (texture); texture_rect.height = cogl_texture_get_height (texture); if (clip != NULL) { /* GdkRectangle is just a typedef of cairo_rectangle_int_t, * so we can use the gdk_rectangle_* APIs on these. */ if (!gdk_rectangle_intersect (&texture_rect, clip, clip)) return NULL; } if (clip != NULL) texture = cogl_texture_new_from_sub_texture (texture, clip->x, clip->y, clip->width, clip->height); surface = cairo_image_surface_create (CAIRO_FORMAT_ARGB32, cogl_texture_get_width (texture), cogl_texture_get_height (texture)); cogl_texture_get_data (texture, CLUTTER_CAIRO_FORMAT_ARGB32, cairo_image_surface_get_stride (surface), cairo_image_surface_get_data (surface)); cairo_surface_mark_dirty (surface); if (clip != NULL) cogl_object_unref (texture); mask_texture = stex->priv->mask_texture; if (mask_texture != NULL) { cairo_t *cr; cairo_surface_t *mask_surface; if (clip != NULL) mask_texture = cogl_texture_new_from_sub_texture (mask_texture, clip->x, clip->y, clip->width, clip->height); mask_surface = cairo_image_surface_create (CAIRO_FORMAT_A8, cogl_texture_get_width (mask_texture), cogl_texture_get_height (mask_texture)); cogl_texture_get_data (mask_texture, COGL_PIXEL_FORMAT_A_8, cairo_image_surface_get_stride (mask_surface), cairo_image_surface_get_data (mask_surface)); cairo_surface_mark_dirty (mask_surface); cr = cairo_create (surface); cairo_set_source_surface (cr, mask_surface, 0, 0); cairo_set_operator (cr, CAIRO_OPERATOR_DEST_IN); cairo_paint (cr); cairo_destroy (cr); cairo_surface_destroy (mask_surface); if (clip != NULL) cogl_object_unref (mask_texture); } return surface; } void meta_shaped_texture_set_fallback_size (MetaShapedTexture *self, guint fallback_width, guint fallback_height) { MetaShapedTexturePrivate *priv = self->priv; priv->fallback_width = fallback_width; priv->fallback_height = fallback_height; } static void meta_shaped_texture_cull_out (MetaCullable *cullable, cairo_region_t *unobscured_region, cairo_region_t *clip_region) { MetaShapedTexture *self = META_SHAPED_TEXTURE (cullable); MetaShapedTexturePrivate *priv = self->priv; set_unobscured_region (self, unobscured_region); set_clip_region (self, clip_region); if (clutter_actor_get_paint_opacity (CLUTTER_ACTOR (self)) == 0xff) { if (priv->opaque_region) { if (unobscured_region) cairo_region_subtract (unobscured_region, priv->opaque_region); if (clip_region) cairo_region_subtract (clip_region, priv->opaque_region); } } } static void meta_shaped_texture_reset_culling (MetaCullable *cullable) { MetaShapedTexture *self = META_SHAPED_TEXTURE (cullable); set_clip_region (self, NULL); } static void cullable_iface_init (MetaCullableInterface *iface) { iface->cull_out = meta_shaped_texture_cull_out; iface->reset_culling = meta_shaped_texture_reset_culling; } ClutterActor * meta_shaped_texture_new (void) { return g_object_new (META_TYPE_SHAPED_TEXTURE, NULL); }
mchalupa/mutter
src/compositor/meta-shaped-texture.c
C
gpl-2.0
28,539
/* JPC: An x86 PC Hardware Emulator for a pure Java Virtual Machine Copyright (C) 2012-2013 Ian Preston This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Details (including contact information) can be found at: jpc.sourceforge.net or the developer website sourceforge.net/projects/jpc/ End of licence header */ package org.jpc.emulator.execution.opcodes.rm; import org.jpc.emulator.execution.*; import org.jpc.emulator.execution.decoder.*; import org.jpc.emulator.processor.*; import org.jpc.emulator.processor.fpu64.*; import static org.jpc.emulator.processor.Processor.*; public class adc_Ed_Ib extends Executable { final int op1Index; final int immb; public adc_Ed_Ib(int blockStart, int eip, int prefices, PeekableInputStream input) { super(blockStart, eip); int modrm = input.readU8(); op1Index = Modrm.Ed(modrm); immb = Modrm.Ib(input); } public Branch execute(Processor cpu) { Reg op1 = cpu.regs[op1Index]; boolean incf = cpu.cf(); cpu.flagOp1 = op1.get32(); cpu.flagOp2 = immb; cpu.flagResult = (cpu.flagOp1 + cpu.flagOp2 + (incf ? 1 : 0)); op1.set32(cpu.flagResult); cpu.flagIns = UCodes.ADC32; cpu.flagStatus = OSZAPC; return Branch.None; } public boolean isBranch() { return false; } public String toString() { return this.getClass().getName(); } }
ysangkok/JPC
src/org/jpc/emulator/execution/opcodes/rm/adc_Ed_Ib.java
Java
gpl-2.0
2,085
/* $Id: vboxext.h $ */ /** @file * * VBox extension to Wine D3D * * Copyright (C) 2011-2015 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; * you can redistribute it and/or modify it under the terms of the GNU * General Public License (GPL) as published by the Free Software * Foundation, in version 2 as it comes in the "COPYING" file of the * VirtualBox OSE distribution. VirtualBox OSE is distributed in the * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. */ #ifndef ___VBOXEXT_H__ #define ___VBOXEXT_H__ #ifdef VBOX_WINE_WITHOUT_LIBWINE # include <windows.h> #endif #include <iprt/list.h> HRESULT VBoxExtCheckInit(); HRESULT VBoxExtCheckTerm(); #if defined(VBOX_WINE_WITH_SINGLE_CONTEXT) || defined(VBOX_WINE_WITH_SINGLE_SWAPCHAIN_CONTEXT) # ifndef VBOX_WITH_WDDM /* Windows destroys HDC created by a given thread when the thread is terminated * this leads to a mess-up in Wine & Chromium code in some situations, e.g. * D3D device is created in one thread, then the thread is terminated, * then device is started to be used in another thread */ HDC VBoxExtGetDC(HWND hWnd); int VBoxExtReleaseDC(HWND hWnd, HDC hDC); # endif /* We need to do a VBoxTlsRefRelease for the current thread context on thread exit to avoid memory leaking * Calling VBoxTlsRefRelease may result in a call to context dtor callback, which is supposed to be run under wined3d lock. * We can not acquire a wined3d lock in DllMain since this would result in a lock order violation, which may result in a deadlock. * In other words, wined3d may internally call Win32 API functions which result in a DLL lock acquisition while holding wined3d lock. * So lock order should always be "wined3d lock" -> "dll lock". * To avoid possible deadlocks we make an asynchronous call to a worker thread to make a context release from there. */ struct wined3d_context; void VBoxExtReleaseContextAsync(struct wined3d_context *context); #endif /* API for creating & destroying windows */ HRESULT VBoxExtWndDestroy(HWND hWnd, HDC hDC); HRESULT VBoxExtWndCreate(DWORD width, DWORD height, HWND *phWnd, HDC *phDC); /* hashmap */ typedef DECLCALLBACK(uint32_t) FNVBOXEXT_HASHMAP_HASH(void *pvKey); typedef FNVBOXEXT_HASHMAP_HASH *PFNVBOXEXT_HASHMAP_HASH; typedef DECLCALLBACK(bool) FNVBOXEXT_HASHMAP_EQUAL(void *pvKey1, void *pvKey2); typedef FNVBOXEXT_HASHMAP_EQUAL *PFNVBOXEXT_HASHMAP_EQUAL; struct VBOXEXT_HASHMAP; struct VBOXEXT_HASHMAP_ENTRY; typedef DECLCALLBACK(bool) FNVBOXEXT_HASHMAP_VISITOR(struct VBOXEXT_HASHMAP *pMap, void *pvKey, struct VBOXEXT_HASHMAP_ENTRY *pValue, void *pvVisitor); typedef FNVBOXEXT_HASHMAP_VISITOR *PFNVBOXEXT_HASHMAP_VISITOR; typedef struct VBOXEXT_HASHMAP_ENTRY { RTLISTNODE ListNode; void *pvKey; uint32_t u32Hash; } VBOXEXT_HASHMAP_ENTRY, *PVBOXEXT_HASHMAP_ENTRY; typedef struct VBOXEXT_HASHMAP_BUCKET { RTLISTNODE EntryList; } VBOXEXT_HASHMAP_BUCKET, *PVBOXEXT_HASHMAP_BUCKET; #define VBOXEXT_HASHMAP_NUM_BUCKETS 29 typedef struct VBOXEXT_HASHMAP { PFNVBOXEXT_HASHMAP_HASH pfnHash; PFNVBOXEXT_HASHMAP_EQUAL pfnEqual; uint32_t cEntries; VBOXEXT_HASHMAP_BUCKET aBuckets[VBOXEXT_HASHMAP_NUM_BUCKETS]; } VBOXEXT_HASHMAP, *PVBOXEXT_HASHMAP; void VBoxExtHashInit(PVBOXEXT_HASHMAP pMap, PFNVBOXEXT_HASHMAP_HASH pfnHash, PFNVBOXEXT_HASHMAP_EQUAL pfnEqual); PVBOXEXT_HASHMAP_ENTRY VBoxExtHashPut(PVBOXEXT_HASHMAP pMap, void *pvKey, PVBOXEXT_HASHMAP_ENTRY pEntry); PVBOXEXT_HASHMAP_ENTRY VBoxExtHashGet(PVBOXEXT_HASHMAP pMap, void *pvKey); PVBOXEXT_HASHMAP_ENTRY VBoxExtHashRemove(PVBOXEXT_HASHMAP pMap, void *pvKey); void* VBoxExtHashRemoveEntry(PVBOXEXT_HASHMAP pMap, PVBOXEXT_HASHMAP_ENTRY pEntry); void VBoxExtHashVisit(PVBOXEXT_HASHMAP pMap, PFNVBOXEXT_HASHMAP_VISITOR pfnVisitor, void *pvVisitor); void VBoxExtHashCleanup(PVBOXEXT_HASHMAP pMap, PFNVBOXEXT_HASHMAP_VISITOR pfnVisitor, void *pvVisitor); DECLINLINE(uint32_t) VBoxExtHashSize(PVBOXEXT_HASHMAP pMap) { return pMap->cEntries; } DECLINLINE(void*) VBoxExtHashEntryKey(PVBOXEXT_HASHMAP_ENTRY pEntry) { return pEntry->pvKey; } struct VBOXEXT_HASHCACHE_ENTRY; typedef DECLCALLBACK(void) FNVBOXEXT_HASHCACHE_CLEANUP_ENTRY(void *pvKey, struct VBOXEXT_HASHCACHE_ENTRY *pEntry); typedef FNVBOXEXT_HASHCACHE_CLEANUP_ENTRY *PFNVBOXEXT_HASHCACHE_CLEANUP_ENTRY; typedef struct VBOXEXT_HASHCACHE_ENTRY { VBOXEXT_HASHMAP_ENTRY MapEntry; uint32_t u32Usage; } VBOXEXT_HASHCACHE_ENTRY, *PVBOXEXT_HASHCACHE_ENTRY; typedef struct VBOXEXT_HASHCACHE { VBOXEXT_HASHMAP Map; uint32_t cMaxElements; PFNVBOXEXT_HASHCACHE_CLEANUP_ENTRY pfnCleanupEntry; } VBOXEXT_HASHCACHE, *PVBOXEXT_HASHCACHE; #define VBOXEXT_HASHCACHE_FROM_MAP(_pMap) RT_FROM_MEMBER((_pMap), VBOXEXT_HASHCACHE, Map) #define VBOXEXT_HASHCACHE_ENTRY_FROM_MAP(_pEntry) RT_FROM_MEMBER((_pEntry), VBOXEXT_HASHCACHE_ENTRY, MapEntry) DECLINLINE(void) VBoxExtCacheInit(PVBOXEXT_HASHCACHE pCache, uint32_t cMaxElements, PFNVBOXEXT_HASHMAP_HASH pfnHash, PFNVBOXEXT_HASHMAP_EQUAL pfnEqual, PFNVBOXEXT_HASHCACHE_CLEANUP_ENTRY pfnCleanupEntry) { VBoxExtHashInit(&pCache->Map, pfnHash, pfnEqual); pCache->cMaxElements = cMaxElements; pCache->pfnCleanupEntry = pfnCleanupEntry; } DECLINLINE(PVBOXEXT_HASHCACHE_ENTRY) VBoxExtCacheGet(PVBOXEXT_HASHCACHE pCache, void *pvKey) { PVBOXEXT_HASHMAP_ENTRY pEntry = VBoxExtHashRemove(&pCache->Map, pvKey); return VBOXEXT_HASHCACHE_ENTRY_FROM_MAP(pEntry); } DECLINLINE(void) VBoxExtCachePut(PVBOXEXT_HASHCACHE pCache, void *pvKey, PVBOXEXT_HASHCACHE_ENTRY pEntry) { PVBOXEXT_HASHMAP_ENTRY pOldEntry = VBoxExtHashPut(&pCache->Map, pvKey, &pEntry->MapEntry); PVBOXEXT_HASHCACHE_ENTRY pOld; if (!pOldEntry) return; pOld = VBOXEXT_HASHCACHE_ENTRY_FROM_MAP(pOldEntry); if (pOld != pEntry) pCache->pfnCleanupEntry(pvKey, pOld); } void VBoxExtCacheCleanup(PVBOXEXT_HASHCACHE pCache); DECLINLINE(void) VBoxExtCacheTerm(PVBOXEXT_HASHCACHE pCache) { VBoxExtCacheCleanup(pCache); } #endif /* #ifndef ___VBOXEXT_H__*/
sobomax/virtualbox_64bit_edd
src/VBox/Devices/Graphics/shaderlib/vboxext.h
C
gpl-2.0
6,144
/* * Xylon DRM driver plane header * * Copyright (C) 2014 Xylon d.o.o. * Author: Davor Joja <davor.joja@logicbricks.com> * * Based on Xilinx DRM plane header. * Copyright (C) 2013 Xilinx, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef _XYLON_DRM_PLANE_H_ #define _XYLON_DRM_PLANE_H_ enum xylon_drm_plane_op_id { XYLON_DRM_PLANE_OP_ID_BACKGROUND_COLOR, XYLON_DRM_PLANE_OP_ID_COLOR_TRANSPARENCY, XYLON_DRM_PLANE_OP_ID_INTERLACE, XYLON_DRM_PLANE_OP_ID_TRANSPARENCY, XYLON_DRM_PLANE_OP_ID_TRANSPARENT_COLOR }; struct xylon_drm_plane_op { enum xylon_drm_plane_op_id id; u32 param; }; struct xylon_drm_plane_manager; void xylon_drm_plane_dpms(struct drm_plane *base_plane, int dpms); int xylon_drm_plane_fb_set(struct drm_plane *base_plane, struct drm_framebuffer *fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, u32 src_x, u32 src_y, u32 src_w, u32 src_h); void xylon_drm_plane_commit(struct drm_plane *base_plane); void xylon_drm_plane_destroy(struct drm_plane *base_plane); struct drm_plane * xylon_drm_plane_create(struct xylon_drm_plane_manager *manager, unsigned int possible_crtcs, bool priv, int priv_id); void xylon_drm_plane_destroy_all(struct xylon_drm_plane_manager *manager); int xylon_drm_plane_create_all(struct xylon_drm_plane_manager *manager, unsigned int possible_crtcs); bool xylon_drm_plane_check_format(struct xylon_drm_plane_manager *manager, u32 format); unsigned int xylon_drm_plane_get_bits_per_pixel(struct drm_plane *base_plane); int xylon_drm_plane_op(struct drm_plane *base_plane, struct xylon_drm_plane_op *op); struct xylon_drm_plane_manager * xylon_drm_plane_probe_manager(struct drm_device *dev, struct xylon_cvc *cvc); void xylon_drm_plane_remove_manager(struct xylon_drm_plane_manager *manager); #endif /* _XYLON_DRM_PLANE_H_ */
sandeshghimire/xlnx-3.17
drivers/gpu/drm/xylon/xylon_plane.h
C
gpl-2.0
2,306
#!/bin/bash # called by dracut check() { require_binaries wicked || return 1 # do not add this module by default return 255 } # called by dracut depends() { echo systemd dbus return 0 } # called by dracut installkernel() { return 0 } # called by dracut install() { local -a wicked_units inst_hook cmdline 99 "$moddir/wicked-config.sh" # Seems to not execute if in initqueue/settled inst_hook pre-udev 99 "$moddir/wicked-run.sh" # even with wicked configuring the interface, ip is useful inst_multiple ip inst_dir /etc/wicked/extensions inst_dir /usr/share/wicked/schema if [ -d /usr/lib/wicked/bin ]; then inst_dir /usr/lib/wicked/bin inst_multiple "/usr/lib/wicked/bin/*" elif [ -d /usr/libexec/wicked/bin ]; then inst_dir /usr/libexec/wicked/bin inst_multiple "/usr/libexec/wicked/bin/*" fi inst_dir /var/lib/wicked inst_multiple "/etc/wicked/*.xml" inst_multiple "/etc/wicked/extensions/*" if [ -f /etc/dbus-1/system.d/org.opensuse.Network.conf ]; then inst_multiple "/etc/dbus-1/system.d/org.opensuse.Network*" elif [ -f /usr/share/dbus-1/system.d/org.opensuse.Network.conf ]; then inst_multiple "/usr/share/dbus-1/system.d/org.opensuse.Network*" fi inst_multiple "/usr/share/wicked/schema/*" inst_multiple "/usr/sbin/wicked*" wicked_units=( "$systemdsystemunitdir"/wickedd.service "$systemdsystemunitdir"/wickedd-auto4.service "$systemdsystemunitdir"/wickedd-dhcp4.service "$systemdsystemunitdir"/wickedd-dhcp6.service "$systemdsystemunitdir"/wickedd-nanny.service ) inst_multiple "${wicked_units[@]}" for unit in "${wicked_units[@]}"; do sed -i 's/^After=.*/After=dbus.service/g' "$initdir/$unit" sed -i 's/^Before=\(.*\)/Before=\1 dracut-pre-udev.service/g' "$initdir/$unit" sed -i 's/^Wants=\(.*\)/Wants=\1 dbus.service/g' "$initdir/$unit" # shellcheck disable=SC1004 sed -i -e \ '/^\[Unit\]/aDefaultDependencies=no\ Conflicts=shutdown.target\ Before=shutdown.target' \ "$initdir/$unit" done }
FGrose/dracut
modules.d/35network-wicked/module-setup.sh
Shell
gpl-2.0
2,211
ifeq ($(subdir),misc) sysdep_routines += ioperm iopl vm86 setfsgid setfsuid setresgid setresuid sysdep_headers += sys/elf.h sys/perm.h sys/reg.h sys/vm86.h sys/debugreg.h sys/io.h endif ifeq ($(subdir),elf) sysdep-others += lddlibc4 install-bin += lddlibc4 # extra shared linker files to link into dl-allobjs.so and libc sysdep-dl-routines += dl-procinfo sysdep_routines += dl-procinfo # extra shared linker files to link only into dl-allobjs.so sysdep-rtld-routines += dl-procinfo ifeq (yes,$(build-shared)) # This is needed to support g++ v2 and v3. sysdep_routines += framestate shared-only-routines += framestate endif endif ifeq ($(subdir),resource) sysdep_routines += oldgetrlimit64 endif
nslu2/glibc
sysdeps/unix/sysv/linux/i386/Makefile
Makefile
gpl-2.0
699
// D3D11RenderFactory.hpp // KlayGE D3D11äÖȾÒýÇæ³éÏ󹤳§ Í·Îļþ // Ver 3.8.0 // °æÈ¨ËùÓÐ(C) ¹¨ÃôÃô, 2009 // Homepage: http://www.klayge.org // // 3.8.0 // ³õ´Î½¨Á¢ (2009.1.30) // // Ð޸ļǼ ///////////////////////////////////////////////////////////////////////////////// #ifndef _D3D11RENDERFACTORY_HPP #define _D3D11RENDERFACTORY_HPP #pragma once #include <KlayGE/PreDeclare.hpp> #ifdef KLAYGE_HAS_DECLSPEC #ifdef KLAYGE_D3D11_RE_SOURCE // Build dll #define KLAYGE_D3D11_RE_API __declspec(dllexport) #else // Use dll #define KLAYGE_D3D11_RE_API __declspec(dllimport) #endif #else #define KLAYGE_D3D11_RE_API #endif // KLAYGE_HAS_DECLSPEC extern "C" { KLAYGE_D3D11_RE_API void MakeRenderFactory(KlayGE::RenderFactoryPtr& ptr); } #endif // _D3D11RENDERFACTORY_HPP
qiankanglai/KlayGE
KlayGE/Plugins/Include/KlayGE/D3D11/D3D11RenderFactory.hpp
C++
gpl-2.0
830
#ifndef COLOURS_H_ #define COLOURS_H_ #include "colours_external.h" extern int Colours_table[256]; typedef enum { COLOURS_PRESET_STANDARD, COLOURS_PRESET_DEEPBLACK, COLOURS_PRESET_VIBRANT, COLOURS_PRESET_CUSTOM, /* Number of "normal" (not including CUSTOM) values in enumerator */ COLOURS_PRESET_SIZE = COLOURS_PRESET_CUSTOM } Colours_preset_t; /* Contains controls for palette adjustment. These controls are available for NTSC and PAL palettes. */ typedef struct Colours_setup_t { double hue; /* TV tint control */ double saturation; double contrast; double brightness; double gamma; /* Delay between phases of two consecutive chromas, in degrees. Corresponds to the color adjustment potentiometer on the bottom of Atari computers. */ double color_delay; int black_level; /* 0..255. ITU-R Recommendation BT.601 advises it to be 16. */ int white_level; /* 0..255. ITU-R Recommendation BT.601 advises it to be 235. */ } Colours_setup_t; /* Limits for the adjustable values. */ #define COLOURS_HUE_MIN -1.0 #define COLOURS_HUE_MAX 1.0 #define COLOURS_SATURATION_MIN -1.0 #define COLOURS_SATURATION_MAX 1.0 #define COLOURS_CONTRAST_MIN -2.0 #define COLOURS_CONTRAST_MAX 2.0 #define COLOURS_BRIGHTNESS_MIN -2.0 #define COLOURS_BRIGHTNESS_MAX 2.0 #define COLOURS_GAMMA_MIN 1.0 #define COLOURS_GAMMA_MAX 3.5 #define COLOURS_DELAY_MIN 10 #define COLOURS_DELAY_MAX 50 /* Pointer to the current palette setup. Depending on the current TV system, it points to the NTSC setup, or the PAL setup. (See COLOURS_NTSC_setup and COLOURS_PAL_setup.) */ extern Colours_setup_t *Colours_setup; #define Colours_GetR(x) ((UBYTE) (Colours_table[x] >> 16)) #define Colours_GetG(x) ((UBYTE) (Colours_table[x] >> 8)) #define Colours_GetB(x) ((UBYTE) Colours_table[x]) /* Packs R, G, B into palette COLORTABLE_PTR for colour number I. */ void Colours_SetRGB(int i, int r, int g, int b, int *colortable_ptr); /* Called when the TV system changes, it updates the current palette accordingly. */ void Colours_SetVideoSystem(int mode); /* Updates the current palette - should be called after changing palette setup or loading/unloading an external palette. */ void Colours_Update(void); /* Restores default setup for the current palette (NTSC or PAL one). Colours_Update should be called afterwards to apply changes. */ void Colours_RestoreDefaults(void); /* Save the current colours, including adjustments, to a palette file. Returns TRUE on success or FALSE on error. */ int Colours_Save(const char *filename); /* Pointer to an externally-loaded palette. Depending on the current TV system, it points to the external NTSC or PAL palette - they can be loaded independently. (See COLOURS_NTSC_external and COLOURS_PAL_external.) */ extern COLOURS_EXTERNAL_t *Colours_external; /* Initialise variables before loading from config file. */ void Colours_PreInitialise(void); /* Read/write to configuration file. */ int Colours_ReadConfig(char *option, char *ptr); void Colours_WriteConfig(FILE *fp); /* Colours initialisation and processing of command-line arguments. */ int Colours_Initialise(int *argc, char *argv[]); /* Functions for setting and getting the color preset. PRESET cannot equal COLOURS_PRESET_CUSTOM. */ void Colours_SetPreset(Colours_preset_t preset); Colours_preset_t Colours_GetPreset(void); /* Convert given R, G and B values to corresponding Y, U, V values (all in the 0.0 - 1.0 range). */ void Colours_RGB2YUV(double r, double g, double b, double *y, double *u, double *v); /* Convert given Y, U and V values to corresponding R, G, B values (all in the 0.0 - 1.0 range). */ void Colours_YUV2RGB(double y, double u, double v, double *r, double *g, double *b); /* Converts a gamma-adjusted color value c (0 <= c <= 1) into linear value. */ double Colours_Gamma2Linear(double c, double gamma_adj); /* Converts a linear color value c (0 <= c <= 1) into sRGB gamma-corrected value. */ double Colours_Linear2sRGB(double c); #endif /* COLOURS_H_ */
robmcmullen/atari800
src/colours.h
C
gpl-2.0
4,009
<table class="form-table"><tbody> <tr><th scope="row"><?php _e("Rating", "gd-star-rating"); ?></th> <td> <table cellpadding="0" cellspacing="0" class="previewtable"> <tr> <td width="150"><?php _e("Thumbs Set", "gd-star-rating"); ?>:</td> <td width="200" align="left"> <select style="width: 180px;" name="gdsr_thumb_style" id="gdsr_thumb_style"> <?php GDSRHelper::render_styles_select($gdsr_gfx->thumbs, $gdsr_options["thumb_style"]); ?> </select> </td> <td width="10"></td> <td width="150" align="left"> <?php GDSRHelper::render_thumbs_sizes("gdsr_thumb_size", $gdsr_options["thumb_size"]); ?> </td> </tr> </table> <table cellpadding="0" cellspacing="0" class="previewtable"> <tr> <td width="150">MSIE 6:</td> <td width="200" align="left"> <select style="width: 180px;" name="gdsr_thumb_style_ie6" id="gdsr_thumb_style_ie6"> <?php GDSRHelper::render_styles_select($gdsr_gfx->thumbs, $gdsr_options["thumb_style_ie6"]); ?> </select> </td> <td width="10"></td> <td width="150"><?php _e("Rating header", "gd-star-rating"); ?>:</td> <td><input type="text" name="gdsr_thumb_header_text" id="gdsr_thumb_header_text" value="<?php echo wp_specialchars($gdsr_options["thumb_header_text"]); ?>" style="width: 170px" /></td> </tr> </table> <div class="gdsr-table-split"></div> <table cellpadding="0" cellspacing="0" class="previewtable"> <tr> <td width="150" valign="top"><?php _e("Auto insert rating code", "gd-star-rating"); ?>:</td> <td width="200" valign="top"> <input type="checkbox" name="gdsr_thumb_posts" id="gdsr_thumb_posts"<?php if ($gdsr_options["thumb_display_posts"] == 1) echo " checked"; ?> /><label style="margin-left: 5px;" for="gdsr_posts"><?php _e("For individual posts.", "gd-star-rating"); ?></label> <br /> <input type="checkbox" name="gdsr_thumb_pages" id="gdsr_thumb_pages"<?php if ($gdsr_options["thumb_display_pages"] == 1) echo " checked"; ?> /><label style="margin-left: 5px;" for="gdsr_pages"><?php _e("For individual pages.", "gd-star-rating"); ?></label> </td> <td width="10"></td> <td valign="top"> <input type="checkbox" name="gdsr_thumb_archive" id="gdsr_thumb_archive"<?php if ($gdsr_options["thumb_display_archive"] == 1) echo " checked"; ?> /><label style="margin-left: 5px;" for="gdsr_archive"><?php _e("For posts displayed in Archives.", "gd-star-rating"); ?></label> <br /> <input type="checkbox" name="gdsr_thumb_home" id="gdsr_thumb_home"<?php if ($gdsr_options["thumb_display_home"] == 1) echo " checked"; ?> /><label style="margin-left: 5px;" for="gdsr_home"><?php _e("For posts displayed on Front Page.", "gd-star-rating"); ?></label> <br /> <input type="checkbox" name="gdsr_thumb_search" id="gdsr_thumb_search"<?php if ($gdsr_options["thumb_display_search"] == 1) echo " checked"; ?> /><label style="margin-left: 5px;" for="gdsr_search"><?php _e("For posts displayed on Search results.", "gd-star-rating"); ?></label> </td> </tr> </table> <div class="gdsr-table-split"></div> <table cellpadding="0" cellspacing="0" class="previewtable"> <tr> <td width="150"><?php _e("Auto insert location", "gd-star-rating"); ?>:</td> <td width="200" valign="top"><?php GDSRHelper::render_insert_position("gdsr_thumb_auto_display_position", $gdsr_options["thumb_auto_display_position"]); ?></td> </tr> </table> </td> </tr> <tr><th scope="row"><?php _e("Defaults", "gd-star-rating"); ?></th> <td> <table cellpadding="0" cellspacing="0" class="previewtable"> <tr> <td width="150"><?php _e("Vote rule", "gd-star-rating"); ?>:</td> <td width="200" align="left"> <?php GDSRHelper::render_rules_combo("gdsr_recc_default_vote_articles", $gdsr_options["recc_default_voterules_articles"]); ?> </td> <td width="10"></td> <?php if ($gdsr_options["moderation_active"] == 1) { ?> <td width="150"><?php _e("Moderation rule", "gd-star-rating"); ?>:</td> <td width="200" align="left"> <?php GDSRHelper::render_moderation_combo("gdsr_recc_default_mod_articles", $gdsr_options["recc_default_moderation_articles"]); ?> </td> <?php } ?> </tr> </table> <div class="gdsr-table-split"></div> <table cellpadding="0" cellspacing="0" class="previewtable"> <tr> <td width="150"><?php _e("Rating Template", "gd-star-rating"); ?>:</td> <td align="left"><?php gdTemplateHelper::render_templates_section("TAB", "gdsr_default_tab_template", $gdsr_options["default_tab_template"], 350); ?></td> </tr> <tr> <td width="150"><?php _e("Animation indicator", "gd-star-rating"); ?>:</td> <td align="left"><?php GDSRHelper::render_loaders("gdsr_wait_loader_artthumb", $gdsr_options["wait_loader_artthumb"], 'jqloaderartthumb', 180, '', true); ?></td> </tr> </table> </td> </tr> </tbody></table>
kkirsche/WobbleNation
wp-content/plugins/gd-star-rating/options/settings/settings_thumbs_articles.php
PHP
gpl-2.0
5,704
<?php /** * @group MobileFrontend */ class MobileFrontendHooksTest extends MediaWikiTestCase { /** * Test headers and alternate/canonical links to be set or not * * @dataProvider onBeforePageDisplayDataProvider */ public function testOnBeforePageDisplay( $mobileUrlTemplate, $mfNoindexPages, $mfEnableXAnalyticsLogging, $mfXAnalyticsItems, $isAlternateCanonical, $isXAnalytics ) { // set globals $this->setMwGlobals( array( 'wgMobileUrlTemplate' => $mobileUrlTemplate, 'wgMFNoindexPages' => $mfNoindexPages, 'wgMFEnableXAnalyticsLogging' => $mfEnableXAnalyticsLogging, ) ); // test with forced mobile view $param = $this->getContextSetup( 'mobile', $mfXAnalyticsItems ); $out = $param['out']; $sk = $param['sk']; // run the test MobileFrontendHooks::onBeforePageDisplay( $out, $sk ); // test, if alternate or canonical link is added, but not both $links = $out->getLinkTags(); $this->assertEquals( $isAlternateCanonical, count( $links ) ); // if there should be an alternate or canonical link, check, if it's the correct one if ( $isAlternateCanonical ) { // should be canonical link, not alternate in mobile view $this->assertEquals( 'canonical', $links[0]['rel'] ); } // in mobile view, a vary cookie header should always be set $this->assertEquals( true, (bool)strpos( $out->getVaryHeader(), 'Cookie' ) ); // check, if XAnalytics is set, if it should be $resp = $param['context']->getRequest()->response(); $this->assertEquals( $isXAnalytics, (bool)$resp->getHeader( 'X-Analytics' ) ); // test with forced desktop view $param = $this->getContextSetup( 'desktop', $mfXAnalyticsItems ); $out = $param['out']; $sk = $param['sk']; // run the test MobileFrontendHooks::onBeforePageDisplay( $out, $sk ); // test, if alternate or canonical link is added, but not both $links = $out->getLinkTags(); $this->assertEquals( $isAlternateCanonical, count( $links ) ); // if there should be an alternate or canonical link, check, if it's the correct one if ( $isAlternateCanonical ) { // should be alternate link, not canonical in desktop view $this->assertEquals( 'alternate', $links[0]['rel'] ); } // in desktop view the cookie vary header should never be set $this->assertEquals( false, (bool)strpos( $out->getVaryHeader(), 'Cookie' ) ); // there should never be an XAnalytics header in desktop mode $resp = $param['context']->getRequest()->response(); $this->assertEquals( false, (bool)$resp->getHeader( 'X-Analytics' ) ); } /** * Creates a new set of object for the actual test context, including a new * outputpage and skintemplate. * * @param string $mode The mode for the test cases (desktop, mobile) * @return array Array of objects, including MobileContext (context), * SkinTemplate (sk) and OutputPage (out) */ protected function getContextSetup( $mode, $mfXAnalyticsItems ) { // Create a new MobileContext object for this test MobileContext::setInstance( null ); // create a new instance of MobileContext $context = MobileContext::singleton(); // create a DerivativeContext to use in MobileContext later $mainContext = new DerivativeContext( RequestContext::getMain() ); // create a new, empty OutputPage $out = new OutputPage( $context ); // create a new, empty SkinTemplate $sk = new SkinTemplate(); // create a new Title (main page) $title = Title::newMainPage(); // create a FauxRequest to use instead of a WebRequest object (FauxRequest forces // the creation of a FauxResponse, which allows to investigate sent header values) $request = new FauxRequest(); // set the new request object to the context $mainContext->setRequest( $request ); // set the main page title to the context $mainContext->setTitle( $title ); // set the context to the SkinTemplate $sk->setContext( $mainContext ); // set the OutputPage to the context $mainContext->setOutput( $out ); // set the DerivativeContext as a base to MobileContext $context->setContext( $mainContext ); // set the mode to MobileContext $context->setUseFormat( $mode ); // if there are any XAnalytics items, add them foreach ( $mfXAnalyticsItems as $key => $val ) { $context->addAnalyticsLogItem( $key, $val ); } // set the newly created MobileContext object as the current instance to use MobileContext::setInstance( $context ); // return the stuff return array( 'out' => $out, 'sk' => $sk, 'context' => $context, ); } /** * Dataprovider fro testOnBeforePageDisplay */ public function onBeforePageDisplayDataProvider() { return array( // wgMobileUrlTemplate, wgMFNoindexPages, wgMFEnableXAnalyticsLogging, // XanalyticsItems, alternate & canonical link, XAnalytics array( true, true, true, array( 'mf-m' => 'a', 'zero' => '502-13' ), 1, true ), array( true, false, true, array( 'mf-m' => 'a', 'zero' => '502-13' ), 0, true ), array( false, true, true, array( 'mf-m' => 'a', 'zero' => '502-13' ), 0, true ), array( false, false, true, array( 'mf-m' => 'a', 'zero' => '502-13' ), 0, true ), array( true, true, false, array(), 1, false ), array( true, false, false, array(), 0, false ), array( false, true, false, array(), 0, false ), array( false, false, false, array(), 0, false ), ); } }
paladox/mediawiki-extensions-MobileFrontend
tests/phpunit/MobileFrontend.hooksTest.php
PHP
gpl-2.0
5,303
<?php /* ----------------------------------------------------------------------------------------- $Id: xtc_validate_email.inc.php 2085 2011-08-03 15:25:38Z web28 $ modified eCommerce Shopsoftware http://www.modified-shop.org Copyright (c) 2009 - 2013 [www.modified-shop.org] ----------------------------------------------------------------------------------------- based on: (c) 2000-2001 The Exchange Project (earlier name of osCommerce) (c) 2002-2003 osCommerce(validations.php,v 1.11 2003/02/11); www.oscommerce.com (c) 2003 nextcommerce (xtc_validate_email.inc.php,v 1.5 2003/08/14); www.nextcommerce.org (c) 2003 XT-Commerce (xtc_validate_email.inc.php 899 2005-04-29) (c) 2010 osCommerce (validations.php) Released under the GNU General Public License ---------------------------------------------------------------------------------------*/ //////////////////////////////////////////////////////////////////////////////////////////////// // // Function : xtc_validate_email // // Arguments : email email address to be checked // // Return : true - valid email address // false - invalid email address // // Description : function for validating email address that conforms to RFC 822 specs // // This function will first attempt to validate the Email address using the filter // extension for performance. If this extension is not available it will // fall back to a regex based validator which doesn't validate all RFC822 // addresses but catches 99.9% of them. The regex is based on the code found at // http://www.regular-expressions.info/email.html // // Optional validation for validating the domain name is also valid is supplied // and can be enabled using the administration tool. // // Sample Valid Addresses: // // first.last@host.com // firstlast@host.to // first-last@host.com // first_last@host.com // // Invalid Addresses: // // first last@host.com // first@last@host.com // //////////////////////////////////////////////////////////////////////////////////////////////// function xtc_validate_email($email) { //BOF - web28 - 2011-07-31 - SQL nullbyte injection fix 16.02.2011 if (strpos($email,"\0")!== false) {return false;} if (strpos($email,"\x00")!== false) {return false;} if (strpos($email,"\u0000")!== false) {return false;} if (strpos($email,"\000")!== false) {return false;} //EOF - web28 - 2011-07-31 - SQL nullbyte injection fix 16.02.2011 $email = trim($email); $valid_address = false; if (strlen($email) > 255) { $valid_address = false; } else { if ( substr_count( $email, '@' ) > 1 ) { $valid_address = false; } //web28 - 2014-02-28 - new $regex see http://www.regular-expressions.info/email.html $regex = "/^[a-z0-9!#$%&'*+\/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+\/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+(?:[a-z]{2,15})$/i"; $valid_address = preg_match($regex, $email); } if ($valid_address && ENTRY_EMAIL_ADDRESS_CHECK == 'true') { $domain = explode('@', $email); if (!checkdnsrr($domain[1], "MX") && !checkdnsrr($domain[1], "A")) { $valid_address = false; } } return $valid_address; } ?>
ReichardtIT/modified-inkl-bootstrap-by-karl
inc/xtc_validate_email.inc.php
PHP
gpl-2.0
3,451
/////////////////////////////////////////////////////////////////////////////// // For information as to what this class does, see the Javadoc, below. // // Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, // // 2007, 2008, 2009, 2010, 2014, 2015 by Peter Spirtes, Richard Scheines, Joseph // // Ramsey, and Clark Glymour. // // // // This program is free software; you can redistribute it and/or modify // // it under the terms of the GNU General Public License as published by // // the Free Software Foundation; either version 2 of the License, or // // (at your option) any later version. // // // // This program is distributed in the hope that it will be useful, // // but WITHOUT ANY WARRANTY; without even the implied warranty of // // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // // GNU General Public License for more details. // // // // You should have received a copy of the GNU General Public License // // along with this program; if not, write to the Free Software // // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA // /////////////////////////////////////////////////////////////////////////////// package edu.cmu.tetrad.search; import edu.cmu.tetrad.data.*; import edu.cmu.tetrad.graph.*; import edu.cmu.tetrad.sem.SemEstimator; import edu.cmu.tetrad.sem.SemIm; import edu.cmu.tetrad.sem.SemPm; import edu.cmu.tetrad.sem.StandardizedSemIm; import edu.cmu.tetrad.util.*; import java.beans.PropertyChangeEvent; import java.beans.PropertyChangeListener; import java.text.DecimalFormat; import java.text.NumberFormat; import java.util.*; /** * GesSearch is an implementation of the GES algorithm, as specified in Chickering (2002) "Optimal structure * identification with greedy search" Journal of Machine Learning Research. It works for both BayesNets and SEMs. * <p> * Some code optimization could be done for the scoring part of the graph for discrete models (method scoreGraphChange). * Some of Andrew Moore's approaches for caching sufficient statistics, for instance. * * @author Ricardo Silva, Summer 2003 * @author Joseph Ramsey, Revisions 10/2005 */ public final class Images implements GraphSearch, IImages { /** * The data set, various variable subsets of which are to be scored. */ private List<DataSet> dataSets; /** * The covariance matrix for the data set. */ private List<TetradMatrix> covariances; /** * Sample size, either from the data set or from the variances. */ private int sampleSize; /** * Specification of forbidden and required edges. */ private IKnowledge knowledge = new Knowledge2(); /** * Map from variables to their column indices in the data set. */ private HashMap<Node, Integer> hashIndices; /** * Array of variable names from the data set, in order. */ private String varNames[]; /** * List of variables in the data set, in order. */ private List<Node> variables; /** * True iff the data set is discrete. */ private boolean discrete; /** * The true graph, if known. If this is provided, asterisks will be printed out next to false positive added edges * (that is, edges added that aren't adjacencies in the true graph). */ private Graph trueGraph; /** * For formatting printed numbers. */ private final NumberFormat nf = NumberFormatUtil.getInstance().getNumberFormat(); /** * Caches scores for discrete search. */ private final LocalScoreCache localScoreCache = new LocalScoreCache(); /** * Elapsed time of the most recent search. */ private long elapsedTime; /** * True if cycles are to be aggressively prevented. May be expensive for large graphs (but also useful for large * graphs). */ private boolean aggressivelyPreventCycles = false; /** * Listeners for graph change events. */ private transient List<PropertyChangeListener> listeners; /** * Penalty discount--the BIC penalty is multiplied by this (for continuous variables). */ private double penaltyDiscount = 1.0; /** * The score for discrete searches. */ private LocalDiscreteScore discreteScore; /** * The logger for this class. The config needs to be set. */ private TetradLogger logger = TetradLogger.getInstance(); /** * The top n graphs found by the algorithm, where n is <code>numPatternsToStore</code>. */ private SortedSet<ScoredGraph> topGraphs = new TreeSet<ScoredGraph>(); /** * The number of top patterns to store. */ private int numPatternsToStore = 10; private SortedSet<Arrow> sortedArrows = new TreeSet<Arrow>(); private Set<Arrow>[][] lookupArrows; private Map<Node, Map<Set<Node>, Double>> scoreHash; private Map<Node, Integer> nodesHash; /** * True if graphs should be stored. */ private boolean storeGraphs = true; private double bic; private Map<DataSet, Set<Node>> missingVariables; private Graph returnGraph; private int maxNumEdges = -1; private int subsetBound = Integer.MAX_VALUE; private ArrayList<CovarianceMatrix> covarianceMatrices = new ArrayList<CovarianceMatrix>(); private boolean log = true; private double trimAlpha = 0.0; private boolean useKnowledgeBackwards; private Graph adjacencyGraph; private boolean verbose = true; private boolean containsMissingVariables; //===========================CONSTRUCTORS=============================// private Images() { // private } public Images(List<DataSet> dataSets) { setDataSets(dataSets); } public static Images covarianceInstance(List<CovarianceMatrix> covarianceMatrixes) { Images images = new Images(); images.setCovariances(covarianceMatrixes); return images; } //==========================PUBLIC METHODS==========================// public boolean isAggressivelyPreventCycles() { return this.aggressivelyPreventCycles; } public void setAggressivelyPreventCycles(boolean aggressivelyPreventCycles) { this.aggressivelyPreventCycles = aggressivelyPreventCycles; } /** * Greedy equivalence search: Start from the empty graph, add edges till model is significant. Then start deleting * edges till a minimum is achieved. * * @return the resulting Pattern. */ public Graph search() { long startTime = System.currentTimeMillis(); topGraphs = new TreeSet<ScoredGraph>(); Graph graph = new EdgeListGraph(new LinkedList<Node>(getVariables())); scoreHash = new WeakHashMap<Node, Map<Set<Node>, Double>>(); for (Node node : graph.getNodes()) { scoreHash.put(node, new HashMap<Set<Node>, Double>()); } fireGraphChange(graph); buildIndexing(graph); addRequiredEdges(graph); double score = 0; //scoreGraph(SearchGraphUtils.dagFromPattern(graph)); storeGraph(new EdgeListGraph(graph), score); List<Node> nodes = graph.getNodes(); nodesHash = new HashMap<Node, Integer>(); int index = -1; for (Node node : nodes) { nodesHash.put(node, ++index); } // Do forward search. score = fes(graph, nodes, score); // Do backward search. score = bes(graph, nodes, score); long endTime = System.currentTimeMillis(); this.elapsedTime = endTime - startTime; this.logger.log("graph", "\nReturning this graph: " + graph); TetradLogger.getInstance().log("info", "Final Model BIC = " + nf.format(score)); this.logger.log("info", "Elapsed time = " + (elapsedTime) / 1000. + " s"); this.logger.flush(); bic = score; return graph; } public Graph search(List<Node> nodes) { long startTime = System.currentTimeMillis(); localScoreCache.clear(); if (!variables.containsAll(nodes)) { throw new IllegalArgumentException( "All of the nodes must be in the supplied data set."); } Graph graph = new EdgeListGraph(nodes); buildIndexing(graph); addRequiredEdges(graph); double score = 0; //scoreGraph(graph); // Do forward search. score = fes(graph, nodes, score); // Do backward search. score = bes(graph, nodes, score); long endTime = System.currentTimeMillis(); this.elapsedTime = endTime - startTime; this.logger.log("graph", "\nReturning this graph: " + graph); this.logger.log("info", "Elapsed time = " + (elapsedTime) / 1000. + " s"); this.logger.flush(); bic = score; return graph; } public IKnowledge getKnowledge() { return knowledge; } /** * Sets the background knowledge. * * @param knowledge the knowledge object, specifying forbidden and required edges. */ public void setKnowledge(IKnowledge knowledge) { if (knowledge == null) { throw new NullPointerException("Knowledge must not be null."); } this.knowledge = knowledge; } public void setElapsedTime(long elapsedTime) { this.elapsedTime = elapsedTime; } public long getElapsedTime() { return elapsedTime; } public void addPropertyChangeListener(PropertyChangeListener l) { getListeners().add(l); } public double getPenaltyDiscount() { return penaltyDiscount; } public void setPenaltyDiscount(double penaltyDiscount) { if (penaltyDiscount < 0) { throw new IllegalArgumentException("Penalty discount must be >= 0: " + penaltyDiscount); } this.penaltyDiscount = penaltyDiscount; } public void setTrueGraph(Graph trueGraph) { this.trueGraph = trueGraph; } public double getScore(Graph dag) { return scoreDag(dag); } public SortedSet<ScoredGraph> getTopGraphs() { return topGraphs; } public int getNumPatternsToStore() { return numPatternsToStore; } public void setNumPatternsToStore(int numPatternsToStore) { if (numPatternsToStore < 0) { throw new IllegalArgumentException("Number of patterns to store must be >= 0: " + numPatternsToStore); } storeGraphs = numPatternsToStore != 0; this.numPatternsToStore = numPatternsToStore; } //===========================PRIVATE METHODS========================// /** * Forward equivalence search. * * @param graph The graph in the state prior to the forward equivalence search. * @param score The score in the state prior to the forward equivalence search * @return the score in the state after the forward equivelance search. Note that the graph is changed as a * side-effect to its state after the forward equivelance search. */ private double fes(Graph graph, List<Node> nodes, double score) { TetradLogger.getInstance().log("info", "** FORWARD EQUIVALENCE SEARCH"); TetradLogger.getInstance().log("info", "Initial Model BIC = " + nf.format(score)); initializeArrowsForward(nodes); while (!sortedArrows.isEmpty()) { Arrow arrow = sortedArrows.first(); sortedArrows.remove(arrow); Node x = nodes.get(arrow.getX()); Node y = nodes.get(arrow.getY()); if (!validInsert(x, y, arrow.getHOrT(), arrow.getNaYX(), graph)) { continue; } List<Node> t = arrow.getHOrT(); double bump = arrow.getBump(); score = score + bump; Set<Edge> edges = new HashSet<Edge>(graph.getEdges()); insert(x, y, t, graph, score, bump); rebuildPattern(graph); // Try to avoid duplicating scoring calls. First clear out all of the edges that need to be changed, // then change them, checking to see if they're already been changed. I know, roundabout, but there's // a performance boost. for (Edge edge : graph.getEdges()) { Node _x = edge.getNode1(); Node _y = edge.getNode2(); if (!edges.contains(edge)) { clearForward(graph, nodes, nodesHash.get(_x), nodesHash.get(_y)); } } for (Edge edge : graph.getEdges()) { Node _x = edge.getNode1(); Node _y = edge.getNode2(); if (!edges.contains(edge)) { reevaluateForward(graph, nodes, nodesHash.get(_x), nodesHash.get(_y)); } } storeGraph(graph, score); } return score; } private double bes(Graph graph, List<Node> nodes, double score) { TetradLogger.getInstance().log("info", "** BACKWARD EQUIVALENCE SEARCH"); TetradLogger.getInstance().log("info", "Initial Model BIC = " + nf.format(score)); if (!useKnowledgeBackwards) { knowledge = new Knowledge2(); } initializeArrowsBackward(graph); while (!sortedArrows.isEmpty()) { Arrow arrow = sortedArrows.first(); sortedArrows.remove(arrow); Node x = nodes.get(arrow.getX()); Node y = nodes.get(arrow.getY()); if (!validDelete(arrow.getHOrT(), arrow.getNaYX(), graph)) { continue; } List<Node> h = arrow.getHOrT(); double bump = arrow.getBump(); delete(x, y, h, graph, score, bump); score = score + bump; rebuildPattern(graph); storeGraph(graph, score); initializeArrowsBackward(graph); // Rebuilds Arrows from scratch each time. Fast enough for backwards. } return score; } // Expensive. private void initializeArrowsForward(List<Node> nodes) { sortedArrows.clear(); lookupArrows = new HashSet[nodes.size()][nodes.size()]; List<Node> emptyList = Collections.EMPTY_LIST; Set<Node> emptySet = Collections.EMPTY_SET; for (int j = 0; j < nodes.size(); j++) { if (verbose) { if ((j + 1) % 10 == 0) System.out.println("Initializing arrows forward: " + (j + 1)); } for (int i = 0; i < nodes.size(); i++) { if (j == i) continue; Node _x = nodes.get(i); Node _y = nodes.get(j); if (getAdjacencyGraph() != null && !getAdjacencyGraph().isAdjacentTo(_x, _y)) { continue; } if (getKnowledge().isForbidden(_x.getName(), _y.getName())) { continue; } if (!validSetByKnowledge(_y, emptyList)) { continue; } double bump = scoreGraphChange(_y, Collections.singleton(_x), emptySet); if (bump > 0.0) { Arrow arrow = new Arrow(bump, i, j, emptyList, emptyList, nodes); lookupArrows[i][j] = new HashSet<Arrow>(); sortedArrows.add(arrow); lookupArrows[i][j].add(arrow); } } } } private void initializeArrowsBackward(Graph graph) { List<Node> nodes = graph.getNodes(); sortedArrows.clear(); lookupArrows = new HashSet[nodes.size()][nodes.size()]; for (Edge edge : graph.getEdges()) { Node x = edge.getNode1(); Node y = edge.getNode2(); int i = nodesHash.get(edge.getNode1()); int j = nodesHash.get(edge.getNode2()); if (!getKnowledge().noEdgeRequired(x.getName(), y.getName())) { continue; } if (Edges.isDirectedEdge(edge)) { calculateArrowsBackward(i, j, nodes, graph); } else { calculateArrowsBackward(i, j, nodes, graph); calculateArrowsBackward(j, i, nodes, graph); } } } private void clearForward(Graph graph, List<Node> nodes, int i, int j) { Node x = nodes.get(i); Node y = nodes.get(j); if (!graph.isAdjacentTo(x, y)) throw new IllegalArgumentException(); clearArrow(i, j); clearArrow(j, i); for (int _w = 0; _w < nodes.size(); _w++) { Node w = nodes.get(_w); if (w == x) continue; if (w == y) continue; if (!graph.isAdjacentTo(w, x)) { clearArrow(_w, i); if (graph.isAdjacentTo(w, y)) { clearArrow(i, _w); } } if (!graph.isAdjacentTo(w, y)) { clearArrow(_w, j); if (graph.isAdjacentTo(w, x)) { clearArrow(j, _w); } } } } private void reevaluateForward(Graph graph, List<Node> nodes, int i, int j) { Node x = nodes.get(i); Node y = nodes.get(j); if (!graph.isAdjacentTo(x, y)) throw new IllegalArgumentException(); for (int _w = 0; _w < nodes.size(); _w++) { Node w = nodes.get(_w); if (w == x) continue; if (w == y) continue; if (!graph.isAdjacentTo(w, x)) { if (lookupArrows[_w][i] == null) { calculateArrowsForward(_w, i, nodes, graph); } if (graph.isAdjacentTo(w, y)) { if (lookupArrows[i][_w] == null) { calculateArrowsForward(i, _w, nodes, graph); } } } if (!graph.isAdjacentTo(w, y)) { if (lookupArrows[_w][j] == null) { calculateArrowsForward(_w, j, nodes, graph); } if (graph.isAdjacentTo(w, x)) { if (lookupArrows[j][_w] == null) { calculateArrowsForward(j, _w, nodes, graph); } } } } } private void clearArrow(int i, int j) { if (lookupArrows[i][j] != null) { // removeall is slower for (Arrow arrow : lookupArrows[i][j]) { sortedArrows.remove(arrow); } lookupArrows[i][j] = null; } } private void calculateArrowsForward(int i, int j, List<Node> nodes, Graph graph) { if (i == j) { return; } Node x = nodes.get(i); Node y = nodes.get(j); // if (graph.isAdjacentTo(x, y)) { // return; // } if (getKnowledge().isForbidden(x.getName(), y.getName())) { return; } // clearArrow(i, j); List<Node> naYX = getNaYX(x, y, graph); List<Node> tNeighbors = getTNeighbors(x, y, graph); lookupArrows[i][j] = new HashSet<Arrow>(); DepthChoiceGenerator gen = new DepthChoiceGenerator(tNeighbors.size(), 1); int[] choice; while ((choice = gen.next()) != null) { List<Node> t = GraphUtils.asList(choice, tNeighbors); if (!validSetByKnowledge(y, t)) { continue; } double bump = insertEval(x, y, t, naYX, graph); if (bump > 0.0) { Arrow arrow = new Arrow(bump, i, j, t, naYX, nodes); sortedArrows.add(arrow); lookupArrows[i][j].add(arrow); } } } private void calculateArrowsBackward(int i, int j, List<Node> nodes, Graph graph) { if (i == j) { return; } Node x = nodes.get(i); Node y = nodes.get(j); if (!graph.isAdjacentTo(x, y)) { return; } if (!getKnowledge().noEdgeRequired(x.getName(), y.getName())) { return; } List<Node> naYX = getNaYX(x, y, graph); clearArrow(i, j); List<Node> _naYX = new ArrayList<Node>(naYX); DepthChoiceGenerator gen = new DepthChoiceGenerator(_naYX.size(), _naYX.size()); int[] choice; lookupArrows[i][j] = new HashSet<Arrow>(); while ((choice = gen.next()) != null) { List<Node> H = GraphUtils.asList(choice, _naYX); if (!validSetByKnowledge(y, H)) { continue; } double bump = deleteEval(x, y, H, naYX, graph); if (bump > 0.0) { Arrow arrow = new Arrow(bump, i, j, H, naYX, nodes); sortedArrows.add(arrow); lookupArrows[i][j].add(arrow); } } } /** * True iff log output should be produced. */ public ArrayList<CovarianceMatrix> getCovarianceMatrices() { return covarianceMatrices; } public boolean isLog() { return log; } public void setLog(boolean log) { this.log = log; } public double getTrimAlpha() { return trimAlpha; } public void setTrimAlpha(double trimAlpha) { if (trimAlpha < 0.0) {// || trimAlpha > 0.5) { throw new IllegalArgumentException("Clip must be in [0, 1]"); } this.trimAlpha = trimAlpha; } public void setAdjacencyGraph(Graph adjacencyGraph) { Graph graph = GraphUtils.undirectedGraph(adjacencyGraph); graph = GraphUtils.replaceNodes(graph, getVariables()); this.adjacencyGraph = graph; } public Graph getAdjacencyGraph() { return adjacencyGraph; } public void setVerbose(boolean verbose) { this.verbose = verbose; } public boolean isUseKnowledgeBackwards() { return useKnowledgeBackwards; } public void setUseKnowledgeBackwards(boolean useKnowledgeBackwards) { this.useKnowledgeBackwards = useKnowledgeBackwards; } private static class Arrow implements Comparable { private double bump; private int x; private int y; private List<Node> hOrT; private List<Node> naYX; private List<Node> nodes; public Arrow(double bump, int x, int y, List<Node> hOrT, List<Node> naYX, List<Node> nodes) { this.bump = bump; this.x = x; this.y = y; this.hOrT = hOrT; this.naYX = naYX; this.nodes = nodes; } public double getBump() { return bump; } public int getX() { return x; } public int getY() { return y; } public List<Node> getHOrT() { return hOrT; } public List<Node> getNaYX() { return naYX; } // Sorting is by bump, high to low. public int compareTo(Object o) { Arrow arrow = (Arrow) o; return Double.compare(arrow.getBump(), getBump()); } public String toString() { return "Arrow<" + nodes.get(x) + "->" + nodes.get(y) + " bump = " + bump + " t/h = " + hOrT + " naYX = " + naYX + ">"; } } /** * Get all nodes that are connected to Y by an undirected edge and not adjacent to X. */ private static List<Node> getTNeighbors(Node x, Node y, Graph graph) { List<Edge> yEdges = graph.getEdges(y); List<Node> tNeighbors = new ArrayList<Node>(); for (Edge edge : yEdges) { if (!Edges.isUndirectedEdge(edge)) { continue; } Node z = edge.getDistalNode(y); if (graph.isAdjacentTo(z, x)) { continue; } tNeighbors.add(z); } return tNeighbors; } /** * Evaluate the Insert(X, Y, T) operator (Definition 12 from Chickering, 2002). */ private double insertEval(Node x, Node y, List<Node> t, List<Node> naYX, Graph graph) { List<Node> paY = graph.getParents(y); Set<Node> paYPlusX = new HashSet<Node>(paY); paYPlusX.add(x); Set<Node> set1 = new HashSet<Node>(naYX); set1.addAll(t); set1.addAll(paYPlusX); Set<Node> set2 = new HashSet<Node>(naYX); set2.addAll(t); set2.addAll(paY); return scoreGraphChange(y, set1, set2); } /** * Evaluate the Delete(X, Y, T) operator (Definition 12 from Chickering, 2002). */ private double deleteEval(Node x, Node y, List<Node> h, List<Node> naYX, Graph graph) { List<Node> paY = graph.getParents(y); paY.add(x); Set<Node> paYMinuxX = new HashSet<Node>(paY); paYMinuxX.remove(x); Set<Node> set1 = new HashSet<Node>(naYX); set1.removeAll(h); set1.addAll(paYMinuxX); Set<Node> set2 = new HashSet<Node>(naYX); set2.removeAll(h); set2.addAll(paY); return scoreGraphChange(y, set1, set2); } /* * Do an actual insertion * (Definition 12 from Chickering, 2002). **/ private void insert(Node x, Node y, List<Node> t, Graph graph, double score, double bump) { if (graph.isAdjacentTo(x, y)) { Edge edge = graph.getEdge(x, y); if (Edges.isUndirectedEdge(edge)) { graph.removeEdge(x, y); } else { return; } } if (graph.isAdjacentTo(x, y)) { return; // knowledge required // throw new IllegalArgumentException(x + " and " + y + " are already adjacent in the graph."); } Edge trueEdge = null; if (trueGraph != null) { Node _x = trueGraph.getNode(x.getName()); Node _y = trueGraph.getNode(y.getName()); trueEdge = trueGraph.getEdge(_x, _y); } graph.addDirectedEdge(x, y); if (log) { String label = trueGraph != null && trueEdge != null ? "*" : ""; TetradLogger.getInstance().log("insertedEdges", graph.getNumEdges() + ". INSERT " + graph.getEdge(x, y) + " " + t + " (" + nf.format(score) + ") " + label); if (verbose) { System.out.println(graph.getNumEdges() + ". INSERT " + graph.getEdge(x, y) + " " + t + "\t" + nf.format(score) + "\t" + bump + "\t" + label); } } else { int numEdges = graph.getNumEdges() - 1; if (numEdges % 50 == 0) System.out.println(numEdges); } for (Node _t : t) { Edge oldEdge = graph.getEdge(_t, y); if (oldEdge == null) throw new IllegalArgumentException("Not adjacent: " + _t + ", " + y); if (!Edges.isUndirectedEdge(oldEdge)) { throw new IllegalArgumentException("Should be undirected: " + oldEdge); } graph.removeEdge(_t, y); graph.addDirectedEdge(_t, y); if (log) { TetradLogger.getInstance().log("directedEdges", "--- Directing " + oldEdge + " to " + graph.getEdge(_t, y)); if (verbose) { // System.out.println("--- Directing " + oldEdge + " to " + // graph.getEdge(_t, y)); } } } } /** * Do an actual deletion (Definition 13 from Chickering, 2002). */ private void delete(Node x, Node y, List<Node> subset, Graph graph, double score, double bump) { Edge trueEdge = null; if (trueGraph != null) { Node _x = trueGraph.getNode(x.getName()); Node _y = trueGraph.getNode(y.getName()); trueEdge = trueGraph.getEdge(_x, _y); } if (log) { Edge oldEdge = graph.getEdge(x, y); String label = trueGraph != null && trueEdge != null ? "*" : ""; TetradLogger.getInstance().log("deletedEdges", (graph.getNumEdges() - 1) + ". DELETE " + oldEdge + " " + subset + " (" + nf.format(score) + ") " + label); if (verbose) { System.out.println((graph.getNumEdges() - 1) + ". DELETE " + oldEdge + " " + subset + "\t" + nf.format(score) + "\t" + bump + "\t" + label); } } else { int numEdges = graph.getNumEdges() - 1; if (verbose) { if (numEdges % 50 == 0) System.out.println(numEdges); } } graph.removeEdge(x, y); for (Node h : subset) { Edge oldEdge = graph.getEdge(y, h); graph.removeEdge(y, h); graph.addDirectedEdge(y, h); if (log) { TetradLogger.getInstance().log("directedEdges", "--- Directing " + oldEdge + " to " + graph.getEdge(y, h)); if (verbose) { // System.out.println("--- Directing " + oldEdge + " to " + // graph.getEdge(y, h)); } } if (Edges.isUndirectedEdge(graph.getEdge(x, h))) { if (!graph.isAdjacentTo(x, h)) throw new IllegalArgumentException("Not adjacent: " + x + ", " + h); oldEdge = graph.getEdge(x, h); graph.removeEdge(x, h); graph.addDirectedEdge(x, h); if (log) { TetradLogger.getInstance().log("directedEdges", "--- Directing " + oldEdge + " to " + graph.getEdge(x, h)); if (verbose) { System.out.println("--- Directing " + oldEdge + " to " + graph.getEdge(x, h)); } } } } } /* * Test if the candidate insertion is a valid operation * (Theorem 15 from Chickering, 2002). **/ private boolean validInsert(Node x, Node y, List<Node> t, List<Node> naYX, Graph graph) { List<Node> union = new ArrayList<Node>(t); // t and nayx are disjoint union.addAll(naYX); if (!isClique(union, graph)) { return false; } return !existsUnblockedSemiDirectedPath(y, x, union, graph); } /** * Test if the candidate deletion is a valid operation (Theorem 17 from Chickering, 2002). */ private static boolean validDelete(List<Node> h, List<Node> naXY, Graph graph) { List<Node> list = new ArrayList<Node>(naXY); list.removeAll(h); return isClique(list, graph); } //---Background knowledge methods. private void addRequiredEdges(Graph graph) { for (Iterator<KnowledgeEdge> it = getKnowledge().requiredEdgesIterator(); it.hasNext(); ) { KnowledgeEdge next = it.next(); String a = next.getFrom(); String b = next.getTo(); Node nodeA = null, nodeB = null; Iterator<Node> itn = graph.getNodes().iterator(); while (itn.hasNext() && (nodeA == null || nodeB == null)) { Node nextNode = itn.next(); if (nextNode.getName().equals(a)) { nodeA = nextNode; } if (nextNode.getName().equals(b)) { nodeB = nextNode; } } if (graph.containsEdge(Edges.directedEdge(nodeB, nodeA))) { graph.removeEdge(nodeB, nodeA); graph.addEdge(Edges.undirectedEdge(nodeA, nodeB)); } else if (!graph.isAncestorOf(nodeB, nodeA)) { graph.removeEdges(nodeA, nodeB); graph.addDirectedEdge(nodeA, nodeB); TetradLogger.getInstance().log("insertedEdges", "Adding edge by knowledge: " + graph.getEdge(nodeA, nodeB)); } } for (Iterator<KnowledgeEdge> it = getKnowledge().forbiddenEdgesIterator(); it.hasNext(); ) { KnowledgeEdge next = it.next(); String a = next.getFrom(); String b = next.getTo(); Node nodeA = null, nodeB = null; Iterator<Node> itn = graph.getNodes().iterator(); while (itn.hasNext() && (nodeA == null || nodeB == null)) { Node nextNode = itn.next(); if (nextNode.getName().equals(a)) { nodeA = nextNode; } if (nextNode.getName().equals(b)) { nodeB = nextNode; } } if (nodeA != null && nodeB != null && graph.isAdjacentTo(nodeA, nodeB) && !graph.isChildOf(nodeA, nodeB)) { if (!graph.isAncestorOf(nodeA, nodeB)) { graph.removeEdges(nodeA, nodeB); graph.addDirectedEdge(nodeB, nodeA); TetradLogger.getInstance().log("insertedEdges", "Adding edge by knowledge: " + graph.getEdge(nodeB, nodeA)); } } } } /** * Use background knowledge to decide if an insert or delete operation does not orient edges in a forbidden * direction according to prior knowledge. If some orientation is forbidden in the subset, the whole subset is * forbidden. */ private boolean validSetByKnowledge(Node y, List<Node> subset) { for (Node node : subset) { if (getKnowledge().isForbidden(node.getName(), y.getName())) { return false; } } return true; } //--Auxiliary methods. /** * Find all nodes that are connected to Y by an undirected edge that are adjacent to X (that is, by undirected or * directed edge). */ private static List<Node> getNaYX(Node x, Node y, Graph graph) { List<Edge> yEdges = graph.getEdges(y); List<Node> nayx = new ArrayList<Node>(); for (Edge edge : yEdges) { if (!Edges.isUndirectedEdge(edge)) { continue; } Node z = edge.getDistalNode(y); if (!graph.isAdjacentTo(z, x)) { continue; } nayx.add(z); } return nayx; } /** * @return true iif the given set forms a clique in the given graph. */ private static boolean isClique(List<Node> nodes, Graph graph) { for (int i = 0; i < nodes.size() - 1; i++) { for (int j = i; j < nodes.size(); j++) { if (i == j && graph.isAdjacentTo(nodes.get(i), nodes.get(j))) { throw new IllegalArgumentException(); } if (!graph.isAdjacentTo(nodes.get(i), nodes.get(j))) { return false; } } } return true; } private boolean existsUnblockedSemiDirectedPath(Node from, Node to, List<Node> cond, Graph G) { Queue<Node> Q = new LinkedList<Node>(); Set<Node> V = new HashSet<Node>(); Q.offer(from); V.add(from); while (!Q.isEmpty()) { Node t = Q.remove(); if (t == to) return true; for (Node u : G.getAdjacentNodes(t)) { Edge edge = G.getEdge(t, u); Node c = Edges.traverseSemiDirected(t, edge); if (c == null) continue; if (cond.contains(c)) continue; if (!V.contains(c)) { V.add(c); Q.offer(c); } } } return false; } /** * Completes a pattern that was modified by an insertion/deletion operator Based on the algorithm described on * Appendix C of (Chickering, 2002). */ private void rebuildPattern(Graph graph) { SearchGraphUtils.basicPattern(graph, false); addRequiredEdges(graph); meekOrient(graph, getKnowledge()); TetradLogger.getInstance().log("rebuiltPatterns", "Rebuilt pattern = " + graph); } /** * Fully direct a graph with background knowledge. I am not sure how to adapt Chickering's suggested algorithm above * (dagToPdag) to incorporate background knowledge, so I am also implementing this algorithm based on Meek's 1995 * UAI paper. Notice it is the same implemented in PcSearch. </p> *IMPORTANT!* *It assumes all colliders are * oriented, as well as arrows dictated by time order.* */ private void meekOrient(Graph graph, IKnowledge knowledge) { MeekRules rules = new MeekRules(); rules.setKnowledge(knowledge); rules.orientImplied(graph); } private void setDataSets(List<DataSet> dataSets) { List<String> varNames = dataSets.get(0).getVariableNames(); for (int i = 2; i < dataSets.size(); i++) { List<String> _varNames = dataSets.get(i).getVariableNames(); if (!varNames.equals(_varNames)) { throw new IllegalArgumentException("Variable names not consistent."); } } this.varNames = varNames.toArray(new String[varNames.size()]); this.sampleSize = dataSets.get(0).getNumRows(); this.variables = dataSets.get(0).getVariables(); this.dataSets = dataSets; this.discrete = dataSets.get(0).isDiscrete(); if (!isDiscrete()) { this.covariances = new ArrayList<TetradMatrix>(); for (DataSet dataSet : dataSets) { CovarianceMatrix cov = new CovarianceMatrix(dataSet); this.covarianceMatrices.add(cov); this.covariances.add(cov.getMatrix()); } } missingVariables = new HashMap<DataSet, Set<Node>>(); containsMissingVariables = false; for (DataSet dataSet : dataSets) { missingVariables.put(dataSet, new HashSet<Node>()); } for (DataSet dataSet : dataSets) { for (Node node : dataSet.getVariables()) { int index = dataSet.getVariables().indexOf(node); boolean missing = true; for (int i = 0; i < dataSet.getNumRows(); i++) { if (!Double.isNaN(dataSet.getDouble(i, index))) { missing = false; break; } } if (missing) { missingVariables.get(dataSet).add(node); containsMissingVariables = true; } } } setKnowledge(dataSets.get(0).getKnowledge()); } private void setCovariances(List<CovarianceMatrix> dataSets) { List<String> varNames = dataSets.get(0).getVariableNames(); for (int i = 2; i < dataSets.size(); i++) { List<String> _varNames = dataSets.get(i).getVariableNames(); if (!varNames.equals(_varNames)) { throw new IllegalArgumentException("Variable names not consistent."); } } this.varNames = varNames.toArray(new String[varNames.size()]); this.sampleSize = dataSets.get(0).getSampleSize(); this.variables = dataSets.get(0).getVariables(); this.covariances = new ArrayList<TetradMatrix>(); for (CovarianceMatrix cov : dataSets) { this.covarianceMatrices.add(cov); this.covariances.add(cov.getMatrix()); } setKnowledge(dataSets.get(0).getKnowledge()); } private void buildIndexing(Graph graph) { this.hashIndices = new HashMap<Node, Integer>(); for (Node next : graph.getNodes()) { for (int i = 0; i < this.varNames.length; i++) { if (this.varNames[i].equals(next.getName())) { this.hashIndices.put(next, i); break; } } } } //===========================SCORING METHODS public double scoreDag(Graph graph) { Dag dag = new Dag(graph); double score = 0.0; for (Node y : dag.getNodes()) { Set<Node> parents = new HashSet<Node>(dag.getParents(y)); int nextIndex = -1; for (int i = 0; i < getVariables().size(); i++) { if (this.varNames[i].equals(y.getName())) { nextIndex = i; break; } } int parentIndices[] = new int[parents.size()]; Iterator<Node> pi = parents.iterator(); int count = 0; while (pi.hasNext()) { Node nextParent = pi.next(); for (int i = 0; i < getVariables().size(); i++) { if (this.varNames[i].equals(nextParent.getName())) { parentIndices[count++] = i; break; } } } score += localSemScore(nextIndex, parentIndices); } return score; } private double scoreGraphChange(Node y, Set<Node> parents1, Set<Node> parents2) { int yIndex = hashIndices.get(y); int parentIndices1[] = new int[parents1.size()]; int count = 0; for (Node aParents1 : parents1) { parentIndices1[count++] = (hashIndices.get(aParents1)); } int parentIndices2[] = new int[parents2.size()]; int count2 = 0; for (Node aParents2 : parents2) { parentIndices2[count2++] = (hashIndices.get(aParents2)); } List<Double> diffs = new ArrayList<Double>(); int numDataSets = numDataSets(); for (int d = 0; d < numDataSets; d++) { double score1 = localSemScoreOneDataSet(d, yIndex, parentIndices1); double score2 = localSemScoreOneDataSet(d, yIndex, parentIndices2); double diff = score1 - score2; diffs.add(diff); } Collections.sort(diffs); double sum = 0.0; int _count = 0; int from = (int) Math.floor(((double) (numDataSets - 1)) * (trimAlpha)); // int to = (int) Math.ceil(((double) (numDataSets - 1)) * (1.0 - trimAlpha)); int to = numDataSets - 1; //(int) Math.ceil(((double) (numDataSets - 1)) * (1.0 - trimAlpha)); for (int m = from; m <= to; m++) { double diff = diffs.get(m); if (diff != 0) { sum += diff; _count++; } } return sum / _count; } /** * Calculates the sample likelihood and BIC score for i given its parents in a simple SEM model. */ private double localSemScore(int i, int[] parents) { double sum = 0.0; for (int d = 0; d < numDataSets(); d++) { sum += localSemScoreOneDataSet(d, i, parents); } return sum; } private int numDataSets() { return getCovMatrices().size(); } private double localSemScoreOneDataSet(int dataIndex, int i, int[] parents) { TetradMatrix cov = getCovMatrices().get(dataIndex); double residualVariance = cov.get(i, i); int n = sampleSize(); int p = parents.length; if (containsMissingVariables) { DataSet data = dataSets.get(dataIndex); if (missingVariables.get(data).contains(data.getVariable(i))) { return 0; } } try { TetradMatrix covxx = cov.getSelection(parents, parents); TetradMatrix covxxInv = covxx.inverse(); TetradVector covxy = cov.getSelection(parents, new int[]{i}).getColumn(0); TetradVector b = covxxInv.times(covxy); residualVariance -= covxy.dotProduct(b); } catch (Exception e) { e.printStackTrace(); throwMinimalLinearDependentSet(parents, cov); } if (residualVariance <= 0) { if (verbose) { System.out.println("Negative residual variance: " + residualVariance); } return Double.NaN; } double c = getPenaltyDiscount(); return score(residualVariance, n, p, c); } // Calculates the BIC score. private double score(double residualVariance, int n, int p, double c) { return -n * Math.log(residualVariance) - n * Math.log(2 * Math.PI) - n - dof(p) * c * (Math.log(n)); // return -n * Math.log(residualVariance) - c * dof(p) * Math.log(n); } private int dof(int p) { return (p + 1) * (p + 2) / 2; // return p + 1; } private void throwMinimalLinearDependentSet(int[] parents, TetradMatrix cov) { List<Node> _parents = new ArrayList<Node>(); for (int p : parents) _parents.add(variables.get(p)); DepthChoiceGenerator gen = new DepthChoiceGenerator(_parents.size(), _parents.size()); int[] choice; while ((choice = gen.next()) != null) { int[] sel = new int[choice.length]; List<Node> _sel = new ArrayList<Node>(); for (int m = 0; m < choice.length; m++) { sel[m] = parents[m]; _sel.add(variables.get(sel[m])); } TetradMatrix m = cov.getSelection(sel, sel); try { m.inverse(); } catch (Exception e2) { throw new RuntimeException("Linear dependence among variables: " + _sel); } } } private int sampleSize() { return this.sampleSize; } private List<Node> getVariables() { return variables; } private List<TetradMatrix> getCovMatrices() { return covariances; } private boolean isDiscrete() { return discrete; } private void fireGraphChange(Graph graph) { for (PropertyChangeListener l : getListeners()) { l.propertyChange(new PropertyChangeEvent(this, "graph", null, graph)); } } private List<PropertyChangeListener> getListeners() { if (listeners == null) { listeners = new ArrayList<PropertyChangeListener>(); } return listeners; } private void storeGraph(Graph graph, double score) { if (!storeGraphs) return; if (topGraphs.isEmpty() || score > topGraphs.first().getScore()) { Graph graphCopy = new EdgeListGraph(graph); topGraphs.add(new ScoredGraph(graphCopy, score)); if (topGraphs.size() > getNumPatternsToStore()) { topGraphs.remove(topGraphs.first()); } } } public Map<Edge, Integer> getBoostrapCounts(int numBootstraps) { if (returnGraph == null) { returnGraph = search(); } return bootstrapImagesCounts(dataSets, returnGraph.getNodes(), getKnowledge(), numBootstraps, getPenaltyDiscount()); } public String bootstrapPercentagesString(int numBootstraps) { if (returnGraph == null) { returnGraph = search(); } StringBuilder builder = new StringBuilder( "For " + numBootstraps + " repetitions, the percentage of repetitions in which each " + "edge occurs in the IMaGES pattern for that repetition. In each repetition, for each " + "input data set, a sample the size of that data set chosen randomly and with replacement. " + "Images is run on the collection of these data sets. 100% for an edge means that that " + "edge occurs in all such randomly chosen samples, over " + numBootstraps + " repetitions; " + "0% means it never occurs. Edges not mentioned occur in 0% of the random samples.\n\n" ); Map<Edge, Integer> counts = getBoostrapCounts(numBootstraps); builder.append(edgePercentagesString(counts, new ArrayList<Edge>(returnGraph.getEdges()), null, numBootstraps)); return builder.toString(); } public String gesCountsString() { if (returnGraph == null) { returnGraph = search(); } Map<Edge, Integer> counts = getGesCounts(dataSets, returnGraph.getNodes(), getKnowledge(), getPenaltyDiscount()); return gesEdgesString(counts, dataSets); } private Map<Edge, Integer> getGesCounts(List<DataSet> dataSets, List<Node> nodes, IKnowledge knowledge, double penalty) { if (returnGraph == null) { returnGraph = search(); } Map<Edge, Integer> counts = new HashMap<Edge, Integer>(); for (DataSet dataSet : dataSets) { Ges ges = new Ges(dataSet); ges.setKnowledge(knowledge); ges.setPenaltyDiscount(penalty); Graph pattern = ges.search(); incrementCounts(counts, pattern, nodes); } return counts; } public Map<Edge, Double> averageStandardizedCoefficients() { if (returnGraph == null) { returnGraph = search(); } return averageStandardizedCoefficients(returnGraph); } public Map<Edge, Double> averageStandardizedCoefficients(Graph graph) { Graph dag = SearchGraphUtils.dagFromPattern(graph); Map<Edge, Double> coefs = new HashMap<Edge, Double>(); for (DataSet dataSet : dataSets) { SemPm pm = new SemPm(dag); Graph _graph = pm.getGraph(); SemEstimator estimator = new SemEstimator(dataSet, pm); SemIm im = estimator.estimate(); StandardizedSemIm im2 = new StandardizedSemIm(im); for (Edge edge : _graph.getEdges()) { edge = translateEdge(edge, dag); if (coefs.get(edge) == null) { coefs.put(edge, 0.0); } coefs.put(edge, coefs.get(edge) + im2.getParameterValue(edge)); } } for (Edge edge : coefs.keySet()) { coefs.put(edge, coefs.get(edge) / (double) numDataSets()); } return coefs; } public String averageStandardizedCoefficientsString() { if (returnGraph == null) { returnGraph = search(); } Graph graph = new Dag(GraphUtils.randomDag(returnGraph.getNodes(), 0, 12, 30, 15, 15, true)); return averageStandardizedCoefficientsString(graph); } public String averageStandardizedCoefficientsString(Graph graph) { Map<Edge, Double> coefs = averageStandardizedCoefficients(graph); return edgeCoefsString(coefs, new ArrayList<Edge>(graph.getEdges()), "Estimated adjacencyGraph", "Average standardized coefficient"); } public String logEdgeBayesFactorsString(Graph dag) { Map<Edge, Double> coefs = logEdgeBayesFactors(dag); return logBayesPosteriorFactorsString(coefs, scoreDag(dag)); } public Map<Edge, Double> logEdgeBayesFactors(Graph dag) { Map<Edge, Double> logBayesFactors = new HashMap<Edge, Double>(); double withEdge = scoreDag(dag); for (Edge edge : dag.getEdges()) { dag.removeEdge(edge); double withoutEdge = scoreDag(dag); double difference = withoutEdge - withEdge; logBayesFactors.put(edge, difference); dag.addEdge(edge); } return logBayesFactors; } private Edge translateEdge(Edge edge, Graph graph) { Node node1 = graph.getNode(edge.getNode1().getName()); Node node2 = graph.getNode(edge.getNode2().getName()); return new Edge(node1, node2, edge.getEndpoint1(), edge.getEndpoint2()); } private String gesEdgesString(Map<Edge, Integer> counts, List<DataSet> dataSets) { if (returnGraph == null) { returnGraph = search(); } return edgePercentagesString(counts, new ArrayList<Edge>(returnGraph.getEdges()), "Percentage of GES results each edge participates in", dataSets.size()); } /** * Bootstraps images coefs at a particular penalty level. * * @param dataSets The data sets from which bootstraps are drawn. These must share the same variable set, be * continuous, but may have different sample sizes. * @param nodes The nodes over which edge coefs are to be done. Why not specify this in advance? * @param knowledge Knowledge under which IMaGES should operate. * @param numBootstraps The number of bootstrap samples to be drawn. * @param penalty The penalty discount at which the bootstrap analysis is to be done. * @return A map from edges to coefs, where the edges are over the nodes of the datasets. */ private Map<Edge, Integer> bootstrapImagesCounts(List<DataSet> dataSets, List<Node> nodes, IKnowledge knowledge, int numBootstraps, double penalty) { List<Node> dataVars = dataSets.get(0).getVariables(); for (DataSet dataSet : dataSets) { if (!dataSet.getVariables().equals(dataVars)) { throw new IllegalArgumentException("Data sets must share the same variable set."); } } Map<Edge, Integer> counts = new HashMap<Edge, Integer>(); for (int i = 0; i < numBootstraps; i++) { List<DataSet> bootstraps = new ArrayList<DataSet>(); for (DataSet dataSet : dataSets) { bootstraps.add(DataUtils.getBootstrapSample(dataSet, dataSet.getNumRows())); // bootstraps.add(dataSet); } Images images = new Images(bootstraps); images.setPenaltyDiscount(penalty); // ImagesFirstNontriangular images = new ImagesFirstNontriangular(bootstraps); images.setKnowledge(knowledge); Graph pattern = images.search(); incrementCounts(counts, pattern, nodes); } return counts; } private void incrementCounts(Map<Edge, Integer> counts, Graph pattern, List<Node> nodes) { Graph _pattern = GraphUtils.replaceNodes(pattern, nodes); for (Edge e : _pattern.getEdges()) { if (counts.get(e) == null) { counts.put(e, 0); } counts.put(e, counts.get(e) + 1); } } /** * Prints edge coefs, with edges in the order of the adjacencies in <code>edgeList</code>. * * @param counts A map from edges to coefs. * @param edgeList A list of edges, the true edges or estimated edges. */ private String edgePercentagesString(Map<Edge, Integer> counts, List<Edge> edgeList, String percentagesLabel, int numBootstraps) { NumberFormat nf = new DecimalFormat("0"); StringBuilder builder = new StringBuilder(); if (percentagesLabel != null) { builder.append("\n").append(percentagesLabel).append(":\n\n"); } for (int i = 0; i < edgeList.size(); i++) { Edge edge = edgeList.get(i); int total = 0; for (Edge _edge : new HashMap<Edge, Integer>(counts).keySet()) { if (_edge.getNode1() == edge.getNode1() && _edge.getNode2() == edge.getNode2() || _edge.getNode1() == edge.getNode2() && _edge.getNode2() == edge.getNode1()) { total += counts.get(_edge); double percentage = counts.get(_edge) / (double) numBootstraps * 100.; builder.append(i + 1).append(". ").append(_edge).append(" ").append(nf.format(percentage)).append("%\n"); counts.remove(_edge); } } double percentage = total / (double) numBootstraps * 100.; builder.append(" (Sum = ").append(nf.format(percentage)).append("%)\n\n"); } // The left over edges. builder.append("Edges not adjacent in the estimated pattern:\n\n"); // for (Edge edge : coefs.keySet()) { // double percentage = coefs.get(edge) / (double) numBootstraps * 100.; // builder.append(edge + " " + nf.format(percentage) + "%\n"); // } for (Edge edge : new ArrayList<Edge>(counts.keySet())) { if (!counts.keySet().contains(edge)) continue; int total = 0; for (Edge _edge : new HashMap<Edge, Integer>(counts).keySet()) { if (_edge.getNode1() == edge.getNode1() && _edge.getNode2() == edge.getNode2() || _edge.getNode1() == edge.getNode2() && _edge.getNode2() == edge.getNode1()) { total += counts.get(_edge); double percentage = counts.get(_edge) / (double) numBootstraps * 100.; builder.append(_edge).append(" ").append(nf.format(percentage)).append("%\n"); counts.remove(_edge); } } double percentage = total / (double) numBootstraps * 100.; builder.append(" (Sum = ").append(nf.format(percentage)).append("%)\n\n"); } builder.append("\nThe estimated pattern, for reference:\n\n"); for (int i = 0; i < edgeList.size(); i++) { Edge edge = edgeList.get(i); builder.append(i + 1).append(". ").append(edge).append("\n"); } return builder.toString(); } private String edgeCoefsString(Map<Edge, Double> coefs, List<Edge> edgeList, String edgeListLabel, String percentagesLabel) { NumberFormat nf = new DecimalFormat("0.00"); StringBuilder builder = new StringBuilder(); builder.append("\n").append(edgeListLabel).append(":\n\n"); for (int i = 0; i < edgeList.size(); i++) { Edge edge = edgeList.get(i); builder.append(i + 1).append(". ").append(edge).append("\n"); } builder.append("\n").append(percentagesLabel).append(":\n\n"); for (int i = 0; i < edgeList.size(); i++) { Edge edge = edgeList.get(i); for (Edge _edge : new HashMap<Edge, Double>(coefs).keySet()) { if (_edge.getNode1() == edge.getNode1() && _edge.getNode2() == edge.getNode2() || _edge.getNode1() == edge.getNode2() && _edge.getNode2() == edge.getNode1()) { double coef = coefs.get(_edge); builder.append(i + 1).append(". ").append(_edge).append(" ").append(nf.format(coef)).append("\n"); coefs.remove(_edge); } } } return builder.toString(); } private String logBayesPosteriorFactorsString(final Map<Edge, Double> coefs, double modelScore) { NumberFormat nf = new DecimalFormat("0.00"); StringBuilder builder = new StringBuilder(); SortedMap<Edge, Double> sortedCoefs = new TreeMap<Edge, Double>(new Comparator<Edge>() { public int compare(Edge edge1, Edge edge2) { return coefs.get(edge1).compareTo(coefs.get(edge2)); } }); sortedCoefs.putAll(coefs); builder.append("Model score: ").append(nf.format(modelScore)).append("\n\n"); builder.append("Edge Posterior Log Bayes Factors:\n\n"); builder.append("For a DAG in the IMaGES pattern with model score m, for each edge e in the " + "DAG, the model score that would result from removing each edge, calculating " + "the resulting model score m(e), and then reporting m(e) - m. The score used is " + "the IMScore, L - SUM_i{kc ln n(i)}, L is the maximum likelihood of the model, " + "k isthe number of parameters of the model, n(i) is the sample size of the ith " + "data set, and c is the penalty discount. Note that the more negative the score, " + "the more important the edge is to the posterior probability of the IMaGES model. " + "Edges are given in order of their importance so measured.\n\n"); int i = 0; for (Edge edge : sortedCoefs.keySet()) { builder.append(++i).append(". ").append(edge).append(" ").append(nf.format(sortedCoefs.get(edge))).append("\n"); } return builder.toString(); } public double getModelScore() { return bic; } public int getMaxNumEdges() { return maxNumEdges; } public void setMaxNumEdges(int maxNumEdges) { if (maxNumEdges < -1) throw new IllegalArgumentException(); this.maxNumEdges = maxNumEdges; } }
jdramsey/tetrad
tetrad-lib/src/main/java/edu/cmu/tetrad/search/Images.java
Java
gpl-2.0
61,462
<?php /** * The header for our theme. * * Displays all of the <head> section and everything up till <div id="content"> * * @package Daily Cooking Custom */ ?><!DOCTYPE html> <html <?php language_attributes(); ?>> <head> <meta charset="<?php bloginfo('charset'); ?>"> <meta name="viewport" content="width=device-width, initial-scale=1"> <link rel="profile" href="http://gmpg.org/xfn/11"> <link rel="pingback" href="<?php bloginfo('pingback_url'); ?>"> <?php wp_head(); ?> </head> <body <?php body_class(); ?>> <div id="page" class="hfeed site"> <a class="skip-link screen-reader-text" href="#content"><?php _e( 'Skip to content', 'daily-cooking-custom' ); ?></a> <header id="masthead" class="site-header" role="banner"> <div class="site-branding"> <h1 class="site-title"><a href="<?php echo esc_url(home_url('/')); ?>" rel="home"><?php bloginfo('name'); ?></a></h1> <h2 class="site-description"><?php bloginfo('description'); ?></h2> </div><!-- .site-branding --> <nav id="site-navigation" class="main-navigation" role="navigation"> <button class="menu-toggle" aria-controls="menu" aria-expanded="false"><?php _e( 'Primary Menu', 'daily-cooking-custom' ); ?></button> <?php wp_nav_menu(array('theme_location' => 'primary')); ?> </nav><!-- #site-navigation --> </header><!-- #masthead --> <div id="content" class="site-content">
csp5096/PBR-World.com
wp-examples/chapter 7/0907_07_code/phase 3/header.php
PHP
gpl-2.0
1,362