code
stringlengths
3
1.01M
repo_name
stringlengths
5
116
path
stringlengths
3
311
language
stringclasses
30 values
license
stringclasses
15 values
size
int64
3
1.01M
# -*- coding: utf-8 -*- """ *************************************************************************** CreateWorkspace.py --------------------- Date : October 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Victor Olaya' __date__ = 'October 2012' __copyright__ = '(C) 2012, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' from qgis.core import * from GeoServerToolsAlgorithm import \ GeoServerToolsAlgorithm from processing.parameters.ParameterString import ParameterString from processing.outputs.OutputString import OutputString class CreateWorkspace(GeoServerToolsAlgorithm): WORKSPACE = 'WORKSPACE' WORKSPACEURI = 'WORKSPACEURI' def processAlgorithm(self, progress): self.createCatalog() workspaceName = self.getParameterValue(self.WORKSPACE) workspaceUri = self.getParameterValue(self.WORKSPACEURI) self.catalog.create_workspace(workspaceName, workspaceUri) def defineCharacteristics(self): self.addBaseParameters() self.name = 'Create workspace' self.group = 'GeoServer management tools' self.addParameter(ParameterString(self.WORKSPACE, 'Workspace')) self.addParameter(ParameterString(self.WORKSPACEURI, 'Workspace URI')) self.addOutput(OutputString(self.WORKSPACE, 'Workspace'))
mhugent/Quantum-GIS
python/plugins/processing/algs/admintools/CreateWorkspace.py
Python
gpl-2.0
2,079
/* Copyright (C) 2007-2014 Open Information Security Foundation * * You can copy, redistribute or modify this Program under the terms of * the GNU General Public License version 2 as published by the Free * Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * version 2 along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ /** * \file * * \author Victor Julien <victor@inliniac.net> * * AppLayer File Logger Output registration functions */ #include "suricata-common.h" #include "tm-modules.h" #include "output-file.h" #include "app-layer.h" #include "app-layer-parser.h" #include "detect-filemagic.h" #include "util-profiling.h" typedef struct OutputLoggerThreadStore_ { void *thread_data; struct OutputLoggerThreadStore_ *next; } OutputLoggerThreadStore; /** per thread data for this module, contains a list of per thread * data for the packet loggers. */ typedef struct OutputLoggerThreadData_ { OutputLoggerThreadStore *store; } OutputLoggerThreadData; /* logger instance, a module + a output ctx, * it's perfectly valid that have multiple instances of the same * log module (e.g. http.log) with different output ctx'. */ typedef struct OutputFileLogger_ { FileLogger LogFunc; OutputCtx *output_ctx; struct OutputFileLogger_ *next; const char *name; TmmId module_id; } OutputFileLogger; static OutputFileLogger *list = NULL; int OutputRegisterFileLogger(const char *name, FileLogger LogFunc, OutputCtx *output_ctx) { int module_id = TmModuleGetIdByName(name); if (module_id < 0) return -1; OutputFileLogger *op = SCMalloc(sizeof(*op)); if (op == NULL) return -1; memset(op, 0x00, sizeof(*op)); op->LogFunc = LogFunc; op->output_ctx = output_ctx; op->name = name; op->module_id = (TmmId) module_id; if (list == NULL) list = op; else { OutputFileLogger *t = list; while (t->next) t = t->next; t->next = op; } SCLogDebug("OutputRegisterTxLogger happy"); return 0; } static TmEcode OutputFileLog(ThreadVars *tv, Packet *p, void *thread_data, PacketQueue *pq, PacketQueue *postpq) { BUG_ON(thread_data == NULL); BUG_ON(list == NULL); OutputLoggerThreadData *op_thread_data = (OutputLoggerThreadData *)thread_data; OutputFileLogger *logger = list; OutputLoggerThreadStore *store = op_thread_data->store; BUG_ON(logger == NULL && store != NULL); BUG_ON(logger != NULL && store == NULL); BUG_ON(logger == NULL && store == NULL); uint8_t flags = 0; Flow * const f = p->flow; /* no flow, no files */ if (f == NULL) { SCReturnInt(TM_ECODE_OK); } if (p->flowflags & FLOW_PKT_TOCLIENT) flags |= STREAM_TOCLIENT; else flags |= STREAM_TOSERVER; int file_close = (p->flags & PKT_PSEUDO_STREAM_END) ? 1 : 0; int file_trunc = 0; FLOWLOCK_WRLOCK(f); // < need write lock for FilePrune below file_trunc = StreamTcpReassembleDepthReached(p); FileContainer *ffc = AppLayerParserGetFiles(p->proto, f->alproto, f->alstate, flags); SCLogDebug("ffc %p", ffc); if (ffc != NULL) { File *ff; for (ff = ffc->head; ff != NULL; ff = ff->next) { if (ff->flags & FILE_LOGGED) continue; SCLogDebug("ff %p", ff); if (file_trunc && ff->state < FILE_STATE_CLOSED) ff->state = FILE_STATE_TRUNCATED; if (file_close && ff->state < FILE_STATE_CLOSED) ff->state = FILE_STATE_TRUNCATED; if (ff->state == FILE_STATE_CLOSED || ff->state == FILE_STATE_TRUNCATED || ff->state == FILE_STATE_ERROR) { int file_logged = 0; if (FileForceMagic() && ff->magic == NULL) { FilemagicGlobalLookup(ff); } logger = list; store = op_thread_data->store; while (logger && store) { BUG_ON(logger->LogFunc == NULL); SCLogDebug("logger %p", logger); PACKET_PROFILING_TMM_START(p, logger->module_id); logger->LogFunc(tv, store->thread_data, (const Packet *)p, (const File *)ff); PACKET_PROFILING_TMM_END(p, logger->module_id); file_logged = 1; logger = logger->next; store = store->next; BUG_ON(logger == NULL && store != NULL); BUG_ON(logger != NULL && store == NULL); } if (file_logged) { ff->flags |= FILE_LOGGED; } } } FilePrune(ffc); } FLOWLOCK_UNLOCK(f); return TM_ECODE_OK; } /** \brief thread init for the tx logger * This will run the thread init functions for the individual registered * loggers */ static TmEcode OutputFileLogThreadInit(ThreadVars *tv, void *initdata, void **data) { OutputLoggerThreadData *td = SCMalloc(sizeof(*td)); if (td == NULL) return TM_ECODE_FAILED; memset(td, 0x00, sizeof(*td)); *data = (void *)td; SCLogDebug("OutputFileLogThreadInit happy (*data %p)", *data); OutputFileLogger *logger = list; while (logger) { TmModule *tm_module = TmModuleGetByName((char *)logger->name); if (tm_module == NULL) { SCLogError(SC_ERR_INVALID_ARGUMENT, "TmModuleGetByName for %s failed", logger->name); exit(EXIT_FAILURE); } if (tm_module->ThreadInit) { void *retptr = NULL; if (tm_module->ThreadInit(tv, (void *)logger->output_ctx, &retptr) == TM_ECODE_OK) { OutputLoggerThreadStore *ts = SCMalloc(sizeof(*ts)); /* todo */ BUG_ON(ts == NULL); memset(ts, 0x00, sizeof(*ts)); /* store thread handle */ ts->thread_data = retptr; if (td->store == NULL) { td->store = ts; } else { OutputLoggerThreadStore *tmp = td->store; while (tmp->next != NULL) tmp = tmp->next; tmp->next = ts; } SCLogDebug("%s is now set up", logger->name); } } logger = logger->next; } return TM_ECODE_OK; } static TmEcode OutputFileLogThreadDeinit(ThreadVars *tv, void *thread_data) { OutputLoggerThreadData *op_thread_data = (OutputLoggerThreadData *)thread_data; OutputLoggerThreadStore *store = op_thread_data->store; OutputFileLogger *logger = list; while (logger && store) { TmModule *tm_module = TmModuleGetByName((char *)logger->name); if (tm_module == NULL) { SCLogError(SC_ERR_INVALID_ARGUMENT, "TmModuleGetByName for %s failed", logger->name); exit(EXIT_FAILURE); } if (tm_module->ThreadDeinit) { tm_module->ThreadDeinit(tv, store->thread_data); } OutputLoggerThreadStore *next_store = store->next; SCFree(store); store = next_store; logger = logger->next; } SCFree(op_thread_data); return TM_ECODE_OK; } static void OutputFileLogExitPrintStats(ThreadVars *tv, void *thread_data) { OutputLoggerThreadData *op_thread_data = (OutputLoggerThreadData *)thread_data; OutputLoggerThreadStore *store = op_thread_data->store; OutputFileLogger *logger = list; while (logger && store) { TmModule *tm_module = TmModuleGetByName((char *)logger->name); if (tm_module == NULL) { SCLogError(SC_ERR_INVALID_ARGUMENT, "TmModuleGetByName for %s failed", logger->name); exit(EXIT_FAILURE); } if (tm_module->ThreadExitPrintStats) { tm_module->ThreadExitPrintStats(tv, store->thread_data); } logger = logger->next; store = store->next; } } void TmModuleFileLoggerRegister (void) { tmm_modules[TMM_FILELOGGER].name = "__file_logger__"; tmm_modules[TMM_FILELOGGER].ThreadInit = OutputFileLogThreadInit; tmm_modules[TMM_FILELOGGER].Func = OutputFileLog; tmm_modules[TMM_FILELOGGER].ThreadExitPrintStats = OutputFileLogExitPrintStats; tmm_modules[TMM_FILELOGGER].ThreadDeinit = OutputFileLogThreadDeinit; tmm_modules[TMM_FILELOGGER].cap_flags = 0; } void OutputFileShutdown(void) { OutputFileLogger *logger = list; while (logger) { OutputFileLogger *next_logger = logger->next; SCFree(logger); logger = next_logger; } list = NULL; }
jasonish/endace-suricata
src/output-file.c
C
gpl-2.0
9,171
/* * Driver for Renesas R-Car VIN * * Copyright (C) 2016 Renesas Electronics Corp. * Copyright (C) 2011-2013 Renesas Solutions Corp. * Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com> * Copyright (C) 2008 Magnus Damm * * Based on the soc-camera rcar_vin driver * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/pm_runtime.h> #include <media/v4l2-event.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-rect.h> #include "rcar-vin.h" #define RVIN_DEFAULT_FORMAT V4L2_PIX_FMT_YUYV #define RVIN_MAX_WIDTH 2048 #define RVIN_MAX_HEIGHT 2048 /* ----------------------------------------------------------------------------- * Format Conversions */ static const struct rvin_video_format rvin_formats[] = { { .fourcc = V4L2_PIX_FMT_NV16, .bpp = 1, }, { .fourcc = V4L2_PIX_FMT_YUYV, .bpp = 2, }, { .fourcc = V4L2_PIX_FMT_UYVY, .bpp = 2, }, { .fourcc = V4L2_PIX_FMT_RGB565, .bpp = 2, }, { .fourcc = V4L2_PIX_FMT_XRGB555, .bpp = 2, }, { .fourcc = V4L2_PIX_FMT_XBGR32, .bpp = 4, }, }; const struct rvin_video_format *rvin_format_from_pixel(u32 pixelformat) { int i; for (i = 0; i < ARRAY_SIZE(rvin_formats); i++) if (rvin_formats[i].fourcc == pixelformat) return rvin_formats + i; return NULL; } static u32 rvin_format_bytesperline(struct v4l2_pix_format *pix) { const struct rvin_video_format *fmt; fmt = rvin_format_from_pixel(pix->pixelformat); if (WARN_ON(!fmt)) return -EINVAL; return pix->width * fmt->bpp; } static u32 rvin_format_sizeimage(struct v4l2_pix_format *pix) { if (pix->pixelformat == V4L2_PIX_FMT_NV16) return pix->bytesperline * pix->height * 2; return pix->bytesperline * pix->height; } /* ----------------------------------------------------------------------------- * V4L2 */ static int __rvin_try_format_source(struct rvin_dev *vin, u32 which, struct v4l2_pix_format *pix, struct rvin_source_fmt *source) { struct v4l2_subdev *sd; struct v4l2_subdev_pad_config *pad_cfg; struct v4l2_subdev_format format = { .which = which, }; int ret; sd = vin_to_source(vin); v4l2_fill_mbus_format(&format.format, pix, vin->digital.code); pad_cfg = v4l2_subdev_alloc_pad_config(sd); if (pad_cfg == NULL) return -ENOMEM; format.pad = vin->src_pad_idx; ret = v4l2_subdev_call(sd, pad, set_fmt, pad_cfg, &format); if (ret < 0 && ret != -ENOIOCTLCMD) goto done; v4l2_fill_pix_format(pix, &format.format); source->width = pix->width; source->height = pix->height; vin_dbg(vin, "Source resolution: %ux%u\n", source->width, source->height); done: v4l2_subdev_free_pad_config(pad_cfg); return ret; } static int __rvin_try_format(struct rvin_dev *vin, u32 which, struct v4l2_pix_format *pix, struct rvin_source_fmt *source) { const struct rvin_video_format *info; u32 rwidth, rheight, walign; /* Requested */ rwidth = pix->width; rheight = pix->height; /* * Retrieve format information and select the current format if the * requested format isn't supported. */ info = rvin_format_from_pixel(pix->pixelformat); if (!info) { vin_dbg(vin, "Format %x not found, keeping %x\n", pix->pixelformat, vin->format.pixelformat); *pix = vin->format; pix->width = rwidth; pix->height = rheight; } /* Always recalculate */ pix->bytesperline = 0; pix->sizeimage = 0; /* Limit to source capabilities */ __rvin_try_format_source(vin, which, pix, source); /* If source can't match format try if VIN can scale */ if (source->width != rwidth || source->height != rheight) rvin_scale_try(vin, pix, rwidth, rheight); /* HW limit width to a multiple of 32 (2^5) for NV16 else 2 (2^1) */ walign = vin->format.pixelformat == V4L2_PIX_FMT_NV16 ? 5 : 1; /* Limit to VIN capabilities */ v4l_bound_align_image(&pix->width, 2, RVIN_MAX_WIDTH, walign, &pix->height, 4, RVIN_MAX_HEIGHT, 2, 0); switch (pix->field) { case V4L2_FIELD_NONE: case V4L2_FIELD_TOP: case V4L2_FIELD_BOTTOM: case V4L2_FIELD_INTERLACED_TB: case V4L2_FIELD_INTERLACED_BT: case V4L2_FIELD_INTERLACED: break; default: pix->field = V4L2_FIELD_NONE; break; } pix->bytesperline = max_t(u32, pix->bytesperline, rvin_format_bytesperline(pix)); pix->sizeimage = max_t(u32, pix->sizeimage, rvin_format_sizeimage(pix)); if (vin->chip == RCAR_M1 && pix->pixelformat == V4L2_PIX_FMT_XBGR32) { vin_err(vin, "pixel format XBGR32 not supported on M1\n"); return -EINVAL; } vin_dbg(vin, "Requested %ux%u Got %ux%u bpl: %d size: %d\n", rwidth, rheight, pix->width, pix->height, pix->bytesperline, pix->sizeimage); return 0; } static int rvin_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct rvin_dev *vin = video_drvdata(file); strlcpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver)); strlcpy(cap->card, "R_Car_VIN", sizeof(cap->card)); snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", dev_name(vin->dev)); return 0; } static int rvin_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct rvin_dev *vin = video_drvdata(file); struct rvin_source_fmt source; return __rvin_try_format(vin, V4L2_SUBDEV_FORMAT_TRY, &f->fmt.pix, &source); } static int rvin_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct rvin_dev *vin = video_drvdata(file); struct rvin_source_fmt source; int ret; if (vb2_is_busy(&vin->queue)) return -EBUSY; ret = __rvin_try_format(vin, V4L2_SUBDEV_FORMAT_ACTIVE, &f->fmt.pix, &source); if (ret) return ret; vin->source.width = source.width; vin->source.height = source.height; vin->format = f->fmt.pix; return 0; } static int rvin_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct rvin_dev *vin = video_drvdata(file); f->fmt.pix = vin->format; return 0; } static int rvin_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { if (f->index >= ARRAY_SIZE(rvin_formats)) return -EINVAL; f->pixelformat = rvin_formats[f->index].fourcc; return 0; } static int rvin_g_selection(struct file *file, void *fh, struct v4l2_selection *s) { struct rvin_dev *vin = video_drvdata(file); if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; switch (s->target) { case V4L2_SEL_TGT_CROP_BOUNDS: case V4L2_SEL_TGT_CROP_DEFAULT: s->r.left = s->r.top = 0; s->r.width = vin->source.width; s->r.height = vin->source.height; break; case V4L2_SEL_TGT_CROP: s->r = vin->crop; break; case V4L2_SEL_TGT_COMPOSE_BOUNDS: case V4L2_SEL_TGT_COMPOSE_DEFAULT: s->r.left = s->r.top = 0; s->r.width = vin->format.width; s->r.height = vin->format.height; break; case V4L2_SEL_TGT_COMPOSE: s->r = vin->compose; break; default: return -EINVAL; } return 0; } static int rvin_s_selection(struct file *file, void *fh, struct v4l2_selection *s) { struct rvin_dev *vin = video_drvdata(file); const struct rvin_video_format *fmt; struct v4l2_rect r = s->r; struct v4l2_rect max_rect; struct v4l2_rect min_rect = { .width = 6, .height = 2, }; if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; v4l2_rect_set_min_size(&r, &min_rect); switch (s->target) { case V4L2_SEL_TGT_CROP: /* Can't crop outside of source input */ max_rect.top = max_rect.left = 0; max_rect.width = vin->source.width; max_rect.height = vin->source.height; v4l2_rect_map_inside(&r, &max_rect); v4l_bound_align_image(&r.width, 2, vin->source.width, 1, &r.height, 4, vin->source.height, 2, 0); r.top = clamp_t(s32, r.top, 0, vin->source.height - r.height); r.left = clamp_t(s32, r.left, 0, vin->source.width - r.width); vin->crop = s->r = r; vin_dbg(vin, "Cropped %dx%d@%d:%d of %dx%d\n", r.width, r.height, r.left, r.top, vin->source.width, vin->source.height); break; case V4L2_SEL_TGT_COMPOSE: /* Make sure compose rect fits inside output format */ max_rect.top = max_rect.left = 0; max_rect.width = vin->format.width; max_rect.height = vin->format.height; v4l2_rect_map_inside(&r, &max_rect); /* * Composing is done by adding a offset to the buffer address, * the HW wants this address to be aligned to HW_BUFFER_MASK. * Make sure the top and left values meets this requirement. */ while ((r.top * vin->format.bytesperline) & HW_BUFFER_MASK) r.top--; fmt = rvin_format_from_pixel(vin->format.pixelformat); while ((r.left * fmt->bpp) & HW_BUFFER_MASK) r.left--; vin->compose = s->r = r; vin_dbg(vin, "Compose %dx%d@%d:%d in %dx%d\n", r.width, r.height, r.left, r.top, vin->format.width, vin->format.height); break; default: return -EINVAL; } /* HW supports modifying configuration while running */ rvin_crop_scale_comp(vin); return 0; } static int rvin_cropcap(struct file *file, void *priv, struct v4l2_cropcap *crop) { struct rvin_dev *vin = video_drvdata(file); struct v4l2_subdev *sd = vin_to_source(vin); if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; return v4l2_subdev_call(sd, video, g_pixelaspect, &crop->pixelaspect); } static int rvin_enum_input(struct file *file, void *priv, struct v4l2_input *i) { struct rvin_dev *vin = video_drvdata(file); struct v4l2_subdev *sd = vin_to_source(vin); int ret; if (i->index != 0) return -EINVAL; ret = v4l2_subdev_call(sd, video, g_input_status, &i->status); if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) return ret; i->type = V4L2_INPUT_TYPE_CAMERA; i->std = vin->vdev.tvnorms; if (v4l2_subdev_has_op(sd, pad, dv_timings_cap)) i->capabilities = V4L2_IN_CAP_DV_TIMINGS; strlcpy(i->name, "Camera", sizeof(i->name)); return 0; } static int rvin_g_input(struct file *file, void *priv, unsigned int *i) { *i = 0; return 0; } static int rvin_s_input(struct file *file, void *priv, unsigned int i) { if (i > 0) return -EINVAL; return 0; } static int rvin_querystd(struct file *file, void *priv, v4l2_std_id *a) { struct rvin_dev *vin = video_drvdata(file); struct v4l2_subdev *sd = vin_to_source(vin); return v4l2_subdev_call(sd, video, querystd, a); } static int rvin_s_std(struct file *file, void *priv, v4l2_std_id a) { struct rvin_dev *vin = video_drvdata(file); struct v4l2_subdev *sd = vin_to_source(vin); struct v4l2_subdev_format fmt = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; struct v4l2_mbus_framefmt *mf = &fmt.format; int ret = v4l2_subdev_call(sd, video, s_std, a); if (ret < 0) return ret; /* Changing the standard will change the width/height */ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt); if (ret) { vin_err(vin, "Failed to get initial format\n"); return ret; } vin->format.width = mf->width; vin->format.height = mf->height; vin->crop.top = vin->crop.left = 0; vin->crop.width = mf->width; vin->crop.height = mf->height; vin->compose.top = vin->compose.left = 0; vin->compose.width = mf->width; vin->compose.height = mf->height; return 0; } static int rvin_g_std(struct file *file, void *priv, v4l2_std_id *a) { struct rvin_dev *vin = video_drvdata(file); struct v4l2_subdev *sd = vin_to_source(vin); return v4l2_subdev_call(sd, video, g_std, a); } static int rvin_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub) { switch (sub->type) { case V4L2_EVENT_SOURCE_CHANGE: return v4l2_event_subscribe(fh, sub, 4, NULL); } return v4l2_ctrl_subscribe_event(fh, sub); } static int rvin_enum_dv_timings(struct file *file, void *priv_fh, struct v4l2_enum_dv_timings *timings) { struct rvin_dev *vin = video_drvdata(file); struct v4l2_subdev *sd = vin_to_source(vin); int pad, ret; pad = timings->pad; timings->pad = vin->src_pad_idx; ret = v4l2_subdev_call(sd, pad, enum_dv_timings, timings); timings->pad = pad; return ret; } static int rvin_s_dv_timings(struct file *file, void *priv_fh, struct v4l2_dv_timings *timings) { struct rvin_dev *vin = video_drvdata(file); struct v4l2_subdev *sd = vin_to_source(vin); int ret; ret = v4l2_subdev_call(sd, video, s_dv_timings, timings); if (ret) return ret; vin->source.width = timings->bt.width; vin->source.height = timings->bt.height; vin->format.width = timings->bt.width; vin->format.height = timings->bt.height; return 0; } static int rvin_g_dv_timings(struct file *file, void *priv_fh, struct v4l2_dv_timings *timings) { struct rvin_dev *vin = video_drvdata(file); struct v4l2_subdev *sd = vin_to_source(vin); return v4l2_subdev_call(sd, video, g_dv_timings, timings); } static int rvin_query_dv_timings(struct file *file, void *priv_fh, struct v4l2_dv_timings *timings) { struct rvin_dev *vin = video_drvdata(file); struct v4l2_subdev *sd = vin_to_source(vin); return v4l2_subdev_call(sd, video, query_dv_timings, timings); } static int rvin_dv_timings_cap(struct file *file, void *priv_fh, struct v4l2_dv_timings_cap *cap) { struct rvin_dev *vin = video_drvdata(file); struct v4l2_subdev *sd = vin_to_source(vin); int pad, ret; pad = cap->pad; cap->pad = vin->src_pad_idx; ret = v4l2_subdev_call(sd, pad, dv_timings_cap, cap); cap->pad = pad; return ret; } static const struct v4l2_ioctl_ops rvin_ioctl_ops = { .vidioc_querycap = rvin_querycap, .vidioc_try_fmt_vid_cap = rvin_try_fmt_vid_cap, .vidioc_g_fmt_vid_cap = rvin_g_fmt_vid_cap, .vidioc_s_fmt_vid_cap = rvin_s_fmt_vid_cap, .vidioc_enum_fmt_vid_cap = rvin_enum_fmt_vid_cap, .vidioc_g_selection = rvin_g_selection, .vidioc_s_selection = rvin_s_selection, .vidioc_cropcap = rvin_cropcap, .vidioc_enum_input = rvin_enum_input, .vidioc_g_input = rvin_g_input, .vidioc_s_input = rvin_s_input, .vidioc_dv_timings_cap = rvin_dv_timings_cap, .vidioc_enum_dv_timings = rvin_enum_dv_timings, .vidioc_g_dv_timings = rvin_g_dv_timings, .vidioc_s_dv_timings = rvin_s_dv_timings, .vidioc_query_dv_timings = rvin_query_dv_timings, .vidioc_querystd = rvin_querystd, .vidioc_g_std = rvin_g_std, .vidioc_s_std = rvin_s_std, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_log_status = v4l2_ctrl_log_status, .vidioc_subscribe_event = rvin_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; /* ----------------------------------------------------------------------------- * File Operations */ static int rvin_power_on(struct rvin_dev *vin) { int ret; struct v4l2_subdev *sd = vin_to_source(vin); pm_runtime_get_sync(vin->v4l2_dev.dev); ret = v4l2_subdev_call(sd, core, s_power, 1); if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) return ret; return 0; } static int rvin_power_off(struct rvin_dev *vin) { int ret; struct v4l2_subdev *sd = vin_to_source(vin); ret = v4l2_subdev_call(sd, core, s_power, 0); pm_runtime_put(vin->v4l2_dev.dev); if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) return ret; return 0; } static int rvin_initialize_device(struct file *file) { struct rvin_dev *vin = video_drvdata(file); int ret; struct v4l2_format f = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .fmt.pix = { .width = vin->format.width, .height = vin->format.height, .field = vin->format.field, .colorspace = vin->format.colorspace, .pixelformat = vin->format.pixelformat, }, }; ret = rvin_power_on(vin); if (ret < 0) return ret; pm_runtime_enable(&vin->vdev.dev); ret = pm_runtime_resume(&vin->vdev.dev); if (ret < 0 && ret != -ENOSYS) goto eresume; /* * Try to configure with default parameters. Notice: this is the * very first open, so, we cannot race against other calls, * apart from someone else calling open() simultaneously, but * .host_lock is protecting us against it. */ ret = rvin_s_fmt_vid_cap(file, NULL, &f); if (ret < 0) goto esfmt; v4l2_ctrl_handler_setup(&vin->ctrl_handler); return 0; esfmt: pm_runtime_disable(&vin->vdev.dev); eresume: rvin_power_off(vin); return ret; } static int rvin_open(struct file *file) { struct rvin_dev *vin = video_drvdata(file); int ret; mutex_lock(&vin->lock); file->private_data = vin; ret = v4l2_fh_open(file); if (ret) goto unlock; if (!v4l2_fh_is_singular_file(file)) goto unlock; if (rvin_initialize_device(file)) { v4l2_fh_release(file); ret = -ENODEV; } unlock: mutex_unlock(&vin->lock); return ret; } static int rvin_release(struct file *file) { struct rvin_dev *vin = video_drvdata(file); bool fh_singular; int ret; mutex_lock(&vin->lock); /* Save the singular status before we call the clean-up helper */ fh_singular = v4l2_fh_is_singular_file(file); /* the release helper will cleanup any on-going streaming */ ret = _vb2_fop_release(file, NULL); /* * If this was the last open file. * Then de-initialize hw module. */ if (fh_singular) { pm_runtime_suspend(&vin->vdev.dev); pm_runtime_disable(&vin->vdev.dev); rvin_power_off(vin); } mutex_unlock(&vin->lock); return ret; } static const struct v4l2_file_operations rvin_fops = { .owner = THIS_MODULE, .unlocked_ioctl = video_ioctl2, .open = rvin_open, .release = rvin_release, .poll = vb2_fop_poll, .mmap = vb2_fop_mmap, .read = vb2_fop_read, }; void rvin_v4l2_remove(struct rvin_dev *vin) { v4l2_info(&vin->v4l2_dev, "Removing %s\n", video_device_node_name(&vin->vdev)); /* Checks internaly if handlers have been init or not */ v4l2_ctrl_handler_free(&vin->ctrl_handler); /* Checks internaly if vdev have been init or not */ video_unregister_device(&vin->vdev); } static void rvin_notify(struct v4l2_subdev *sd, unsigned int notification, void *arg) { struct rvin_dev *vin = container_of(sd->v4l2_dev, struct rvin_dev, v4l2_dev); switch (notification) { case V4L2_DEVICE_NOTIFY_EVENT: v4l2_event_queue(&vin->vdev, arg); break; default: break; } } int rvin_v4l2_probe(struct rvin_dev *vin) { struct v4l2_subdev_format fmt = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; struct v4l2_mbus_framefmt *mf = &fmt.format; struct video_device *vdev = &vin->vdev; struct v4l2_subdev *sd = vin_to_source(vin); int pad_idx, ret; v4l2_set_subdev_hostdata(sd, vin); vin->v4l2_dev.notify = rvin_notify; ret = v4l2_subdev_call(sd, video, g_tvnorms, &vin->vdev.tvnorms); if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) return ret; if (vin->vdev.tvnorms == 0) { /* Disable the STD API if there are no tvnorms defined */ v4l2_disable_ioctl(&vin->vdev, VIDIOC_G_STD); v4l2_disable_ioctl(&vin->vdev, VIDIOC_S_STD); v4l2_disable_ioctl(&vin->vdev, VIDIOC_QUERYSTD); v4l2_disable_ioctl(&vin->vdev, VIDIOC_ENUMSTD); } /* Add the controls */ /* * Currently the subdev with the largest number of controls (13) is * ov6550. So let's pick 16 as a hint for the control handler. Note * that this is a hint only: too large and you waste some memory, too * small and there is a (very) small performance hit when looking up * controls in the internal hash. */ ret = v4l2_ctrl_handler_init(&vin->ctrl_handler, 16); if (ret < 0) return ret; ret = v4l2_ctrl_add_handler(&vin->ctrl_handler, sd->ctrl_handler, NULL); if (ret < 0) return ret; /* video node */ vdev->fops = &rvin_fops; vdev->v4l2_dev = &vin->v4l2_dev; vdev->queue = &vin->queue; strlcpy(vdev->name, KBUILD_MODNAME, sizeof(vdev->name)); vdev->release = video_device_release_empty; vdev->ioctl_ops = &rvin_ioctl_ops; vdev->lock = &vin->lock; vdev->ctrl_handler = &vin->ctrl_handler; vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; vin->src_pad_idx = 0; for (pad_idx = 0; pad_idx < sd->entity.num_pads; pad_idx++) if (sd->entity.pads[pad_idx].flags == MEDIA_PAD_FL_SOURCE) break; if (pad_idx >= sd->entity.num_pads) return -EINVAL; vin->src_pad_idx = pad_idx; fmt.pad = vin->src_pad_idx; /* Try to improve our guess of a reasonable window format */ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt); if (ret) { vin_err(vin, "Failed to get initial format\n"); return ret; } /* Set default format */ vin->format.width = mf->width; vin->format.height = mf->height; vin->format.colorspace = mf->colorspace; vin->format.field = mf->field; vin->format.pixelformat = RVIN_DEFAULT_FORMAT; /* Set initial crop and compose */ vin->crop.top = vin->crop.left = 0; vin->crop.width = mf->width; vin->crop.height = mf->height; vin->compose.top = vin->compose.left = 0; vin->compose.width = mf->width; vin->compose.height = mf->height; ret = video_register_device(&vin->vdev, VFL_TYPE_GRABBER, -1); if (ret) { vin_err(vin, "Failed to register video device\n"); return ret; } video_set_drvdata(&vin->vdev, vin); v4l2_info(&vin->v4l2_dev, "Device registered as %s\n", video_device_node_name(&vin->vdev)); return ret; }
bluecherrydvr/linux
drivers/media/platform/rcar-vin/rcar-v4l2.c
C
gpl-2.0
21,330
module molpro_manage !============================================================== ! This code is part of FCC_TOOLS !============================================================== ! Description ! This MODULE contains subroutines to get molecular information ! from molpro out files ! ! Notes ! All subroutines rewind the file after using it !============================================================== !Common declarations: !=================== use constants use line_preprocess implicit none contains subroutine read_molpro_natoms(unt,Nat,error_flag) !============================================================== ! This code is part of FCC_TOOLS !============================================================== !Description ! Reads coordinates and atom names from molpro. The coordinates ! are retuned as a 3Nat vector !Description ! Get geometry and atom names from molpro. The number of atoms ! is also taken ! !Arguments ! unt (inp) int /scalar unit for the file ! Nat (out) int /scalar Number of atoms ! io_flag (io ) flag Error flag: ! 0 : Success ! -i : Read error on line i ! !Note ! Need to understand better the molpro output !============================================================== integer,intent(in) :: unt integer,intent(out) :: Nat integer,intent(out) :: error_flag !Local variables !============= character(len=240) :: line="" character :: cnull !I/O integer :: IOstatus !Counters integer :: i, ii ! Search section error_flag = 0 ii = 0 do ii = ii + 1 read(unt,'(A)',IOSTAT=IOstatus) line ! Two possible scenarios while reading: ! 1) End of file if ( IOstatus < 0 ) then error_flag = -ii rewind(unt) return endif ! 2) Found what looked for! if ( adjustl(line) == "Current geometry (xyz format, in Angstrom)" ) then !One empty line read(unt,'(A)',IOSTAT=IOstatus) line exit endif enddo ! Overpass lines until reaching the target table !Read Table lines read(unt,*,IOSTAT=IOstatus) Nat rewind(unt) return end subroutine read_molpro_natoms subroutine read_molpro_geom(unt,Nat,AtName,X,Y,Z,error_flag) !============================================================== ! This code is part of FCC_TOOLS !============================================================== !Description ! Reads coordinates and atom names from molpro. The coordinates ! are retuned as a 3Nat vector !Description ! Get geometry and atom names from molpro. The number of atoms ! is also taken ! !Arguments ! unt (inp) int /scalar unit for the file ! Nat (out) int /scalar Number of atoms ! AtName (out) char/vertor Atom names ! X,Y,Z (out) real/vectors Coordinate vectors (ANGSTRONG) ! io_flag (io ) flag Error flag: ! 0 : Success ! -i : Read error on line i ! !Note ! Need to understand better the molpro output !============================================================== integer,intent(in) :: unt integer,intent(out) :: Nat character(len=*), dimension(:), intent(out) :: AtName real(kind=8), dimension(:), intent(out) :: X,Y,Z integer,intent(out) :: error_flag !Local variables !============= character(len=240) :: line="" character :: cnull !I/O integer :: IOstatus !Counters integer :: i, ii ! Search section error_flag = 0 ii = 0 do ii = ii + 1 read(unt,'(A)',IOSTAT=IOstatus) line ! Two possible scenarios while reading: ! 1) End of file if ( IOstatus < 0 ) then error_flag = -ii rewind(unt) return endif ! 2) Found what looked for! if ( adjustl(line) == "Current geometry (xyz format, in Angstrom)" ) then !One empty line read(unt,'(A)',IOSTAT=IOstatus) line exit endif enddo ! Overpass lines until reaching the target table !Read Table lines read(unt,*,IOSTAT=IOstatus) Nat !Title read(unt,'(A)',IOSTAT=IOstatus) line !Start reading geometry do i=1,Nat read(unt,*) AtName(i), & X(i), & Y(i), & Z(i) if ( IOstatus < 0 ) then print*, "Unexpected end of file while reading Geometry" error_flag = 1 rewind(unt) return endif enddo rewind(unt) return end subroutine read_molpro_geom subroutine read_molpro_hess(unt,Nat,Hlt,error_flag) !============================================================== ! This code is part of FCC_TOOLS !============================================================== !Description ! Read Hessian from molpro output. Returs the triangular part of the ! Hessian matrix in AU ! !Arguments ! unt (inp) scalar unit for the file ! Nat (inp) scalar Number of atoms ! Hlt (out) vector Lower triangular part of Hessian matrix (AU) ! error_flag (out) scalar error_flag : ! 0 : Success ! -i : Read error on line i ! 2 : Wrong number of elements for Hlt ! Notes !============================================================== integer,intent(in) :: unt integer,intent(in) :: Nat real(kind=8), dimension(:), intent(out) :: Hlt integer,intent(out) :: error_flag !Local stuff !============= character(len=240) :: line="" character(len=1) :: cnull integer :: N !I/O integer :: IOstatus !Counters integer :: i, j, k, ii, jini, jfin, & iblock, nblocks, icols !Auxiliar arrays real(kind=8),dimension(3*Nat,3*Nat) :: Hpart !Use N to store 3*Nat N = 3*Nat ! Search section ii = 0 error_flag = 0 do ii = ii + 1 read(unt,'(A)',IOSTAT=IOstatus) line ! Two possible scenarios while reading: ! 1) End of file if ( IOstatus < 0 ) then error_flag = -ii rewind(unt) return endif ! 2) Found what looked for! if ( INDEX(line,"Force Constants (Second Derivatives of the Energy) in [a.u.]") /= 0 ) then exit endif enddo !Hessian elements arranged in blocks of 5 columns each !Only triangular part is shown nblocks = N/5 if (N /= nblocks*5) nblocks=nblocks+1 do iblock=1,nblocks !Rirst line is header read(unt,'(A)') line jini=(iblock-1)*5+1 do i=jini,N jfin=min(i,iblock*5) read(unt,*) cnull, Hpart(i,jini:jfin) enddo enddo !Get Hlt from the half matrix in Hpart k = 0 do i=1,N do j=1,i k = k + 1 Hlt(k) = Hpart(i,j) enddo enddo if (k /= (N*(N+1)/2)) then error_flag = 2 endif rewind(unt) return end subroutine read_molpro_hess end module molpro_manage
jcerezochem/fcc_tools
src/modules/molpro_manage.f90
FORTRAN
gpl-2.0
8,474
from __future__ import division """ These functions are for BOSSANOVA (BOss Survey of Satellites Around Nearby Optically obserVable milky way Analogs) """ import numpy as np from matplotlib import pyplot as plt import targeting def count_targets(hsts, verbose=True, remove_cached=True, rvir=300, targetingkwargs={}): """ Generates a count of targets for each field. Parameters ---------- hsts A list of `NSAHost` objects verbose : bool Whether or not to print a message when each host is examined remove_cached : bool Whether or not to remove the cached sdss catalog for each host after counting. This may be necessary to prevent running out of memory, depending on the number of hosts involved. rvir : float "virial radius" in kpc for the arcmin transform targetingkwargs : dict or list of dicts passed into ` targeting.select_targets` if a single dictionary, otherwise the targeting will Returns ------- ntargs : astropy.Table a table object with the names of the hosts and the target counts. """ import sys import collections from astropy import table if isinstance(targetingkwargs, collections.Mapping): colnames = ['ntarg'] targetingkwargs = [targetingkwargs.copy()] else: colnames = [('ntarg_' + t.get('colname', str(i))) for i, t in enumerate(targetingkwargs)] targetingkwargs = [t.copy() for t in targetingkwargs] for t in targetingkwargs: t.setdefault('outercutrad', 300) t.setdefault('removegama', False) if 'colname' in t: del t['colname'] nms = [] dists = [] rvs = [] cnts = [[] for t in targetingkwargs] for i, h in enumerate(hsts): if verbose: print 'Generating target count for', h.name, '#', i + 1, 'of', len(hsts) sys.stdout.flush() nms.append(h.name) dists.append(h.distmpc) rvs.append(h.physical_to_projected(300)) for j, t in enumerate(targetingkwargs): if verbose: print 'Targeting parameters:', t sys.stdout.flush() tcat = targeting.select_targets(h, **t) cnts[j].append(len(tcat)) if remove_cached: h._cached_sdss = None t = table.Table() t.add_column(table.Column(name='name', data=nms)) t.add_column(table.Column(name='distmpc', data=dists, units='Mpc')) t.add_column(table.Column(name='rvirarcmin', data=rvs, units='arcmin')) for cnm, cnt in zip(colnames, cnts): t.add_column(table.Column(name=cnm, data=cnt)) return t _Vabs_mw_sats = {'Bootes I': -6.3099999999999987, 'Bootes II': -2.7000000000000011, 'Bootes III': -5.7500000000000018, 'Canes Venatici I': -8.5900000000000016, 'Canes Venatici II': -4.9199999999999982, 'Canis Major': -14.389999999999999, 'Carina': -9.1099999999999994, 'Coma Berenices': -4.0999999999999996, 'Draco': -8.7999999999999989, 'Fornax': -13.44, 'Hercules': -6.6000000000000014, 'LMC': -18.120000000000001, 'Leo I': -12.02, 'Leo II': -9.8399999999999999, 'Leo IV': -5.8400000000000016, 'Leo V': -5.25, 'Pisces II': -5.0, 'SMC': -16.830000000000002, 'Sagittarius dSph': -13.500000000000002, 'Sculptor': -11.070000000000002, 'Segue I': -1.5, 'Segue II': -2.5, 'Sextans I': -9.2700000000000014, 'Ursa Major I': -5.5299999999999994, 'Ursa Major II': -4.1999999999999993, 'Ursa Minor': -8.7999999999999989, 'Willman 1': -2.6999999999999993} #now just assume they are all g-r=0.5, ~right for Draco ... Apply Jester+ transforms _rabs_mw_sats = dict([(k, v + (-0.41 * (0.5) + 0.01)) for k, v in _Vabs_mw_sats.iteritems()]) _sorted_mw_rabs = np.sort(_rabs_mw_sats.values()) def count_mw_sats(h, maglim, mwsatsrmags=_sorted_mw_rabs): appmags = mwsatsrmags + h.distmod return np.sum(appmags < maglim) def generate_count_table(hsts, fnout=None, maglims=[21, 20.5, 20], outercutrad=-90,remove_cached=True): from astropy.io import ascii from astropy import table targetingkwargs = [] for m in maglims: targetingkwargs.append({'faintlimit': m, 'outercutrad': outercutrad, 'colname': str(m)}) tab = count_targets(hsts, targetingkwargs=targetingkwargs, remove_cached=remove_cached) for m in maglims: satcnt = [] for hs in hsts: satcnt.append(count_mw_sats(hs, m)) tab.add_column(table.Column(name='nsat_' + str(m), data=satcnt)) for m in maglims: nsatstr = 'nsat_' + str(m) ntargstr = 'ntarg_' + str(m) tab.add_column(table.Column(name='ntargpersat_' + str(m), data=tab[ntargstr] / tab[nsatstr])) if fnout: ascii.write(tab, fnout) return tab
saga-survey/saga-code
bossanova/bossanova.py
Python
gpl-2.0
4,779
<?php /* +--------------------------------------------------------------------+ | CiviCRM version 4.6 | +--------------------------------------------------------------------+ | Copyright CiviCRM LLC (c) 2004-2015 | +--------------------------------------------------------------------+ | This file is a part of CiviCRM. | | | | CiviCRM is free software; you can copy, modify, and distribute it | | under the terms of the GNU Affero General Public License | | Version 3, 19 November 2007 and the CiviCRM Licensing Exception. | | | | CiviCRM is distributed in the hope that it will be useful, but | | WITHOUT ANY WARRANTY; without even the implied warranty of | | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | | See the GNU Affero General Public License for more details. | | | | You should have received a copy of the GNU Affero General Public | | License and the CiviCRM Licensing Exception along | | with this program; if not, contact CiviCRM LLC | | at info[AT]civicrm[DOT]org. If you have questions about the | | GNU Affero General Public License or the licensing of CiviCRM, | | see the CiviCRM license FAQ at http://civicrm.org/licensing | +--------------------------------------------------------------------+ */ /** * * @package CRM * @copyright CiviCRM LLC (c) 2004-2015 * $Id$ * */ /** * This class generates form components for processing a contribution * */ class CRM_Contribute_Form_UpdateBilling extends CRM_Core_Form { protected $_crid = NULL; protected $_coid = NULL; protected $_mode = NULL; protected $_subscriptionDetails = NULL; protected $_selfService = FALSE; public $_bltID = NULL; /** * @var array current payment processor including a copy of the object in 'object' key */ public $_paymentProcessor = array(); public $_paymentProcessorObj = NULL; /** * Set variables up before form is built. * * @return void */ public function preProcess() { $this->_mid = CRM_Utils_Request::retrieve('mid', 'Integer', $this, FALSE); $this->_crid = CRM_Utils_Request::retrieve('crid', 'Integer', $this, FALSE); if ($this->_crid) { $this->_paymentProcessor = CRM_Financial_BAO_PaymentProcessor::getProcessorForEntity($this->_crid, 'recur', 'info'); $this->_paymentProcessorObj = CRM_Financial_BAO_PaymentProcessor::getProcessorForEntity($this->_crid, 'recur', 'obj'); $this->_subscriptionDetails = CRM_Contribute_BAO_ContributionRecur::getSubscriptionDetails($this->_crid); // Are we cancelling a recurring contribution that is linked to an auto-renew membership? if ($this->_subscriptionDetails->membership_id) { $this->_mid = $this->_subscriptionDetails->membership_id; } } $this->_coid = CRM_Utils_Request::retrieve('coid', 'Integer', $this, FALSE); if ($this->_coid) { $this->_paymentProcessor = CRM_Financial_BAO_PaymentProcessor::getProcessorForEntity($this->_coid, 'contribute', 'info'); $this->_paymentProcessor['object'] = CRM_Financial_BAO_PaymentProcessor::getProcessorForEntity($this->_coid, 'contribute', 'obj'); $this->_subscriptionDetails = CRM_Contribute_BAO_ContributionRecur::getSubscriptionDetails($this->_coid, 'contribution'); } if ($this->_mid) { $this->_paymentProcessor = CRM_Financial_BAO_PaymentProcessor::getProcessorForEntity($this->_mid, 'membership', 'info'); $this->_paymentProcessor['object'] = CRM_Financial_BAO_PaymentProcessor::getProcessorForEntity($this->_mid, 'membership', 'obj'); $this->_subscriptionDetails = CRM_Contribute_BAO_ContributionRecur::getSubscriptionDetails($this->_mid, 'membership'); $membershipTypes = CRM_Member_PseudoConstant::membershipType(); $membershipTypeId = CRM_Core_DAO::getFieldValue('CRM_Member_DAO_Membership', $this->_mid, 'membership_type_id'); $this->assign('membershipType', CRM_Utils_Array::value($membershipTypeId, $membershipTypes)); $this->_mode = 'auto_renew'; } if ((!$this->_crid && !$this->_coid && !$this->_mid) || ($this->_subscriptionDetails == CRM_Core_DAO::$_nullObject) ) { CRM_Core_Error::fatal('Required information missing.'); } if (!CRM_Core_Permission::check('edit contributions')) { $userChecksum = CRM_Utils_Request::retrieve('cs', 'String', $this, FALSE); if (!CRM_Contact_BAO_Contact_Utils::validChecksum($this->_subscriptionDetails->contact_id, $userChecksum)) { CRM_Core_Error::fatal(ts('You do not have permission to cancel subscription.')); } $this->_selfService = TRUE; } if (!$this->_paymentProcessor['object']->isSupported('updateSubscriptionBillingInfo')) { CRM_Core_Error::fatal(ts("%1 processor doesn't support updating subscription billing details.", array(1 => $this->_paymentProcessor['object']->_processorName) )); } $this->assign('paymentProcessor', $this->_paymentProcessor); // get the billing location type $locationTypes = CRM_Core_PseudoConstant::get('CRM_Core_DAO_Address', 'location_type_id', array(), 'validate'); $this->_bltID = array_search('Billing', $locationTypes); $this->assign('bltID', $this->_bltID); if (!$this->_bltID) { CRM_Core_Error::fatal(ts('Please set a location type of %1', array(1 => 'Billing'))); } $this->assign('frequency_unit', $this->_subscriptionDetails->frequency_unit); $this->assign('frequency_interval', $this->_subscriptionDetails->frequency_interval); $this->assign('amount', $this->_subscriptionDetails->amount); $this->assign('installments', $this->_subscriptionDetails->installments); $this->assign('mode', $this->_mode); // handle context redirection CRM_Contribute_BAO_ContributionRecur::setSubscriptionContext(); } /** * This virtual function is used to set the default values of * various form elements * * access public * * @return array * reference to the array of default values */ /** * @return array */ public function setDefaultValues() { $this->_defaults = array(); if ($this->_subscriptionDetails->contact_id) { $fields = array(); $names = array( 'first_name', 'middle_name', 'last_name', "street_address-{$this->_bltID}", "city-{$this->_bltID}", "postal_code-{$this->_bltID}", "country_id-{$this->_bltID}", "state_province_id-{$this->_bltID}", ); foreach ($names as $name) { $fields[$name] = 1; } $fields["state_province-{$this->_bltID}"] = 1; $fields["country-{$this->_bltID}"] = 1; $fields["email-{$this->_bltID}"] = 1; $fields['email-Primary'] = 1; CRM_Core_BAO_UFGroup::setProfileDefaults($this->_subscriptionDetails->contact_id, $fields, $this->_defaults); // use primary email address if billing email address is empty if (empty($this->_defaults["email-{$this->_bltID}"]) && !empty($this->_defaults['email-Primary']) ) { $this->_defaults["email-{$this->_bltID}"] = $this->_defaults['email-Primary']; } foreach ($names as $name) { if (!empty($this->_defaults[$name])) { $this->_defaults['billing_' . $name] = $this->_defaults[$name]; } } } $config = CRM_Core_Config::singleton(); // set default country from config if no country set if (empty($this->_defaults["billing_country_id-{$this->_bltID}"])) { $this->_defaults["billing_country_id-{$this->_bltID}"] = $config->defaultContactCountry; } return $this->_defaults; } /** * Build the form object. * * @return void */ public function buildQuickForm() { $type = 'next'; if ($this->_selfService) { $type = 'submit'; } $this->addButtons(array( array( 'type' => $type, 'name' => ts('Save'), 'isDefault' => TRUE, ), array( 'type' => 'cancel', 'name' => ts('Cancel'), ), ) ); CRM_Core_Payment_Form::buildPaymentForm($this, $this->_paymentProcessor, TRUE, TRUE); $this->addFormRule(array('CRM_Contribute_Form_UpdateBilling', 'formRule'), $this); } /** * Global form rule. * * @param array $fields * The input form values. * @param array $files * The uploaded files if any. * @param $self * * * @return bool|array * true if no errors, else array of errors */ public static function formRule($fields, $files, $self) { $errors = array(); CRM_Core_Form::validateMandatoryFields($self->_fields, $fields, $errors); // validate the payment instrument values (e.g. credit card number) CRM_Core_Payment_Form::validatePaymentInstrument($self->_paymentProcessor['id'], $fields, $errors, $self); return empty($errors) ? TRUE : $errors; } /** * Process the form. * * @return void */ public function postProcess() { $params = $this->controller->exportValues($this->_name); $status = NULL; // now set the values for the billing location. foreach ($this->_fields as $name => $value) { $fields[$name] = 1; } $fields["email-{$this->_bltID}"] = 1; $processorParams = array(); foreach ($params as $key => $val) { $key = str_replace('billing_', '', $key); list($key) = explode('-', $key); $processorParams[$key] = $val; } $processorParams['state_province'] = CRM_Core_PseudoConstant::stateProvince($params["billing_state_province_id-{$this->_bltID}"], FALSE); $processorParams['country'] = CRM_Core_PseudoConstant::country($params["billing_country_id-{$this->_bltID}"], FALSE); $processorParams['month'] = $processorParams['credit_card_exp_date']['M']; $processorParams['year'] = $processorParams['credit_card_exp_date']['Y']; $processorParams['subscriptionId'] = $this->_subscriptionDetails->subscription_id; $processorParams['amount'] = $this->_subscriptionDetails->amount; $updateSubscription = $this->_paymentProcessorObj->updateSubscriptionBillingInfo($message, $processorParams); if (is_a($updateSubscription, 'CRM_Core_Error')) { CRM_Core_Error::displaySessionError($updateSubscription); } elseif ($updateSubscription) { $ctype = CRM_Core_DAO::getFieldValue('CRM_Contact_DAO_Contact', $this->_subscriptionDetails->contact_id, 'contact_type'); $contact = &CRM_Contact_BAO_Contact::createProfileContact($params, $fields, $this->_subscriptionDetails->contact_id, NULL, NULL, $ctype ); // build tpl params if ($this->_subscriptionDetails->membership_id) { $inputParams = array('id' => $this->_subscriptionDetails->membership_id); CRM_Member_BAO_Membership::getValues($inputParams, $tplParams); $tplParams = $tplParams[$this->_subscriptionDetails->membership_id]; $tplParams['membership_status'] = CRM_Core_DAO::getFieldValue('CRM_Member_DAO_MembershipStatus', $tplParams['status_id']); $tplParams['membershipType'] = CRM_Core_DAO::getFieldValue('CRM_Member_DAO_MembershipType', $tplParams['membership_type_id']); $status = ts('Billing details for your automatically renewed %1 membership have been updated.', array(1 => $tplParams['membershipType']) ); $msgTitle = ts('Details Updated'); $msgType = 'success'; } else { $status = ts('Billing details for the recurring contribution of %1, every %2 %3 have been updated.', array( 1 => $this->_subscriptionDetails->amount, 2 => $this->_subscriptionDetails->frequency_interval, 3 => $this->_subscriptionDetails->frequency_unit, ) ); $msgTitle = ts('Details Updated'); $msgType = 'success'; $tplParams = array( 'recur_frequency_interval' => $this->_subscriptionDetails->frequency_interval, 'recur_frequency_unit' => $this->_subscriptionDetails->frequency_unit, 'amount' => $this->_subscriptionDetails->amount, ); } // format new address for display $addressParts = array("street_address", "city", "postal_code", "state_province", "country"); foreach ($addressParts as $part) { $addressParts[$part] = CRM_Utils_Array::value($part, $processorParams); } $tplParams['address'] = CRM_Utils_Address::format($addressParts); // format old address to store in activity details $this->_defaults["state_province-{$this->_bltID}"] = CRM_Core_PseudoConstant::stateProvince($this->_defaults["state_province-{$this->_bltID}"], FALSE); $this->_defaults["country-{$this->_bltID}"] = CRM_Core_PseudoConstant::country($this->_defaults["country-{$this->_bltID}"], FALSE); $addressParts = array("street_address", "city", "postal_code", "state_province", "country"); foreach ($addressParts as $part) { $key = "{$part}-{$this->_bltID}"; $addressParts[$part] = CRM_Utils_Array::value($key, $this->_defaults); } $this->_defaults['address'] = CRM_Utils_Address::format($addressParts); // format new billing name $name = $processorParams['first_name']; if (!empty($processorParams['middle_name'])) { $name .= " {$processorParams['middle_name']}"; } $name .= ' ' . $processorParams['last_name']; $name = trim($name); $tplParams['billingName'] = $name; // format old billing name $name = $this->_defaults['first_name']; if (!empty($this->_defaults['middle_name'])) { $name .= " {$this->_defaults['middle_name']}"; } $name .= ' ' . $this->_defaults['last_name']; $name = trim($name); $this->_defaults['billingName'] = $name; $message .= " <br/><br/>New Billing Name and Address <br/>============================== <br/>{$tplParams['billingName']} <br/>{$tplParams['address']} <br/><br/>Previous Billing Name and Address <br/>================================== <br/>{$this->_defaults['billingName']} <br/>{$this->_defaults['address']}"; $activityParams = array( 'source_contact_id' => $this->_subscriptionDetails->contact_id, 'activity_type_id' => CRM_Core_OptionGroup::getValue('activity_type', 'Update Recurring Contribution Billing Details', 'name' ), 'subject' => ts('Recurring Contribution Billing Details Updated'), 'details' => $message, 'activity_date_time' => date('YmdHis'), 'status_id' => CRM_Core_OptionGroup::getValue('activity_status', 'Completed', 'name' ), ); $session = CRM_Core_Session::singleton(); $cid = $session->get('userID'); if ($cid) { $activityParams['target_contact_id'][] = $activityParams['source_contact_id']; $activityParams['source_contact_id'] = $cid; } CRM_Activity_BAO_Activity::create($activityParams); // send notification if ($this->_subscriptionDetails->contribution_page_id) { CRM_Core_DAO::commonRetrieveAll('CRM_Contribute_DAO_ContributionPage', 'id', $this->_subscriptionDetails->contribution_page_id, $value, array( 'title', 'receipt_from_name', 'receipt_from_email', ) ); $receiptFrom = '"' . CRM_Utils_Array::value('receipt_from_name', $value[$this->_subscriptionDetails->contribution_page_id]) . '" <' . $value[$this->_subscriptionDetails->contribution_page_id]['receipt_from_email'] . '>'; } else { $domainValues = CRM_Core_BAO_Domain::getNameAndEmail(); $receiptFrom = "$domainValues[0] <$domainValues[1]>"; } list($donorDisplayName, $donorEmail) = CRM_Contact_BAO_Contact::getContactDetails($this->_subscriptionDetails->contact_id); $tplParams['contact'] = array('display_name' => $donorDisplayName); $date = CRM_Utils_Date::format($processorParams['credit_card_exp_date']); $tplParams['credit_card_exp_date'] = CRM_Utils_Date::mysqlToIso($date); $tplParams['credit_card_number'] = CRM_Utils_System::mungeCreditCard($processorParams['credit_card_number']); $tplParams['credit_card_type'] = $processorParams['credit_card_type']; $sendTemplateParams = array( 'groupName' => $this->_subscriptionDetails->membership_id ? 'msg_tpl_workflow_membership' : 'msg_tpl_workflow_contribution', 'valueName' => $this->_subscriptionDetails->membership_id ? 'membership_autorenew_billing' : 'contribution_recurring_billing', 'contactId' => $this->_subscriptionDetails->contact_id, 'tplParams' => $tplParams, 'isTest' => $this->_subscriptionDetails->is_test, 'PDFFilename' => 'receipt.pdf', 'from' => $receiptFrom, 'toName' => $donorDisplayName, 'toEmail' => $donorEmail, ); list($sent) = CRM_Core_BAO_MessageTemplate::sendTemplate($sendTemplateParams); } else { $status = ts('There was some problem updating the billing details.'); $msgTitle = ts('Update Error'); $msgType = 'error'; } $session = CRM_Core_Session::singleton(); $userID = $session->get('userID'); if ($userID && $status) { $session->setStatus($status, $msgTitle, $msgType); } elseif (!$userID) { if ($status) { CRM_Utils_System::setUFMessage($status); } $result = (int) ($updateSubscription && isset($ctype)); if (isset($tplParams)) { $session->set('resultParams', $tplParams); } return CRM_Utils_System::redirect(CRM_Utils_System::url('civicrm/contribute/subscriptionstatus', "reset=1&task=billing&result={$result}")); } } }
creativecommons/donate.creativecommons.org
sites/all/modules/civicrm/CRM/Contribute/Form/UpdateBilling.php
PHP
gpl-2.0
18,155
<?php namespace common\assets; use Yii; use yii\web\AssetBundle; class DHTMLXSchedulerAsset extends AssetBundle { public $sourcePath = '@vendor/DHTMLX/scheduler/codebase'; public $css = [ 'dhtmlxscheduler_flat.css', ]; public $js = [ 'sources/dhtmlxscheduler.js', 'sources/ext/dhtmlxscheduler_recurring.js', 'sources/ext/dhtmlxscheduler_readonly.js', 'sources/ext/dhtmlxscheduler_multiselect.js', 'sources/ext/dhtmlxscheduler_editors.js', 'sources/ext/dhtmlxscheduler_tooltip.js' ]; public $jsOptions = [ 'position' => \yii\web\View::POS_HEAD, ]; public $depends = [ 'yii\web\YiiAsset', ]; public function init() { parent::init(); if (Yii::$app->language === 'lv') { $this->js[] = '@common/assets/scheduler/locale_lv.js'; } else { $this->js[] = 'locale/locale_' . Yii::$app->language . '.js'; } } }
schtanislau/tan
common/assets/DHTMLXSchedulerAsset.php
PHP
gpl-2.0
996
<?php /** * Helper functions for the admin - plugin links and help tabs. * * @package bbPress Search Widget * @subpackage Admin * @author David Decker - DECKERWEB * @copyright Copyright (c) 2011-2013, David Decker - DECKERWEB * @license http://www.opensource.org/licenses/gpl-license.php GPL-2.0+ * @link http://genesisthemes.de/en/wp-plugins/bbpress-search-widget/ * @link http://deckerweb.de/twitter * * @since 1.0.0 */ /** * Prevent direct access to this file. * * @since 1.1.0 */ if ( ! defined( 'ABSPATH' ) ) { exit( 'Sorry, you are not allowed to access this file directly.' ); } /** * Setting helper links constant * * @since 1.2.0 * * @uses get_locale() */ define( 'BBPSW_URL_TRANSLATE', 'http://translate.wpautobahn.com/projects/wordpress-plugins-deckerweb/bbpress-search-widget' ); define( 'BBPSW_URL_WPORG_FAQ', 'http://wordpress.org/extend/plugins/bbpress-search-widget/faq/' ); define( 'BBPSW_URL_WPORG_FORUM', 'http://wordpress.org/support/plugin/bbpress-search-widget' ); define( 'BBPSW_URL_WPORG_PROFILE', 'http://profiles.wordpress.org/daveshine/' ); define( 'BBPSW_URL_SNIPPETS', 'https://gist.github.com/???' ); define( 'BBPSW_PLUGIN_LICENSE', 'GPL-2.0+' ); if ( get_locale() == 'de_DE' || get_locale() == 'de_AT' || get_locale() == 'de_CH' || get_locale() == 'de_LU' ) { define( 'BBPSW_URL_DONATE', 'http://genesisthemes.de/spenden/' ); define( 'BBPSW_URL_PLUGIN', 'http://genesisthemes.de/plugins/bbpress-search-widget/' ); } else { define( 'BBPSW_URL_DONATE', 'http://genesisthemes.de/en/donate/' ); define( 'BBPSW_URL_PLUGIN', 'http://genesisthemes.de/en/wp-plugins/bbpress-search-widget/' ); } /** * Add "Widgets Page" link to plugin page. * * @since 1.0.0 * * @param $bbpsw_links * @param $bbpsw_widgets_link * * @return strings widgets link */ function ddw_bbpsw_widgets_page_link( $bbpsw_links ) { /** Widgets Admin link */ $bbpsw_widgets_link = sprintf( '<a href="%s" title="%s">%s</a>', admin_url( 'widgets.php' ), __( 'Go to the Widgets settings page', 'bbpress-search-widget' ), __( 'Widgets', 'bbpress-search-widget' ) ); /** Set the order of the links */ array_unshift( $bbpsw_links, $bbpsw_widgets_link ); /** Display plugin settings links */ return apply_filters( 'bbpsw_filter_settings_page_link', $bbpsw_links ); } // end of function ddw_bbpsw_widgets_page_link add_filter( 'plugin_row_meta', 'ddw_bbpsw_plugin_links', 10, 2 ); /** * Add various support links to plugin page. * * @since 1.0.0 * * @param $bbpsw_links * @param $bbpsw_file * * @return strings plugin links */ function ddw_bbpsw_plugin_links( $bbpsw_links, $bbpsw_file ) { /** Capability check */ if ( ! current_user_can( 'install_plugins' ) ) { return $bbpsw_links; } // end-if cap check /** List additional links only for this plugin */ if ( $bbpsw_file == BBPSW_PLUGIN_BASEDIR . '/bbpress-search-widget.php' ) { $bbpsw_links[] = '<a href="' . esc_url( BBPSW_URL_WPORG_FAQ ) . '" target="_new" title="' . __( 'FAQ', 'bbpress-search-widget' ) . '">' . __( 'FAQ', 'bbpress-search-widget' ) . '</a>'; $bbpsw_links[] = '<a href="' . esc_url( BBPSW_URL_WPORG_FORUM ) . '" target="_new" title="' . __( 'Support', 'bbpress-search-widget' ) . '">' . __( 'Support', 'bbpress-search-widget' ) . '</a>'; $bbpsw_links[] = '<a href="' . esc_url( BBPSW_URL_TRANSLATE ) . '" target="_new" title="' . __( 'Translations', 'bbpress-search-widget' ) . '">' . __( 'Translations', 'bbpress-search-widget' ) . '</a>'; $bbpsw_links[] = '<a href="' . esc_url( BBPSW_URL_DONATE ) . '" target="_new" title="' . __( 'Donate', 'bbpress-search-widget' ) . '"><strong>' . __( 'Donate', 'bbpress-search-widget' ) . '</strong></a>'; } // end-if plugin links /** Output the links */ return apply_filters( 'bbpsw_filter_plugin_links', $bbpsw_links ); } // end of function ddw_bbpsw_plugin_links add_action( 'sidebar_admin_setup', 'ddw_bbpsw_widgets_help' ); /** * Load plugin help tab after core help tabs on Widget admin page. * * @since 1.2.0 * * @global mixed $pagenow */ function ddw_bbpsw_widgets_help() { global $pagenow; add_action( 'admin_head-' . $pagenow, 'ddw_bbpsw_widgets_help_tab' ); } // end of function ddw_bbpsw_widgets_help add_action( 'load-settings_page_bbpress', 'ddw_bbpsw_widgets_help_tab', 20 ); /** * Create and display plugin help tab content. * * @since 1.2.0 * * @uses get_current_screen() * @uses WP_Screen::add_help_tab() * @uses WP_Screen::set_help_sidebar() * @uses ddw_bbpsw_help_sidebar_content() * * @global mixed $bbpsw_widgets_screen, $pagenow */ function ddw_bbpsw_widgets_help_tab() { global $bbpsw_widgets_screen, $pagenow; $bbpsw_widgets_screen = get_current_screen(); /** Display help tabs only for WordPress 3.3 or higher */ if( ! class_exists( 'WP_Screen' ) || ! $bbpsw_widgets_screen || ! class_exists( 'bbPress' ) ) { return; } /** Add the new help tab */ $bbpsw_widgets_screen->add_help_tab( array( 'id' => 'bbpsw-widgets-help', 'title' => __( 'bbPress Search Widget', 'bbpress-search-widget' ), 'callback' => apply_filters( 'bbpsw_filter_help_tab_content', 'ddw_bbpsw_help_tab_content' ), ) ); /** Add help sidebar */ if ( $pagenow != 'widgets.php' ) { $bbpsw_widgets_screen->set_help_sidebar( ddw_bbpsw_help_sidebar_content() ); } // end-if $pagehook check } // end of function ddw_bbpsw_widgets_help_tab /** * Create and display plugin help tab content. * * @since 1.0.0 * * @uses ddw_bbpsw_plugin_get_data() * * @param bool $bbpsw_noresults_widgetized */ function ddw_bbpsw_help_tab_content() { /** Helper variable */ $bbpsw_noresults_widgetized = (bool) apply_filters( 'bbpsw_filter_noresults_widgetized', '__return_true' ); /** Headline */ echo '<h3>' . __( 'Plugin', 'bbpress-search-widget' ) . ': ' . __( 'bbPress Search Widget', 'bbpress-search-widget' ) . ' <small>v' . esc_attr( ddw_bbpsw_plugin_get_data( 'Version' ) ) . '</small></h3>'; /** Search widget info */ echo '<p><strong>' . sprintf( __( 'Added Widget by the plugin: %s', 'bbpress-search-widget' ), '<em>' . __( 'bbPress: Forum Search Extended', 'bbpress-search-widget' ) . '</em>' ) . '</strong></p>' . '<ul>' . '<li>' . __( 'All search form relevent strings, for example the search button, can easily be changed.', 'bbpress-search-widget' ) . ' ' . sprintf( __( 'Also, the widget comes lots of visibility options so you can setup it really fast. This should work for most use cases. However, if you still need more, use plugins like %1$s or %2$s (then leave all options unchecked and set visibility to %3$s).', 'bbpress-search-widget' ), '<em>Widget Logic</em>', '<em>Widget Display</em>', '<code>' . __( 'Global (default)', 'bbpress-search-widget' ) . '</code>' ) . '</li>' . '<li>' . sprintf( __( 'It searches only in the bbPress specific post types %1$s, %2$s and %3$s and outputs the results formatted like the other regular views of bbPress.', 'bbpress-search-widget' ), '<em>' . __( 'Forum', 'bbpress-search-widget' ) . '</em>', '<em>' . __( 'Topic', 'bbpress-search-widget' ) . '</em>', '<em>' . __( 'Reply', 'bbpress-search-widget' ) . '</em>' ) . '</li>' . '<li>' . __( 'Please note: This plugin does not mix up its displayed search results with WordPress built-in search. It is limited to the bbPress forum post types. For enhanced styling of the widget and/or the search results please have a look on the FAQ page linked below.', 'bbpress-search-widget' ) . '</li>' . '</ul>'; /** Shortcode info, plus parameters */ echo '<p><strong>' . sprintf( __( 'Provided Shortcode by the plugin: %s', 'bbpress-search-widget' ), '<code>[bbpress-searchbox]</code>' ) . '</strong></p>' . '<ul>' . '<li><em>' . __( 'Supporting the following parameters', 'bbpress-search-widget' ) . ':</em></li>' . '<li><code>label_text</code> &mdash; ' . __( 'Label text before the input field', 'bbpress-search-widget' ) . '</li>' . '<li><code>placeholder_text</code> &mdash; ' . __( 'Input field placeholder text', 'bbpress-search-widget' ) . '</li>' . '<li><code>button_text</code> &mdash; ' . __( 'Submit button text', 'bbpress-search-widget' ) . '</li>' . '<li><code>class</code> &mdash; ' . sprintf( __( 'Can be a custom class, added to the wrapper %s container', 'bbpress-search-widget' ), '<code>div</code>' ) . '</li>' . '</ul>'; /** Only show help info if widgetized area is enabled */ if ( $bbpsw_noresults_widgetized && ! function_exists( 'ddw_gwnf_bbpress_search_actions' ) ) { echo '<p><strong>' . sprintf( __( 'Widgetized content area for %s forum search results:', 'bbpress-search-widget' ), '<em>*' . __( 'Not found', 'bbpress-search-widget' ) . '*</em>' ) . '</strong></p>' . '<ul>' . '<li>' . __( 'Registered widget area, only if bbPress is active:', 'bbpress-search-widget' ) . ' <em>' . __( 'bbPress: Forum Search No Results', 'bbpress-search-widget' ) . '</em></li>' . '<li>' . sprintf( __( 'Actually used on the frontend, only if there are %s widgets in this area!', 'bbpress-search-widget' ), '<em>' . __( 'active', 'bbpress-search-widget' ) . '</em>' ) . '</li>' . '</ul>'; } // end-if filter check /** Help footer: plugin info */ echo '<p><strong>' . __( 'Important plugin links:', 'bbpress-search-widget' ) . '</strong>' . '<blockquote><a href="' . esc_url( BBPSW_URL_PLUGIN ) . '" target="_new" title="' . __( 'Plugin Homepage', 'bbpress-search-widget' ) . '">' . __( 'Plugin Homepage', 'bbpress-search-widget' ) . '</a> | <a href="' . esc_url( BBPSW_URL_WPORG_FAQ ) . '" target="_new" title="' . __( 'FAQ', 'bbpress-search-widget' ) . '">' . __( 'FAQ', 'bbpress-search-widget' ) . '</a> | <a href="' . esc_url( BBPSW_URL_WPORG_FORUM ) . '" target="_new" title="' . __( 'Support', 'bbpress-search-widget' ) . '">' . __( 'Support', 'bbpress-search-widget' ) . '</a> | <a href="' . esc_url( BBPSW_URL_TRANSLATE ) . '" target="_new" title="' . __( 'Translations', 'bbpress-search-widget' ) . '">' . __( 'Translations', 'bbpress-search-widget' ) . '</a> | <a href="' . esc_url( BBPSW_URL_DONATE ) . '" target="_new" title="' . __( 'Donate', 'bbpress-search-widget' ) . '"><strong>' . __( 'Donate', 'bbpress-search-widget' ) . '</strong></a></blockquote>'; echo '<blockquote><a href="http://www.opensource.org/licenses/gpl-license.php" target="_new" title="' . esc_attr( BBPSW_PLUGIN_LICENSE ). '">' . esc_attr( BBPSW_PLUGIN_LICENSE ). '</a> &copy; 2011-' . date( 'Y' ) . ' <a href="' . esc_url( ddw_bbpsw_plugin_get_data( 'AuthorURI' ) ) . '" target="_new" title="' . esc_attr__( ddw_bbpsw_plugin_get_data( 'Author' ) ) . '">' . esc_attr__( ddw_bbpsw_plugin_get_data( 'Author' ) ) . '</a></blockquote></p>'; } // end of function ddw_bbpsw_help_tab_content /** * Helper function for returning the Help Sidebar content. * * @since 2.0.0 * * @uses ddw_bbpsw_plugin_get_data() * * @param $bbpsw_help_sidebar * * @return string HTML content for help sidebar. */ function ddw_bbpsw_help_sidebar_content() { $bbpsw_help_sidebar = '<p><strong>' . __( 'More about the plugin author', 'bbpress-search-widget' ) . '</strong></p>' . '<p>' . __( 'Social:', 'bbpress-search-widget' ) . '<br /><a href="http://twitter.com/#!/deckerweb" target="_blank">Twitter</a> | <a href="http://www.facebook.com/deckerweb.service" target="_blank">Facebook</a> | <a href="http://deckerweb.de/gplus" target="_blank">Google+</a> | <a href="' . esc_url( ddw_bbpsw_plugin_get_data( 'AuthorURI' ) ) . '" target="_blank" title="@ deckerweb.de">deckerweb</a></p>' . '<p><a href="' . esc_url( BBPSW_URL_WPORG_PROFILE ) . '" target="_blank" title="@ WordPress.org">@ WordPress.org</a></p>'; return apply_filters( 'bbpsw_filter_help_sidebar_content', $bbpsw_help_sidebar ); } // end of function ddw_bbpsw_help_sidebar_content
giventadevelop/gabon-project
wp-content/wp-content/plugins/bbpress-search-widget/includes/bbpsw-admin.php
PHP
gpl-2.0
11,867
require_dependency 'search/search_result' require_dependency 'search/search_result_type' require_dependency 'search/grouped_search_results' class Search def self.per_facet 5 end # Sometimes we want more topics than are returned due to exclusion of dupes. This is the # factor of extra results we'll ask for. def self.burst_factor 3 end def self.facets %w(topic category user) end def self.long_locale # if adding a language see: # /usr/share/postgresql/9.3/tsearch_data for possible options # Do not add languages that are missing without amending the # base docker config # case SiteSetting.default_locale.to_sym when :da then 'danish' when :de then 'german' when :en then 'english' when :es then 'spanish' when :fr then 'french' when :it then 'italian' when :nl then 'dutch' when :nb_NO then 'norwegian' when :pt then 'portuguese' when :pt_BR then 'portuguese' when :sv then 'swedish' when :ru then 'russian' else 'simple' # use the 'simple' stemmer for other languages end end def self.rebuild_problem_posts(limit = 10000) posts = Post.joins(:topic) .where('posts.id IN ( SELECT p2.id FROM posts p2 LEFT JOIN post_search_data pd ON locale = ? AND p2.id = pd.post_id WHERE pd.post_id IS NULL )', SiteSetting.default_locale).limit(10000) posts.each do |post| # force indexing post.cooked += " " SearchObserver.index(post) end nil end def self.prepare_data(search_data) data = search_data.squish # TODO rmmseg is designed for chinese, we need something else for Korean / Japanese if ['zh_TW', 'zh_CN', 'ja', 'ko'].include?(SiteSetting.default_locale) unless defined? RMMSeg require 'rmmseg' RMMSeg::Dictionary.load_dictionaries end algo = RMMSeg::Algorithm.new(search_data) data = "" while token = algo.next_token data << token.text << " " end end data.force_encoding("UTF-8") data end def initialize(term, opts=nil) if term.present? @term = Search.prepare_data(term.to_s) @original_term = PG::Connection.escape_string(@term) end @opts = opts || {} @guardian = @opts[:guardian] || Guardian.new @search_context = @opts[:search_context] @include_blurbs = @opts[:include_blurbs] || false @limit = Search.per_facet * Search.facets.size @results = GroupedSearchResults.new(@opts[:type_filter]) end # Query a term def execute return nil if @term.blank? || @term.length < (@opts[:min_search_term_length] || SiteSetting.min_search_term_length) # If the term is a number or url to a topic, just include that topic if @results.type_filter == 'topic' begin route = Rails.application.routes.recognize_path(@term) return single_topic(route[:topic_id]).as_json if route[:topic_id].present? rescue ActionController::RoutingError end return single_topic(@term.to_i).as_json if @term =~ /^\d+$/ end find_grouped_results.as_json end private def find_grouped_results if @results.type_filter.present? raise Discourse::InvalidAccess.new("invalid type filter") unless Search.facets.include?(@results.type_filter) send("#{@results.type_filter}_search") else @limit = Search.per_facet + 1 unless @search_context user_search category_search end topic_search end add_more_topics_if_expected @results rescue ActiveRecord::StatementInvalid # In the event of a PG:Error return nothing, it is likely they used a foreign language whose # locale is not supported by postgres end # Add more topics if we expected them def add_more_topics_if_expected expected_topics = 0 expected_topics = Search.facets.size unless @results.type_filter.present? expected_topics = Search.per_facet * Search.facets.size if @results.type_filter == 'topic' expected_topics -= @results.topic_count if expected_topics > 0 extra_posts = posts_query(expected_topics * Search.burst_factor) extra_posts = extra_posts.where("posts.topic_id NOT in (?)", @results.topic_ids) if @results.topic_ids.present? extra_posts.each do |p| @results.add_result(SearchResult.from_post(p, @search_context, @term, @include_blurbs)) end end end # If we're searching for a single topic def single_topic(id) topic = Topic.find_by(id: id) return nil unless @guardian.can_see?(topic) @results.add_result(SearchResult.from_topic(topic)) @results end def secure_category_ids return @secure_category_ids unless @secure_category_ids.nil? @secure_category_ids = @guardian.secure_category_ids end def category_search # scope is leaking onto Category, this is not good and probably a bug in Rails # the secure_category_ids will invoke the same method on User, it calls Category.where # however the scope from the query below is leaking in to Category, this works around # the issue while we figure out what is up in Rails secure_category_ids categories = Category.includes(:category_search_data) .where("category_search_data.search_data @@ #{ts_query}") .references(:category_search_data) .order("topics_month DESC") .secured(@guardian) .limit(@limit) categories.each do |c| @results.add_result(SearchResult.from_category(c)) end end def user_search users = User.includes(:user_search_data) .where("user_search_data.search_data @@ #{ts_query("simple")}") .order("CASE WHEN username_lower = '#{@original_term.downcase}' THEN 0 ELSE 1 END") .order("last_posted_at DESC") .limit(@limit) .references(:user_search_data) users.each do |u| @results.add_result(SearchResult.from_user(u)) end end def posts_query(limit) posts = Post.includes(:post_search_data, {:topic => :category}) .where("topics.deleted_at" => nil) .where("topics.visible") .where("topics.archetype <> ?", Archetype.private_message) .references(:post_search_data, {:topic => :category}) if @search_context.present? && @search_context.is_a?(Topic) posts = posts.where("posts.raw ilike ?", "%#{@term}%") else posts = posts.where("post_search_data.search_data @@ #{ts_query}") end # If we have a search context, prioritize those posts first if @search_context.present? if @search_context.is_a?(User) posts = posts.where("posts.user_id = #{@search_context.id}") elsif @search_context.is_a?(Category) posts = posts.where("topics.category_id = #{@search_context.id}") elsif @search_context.is_a?(Topic) posts = posts.where("topics.id = #{@search_context.id}") .order("posts.post_number") end end posts = posts.order("TS_RANK_CD(TO_TSVECTOR(#{query_locale}, topics.title), #{ts_query}) DESC") .order("TS_RANK_CD(post_search_data.search_data, #{ts_query}) DESC") .order("topics.bumped_at DESC") if secure_category_ids.present? posts = posts.where("(categories.id IS NULL) OR (NOT categories.read_restricted) OR (categories.id IN (?))", secure_category_ids).references(:categories) else posts = posts.where("(categories.id IS NULL) OR (NOT categories.read_restricted)").references(:categories) end posts.limit(limit) end def self.query_locale @query_locale ||= Post.sanitize(Search.long_locale) end def query_locale self.class.query_locale end def self.ts_query(term, locale = nil) locale = Post.sanitize(locale) if locale all_terms = term.gsub(/[:()&!'"]/,'').split query = Post.sanitize(all_terms.map {|t| "#{PG::Connection.escape_string(t)}:*"}.join(" & ")) "TO_TSQUERY(#{locale || query_locale}, #{query})" end def ts_query(locale=nil) if !locale @ts_query ||= begin Search.ts_query(@term, locale) end else Search.ts_query(@term, locale) end end def topic_search posts = if @search_context.is_a?(User) # If we have a user filter, search all posts by default with a higher limit posts_query(@limit * Search.burst_factor) elsif @search_context.is_a?(Topic) posts_query(@limit).where('posts.post_number = 1 OR posts.topic_id = ?', @search_context.id) else posts_query(@limit).where(post_number: 1) end posts.each do |p| @results.add_result(SearchResult.from_post(p, @search_context, @term, @include_blurbs)) end end end
pkgr/discourse
lib/search.rb
Ruby
gpl-2.0
9,282
include $(TOPDIR)/rules.mk PKG_NAME:=pdnsd PKG_VERSION:=1.2.9 PKG_RELEASE=$(PKG_SOURCE_VERSION) PKG_SOURCE_PROTO:=git PKG_SOURCE_URL:=https://github.com/mengskysama/pdnsd.git PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION) PKG_SOURCE_VERSION:=e02a81d9e63927e93dc49d218535c880623bcd77 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz # PKG_MIRROR_MD5SUM:= # CMAKE_INSTALL:=1 include $(INCLUDE_DIR)/package.mk define Package/pdnsd SECTION:=net CATEGORY:=Network SUBMENU:=Web Servers/Proxies DEPENDS:=+libpthread TITLE:=Proxy DNS Server endef define Package/pdnsd/description pdnsd, is an IPv6 capable proxy DNS server with permanent caching (the cache contents are written to hard disk on exit) that is designed to cope with unreachable or down DNS servers (for example in dial-in networking). pdnsd can be used with applications that do dns lookups, eg on startup, and can't be configured to change that behaviour, to prevent the often minute-long hangs (or even crashes) that result from stalled dns queries. endef TARGET_CFLAGS += -I$(STAGING_DIR)/usr/include #TARGET_CFLAGS += -ggdb3 CMAKE_OPTIONS += -DDEBUG=1 CONFIGURE_ARGS += \ --with-cachedir=/var/pdnsd define Package/pdnsd/install $(INSTALL_DIR) $(1)/usr/sbin $(INSTALL_BIN) $(PKG_BUILD_DIR)/src/pdnsd $(1)/usr/sbin/ $(INSTALL_DIR) $(1)/usr/bin $(INSTALL_BIN) $(PKG_BUILD_DIR)/src/pdnsd-ctl/pdnsd-ctl $(1)/usr/bin/ $(INSTALL_DIR) $(1)/etc/init.d #$(INSTALL_BIN) ./files/pdnsd.init $(1)/etc/init.d/pdnsd $(INSTALL_DIR) $(1)/etc $(INSTALL_CONF) $(PKG_BUILD_DIR)/doc/pdnsd.conf $(1)/etc/ endef $(eval $(call BuildPackage,pdnsd))
openwrt-stuff/openwrt-extra
net/pdnsd/Makefile
Makefile
gpl-2.0
1,623
/* Copyright (C) 2003-2015 JabRef contributors. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ package net.sf.jabref.logic.autocompleter; import net.sf.jabref.model.entry.BibtexEntry; import java.util.StringTokenizer; /** * Stores all words which are separated by Globals.SEPARATING_CHARS. This * autocompleter only processes the field which is given by the fieldname. * * @author kahlert, cordes */ class DefaultAutoCompleter extends AbstractAutoCompleter { private final String fieldName; private final String SEPARATING_CHARS = ";,\n "; /** * @see AutoCompleterFactory */ DefaultAutoCompleter(String fieldName) { this.fieldName = fieldName; } @Override public boolean isSingleUnitField() { return false; } @Override public void addBibtexEntry(BibtexEntry entry) { if (entry == null) { return; } String fieldValue = entry.getField(fieldName); if (fieldValue != null) { StringTokenizer tok = new StringTokenizer(fieldValue, SEPARATING_CHARS); while (tok.hasMoreTokens()) { String word = tok.nextToken(); addWordToIndex(word); } } } }
robymus/jabref
src/main/java/net/sf/jabref/logic/autocompleter/DefaultAutoCompleter.java
Java
gpl-2.0
1,924
<?php class optionsModelBup extends modelBup { protected $_allOptions = array(); public function saveGroup($d = array()) { $clearArr = array('opt_values' => array('full' => 0, 'plugins' => 0, 'themes' =>0 , 'uploads' => 0, 'database' => 0, 'any_directories' => 0, 'wp_core' => 0)); if(isset($d['opt_values']) && is_array($d['opt_values']) && !empty($d['opt_values'])) { foreach($clearArr['opt_values'] as $code=>$value) { // set 0 to all array $clearArr['code'] = $code; $this->save($clearArr); } if (isset($d['opt_values']['email_ch']) && !empty($d['opt_values']['email_ch'])) { $this->setValueType('email', $d['opt_values']['email_ch']); } else { $this->setValueType('email', ''); } foreach($d['opt_values'] as $code => $value) { $d['code'] = $code; // if ($code == 'email_ch') continue; if ($code == 'email') { if (!$this->validateEmail($value)) { $this->pushError(__('Incorrect email address', BUP_LANG_CODE)); continue; } } $this->save($d); if ($code == 'full') { //frameBup::_()->getTable('options')->update(array('value' => 1), array('code' => 'database')); //outdated } } return !$this->haveErrors(); } else $this->pushError(__('No change', BUP_LANG_CODE)); } public function set($value, $code){ frameBup::_()->getTable('options')->update(array('value' => $value), array('code' => $code)); } public function getEvery(){ return frameBup::_()->getTable('options')->get('*', "value_type = 'every'", BUP_WPDB_PREF.BUP_DB_PREF.'options', 'all'); } public function saveMainFromDestGroup($d = array()) { if (isset($d['dest_opt']) && !empty($d['dest_opt'])){ if(isset($d['opt_values']['warehouse']) && isset($d['opt_values']['warehouse_abs'])) $this->saveBackupPath(array('warehouse' => $d['opt_values']['warehouse'], 'warehouse_abs' => (int)$d['opt_values']['warehouse_abs'])); if (utilsBup::checkPRO() || $d['dest_opt'] == 0){ $this->set($d['dest_opt'], 'glb_dest'); } else { $this->pushError(__('PRO version is not activated', BUP_LANG_CODE)); } return !$this->haveErrors(); } else $this->pushError(__('No selected options', BUP_LANG_CODE)); } // ---- old ----- public function get($d = array()) { $this->_loadOptions(); $code = false; if(is_string($d)) $code = $d; elseif(is_array($d) && isset($d['code'])) $code = $d['code']; if($code) { $opt = $this->_getByCode($code); if(isset($d['what']) && isset($opt[$d['what']])) return $opt[$d['what']]; else return $opt['value']; } else { return $this->_allOptions; } } public function getValueType($d) { $ret = frameBup::_()->getTable('options')->get('value_type', "code = '".$d."'", BUP_WPDB_PREF.BUP_DB_PREF.'options', 'all'); return $ret[0]['value_type']; } public function setValueType($code, $value) { frameBup::_()->getTable('options')->update(array('value_type' => $value), array('code' => $code)); } public function validateEmail($email){ if ($this->getValueType('email')){ if (preg_match('~^([a-z0-9_-]+\.)*[a-z0-9_-]+@[a-z0-9_-]+(\.[a-z0-9_-]+)*\.[a-z]{2,6}$~', $email, $regs)) { return true; } else { return false; } } else return true; // if email_ch disabled } public function isEmpty($d = array()) { $value = $this->get($d); return empty($value); } public function getByCategories($category = '') { $this->_loadOptions(); $categories = array(); $returnForCat = !empty($category); // If this is not empty - will be returned anly for one category foreach($this->_allOptions as $opt) { if(empty($category) || (is_numeric($category) && $category == $opt['cat_id']) || ($category == $opt['cat_label']) ) { if(empty($categories[ (int)$opt['cat_id'] ])) $categories[ (int)$opt['cat_id'] ] = array('cat_id' => $opt['cat_id'], 'cat_label' => $opt['cat_label'], 'opts' => array()); $categories[ (int)$opt['cat_id'] ]['opts'][] = $opt; if($returnForCat) // Save category ID for returning $returnForCat = (int)$opt['cat_id']; } } if($returnForCat) return $categories[ $returnForCat ]; ksort($categories); return $categories; } public function getByCode($d = array()) { $res = array(); $codeData = $this->get($d); if(empty($d)) { // Sort by code foreach($codeData as $opt) { $res[ $opt['code'] ] = $opt; } } else $res = $codeData; return $res; } /** * Load all options data into protected array */ protected function _loadOptions() { if(empty($this->_allOptions)) { $options = frameBup::_()->getTable('options'); $htmltype = frameBup::_()->getTable('htmltype'); $optionsCategories = frameBup::_()->getTable('options_categories'); $this->_allOptions = $options->innerJoin($htmltype, 'htmltype_id') ->leftJoin($optionsCategories, 'cat_id') ->orderBy(array('cat_id', 'sort_order')) ->getAll($options->alias(). '.*, '. $htmltype->alias(). '.label AS htmltype, '. $optionsCategories->alias(). '.label AS cat_label'); foreach($this->_allOptions as $i => $opt) { if(!empty($this->_allOptions[$i]['params'])) { $this->_allOptions[$i]['params'] = utilsBup::unserialize($this->_allOptions[$i]['params']); } if($this->_allOptions[$i]['value_type'] == 'array') { $this->_allOptions[$i]['value'] = utilsBup::unserialize($this->_allOptions[$i]['value']); if(!is_array($this->_allOptions[$i]['value'])) $this->_allOptions[$i]['value'] = array(); } if(empty($this->_allOptions[$i]['cat_id'])) { // Move all options that have no category - to Other $this->_allOptions[$i]['cat_id'] = 6; $this->_allOptions[$i]['cat_label'] = 'Other'; } } } } /** * Refresh all options data into protected array */ public function refreshOptions() { $options = frameBup::_()->getTable('options'); $htmltype = frameBup::_()->getTable('htmltype'); $optionsCategories = frameBup::_()->getTable('options_categories'); $this->_allOptions = $options->innerJoin($htmltype, 'htmltype_id') ->leftJoin($optionsCategories, 'cat_id') ->orderBy(array('cat_id', 'sort_order')) ->getAll($options->alias(). '.*, '. $htmltype->alias(). '.label AS htmltype, '. $optionsCategories->alias(). '.label AS cat_label'); foreach($this->_allOptions as $i => $opt) { if(!empty($this->_allOptions[$i]['params'])) { $this->_allOptions[$i]['params'] = utilsBup::unserialize($this->_allOptions[$i]['params']); } if($this->_allOptions[$i]['value_type'] == 'array') { $this->_allOptions[$i]['value'] = utilsBup::unserialize($this->_allOptions[$i]['value']); if(!is_array($this->_allOptions[$i]['value'])) $this->_allOptions[$i]['value'] = array(); } if(empty($this->_allOptions[$i]['cat_id'])) { // Move all options that have no category - to Other $this->_allOptions[$i]['cat_id'] = 6; $this->_allOptions[$i]['cat_label'] = 'Other'; } } } /** * Returns option data by it's code * @param string $code option's code * @return array option's data */ protected function _getByCode($code) { $this->_loadOptions(); if(!empty($this->_allOptions)) { foreach($this->_allOptions as $opt) { if($opt['code'] == $code) return $opt; } } return false; } /** * Set option value by code, do no changes in database * @param string $code option's code * @param string $value option's new value */ protected function _setByCode($code, $value) { $this->_loadOptions(); if(!empty($this->_allOptions)) { foreach($this->_allOptions as $id => $opt) { if($opt['code'] == $code) { $this->_allOptions[ $id ]['value'] = $value; break; } } } } public function save($d = array()) { $id = 0; if(isset($d['opt_values']) && is_array($d['opt_values']) && !empty($d['opt_values'])) { if(isset($d['code']) && !empty($d['code'])) { $d['what'] = 'id'; $id = $this->get($d); $id = intval($id); //echo $id.'|'; } if($id) { $updateData = array('value' => $d['opt_values'][ $d['code'] ]); $checkArr = $this->get(array('code' => $d['code'], 'what' => 'value_type')); if($checkArr == 'array' && !empty($checkArr)) { $updateData['value'] = utilsBup::serialize( $updateData['value'] ); } if(frameBup::_()->getTable('options')->update($updateData, array('id' => $id))) { // Let's update data in current options params to avoid reload it from database if(isset($d['code'])) $this->_setByCode($d['code'], $d['opt_values'][ $d['code'] ]); return true; } else $this->pushError(__('Option '. $d['code']. ' update Failed', BUP_LANG_CODE)); } else { $this->pushError(__('Invalid option ID or Code', BUP_LANG_CODE)); } } else $this->pushError(__('Empty data to save option', BUP_LANG_CODE)); return false; } public function saveCodeVal($code, $val) { if(frameBup::_()->getTable('options')->exists($code, 'code')) { frameBup::_()->getTable('options')->update(array( 'value' => $val, ), array('code' => $code)); $this->_setByCode($code, $val); } else { frameBup::_()->getTable('options')->insert(array( 'code' => $code, 'value' => $val, )); } } /*public function saveGroup($d = array()) { if(isset($d['opt_values']) && is_array($d['opt_values']) && !empty($d['opt_values'])) { foreach($d['opt_values'] as $code => $value) { $d['code'] = $code; $this->save($d); } return !$this->haveErrors(); } else $this->pushError(__('Empty data to setup', BUP_LANG_CODE)); }*/ public function saveBgImg($d = array()) { if(!empty($d) && isset($d['bg_image']) && !empty($d['bg_image'])) { $uploader = toeCreateObjBup('fileuploader', array()); if($uploader->validate('bg_image', frameBup::_()->getModule('options')->getBgImgDir()) && $uploader->upload()) { // Remove prev. image utilsBup::deleteFile( frameBup::_()->getModule('options')->getBgImgFullDir() ); $fileInfo = $uploader->getFileInfo(); // Save info for this option $this->save(array('code' => 'bg_image', 'opt_values' => array('bg_image' => $fileInfo['path']))); return true; } else $this->pushError( $uploader->getError() ); } else $this->pushError(__('Empty data to setup', BUP_LANG_CODE)); return false; } public function saveLogoImg($d = array()) { if(!empty($d) && isset($d['logo_image']) && !empty($d['logo_image'])) { $uploader = toeCreateObjBup('fileuploader', array()); if($uploader->validate('logo_image', frameBup::_()->getModule('options')->getLogoImgDir()) && $uploader->upload()) { // Remove prev. image utilsBup::deleteFile( frameBup::_()->getModule('options')->getLogoImgFullDir() ); $fileInfo = $uploader->getFileInfo(); // Save info for this option $this->save(array('code' => 'logo_image', 'opt_values' => array('logo_image' => $fileInfo['path']))); return true; } else $this->pushError( $uploader->getError() ); } else $this->pushError(__('Empty data to setup', BUP_LANG_CODE)); return false; } public function setTplDefault($d = array()) { $code = isset($d['code']) ? $d['code'] : ''; if(!empty($code)) { $plTemplate = $this->get('template'); // Current plugin template if($plTemplate && frameBup::_()->getModule($plTemplate)) { $newValue = frameBup::_()->getModule($plTemplate)->getDefOptions($code); if($newValue !== NULL) { if($this->save(array('opt_values' => array($code => $newValue), 'code' => $code))) { return $newValue; } } else $this->pushError(__('There is no default for this option and current template', BUP_LANG_CODE)); } else $this->pushError(__('There is no default for this option and current template', BUP_LANG_CODE)); } else $this->pushError(__('Empty option code', BUP_LANG_CODE)); return false; } public function setBgImgDefault($d = array()) { $code = isset($d['code']) ? $d['code'] : ''; if(!empty($code)) { $plTemplate = $this->get('template'); // Current plugin template if($plTemplate && frameBup::_()->getModule($plTemplate)) { $newValue = frameBup::_()->getModule($plTemplate)->getDefOptions($code); if($newValue !== NULL && file_exists(frameBup::_()->getModule($plTemplate)->getModDir(). $newValue)) { // Remove prev. image utilsBup::deleteFile( frameBup::_()->getModule('options')->getBgImgFullDir() ); // Copy new image from tpl module directory to uploads dirctory copy( frameBup::_()->getModule($plTemplate)->getModDir(). $newValue, utilsBup::getUploadsDir(). DS. $this->getModule()->getBgImgDir(). DS. $newValue); if($this->save(array('opt_values' => array($code => $newValue), 'code' => $code))) { return $this->getModule()->getBgImgFullPath(); } } else $this->pushError(__('There is no default for this option and current template', BUP_LANG_CODE)); } else $this->pushError(__('There is no default for this option and current template', BUP_LANG_CODE)); } else $this->pushError(__('Empty option code', BUP_LANG_CODE)); return false; } public function removeBgImg($d = array()) { $bgImgDirPath = frameBup::_()->getModule('options')->getBgImgFullDir(); if($this->save(array('opt_values' => array('bg_image' => ''), 'code' => 'bg_image')) && utilsBup::deleteFile( $bgImgDirPath ) ) { return true; } else $this->pushError(__('Unable to remove image', BUP_LANG_CODE)); } public function setLogoDefault($d = array()) { $code = isset($d['code']) ? $d['code'] : ''; if(!empty($code)) { $plTemplate = $this->get('template'); // Current plugin template if($plTemplate && frameBup::_()->getModule($plTemplate)) { $newValue = frameBup::_()->getModule($plTemplate)->getDefOptions($code); if($newValue !== NULL && file_exists(frameBup::_()->getModule($plTemplate)->getModDir(). $newValue)) { // Remove prev. image utilsBup::deleteFile( frameBup::_()->getModule('options')->getLogoImgFullDir() ); // Copy new image from tpl module directory to uploads dirctory copy( frameBup::_()->getModule($plTemplate)->getModDir(). $newValue, utilsBup::getUploadsDir(). DS. $this->getModule()->getLogoImgDir(). DS. $newValue); if($this->save(array('opt_values' => array($code => $newValue), 'code' => $code))) { return $this->getModule()->getLogoImgFullPath(); } } else $this->pushError(__('There is no default for this option and current template', BUP_LANG_CODE)); } else $this->pushError(__('There is no default for this option and current template', BUP_LANG_CODE)); } else $this->pushError(__('Empty option code', BUP_LANG_CODE)); return false; } public function removeLogoImg($d = array()) { $logoImgDirPath = frameBup::_()->getModule('options')->getLogoImgFullDir(); if($this->save(array('opt_values' => array('logo_image' => ''), 'code' => 'logo_image')) && utilsBup::deleteFile( $logoImgDirPath ) ) { return true; } else $this->pushError(__('Unable to remove image', BUP_LANG_CODE)); } public function setTitleParamsDefault($d = array()) { $res = true; $plTemplate = $this->get('template'); // Current plugin template if($plTemplate && frameBup::_()->getModule($plTemplate)) { $msgTitleColor = frameBup::_()->getModule($plTemplate)->getDefOptions('msg_title_color'); if($msgTitleColor !== NULL) { $this->save(array('opt_values' => array('msg_title_color' => $msgTitleColor), 'code' => 'msg_title_color')); } $msgTitleFont = frameBup::_()->getModule($plTemplate)->getDefOptions('msg_title_font'); if($msgTitleFont !== NULL) { $this->save(array('opt_values' => array('msg_title_font' => $msgTitleFont), 'code' => 'msg_title_font')); } if($msgTitleColor !== NULL || $msgTitleFont !== NULL) { $res = array('msg_title_color' => $msgTitleColor, 'msg_title_font' => $msgTitleFont); } } // good in any case return $res; } public function setTextParamsDefault($d = array()) { $res = true; $plTemplate = $this->get('template'); // Current plugin template if($plTemplate && frameBup::_()->getModule($plTemplate)) { $msgTextColor = frameBup::_()->getModule($plTemplate)->getDefOptions('msg_text_color'); if($msgTextColor !== NULL) { $this->save(array('opt_values' => array('msg_text_color' => $msgTextColor), 'code' => 'msg_text_color')); } $msgTextFont = frameBup::_()->getModule($plTemplate)->getDefOptions('msg_text_font'); if($msgTextFont !== NULL) { $this->save(array('opt_values' => array('msg_text_font' => $msgTextFont), 'code' => 'msg_text_font')); } if($msgTextColor !== NULL || $msgTextFont !== NULL) { $res = array('msg_text_color' => $msgTextColor, 'msg_text_font' => $msgTextFont); } } // good in any case return $res; } public function saveBackupPath($newBackupPathArray) { $backupsPath = frameBup::_()->getTable('options')->get('value', array('code' => 'serialized_backups_path'), '', 'row'); $backupsPath = !empty($backupsPath['value']) ? unserialize($backupsPath['value']) : null; if(is_array($backupsPath)) { $newPathExist = false; $serializedNewPath = serialize($newBackupPathArray); foreach($backupsPath as $path) { if($serializedNewPath === serialize($path)) $newPathExist=true; } if(!$newPathExist) { $backupsPath[] = $newBackupPathArray; frameBup::_()->getTable('options')->update(array('value' => serialize($backupsPath)), array('code' => 'serialized_backups_path')); } } elseif (is_array($newBackupPathArray) && isset($newBackupPathArray['warehouse']) && isset($newBackupPathArray['warehouse_abs'])) { $backupsPath = array(); $backupsPath[] = $newBackupPathArray; frameBup::_()->getTable('options')->update(array('value' => serialize($backupsPath)), array('code' => 'serialized_backups_path')); } } }
VitaliyProdan/bmm
wp-content/plugins/backup-by-supsystic/modules/options/models/options.php
PHP
gpl-2.0
18,749
/*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * copyright (C) 2002-2006 * * Umbrello UML Modeller Authors <uml-devel@uml.sf.net> * ***************************************************************************/ #ifndef LINEPATH_H #define LINEPATH_H #include <qobject.h> #include <qptrlist.h> #include <qpoint.h> #include <qpointarray.h> #include <qcanvas.h> #include <qpainter.h> #include "umlnamespace.h" /* how many pixels a user could click around a point */ #define POINT_DELTA 5 class AssociationWidget; class UMLView; // Qt forward declarations class QDataStream; class QDomDocument; class QDomElement; // typedefs typedef QPtrList<QCanvasLine> LineList; typedef QPtrListIterator<QCanvasLine> LineListIt; typedef QPtrList<QCanvasRectangle> RectList; typedef QPtrListIterator<QCanvasRectangle> RectListIt; /** *@author Paul Hensgen * Bugs and comments to uml-devel@lists.sf.net or http://bugs.kde.org */ class LinePath : public QObject { Q_OBJECT public: /** * Constructor */ LinePath(); /** * Deconstructor */ ~LinePath(); /** * equal to (==) operator */ bool operator==( LinePath & rhs ); /** * copy ( = ) operator */ LinePath & operator=( LinePath & rhs ); /** * Enum to tell whether the line docks top/bottom or left/right */ enum Region { TopBottom, LeftRight }; /** * Tell the line where the line docks */ void setDockRegion( Region region ); bool hasPoints (); void dumpPoints (); /** * Returns the point at the point index. */ QPoint getPoint( int pointIndex ); /** * Sets the position of an already set point. */ bool setPoint( int pointIndex, const QPoint &point ); /** * Checks, if we are at an end of the segment or somewhere in the middle. * We use the delta, because with the mouse it is hard to find the * exactly point. */ bool isPoint( int pointIndex, const QPoint &point, unsigned short delta = 0 ); /** * Inserts a point at the given index. */ bool insertPoint( int pointIndex, const QPoint &point ); /** * Removes the point on the line given by the index, at the coordinates * given by point with a fuzzy of delta */ bool removePoint( int pointIndex, const QPoint &point, unsigned short delta = 0 ); /** * Sets the start and end points. */ bool setStartEndPoints( const QPoint &start, const QPoint &end ); /** * Returns the amount of POINTS on the line. * Includes start and end points. */ int count(); /** * Returns -1 if the given point is not on the line. * else returns the line segment the point is on. * Use the value to insert points at the point position. */ int onLinePath( const QPoint &position ); /** * Sets the canvas to be used. */ void setCanvas( QCanvas * canvas ); /** * Sets the Association type. */ void setAssocType( Uml::Association_Type type ); /** * Calls a group of methods to update the line. Used to save you calling multiple methods. */ void update(); /** * This will setup the class ready to display the line correctly. * This MUST be called before you can use this class. */ void setAssociation( AssociationWidget * association ); /** * Returns the Association this class is linked to. */ AssociationWidget * getAssociation() { return m_pAssociation; } /** * Sets the status of whether the line is selected or not. */ void setSelected( bool select ); void saveToXMI( QDomDocument & qDoc, QDomElement & qElement ); bool loadFromXMI( QDomElement & qElement ); /** * Activates the line list. * This is needed because the m_pAssociation does not yet * exist at the time of the LinePath::loadFromXMI call. * However, this means that the points in the m_LineList * do not have a parent when they are loaded. * They need to be reparented by calling LinePath::activate() * once the m_pAssociation exists. */ void activate(); /** * Removes and item created that are no longer needed. */ void cleanup(); /** * Returns the type of pen to use depending on the type of Association. */ QPen getPen(); /** * Sets the line color used by the line. */ void setLineColor( const QColor &color ); /** * Sets the line width used by the line. */ void setLineWidth( uint width ); protected: /** * Draw a (hollow) circle. * We can't use QCanvasEllipse directly for this because it doesn't * use the pen, i.e. QCanvasEllipse only draws filled ellipses. */ class Circle : public QCanvasEllipse { public: explicit Circle(QCanvas * canvas, int radius = 0); void setRadius(int radius); int getRadius() const; void setX(int x); void setY(int y); /** * The beef: Override method from QCanvasEllipse. */ void drawShape(QPainter& p); }; /** * Returns the canvas being used. * Will return zero if the Association hasn't been set. * * This class doesn't hold this information but is a wrapper * method to stop calls to undefined variable like m_pAssociation. */ QCanvas * getCanvas(); /** * Returns the Association type. * Returns Uml::at_Association if association hasn't been set. * * This class doesn't hold this information but is a wrapper * method to stop calls to undefined variable like m_pAssociation. */ Uml::Association_Type getAssocType(); /** * Returns the Line Color to use. * Returns black if association not set. * * This class doesn't hold this information but is a wrapper * method to stop calls to undefined variable like m_pAssociation. */ QColor getLineColor(); /** * Returns the Line Width to use. * Returns 0 if association not set. * * This class doesn't hold this information but is a wrapper * method to stop calls to undefined variable like m_pAssociation. */ uint getLineWidth(); /** * Moves the selected canvas widgets. */ void moveSelected( int pointIndex ); /** * Sets up the selected canvases needed. */ void setupSelected(); /** * Calculates the head points. */ void calculateHead(); /** * Creates the head lines to display the head. */ void createHeadLines(); /** * Create a number of new lines and append them to the given list. * * @param list The list into which to append lines. * @param by The number of lines to insert into the given list. */ void growList(LineList &list, int by); /** * Updates the head lines. Call after calculating the new points. */ void updateHead(); /** * Creates the line objects to display the parallel line. */ void setupParallelLine(); /** * Calculates the position of the parallel line. */ void calculateParallelLine(); /** * Updates the parallel line. * Call after calculating the new position. */ void updateParallelLine(); /********Attributes*************/ /** * The association we are representing. */ AssociationWidget * m_pAssociation; /** * Contains all the lines of the association. */ LineList m_LineList; /** * Selected boxes list. */ RectList m_RectList; /** * Head lines. */ LineList m_HeadList; /** * The parallel line. */ LineList m_ParallelList; /** * Selected status. */ bool m_bSelected; /** * Contains calculated points used to draw the line head. */ QPointArray m_PointArray; /** * Contains calculated points used to draw the line head. */ QPoint m_ArrowPointA, m_ArrowPointB, m_MidPoint, m_EgdePoint; /** * A polygon object to blank out any lines we don't want to see. */ QCanvasPolygon * m_pClearPoly; /** * The transparent circle required by containment associations. */ Circle * m_pCircle; /** * Contains the calculated points for the parallel line * on a collaboration message to use. */ QPointArray m_ParallelLines; /** * Region where the line docks */ Region m_DockRegion; bool m_bHeadCreated; bool m_bParallelLineCreated; public slots: /** * Sets the line color used by the line. * * @param viewID The id of the object behind the widget. */ void slotLineColorChanged( Uml::IDType viewID ); /** * Sets the line width used by the line. * * @param viewID The id of the object behind the widget. */ void slotLineWidthChanged( Uml::IDType viewID ); }; #endif
serghei/kde-kdesdk
umbrello/umbrello/linepath.h
C
gpl-2.0
9,617
<?php /** * Base shortcode for all LessTheme Shortcodes */ class ZoShortcode extends WPBakeryShortCode { protected function loadTemplate($atts, $content = null) { $output = ''; $zo_template = isset($atts['zo_template']) ? $atts['zo_template'] : $this->shortcode.'.php'; $files = $this->findShortcodeTemplates(); if ($zo_template && isset($files[$zo_template])) { $this->setTemplate($files[$zo_template]->uri); } else { $this->findShortcodeTemplate(); } if (!is_null($content)) $content = apply_filters('vc_shortcode_content_filter', $content, $this->shortcode); if ($this->html_template) { ob_start(); include( $this->html_template ); $output = ob_get_contents(); ob_end_clean(); } else { trigger_error(sprintf(__('Template file is missing for `%s` shortcode. Make sure you have `%s` file in your theme folder.', 'js_composer'), $this->shortcode, 'wp-content/themes/your_theme/vc_templates/' . $this->shortcode . '.php')); } return apply_filters('vc_shortcode_content_filter_after', $output, $this->shortcode); } /** * * @return Array(): array of all avaiable templates */ protected function findShortcodeTemplates() { $theme_dir = get_template_directory() . '/vc_templates'; $reg = "/^({$this->shortcode}\.php|{$this->shortcode}--.*\.php)/"; $files = zoFileScanDirectory($theme_dir, $reg); $files = array_merge(zoFileScanDirectory(ZO_TEMPLATES, $reg), $files); return $files; } }
lenguyenitc/donations
wp-content/plugins/zotheme/shortcodes/zo_base.php
PHP
gpl-2.0
1,648
/** * \file l4re/lib/src/env.c * \brief Environment */ /* * (c) 2008-2009 Alexander Warg <warg@os.inf.tu-dresden.de> * economic rights: Technische Universität Dresden (Germany) * * This file is part of TUD:OS and distributed under the terms of the * GNU General Public License 2. * Please see the COPYING-GPL-2 file for details. * * As a special exception, you may use this file as part of a free software * library without restriction. Specifically, if other files instantiate * templates or use macros or inline functions from this file, or you compile * this file and link it with other files to produce an executable, this * file does not by itself cause the resulting executable to be covered by * the GNU General Public License. This exception does not however * invalidate any other reasons why the executable file might be covered by * the GNU General Public License. */ #include <l4/re/env.h> l4re_env_t *l4re_global_env;
MicroTrustRepos/microkernel
src/l4/pkg/l4re/lib/src/env.c
C
gpl-2.0
961
<?php /** * @author mr.v * @copyright http://okvee.net * @version okv web starter kit 0.1b */ class index extends admin_controller { function __construct() { parent::__construct(); // load model $this->load->model(array()); // load helper $this->load->helper(array()); }// __construct function index() { $output['admin_content'] = $this->load->view("site-admin/admin_home_view", "", true); // headr tags output########################################### $output['page_title'] = $this->config_model->load("site_name") . $this->config_model->load("page_title_separator") . $this->lang->line("admin_dashboard"); // meta tag //$output['page_metatag'][] = meta("Cache-Control", "no-cache", "http-equiv"); //$output['page_metatag'][] = meta("Pragma", "no-cache", "http-equiv"); // link tag //$output['page_linktag'][] = link_tag("favicon.ico", "shortcut icon", "image/ico"); //$output['page_linktag'][] = link_tag("favicon2.ico", "shortcut icon2", "image/ico"); // script tag //$output['page_scripttag'][] = "<script type=\"text/javascript\" href=\"tinymcs.js\"></script>\n"; //$output['page_scripttag'][] = "<script type=\"text/javascript\" href=\"fckkeditor.js\"></script>\n"; // end headr tags output########################################### // output $this->load->view("site-admin/index_view", $output); }// index }
OkveeNet/vee-manga-reader-pro
application/controllers/site-admin/index.php
PHP
gpl-2.0
1,376
#!/usr/bin/env python # coding:utf-8 import hashlib import logging from x1category import X1Category TOOL_PREFIX = 'X1Tool' class X1Tool(object): 'appid:6376477c731a89e3280657eb88422645f2d1e2a684541222e21371f3110110d2' DEFAULT_METADATA = {'name': "X1Tool", 'author': "admin", 'comments': "default", 'template': "default/index.html", 'category': X1Category.DEFAULT} def __init__(self, metadata=None): if metadata is None: metadata = self.DEFAULT_METADATA self.__metadata = metadata @classmethod def appid(cls): """per tool GUID""" try: return hashlib.sha224(cls.__name__).hexdigest() except Exception, e: logging.error('Fail to get appid: %s' % e) return "0000000000" def run(self, args): return args def metadata(self, attr_key=None, attr_value=None): try: if attr_key is None: return self.__metadata if attr_value is not None: self.__metadata[attr_key] = attr_value return self.__metadata[attr_key] except Exception, e: logging.error('Fail to set attr: %s(%s)' % (attr_key, e)) if __name__ == '__main__': app = X1Tool() print app.appid() print app.metadata()
cshzc/X1Tool
src/server/apps/x1tool.py
Python
gpl-2.0
1,304
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/REC-html40/loose.dtd"> <!-- Generated by phpdocgen 0.17-rc3 on Sun Oct 10 22:21:22 2010 --> <HTML> <HEAD> <TITLE>All Elements</TITLE> <META http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"> <link REL ='stylesheet' TYPE='text/css' HREF='./stylesheet.css' TITLE='Style'> <script language='JavaScript1.2'> <!-- function swaptreecontent(elID) { b = document.getElementById(elID).innerHTML; a = document.getElementById(elID + "off__").innerHTML; if (a.length == 0) { document.getElementById(elID).innerHTML = ""; } else { document.getElementById(elID).innerHTML = a; } document.getElementById(elID + "off__").innerHTML = b; } function swaptreeicon(elID) { a = document.getElementById(elID + "plusminus").src; if (a.indexOf("minus.gif") != -1) a = "./plus.gif"; else a = "./minus.gif"; document.getElementById(elID + "plusminus").src = a; } function swaptree(elID) { swaptreecontent(elID); swaptreeicon(elID); } function swaptree2(elID1,elID2) { swaptreecontent(elID1); swaptreecontent(elID2); swaptreeicon(elID1); } //--> </script> </HEAD> <BODY> <P><DIV id="treeid2off__" style="visibility : hidden; position: absolute"></DIV> <A HREF="javascript:swaptree('treeid2')"><img src="./minus.gif" alt="-" id="treeid2plusminus" width="11" height="11" border="0"></A>&nbsp;All Classes<BR> <div id="treeid2"><TABLE border=0 cellspacing=0 cellpadding=0><TR><TD valign='top' align='right'><img src="./child.gif" alt="-" width="11" height="17" border="0"></TD><TD class='TreeText' valign='middle' align='left'>&nbsp;<A HREF="MDBApi/BrowserEmulator.html" target="classFrame">BrowserEmulator</A></TD></TR></TABLE> <TABLE border=0 cellspacing=0 cellpadding=0><TR><TD valign='top' align='right'><img src="./child.gif" alt="-" width="11" height="17" border="0"></TD><TD class='TreeText' valign='middle' align='left'>&nbsp;<A HREF="IMDB/imdb.html" target="classFrame">imdb</A></TD></TR></TABLE> <TABLE border=0 cellspacing=0 cellpadding=0><TR><TD valign='top' align='right'><img src="./child.gif" alt="-" width="11" height="17" border="0"></TD><TD class='TreeText' valign='middle' align='left'>&nbsp;<A HREF="IMDB/imdb_movielist.html" target="classFrame">imdb_movielist</A></TD></TR></TABLE> <TABLE border=0 cellspacing=0 cellpadding=0><TR><TD valign='top' align='right'><img src="./child.gif" alt="-" width="11" height="17" border="0"></TD><TD class='TreeText' valign='middle' align='left'>&nbsp;<A HREF="IMDB/imdb_nowplaying.html" target="classFrame">imdb_nowplaying</A></TD></TR></TABLE> <TABLE border=0 cellspacing=0 cellpadding=0><TR><TD valign='top' align='right'><img src="./child.gif" alt="-" width="11" height="17" border="0"></TD><TD class='TreeText' valign='middle' align='left'>&nbsp;<A HREF="IMDB/imdb_person.html" target="classFrame">imdb_person</A></TD></TR></TABLE> <TABLE border=0 cellspacing=0 cellpadding=0><TR><TD valign='top' align='right'><img src="./child.gif" alt="-" width="11" height="17" border="0"></TD><TD class='TreeText' valign='middle' align='left'>&nbsp;<A HREF="IMDB/imdb_topcharts.html" target="classFrame">imdb_topcharts</A></TD></TR></TABLE> <TABLE border=0 cellspacing=0 cellpadding=0><TR><TD valign='top' align='right'><img src="./child.gif" alt="-" width="11" height="17" border="0"></TD><TD class='TreeText' valign='middle' align='left'>&nbsp;<A HREF="IMDB/imdb_trailers.html" target="classFrame">imdb_trailers</A></TD></TR></TABLE> <TABLE border=0 cellspacing=0 cellpadding=0><TR><TD valign='top' align='right'><img src="./child.gif" alt="-" width="11" height="17" border="0"></TD><TD class='TreeText' valign='middle' align='left'>&nbsp;<A HREF="IMDB/imdbpsearch.html" target="classFrame">imdbpsearch</A></TD></TR></TABLE> <TABLE border=0 cellspacing=0 cellpadding=0><TR><TD valign='top' align='right'><img src="./child.gif" alt="-" width="11" height="17" border="0"></TD><TD class='TreeText' valign='middle' align='left'>&nbsp;<A HREF="IMDB/imdbsearch.html" target="classFrame">imdbsearch</A></TD></TR></TABLE> <TABLE border=0 cellspacing=0 cellpadding=0><TR><TD valign='top' align='right'><img src="./child.gif" alt="-" width="11" height="17" border="0"></TD><TD class='TreeText' valign='middle' align='left'>&nbsp;<A HREF="IMDB/imdbXML.html" target="classFrame">imdbXML</A></TD></TR></TABLE> <TABLE border=0 cellspacing=0 cellpadding=0><TR><TD valign='top' align='right'><img src="./child.gif" alt="-" width="11" height="17" border="0"></TD><TD class='TreeText' valign='middle' align='left'>&nbsp;<A HREF="MDBApi/mdb_base.html" target="classFrame">mdb_base</A></TD></TR></TABLE> <TABLE border=0 cellspacing=0 cellpadding=0><TR><TD valign='top' align='right'><img src="./child.gif" alt="-" width="11" height="17" border="0"></TD><TD class='TreeText' valign='middle' align='left'>&nbsp;<A HREF="MDBApi/mdb_config.html" target="classFrame">mdb_config</A></TD></TR></TABLE> <TABLE border=0 cellspacing=0 cellpadding=0><TR><TD valign='top' align='right'><img src="./child.gif" alt="-" width="11" height="17" border="0"></TD><TD class='TreeText' valign='middle' align='left'>&nbsp;<A HREF="MDBApi/MDB_Request.html" target="classFrame">MDB_Request</A></TD></TR></TABLE> <TABLE border=0 cellspacing=0 cellpadding=0><TR><TD valign='top' align='right'><img src="./child.gif" alt="-" width="11" height="17" border="0"></TD><TD class='TreeText' valign='middle' align='left'>&nbsp;<A HREF="MDBApi/movie_base.html" target="classFrame">movie_base</A></TD></TR></TABLE> <TABLE border=0 cellspacing=0 cellpadding=0><TR><TD valign='top' align='right'><img src="./child.gif" alt="-" width="11" height="17" border="0"></TD><TD class='TreeText' valign='middle' align='left'>&nbsp;<A HREF="MDBApi/movieposterdb.html" target="classFrame">movieposterdb</A></TD></TR></TABLE> <TABLE border=0 cellspacing=0 cellpadding=0><TR><TD valign='top' align='right'><img src="./child.gif" alt="-" width="11" height="17" border="0"></TD><TD class='TreeText' valign='middle' align='left'>&nbsp;<A HREF="MDBApi/person_base.html" target="classFrame">person_base</A></TD></TR></TABLE> <TABLE border=0 cellspacing=0 cellpadding=0><TR><TD valign='top' align='right'><img src="./child.gif" alt="-" width="11" height="17" border="0"></TD><TD class='TreeText' valign='middle' align='left'>&nbsp;<A HREF="MoviePilot/pilot.html" target="classFrame">pilot</A></TD></TR></TABLE> <TABLE border=0 cellspacing=0 cellpadding=0><TR><TD valign='top' align='right'><img src="./child.gif" alt="-" width="11" height="17" border="0"></TD><TD class='TreeText' valign='middle' align='left'>&nbsp;<A HREF="MoviePilot/pilot_person.html" target="classFrame">pilot_person</A></TD></TR></TABLE> <TABLE border=0 cellspacing=0 cellpadding=0><TR><TD valign='top' align='right'><img src="./lastchild.gif" alt="-" width="11" height="17" border="0"></TD><TD class='TreeText' valign='middle' align='left'>&nbsp;<A HREF="MoviePilot/pilotsearch.html" target="classFrame">pilotsearch</A></TD></TR></TABLE> </DIV></P> <P><DIV id="treeid3off__" style="visibility : hidden; position: absolute"></DIV> <A HREF="javascript:swaptree('treeid3')"><img src="./minus.gif" alt="-" id="treeid3plusminus" width="11" height="11" border="0"></A>&nbsp;All Constants<BR> <div id="treeid3"><TABLE border=0 cellspacing=0 cellpadding=0><TR><TD valign='top' align='right'><img src="./child.gif" alt="-" width="11" height="17" border="0"></TD><TD class='TreeText' valign='middle' align='left'>&nbsp;<A HREF="MDBApi/package-constants.html#$basic_access" target="classFrame">BASIC_ACCESS</A></TD></TR></TABLE> <TABLE border=0 cellspacing=0 cellpadding=0><TR><TD valign='top' align='right'><img src="./child.gif" alt="-" width="11" height="17" border="0"></TD><TD class='TreeText' valign='middle' align='left'>&nbsp;<A HREF="MDBApi/package-constants.html#$full_access" target="classFrame">FULL_ACCESS</A></TD></TR></TABLE> <TABLE border=0 cellspacing=0 cellpadding=0><TR><TD valign='top' align='right'><img src="./child.gif" alt="-" width="11" height="17" border="0"></TD><TD class='TreeText' valign='middle' align='left'>&nbsp;<A HREF="MDBApi/package-constants.html#$medium_access" target="classFrame">MEDIUM_ACCESS</A></TD></TR></TABLE> <TABLE border=0 cellspacing=0 cellpadding=0><TR><TD valign='top' align='right'><img src="./child.gif" alt="-" width="11" height="17" border="0"></TD><TD class='TreeText' valign='middle' align='left'>&nbsp;<A HREF="MDBApi/package-constants.html#$no_access" target="classFrame">NO_ACCESS</A></TD></TR></TABLE> <TABLE border=0 cellspacing=0 cellpadding=0><TR><TD valign='top' align='right'><img src="./lastchild.gif" alt="-" width="11" height="17" border="0"></TD><TD class='TreeText' valign='middle' align='left'>&nbsp;<A HREF="MDBApi/package-constants.html#$pilot_imdbfallback" target="classFrame">PILOT_IMDBFALLBACK</A></TD></TR></TABLE> </DIV></P> </BODY> </HTML>
Bigjoos/U-232-V2
imdb/imdb/doc/apidoc/allelements-frame.html
HTML
gpl-2.0
8,871
using UnityEngine; using System.Collections; public class LixoMisturado { // atributos estaticos static int multiplicadorVidaBase = 10; static int danoMinimo = 1; // estaticos publicos static public float tempoRegen = 3; static public int nivelMaximo = 10; // atributos publicos public bool estaNivelMaximo { get { return _nivel >= nivelMaximo; } } public int nivel { get { return _nivel; } } public int vida { get { return _vidaAtual; } } public bool podeRegenerar { get { return _regen > 0 && _vidaAtual < _vidaMaxima; } } // atributos privados int _nivel = 1; int _vidaMaxima = 0; int _vidaAtual = 0; int _regen = 0; int _armadura = 0; // Construtores public LixoMisturado(int nivel = 1) { if (nivel > nivelMaximo) { nivel = nivelMaximo; } //this._nivel = Random.Range(1, nivel + 1); this._nivel = ObjGerenciadorLixo.ultimoNivelCriadoLixo; Iniciar(); } // Metodos privados void Iniciar() { _vidaMaxima = CalcularVidaMaxima(); _vidaAtual = _vidaMaxima; } int CalcularVidaMaxima() { //int n = (int) Mathf.Pow(2, _nivel - 1); int n = _nivel; return n * multiplicadorVidaBase; } // Metodos publicos public bool ReceberDano(int dano, int redutorArmadura = 0) { int armaduraTotal = _armadura - redutorArmadura; if (armaduraTotal < 0) { armaduraTotal = 0; } dano -= armaduraTotal; if (dano < danoMinimo) { dano = danoMinimo; } _vidaAtual -= dano; if (_vidaAtual <= 0) { _vidaAtual = 0; return true; } return false; } public bool Regenerar() { if (_regen > 0 && _vidaAtual < _vidaMaxima) { _vidaAtual += _regen; Curar (_regen); return true; } return false; } public void Curar(int vidaExtra) { _vidaAtual += vidaExtra; if (_vidaAtual > _vidaMaxima) { _vidaAtual = _vidaMaxima; } } public bool PodeFundir(LixoMisturado outro) { return !(this.estaNivelMaximo || outro.estaNivelMaximo); } public void Fundir(LixoMisturado outro) { int nivel = this.nivel + outro.nivel; if (nivel > nivelMaximo) { nivel = nivelMaximo; } _nivel = nivel; Iniciar (); } }
zugbahn/Reciclador
Unity Projetos/Reciclador_WindowsPhone/Assets/Scripts/Tipos/LixoMisturado.cs
C#
gpl-2.0
2,152
/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */ /* * Copyright (c) 2017 NITK Surathkal * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation; * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Authors: Shravya K.S. <shravya.ks0@gmail.com> * */ #include <iostream> #include "ns3/core-module.h" #include "ns3/network-module.h" #include "ns3/internet-module.h" #include "ns3/point-to-point-module.h" #include "ns3/point-to-point-layout-module.h" #include "ns3/netanim-module.h" #include "ns3/applications-module.h" #include "ns3/ipv4-nix-vector-helper.h" #include "ns3/ipv4-static-routing.h" using namespace ns3; NS_LOG_COMPONENT_DEFINE ("FatTreeAnimation"); int main (int argc, char *argv[]) { Config::SetDefault ("ns3::OnOffApplication::PacketSize", UintegerValue (512)); Config::SetDefault ("ns3::OnOffApplication::DataRate", StringValue ("500kb/s")); uint32_t nPods = 4; std::string animFile = "fat-tree-animation.xml"; // Name of file for animation output CommandLine cmd; cmd.AddValue ("nPods", "Number of pods", nPods); cmd.AddValue ("animFile", "File Name for Animation Output", animFile); cmd.Parse (argc,argv); InternetStackHelper internet; Ipv4NixVectorHelper nixRouting; Ipv4StaticRoutingHelper staticRouting; Ipv4ListRoutingHelper list; list.Add (staticRouting, 0); list.Add (nixRouting, 10); internet.SetRoutingHelper (list); // Create the point-to-point link helpers PointToPointHelper pointToPointRouter; pointToPointRouter.SetDeviceAttribute ("DataRate", StringValue ("10Mbps")); pointToPointRouter.SetChannelAttribute ("Delay", StringValue ("1ms")); PointToPointFatTreeHelper d (nPods, pointToPointRouter); // Install Stack d.InstallStack (internet); d.AssignIpv4Addresses (Ipv4Address ("10.0.0.0"),Ipv4Mask ("/16")); OnOffHelper clientHelper ("ns3::UdpSocketFactory", Address ()); clientHelper.SetAttribute ("OnTime", StringValue ("ns3::ConstantRandomVariable[Constant=1]")); clientHelper.SetAttribute ("OffTime", StringValue ("ns3::ConstantRandomVariable[Constant=0]")); ApplicationContainer clientApps; AddressValue remoteAddress (InetSocketAddress (d.GetServerIpv4Address (2), 5001)); clientHelper.SetAttribute ("Remote", remoteAddress); clientApps.Add (clientHelper.Install (d.GetServerNode (0))); uint16_t port = 50001; Address sinkLocalAddress (InetSocketAddress (Ipv4Address::GetAny (), port)); PacketSinkHelper sinkHelper ("ns3::UdpSocketFactory", sinkLocalAddress); ApplicationContainer sinkApp = sinkHelper.Install (d.GetServerNode (2)); clientApps.Start (Seconds (1.0)); clientApps.Stop (Seconds (10.0)); sinkApp.Start (Seconds (0.0)); sinkApp.Stop (Seconds (10.0)); // Set the bounding box for animation d.BoundingBox (1, 1, 100, 100); // Create the animation object and configure for specified output AnimationInterface anim (animFile); anim.EnablePacketMetadata (); // Optional anim.EnableIpv4L3ProtocolCounters (Seconds (0), Seconds (10)); // Optional // Set up the actual simulation Ipv4GlobalRoutingHelper::PopulateRoutingTables (); Simulator::Run (); std::cout << "Animation Trace file created:" << animFile.c_str () << std::endl; Simulator::Destroy (); return 0; }
shravya-ks/ns-3-tcp-prague
src/netanim/examples/fat-tree-animation.cc
C++
gpl-2.0
3,799
<?php /** * YoutubeGallery Joomla! Native Component * @version 4.2.8 * @author DesignCompass corp< <support@joomlaboat.com> * @link http://www.joomlaboat.com * @GNU General Public License **/ // No direct access to this file defined('_JEXEC') or die('Restricted access'); // import Joomla controller library jimport('joomla.application.component.controller'); /** * YoutubeGallery Component Controller */ jimport('joomla.version'); $version = new JVersion(); $JoomlaVersionRelease=$version->RELEASE; if($JoomlaVersionRelease>=3.0) { class YoutubeGalleryController extends JControllerLegacy { } } else { class YoutubeGalleryController extends JController { } } ?>
xmarlem/BM_stage
components/com_youtubegallery/controller.php
PHP
gpl-2.0
740
#ifndef _RAWC_CONSTANTS_H #define _RAWC_CONSTANTS_H //============================================================================= // File: RAWCControlBoard.h // // COPYRIGHT 2010 Robotics Alliance of the West Coast(RAWC) // All rights reserved. RAWC proprietary and confidential. // // The party receiving this software directly from RAWC (the "Recipient") // may use this software and make copies thereof as reasonably necessary solely // for the purposes set forth in the agreement between the Recipient and // RAWC(the "Agreement"). The software may be used in source code form // solely by the Recipient's employees/volunteers. The Recipient shall have // no right to sublicense, assign, transfer or otherwise provide the source // code to any third party. Subject to the terms and conditions set forth in // the Agreement, this software, in binary form only, may be distributed by // the Recipient to its users. RAWC retains all ownership rights in and to // the software. // // This notice shall supercede any other notices contained within the software. //============================================================================= #include "WPILib.h" //#include <string> #include <map> using namespace std; #define RAWC_CONSTANTS_DEFAULT_FILE "/home/lvuser/constants.csv" class RAWCConstants { // A RAWCConstant is contained in RAWCConstants typedef double RAWCConstant; static constexpr RAWCConstant RAWC_CONSTANTS_DEFAULT_RET_VAL = 0.0; private: map<string, RAWCConstant> data; static RAWCConstants * singletonInstance; RAWCConstants(); //TODO: Add functionality to let this map persist to a file void saveDataToFile(string fileName); // Save to another file void restoreDataFromFile(string fileName); public: void restoreData(); // from default file void save(); // Save to the default file // Get the shared object static RAWCConstants * getInstance(); // Main mechanism to input data void insertKeyAndValue(string key, RAWCConstant value); // Main mechanism to look up data // NOTE: I need to figure out how this can fail elegantly // instead of just checking to see if it exists first // Maybe pass in a pointer to a RAWCConstant and write to that? RAWCConstant getValueForKey(string key); bool doesKeyExist(string key); }; #endif // _RAWC_CONSTANTS_H
FRC-Team-1716/Team-1716-Swerve
src/RAWCConstants.h
C
gpl-2.0
2,336
/* * #%L * Fork of JAI Image I/O Tools. * %% * Copyright (C) 2008 - 2017 Open Microscopy Environment: * - Board of Regents of the University of Wisconsin-Madison * - Glencoe Software, Inc. * - University of Dundee * %% * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * #L% */ /* * $RCSfile: J2KImageWriteParam.java,v $ * * * Copyright (c) 2005 Sun Microsystems, Inc. All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistribution of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * - Redistribution in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Sun Microsystems, Inc. or the names of * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * This software is provided "AS IS," without a warranty of any * kind. ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY * EXCLUDED. SUN MIDROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL * NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF * USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS * DERIVATIVES. IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR * ANY LOST REVENUE, PROFIT OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, * CONSEQUENTIAL, INCIDENTAL OR PUNITIVE DAMAGES, HOWEVER CAUSED AND * REGARDLESS OF THE THEORY OF LIABILITY, ARISING OUT OF THE USE OF OR * INABILITY TO USE THIS SOFTWARE, EVEN IF SUN HAS BEEN ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * You acknowledge that this software is not designed or intended for * use in the design, construction, operation or maintenance of any * nuclear facility. * * $Revision: 1.2 $ * $Date: 2006/09/20 23:23:30 $ * $State: Exp $ */ package com.sun.media.imageio.plugins.jpeg2000; import java.awt.Rectangle; import java.awt.image.Raster; import java.awt.image.RenderedImage; import java.util.Collections; import java.util.Locale; import java.util.Iterator; import javax.imageio.ImageWriteParam; /** * A subclass of <code>ImageWriteParam</code> for writing images in * the JPEG 2000 format. * * <p>JPEG 2000 plugin supports to losslessly or lossy compress gray-scale, * RGB, and RGBA images with byte, unsigned short or short data type. It also * supports losslessly compress bilevel, and 8-bit color indexed images. The * result data is in the of JP2 format -- JPEG 2000 Part 1 or baseline format. * * <p>The parameters for encoding JPEG 2000 are listed in the following table: * * <p><table border=1> * <caption><b>JPEG 2000 Plugin Decoding Parameters</b></caption> * <tr><th>Parameter Name</th> <th>Description</th></tr> * <tr> * <td>numDecompositionLevels</td> * <td> The number of decomposition levels to generate. This value must * be in the range * <code>0&nbsp;&le;&nbsp;numDecompositionLevels&nbsp;&le;&nbsp;32 * </code>. The default value is <code>5</code>. Note that the number * of resolution levels is * <code>numDecompositionLevels&nbsp;+&nbsp;1</code>. * The number of decomposition levels is constant across * all components and all tiles. * </td> * </tr> * <tr> * <td>encodingRate</td> * <td> The bitrate in bits-per-pixel for encoding. Should be set when * lossy compression scheme is used. With the default value * <code>Double.MAX_VALUE</code>, a lossless compression will be done. * </td> * </tr> * <tr> * <td>lossless</td> * <td> Indicates using the lossless scheme or not. It is equivalent to * use reversible quantization and 5x3 integer wavelet filters. The * default is <code>true</code>. * </td> * </tr> * <tr> * <td>componentTransformation</td> * <td> Specifies to utilize the component transformation on some tiles. * If the wavelet transform is reversible (w5x3 filter), the Reversible * Component Transformation (RCT) is applied. If not reversible * (w9x7 filter), the Irreversible Component Transformation (ICT) is used. * </td> * </tr> * <tr> * <td>filters</td> * <td> Specifies which wavelet filters to use for the specified * tile-components. JPEG 2000 part I only supports w5x3 and w9x7 filters. * </td> * </tr> * <tr> * <td>codeBlockSize</td> * <td> Specifies the maximum code-block size to use for tile-component. * The maximum width and height is 1024, however the block size * (i.e. width x height) must not exceed 4096. The minimum width and * height is 4. The default values are (64, 64). * </td> * </tr> * <tr> * <td>progressionType</td> * <td> Specifies which type of progression should be used when generating * the codestream. * <p> The format is ont of the progression types defined below: * * <p> res : Resolution-Layer-Component-Position * <p> layer: Layer-Resolution-Component-Position * <p> res-pos: Resolution-Position-Component-Layer * <p> pos-comp: Position-Component-Resolution-Layer * <p> comp-pos: Component-Position-Resolution-Layer * </td> * </tr> * <tr> * <td>SOP</td> * <td>Specifies whether start of packet (SOP) markers should be used. * true enables, false disables it. The default value is false. * </td> * </tr> * <tr> * <td>EPH</td> * <td>Specifies whether end of packet header (EPH) markers should be used. * true enables, false disables it. The default value is false. * </td> * </tr> * <tr> * <td>writeCodeStreamOnly</td> * <td>Specifies whether write only the jpeg2000 code stream, i.e, no any * box is written. The default value is false. * </td> * </tr> * </table> */ public class J2KImageWriteParam extends ImageWriteParam { /** The filter for lossy compression. */ public static final String FILTER_97 = "w9x7"; /** The filter for lossless compression. */ public static final String FILTER_53 = "w5x3"; /** * The number of decomposition levels. */ private int numDecompositionLevels = 5; /** * The bitrate in bits-per-pixel for encoding. Should be set when lossy * compression scheme is used. The default is * <code>Double.MAX_VALUE</code>. */ private double encodingRate = Double.MAX_VALUE; /** * Indicates using the lossless scheme or not. It is equivalent to * use reversible quantization and 5x3 integer wavelet filters. */ private boolean lossless = true; /** Specifies to utilize the component transformation with some tiles. * If the wavelet transform is reversible (w5x3 filter), the * Reversible Component Transformation (RCT) is applied. If not reversible * (w9x7 filter), the Irreversible Component Transformation (ICT) * is used. */ private boolean componentTransformation = true; /** Specifies which filters to use for the specified tile-components. * JPEG 2000 part I only supports w5x3 and w9x7 filters. */ private String filter = FILTER_53; /** Specifies the maximum code-block size to use for tile-component. * The maximum width and height is 1024, however the image area * (i.e. width x height) must not exceed 4096. The minimum * width and height is 4. Default: 64 64. */ private int[] codeBlockSize = new int[]{64, 64}; /** See above. */ private String progressionType = "layer"; /** Specifies whether end of packet header (EPH) markers should be used. * true enables, false disables it. Default: false. */ private boolean EPH = false; /** Specifies whether start of packet (SOP) markers should be used. * true enables, false disables it. Default: false. */ private boolean SOP = false; /** Specifies whether write only the jpeg2000 code stream, i.e, no any * box is written. The default value is false. */ private boolean writeCodeStreamOnly = false; /** * Constructor which sets the <code>Locale</code>. * * @param locale a <code>Locale</code> to be used to localize * compression type names and quality descriptions, or * <code>null</code>. */ public J2KImageWriteParam(Locale locale) { super(locale); setDefaults(); } /** * Constructs a <code>J2KImageWriteParam</code> object with default * values for all parameters. */ public J2KImageWriteParam() { super(); setDefaults(); } /** Set source */ private void setDefaults() { // override the params in the super class canOffsetTiles = true; canWriteTiles = true; canOffsetTiles = true; compressionTypes = new String[] {"JPEG2000"}; canWriteCompressed = true; tilingMode = MODE_EXPLICIT; } /** * Sets <code>numDecompositionLevels</code>. * * @param numDecompositionLevels the number of decomposition levels. * @throws IllegalArgumentException if <code>numDecompositionLevels</code> * is negative or greater than 32. * @see #getNumDecompositionLevels */ public void setNumDecompositionLevels(int numDecompositionLevels) { if(numDecompositionLevels < 0 || numDecompositionLevels > 32) { throw new IllegalArgumentException ("numDecompositionLevels < 0 || numDecompositionLevels > 32"); } this.numDecompositionLevels = numDecompositionLevels; } /** * Gets <code>numDecompositionLevels</code>. * * @return the number of decomposition levels. * @see #setNumDecompositionLevels */ public int getNumDecompositionLevels() { return numDecompositionLevels; } /** * Sets <code>encodingRate</code>. * * @param rate the encoding rate in bits-per-pixel. * @see #getEncodingRate() */ public void setEncodingRate(double rate) { this.encodingRate = rate; if (encodingRate != Double.MAX_VALUE) { lossless = false; filter = FILTER_97; } else { lossless = true; filter = FILTER_53; } } /** * Gets <code>encodingRate</code>. * * @return the encoding rate in bits-per-pixel. * @see #setEncodingRate(double) */ public double getEncodingRate() { return encodingRate; } /** * Sets <code>lossless</code>. * * @param lossless whether the compression scheme is lossless. * @see #getLossless() */ public void setLossless(boolean lossless) { this.lossless = lossless; } /** * Gets <code>lossless</code>. * * @return whether the compression scheme is lossless. * @see #setLossless(boolean) */ public boolean getLossless() { return lossless; } /** * Sets <code>filter</code>. * * @param value which wavelet filters to use for the specified * tile-components. * @see #getFilter() */ public void setFilter(String value) { filter = value; } /** * Gets <code>filters</code>. * * @return which wavelet filters to use for the specified * tile-components. * @see #setFilter(String) */ public String getFilter() { return filter; } /** * Sets <code>componentTransformation</code>. * * @param value whether to utilize the component transformation. * @see #getComponentTransformation() */ public void setComponentTransformation(boolean value) { componentTransformation = value; } /** * Gets <code>componentTransformation</code>. * * @return whether to utilize the component transformation. * @see #setComponentTransformation(boolean) */ public boolean getComponentTransformation() { return componentTransformation; } /** * Sets <code>codeBlockSize</code>. * * @param value the maximum code-block size to use per tile-component. * @see #getCodeBlockSize() */ public void setCodeBlockSize(int[] value) { codeBlockSize = value; } /** * Gets <code>codeBlockSize</code>. * * @return the maximum code-block size to use per tile-component. * @see #setCodeBlockSize(int[]) */ public int[] getCodeBlockSize() { return codeBlockSize; } /** * Sets <code>SOP</code>. * * @param value whether start of packet (SOP) markers should be used. * @see #getSOP() */ public void setSOP(boolean value) { SOP = value; } /** * Gets <code>SOP</code>. * * @return whether start of packet (SOP) markers should be used. * @see #setSOP(boolean) */ public boolean getSOP() { return SOP; } /** * Sets <code>EPH</code>. * * @param value whether end of packet header (EPH) markers should be used. * @see #getEPH() */ public void setEPH(boolean value) { EPH = value; } /** * Gets <code>EPH</code>. * * @return whether end of packet header (EPH) markers should be used. * @see #setEPH(boolean) */ public boolean getEPH() { return EPH; } /** * Sets <code>progressionType</code>. * * @param value which type of progression should be used when generating * the codestream. * @see #getProgressionType() */ public void setProgressionType(String value) { progressionType = value; } /** * Gets <code>progressionType</code>. * * @return which type of progression should be used when generating * the codestream. * @see #setProgressionType(String) */ public String getProgressionType() { return progressionType; } /** Sets <code>writeCodeStreamOnly</code>. * * @param value Whether the jpeg2000 code stream only or the jp2 format * will be written into the output. * @see #getWriteCodeStreamOnly() */ public void setWriteCodeStreamOnly(boolean value) { writeCodeStreamOnly = value; } /** Gets <code>writeCodeStreamOnly</code>. * * @return whether the jpeg2000 code stream only or the jp2 format * will be written into the output. * @see #setWriteCodeStreamOnly(boolean) */ public boolean getWriteCodeStreamOnly() { return writeCodeStreamOnly; } }
stelfrich/bioformats
components/forks/jai/src/com/sun/media/imageio/plugins/jpeg2000/J2KImageWriteParam.java
Java
gpl-2.0
16,164
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/> <meta http-equiv="X-UA-Compatible" content="IE=9"/> <title>Aria: Member List</title> <link href="tabs.css" rel="stylesheet" type="text/css"/> <link href="doxygen.css" rel="stylesheet" type="text/css" /> <link href="navtree.css" rel="stylesheet" type="text/css"/> <script type="text/javascript" src="jquery.js"></script> <script type="text/javascript" src="resize.js"></script> <script type="text/javascript" src="navtree.js"></script> <script type="text/javascript"> $(document).ready(initResizable); </script> </head> <body> <div id="top"><!-- do not remove this div! --> <div id="titlearea"> <table cellspacing="0" cellpadding="0"> <tbody> <tr style="height: 56px;"> <td style="padding-left: 0.5em;"> <div id="projectname">Aria &#160;<span id="projectnumber">2.9.0</span> </div> </td> </tr> </tbody> </table> </div> <!-- Generated by Doxygen 1.7.6.1 --> <div id="navrow1" class="tabs"> <ul class="tablist"> <li><a href="index.html"><span>Main&#160;Page</span></a></li> <li><a href="pages.html"><span>Related&#160;Pages</span></a></li> <li><a href="modules.html"><span>Modules</span></a></li> <li class="current"><a href="annotated.html"><span>Classes</span></a></li> <li><a href="files.html"><span>Files</span></a></li> <li><a href="examples.html"><span>Examples</span></a></li> </ul> </div> <div id="navrow2" class="tabs2"> <ul class="tablist"> <li><a href="annotated.html"><span>Class&#160;List</span></a></li> <li><a href="classes.html"><span>Class&#160;Index</span></a></li> <li><a href="hierarchy.html"><span>Class&#160;Hierarchy</span></a></li> <li><a href="functions.html"><span>Class&#160;Members</span></a></li> </ul> </div> </div> <div id="side-nav" class="ui-resizable side-nav-resizable"> <div id="nav-tree"> <div id="nav-tree-contents"> </div> </div> <div id="splitbar" style="-moz-user-select:none;" class="ui-resizable-handle"> </div> </div> <script type="text/javascript"> initNavTree('classArTCM2.html',''); </script> <div id="doc-content"> <div class="header"> <div class="headertitle"> <div class="title">ArTCM2 Member List</div> </div> </div><!--header--> <div class="contents"> This is the complete list of members for <a class="el" href="classArTCM2.html">ArTCM2</a>, including all inherited members.<table> <tr bgcolor="#f0f0f0"><td><b>addHeadingDataCallback</b>(ArFunctor1&lt; double &gt; *f) (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [inline]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>ArTCM2</b>() (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td></td></tr> <tr class="memlist"><td><a class="el" href="classArTCM2.html#a03d68f7ee1b1ee2b6cf876c0908f43af">blockingConnect</a>(unsigned long connectTimeout=5000)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [virtual]</code></td></tr> <tr class="memlist"><td><a class="el" href="classArTCM2.html#aaba360fbc82b2c25cd25a20f42e4c206">commandAutoCalibration</a>(void)=0</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [pure virtual]</code></td></tr> <tr class="memlist"><td><a class="el" href="classArTCM2.html#a8b57b5ac6e217d7494cc72cfae61f171">commandContinuousPackets</a>(void)=0</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [pure virtual]</code></td></tr> <tr class="memlist"><td><a class="el" href="classArTCM2.html#af1af3c0170ed4c7a63fa6d00ce4a8d6b">commandJustCompass</a>(void)=0</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [pure virtual]</code></td></tr> <tr class="memlist"><td><a class="el" href="classArTCM2.html#a533b040e10e90d12bd9daebb409a200a">commandOff</a>(void)=0</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [pure virtual]</code></td></tr> <tr class="memlist"><td><a class="el" href="classArTCM2.html#a7922b19e37dd55443f8ca4fd60c3709a">commandOnePacket</a>(void)=0</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [pure virtual]</code></td></tr> <tr class="memlist"><td><a class="el" href="classArTCM2.html#a932f31b8962d13d634e79b74337e817b">commandSoftReset</a>(void)=0</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [pure virtual]</code></td></tr> <tr class="memlist"><td><a class="el" href="classArTCM2.html#a43df9ef61d5f9b25ef56a268635711c9">commandStopCalibration</a>(void)=0</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [pure virtual]</code></td></tr> <tr class="memlist"><td><a class="el" href="classArTCM2.html#a60d1e9687c237dec7f5c4df552694743">commandUserCalibration</a>(void)=0</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [pure virtual]</code></td></tr> <tr class="memlist"><td><a class="el" href="classArTCM2.html#aadac4c6e5254827cc598dbfab7e671fe">connect</a>()</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [virtual]</code></td></tr> <tr class="memlist"><td><a class="el" href="classArTCM2.html#a1d354c63ca69759dcef14f7d12a5d1bd">getCalibrationH</a>(void) const </td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [inline]</code></td></tr> <tr class="memlist"><td><a class="el" href="classArTCM2.html#adffff5238710ed005d2b85ad632ddc58">getCalibrationM</a>(void) const </td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [inline]</code></td></tr> <tr class="memlist"><td><a class="el" href="classArTCM2.html#a3c147f489fefc9b0b2180e5121ebe3ca">getCalibrationV</a>(void) const </td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [inline]</code></td></tr> <tr class="memlist"><td><a class="el" href="classArTCM2.html#acf62aab4d19e4f1924794ea6532d794f">getCompass</a>(void) const </td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [inline]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>getError</b>(void) const (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [inline]</code></td></tr> <tr class="memlist"><td><a class="el" href="classArTCM2.html#a02eaefd0ee678b5e01f5d94ba883cd3f">getHeading</a>(void) const </td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [inline]</code></td></tr> <tr class="memlist"><td><a class="el" href="classArTCM2.html#aef135653337700db560d061cb4722d46">getPacCount</a>(void)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [inline]</code></td></tr> <tr class="memlist"><td><a class="el" href="classArTCM2.html#a351381b91db9a6ecf47f68df800998f8">getPitch</a>(void) const </td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [inline]</code></td></tr> <tr class="memlist"><td><a class="el" href="classArTCM2.html#a01a4ae402073f91a9e79dced2181a301">getRoll</a>(void) const </td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [inline]</code></td></tr> <tr class="memlist"><td><a class="el" href="classArTCM2.html#a41ebde0196e3b1b7973bd8e7e7f1ed2d">getTemperature</a>(void) const </td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [inline]</code></td></tr> <tr class="memlist"><td><a class="el" href="classArTCM2.html#a65b7d3f541cef8f29557d4d43defc255">getXMagnetic</a>(void) const </td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [inline]</code></td></tr> <tr class="memlist"><td><a class="el" href="classArTCM2.html#a72a1aa71f7981d8d3f121b7295688c66">getYMagnetic</a>(void) const </td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [inline]</code></td></tr> <tr class="memlist"><td><a class="el" href="classArTCM2.html#a9d748cf260579ee8bdbdc8afd59c569b">getZMagnetic</a>(void) const </td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [inline]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>haveCalibrationH</b>() const (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [inline]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>haveCalibrationM</b>() const (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [inline]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>haveCalibrationV</b>() const (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [inline]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>haveHeading</b>() const (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [inline]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>havePitch</b>() const (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [inline]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>haveRoll</b>() const (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [inline]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>haveTemperature</b>() const (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [inline]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>haveXMagnetic</b>() const (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [inline]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>haveYMagnetic</b>() const (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [inline]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>haveZMagnetic</b>() const (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [inline]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>incrementPacketCount</b>() (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [inline, protected]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>invokeHeadingDataCallbacks</b>(double heading) (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [inline, protected]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>myCalibrationH</b> (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [protected]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>myCalibrationM</b> (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [protected]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>myCalibrationV</b> (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [protected]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>myError</b> (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [protected]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>myHaveCalibrationH</b> (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [protected]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>myHaveCalibrationM</b> (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [protected]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>myHaveCalibrationV</b> (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [protected]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>myHaveHeading</b> (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [protected]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>myHavePitch</b> (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [protected]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>myHaveRoll</b> (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [protected]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>myHaveTemperature</b> (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [protected]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>myHaveXMag</b> (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [protected]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>myHaveYMag</b> (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [protected]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>myHaveZMag</b> (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [protected]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>myHeading</b> (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [protected]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>myHeadingDataCallbacks</b> (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [protected]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>myPacCount</b> (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [protected]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>myPacCurrentCount</b> (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [protected]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>myPitch</b> (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [protected]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>myRoll</b> (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [protected]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>myTemperature</b> (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [protected]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>myTimeLastPacket</b> (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [protected]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>myXMag</b> (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [protected]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>myYMag</b> (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [protected]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>myZMag</b> (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [protected]</code></td></tr> <tr bgcolor="#f0f0f0"><td><b>~ArTCM2</b>() (defined in <a class="el" href="classArTCM2.html">ArTCM2</a>)</td><td><a class="el" href="classArTCM2.html">ArTCM2</a></td><td><code> [inline, virtual]</code></td></tr> </table></div><!-- contents --> </div> <div id="nav-path" class="navpath"> <ul> <li class="footer">Generated on Mon Nov 10 2014 07:58:54 for Aria by <a href="http://www.doxygen.org/index.html"> <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.7.6.1 </li> </ul> </div> </body> </html>
sfe1012/Robot
docs/classArTCM2-members.html
HTML
gpl-2.0
16,783
/* * CFQ, or complete fairness queueing, disk scheduler. * * Based on ideas from a previously unfinished io * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli. * * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> */ #include <linux/module.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/elevator.h> #include <linux/jiffies.h> #include <linux/rbtree.h> #include <linux/ioprio.h> #include <linux/blktrace_api.h> #include "blk.h" #include "cfq.h" /* * tunables */ /* max queue in one round of service */ static const int cfq_quantum = 8; static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; /* maximum backwards seek, in KiB */ static const int cfq_back_max = 16 * 1024; /* penalty of a backwards seek */ static const int cfq_back_penalty = 2; static const int cfq_slice_sync = HZ / 10; static int cfq_slice_async = HZ / 25; static const int cfq_slice_async_rq = 2; static int cfq_slice_idle = HZ / 125; static int cfq_group_idle = HZ / 125; static const int cfq_target_latency = HZ * 3/10; /* 300 ms */ static const int cfq_hist_divisor = 4; /* * offset from end of service tree */ #define CFQ_IDLE_DELAY (HZ / 5) /* * below this threshold, we consider thinktime immediate */ #define CFQ_MIN_TT (2) #define CFQ_SLICE_SCALE (5) #define CFQ_HW_QUEUE_MIN (5) #define CFQ_SERVICE_SHIFT 12 #define CFQQ_SEEK_THR (sector_t)(8 * 100) #define CFQQ_CLOSE_THR (sector_t)(8 * 1024) #define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32) #define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8) #define RQ_CIC(rq) icq_to_cic((rq)->elv.icq) #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0]) #define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1]) static struct kmem_cache *cfq_pool; #define CFQ_PRIO_LISTS IOPRIO_BE_NR #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) #define sample_valid(samples) ((samples) > 80) #define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node) struct cfq_ttime { unsigned long last_end_request; unsigned long ttime_total; unsigned long ttime_samples; unsigned long ttime_mean; }; /* * Most of our rbtree usage is for sorting with min extraction, so * if we cache the leftmost node we don't have to walk down the tree * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should * move this into the elevator for the rq sorting as well. */ struct cfq_rb_root { struct rb_root rb; struct rb_node *left; unsigned count; unsigned total_weight; u64 min_vdisktime; struct cfq_ttime ttime; }; #define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, \ .ttime = {.last_end_request = jiffies,},} /* * Per process-grouping structure */ struct cfq_queue { /* reference count */ int ref; /* various state flags, see below */ unsigned int flags; /* parent cfq_data */ struct cfq_data *cfqd; /* service_tree member */ struct rb_node rb_node; /* service_tree key */ unsigned long rb_key; /* prio tree member */ struct rb_node p_node; /* prio tree root we belong to, if any */ struct rb_root *p_root; /* sorted list of pending requests */ struct rb_root sort_list; /* if fifo isn't expired, next request to serve */ struct request *next_rq; /* requests queued in sort_list */ int queued[2]; /* currently allocated requests */ int allocated[2]; /* fifo list of requests in sort_list */ struct list_head fifo; /* time when queue got scheduled in to dispatch first request. */ unsigned long dispatch_start; unsigned int allocated_slice; unsigned int slice_dispatch; /* time when first request from queue completed and slice started. */ unsigned long slice_start; unsigned long slice_end; long slice_resid; /* pending priority requests */ int prio_pending; /* number of requests that are on the dispatch list or inside driver */ int dispatched; /* io prio of this group */ unsigned short ioprio, org_ioprio; unsigned short ioprio_class; pid_t pid; u32 seek_history; sector_t last_request_pos; struct cfq_rb_root *service_tree; struct cfq_queue *new_cfqq; struct cfq_group *cfqg; /* Number of sectors dispatched from queue in single dispatch round */ unsigned long nr_sectors; }; /* * First index in the service_trees. * IDLE is handled separately, so it has negative index */ enum wl_prio_t { BE_WORKLOAD = 0, RT_WORKLOAD = 1, IDLE_WORKLOAD = 2, CFQ_PRIO_NR, }; /* * Second index in the service_trees. */ enum wl_type_t { ASYNC_WORKLOAD = 0, SYNC_NOIDLE_WORKLOAD = 1, SYNC_WORKLOAD = 2 }; /* This is per cgroup per device grouping structure */ struct cfq_group { /* group service_tree member */ struct rb_node rb_node; /* group service_tree key */ u64 vdisktime; unsigned int weight; unsigned int new_weight; bool needs_update; /* number of cfqq currently on this group */ int nr_cfqq; /* * Per group busy queues average. Useful for workload slice calc. We * create the array for each prio class but at run time it is used * only for RT and BE class and slot for IDLE class remains unused. * This is primarily done to avoid confusion and a gcc warning. */ unsigned int busy_queues_avg[CFQ_PRIO_NR]; /* * rr lists of queues with requests. We maintain service trees for * RT and BE classes. These trees are subdivided in subclasses * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE * class there is no subclassification and all the cfq queues go on * a single tree service_tree_idle. * Counts are embedded in the cfq_rb_root */ struct cfq_rb_root service_trees[2][3]; struct cfq_rb_root service_tree_idle; unsigned long saved_workload_slice; enum wl_type_t saved_workload; enum wl_prio_t saved_serving_prio; struct blkio_group blkg; #ifdef CONFIG_CFQ_GROUP_IOSCHED struct hlist_node cfqd_node; int ref; #endif /* number of requests that are on the dispatch list or inside driver */ int dispatched; struct cfq_ttime ttime; }; struct cfq_io_cq { struct io_cq icq; /* must be the first member */ struct cfq_queue *cfqq[2]; struct cfq_ttime ttime; }; /* * Per block device queue structure */ struct cfq_data { struct request_queue *queue; /* Root service tree for cfq_groups */ struct cfq_rb_root grp_service_tree; struct cfq_group root_group; /* * The priority currently being served */ enum wl_prio_t serving_prio; enum wl_type_t serving_type; unsigned long workload_expires; struct cfq_group *serving_group; unsigned int nr_urgent_pending; unsigned int nr_urgent_in_flight; /* * Each priority tree is sorted by next_request position. These * trees are used when determining if two or more queues are * interleaving requests (see cfq_close_cooperator). */ struct rb_root prio_trees[CFQ_PRIO_LISTS]; unsigned int busy_queues; unsigned int busy_sync_queues; int rq_in_driver; int rq_in_flight[2]; /* * queue-depth detection */ int rq_queued; int hw_tag; /* * hw_tag can be * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection) * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth) * 0 => no NCQ */ int hw_tag_est_depth; unsigned int hw_tag_samples; /* * idle window management */ struct timer_list idle_slice_timer; struct work_struct unplug_work; struct cfq_queue *active_queue; struct cfq_io_cq *active_cic; /* * async queue for each priority case */ struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR]; struct cfq_queue *async_idle_cfqq; sector_t last_position; /* * tunables, see top of file */ unsigned int cfq_quantum; unsigned int cfq_fifo_expire[2]; unsigned int cfq_back_penalty; unsigned int cfq_back_max; unsigned int cfq_slice[2]; unsigned int cfq_slice_async_rq; unsigned int cfq_slice_idle; unsigned int cfq_group_idle; unsigned int cfq_latency; unsigned int cfq_target_latency; /* * Fallback dummy cfqq for extreme OOM conditions */ struct cfq_queue oom_cfqq; unsigned long last_delayed_sync; /* List of cfq groups being managed on this device*/ struct hlist_head cfqg_list; /* Number of groups which are on blkcg->blkg_list */ unsigned int nr_blkcg_linked_grps; }; static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd); static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg, enum wl_prio_t prio, enum wl_type_t type) { if (!cfqg) return NULL; if (prio == IDLE_WORKLOAD) return &cfqg->service_tree_idle; return &cfqg->service_trees[prio][type]; } enum cfqq_state_flags { CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */ CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */ CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */ CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ CFQ_CFQQ_FLAG_sync, /* synchronous queue */ CFQ_CFQQ_FLAG_coop, /* cfqq is shared */ CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */ CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */ CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */ }; #define CFQ_CFQQ_FNS(name) \ static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ { \ (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ } \ static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ { \ (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ } \ static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ { \ return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ } CFQ_CFQQ_FNS(on_rr); CFQ_CFQQ_FNS(wait_request); CFQ_CFQQ_FNS(must_dispatch); CFQ_CFQQ_FNS(must_alloc_slice); CFQ_CFQQ_FNS(fifo_expire); CFQ_CFQQ_FNS(idle_window); CFQ_CFQQ_FNS(prio_changed); CFQ_CFQQ_FNS(slice_new); CFQ_CFQQ_FNS(sync); CFQ_CFQQ_FNS(coop); CFQ_CFQQ_FNS(split_coop); CFQ_CFQQ_FNS(deep); CFQ_CFQQ_FNS(wait_busy); #undef CFQ_CFQQ_FNS #ifdef CONFIG_CFQ_GROUP_IOSCHED #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \ cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ blkg_path(&(cfqq)->cfqg->blkg), ##args) #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \ blk_add_trace_msg((cfqd)->queue, "%s " fmt, \ blkg_path(&(cfqg)->blkg), ##args) \ #else #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args) #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0) #endif #define cfq_log(cfqd, fmt, args...) \ blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) /* Traverses through cfq group service trees */ #define for_each_cfqg_st(cfqg, i, j, st) \ for (i = 0; i <= IDLE_WORKLOAD; i++) \ for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\ : &cfqg->service_tree_idle; \ (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \ (i == IDLE_WORKLOAD && j == 0); \ j++, st = i < IDLE_WORKLOAD ? \ &cfqg->service_trees[i][j]: NULL) \ static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd, struct cfq_ttime *ttime, bool group_idle) { unsigned long slice; if (!sample_valid(ttime->ttime_samples)) return false; if (group_idle) slice = cfqd->cfq_group_idle; else slice = cfqd->cfq_slice_idle; return ttime->ttime_mean > slice; } static inline bool iops_mode(struct cfq_data *cfqd) { /* * If we are not idling on queues and it is a NCQ drive, parallel * execution of requests is on and measuring time is not possible * in most of the cases until and unless we drive shallower queue * depths and that becomes a performance bottleneck. In such cases * switch to start providing fairness in terms of number of IOs. */ if (!cfqd->cfq_slice_idle && cfqd->hw_tag) return true; else return false; } static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq) { if (cfq_class_idle(cfqq)) return IDLE_WORKLOAD; if (cfq_class_rt(cfqq)) return RT_WORKLOAD; return BE_WORKLOAD; } static enum wl_type_t cfqq_type(struct cfq_queue *cfqq) { if (!cfq_cfqq_sync(cfqq)) return ASYNC_WORKLOAD; if (!cfq_cfqq_idle_window(cfqq)) return SYNC_NOIDLE_WORKLOAD; return SYNC_WORKLOAD; } static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl, struct cfq_data *cfqd, struct cfq_group *cfqg) { if (wl == IDLE_WORKLOAD) return cfqg->service_tree_idle.count; return cfqg->service_trees[wl][ASYNC_WORKLOAD].count + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count + cfqg->service_trees[wl][SYNC_WORKLOAD].count; } static inline int cfqg_busy_async_queues(struct cfq_data *cfqd, struct cfq_group *cfqg) { return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count; } static void cfq_dispatch_insert(struct request_queue *, struct request *); static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool, struct io_context *, gfp_t); static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq) { /* cic->icq is the first member, %NULL will convert to %NULL */ return container_of(icq, struct cfq_io_cq, icq); } static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc) { if (ioc) return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue)); return NULL; } static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync) { return cic->cfqq[is_sync]; } static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq, bool is_sync) { cic->cfqq[is_sync] = cfqq; } static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic) { return cic->icq.q->elevator->elevator_data; } /* * We regard a request as SYNC, if it's either a read or has the SYNC bit * set (in which case it could also be direct WRITE). */ static inline bool cfq_bio_sync(struct bio *bio) { return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC); } /* * scheduler run of queue, if there are requests pending and no one in the * driver that will restart queueing */ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) { if (cfqd->busy_queues) { cfq_log(cfqd, "schedule dispatch"); kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); } } /* * Scale schedule slice based on io priority. Use the sync time slice only * if a queue is marked sync and has sync io queued. A sync queue with async * io only, should not get full sync slice length. */ static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync, unsigned short prio) { const int base_slice = cfqd->cfq_slice[sync]; WARN_ON(prio >= IOPRIO_BE_NR); return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio)); } static inline int cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) { return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio); } static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg) { u64 d = delta << CFQ_SERVICE_SHIFT; d = d * BLKIO_WEIGHT_DEFAULT; do_div(d, cfqg->weight); return d; } static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime) { s64 delta = (s64)(vdisktime - min_vdisktime); if (delta > 0) min_vdisktime = vdisktime; return min_vdisktime; } static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime) { s64 delta = (s64)(vdisktime - min_vdisktime); if (delta < 0) min_vdisktime = vdisktime; return min_vdisktime; } static void update_min_vdisktime(struct cfq_rb_root *st) { struct cfq_group *cfqg; if (st->left) { cfqg = rb_entry_cfqg(st->left); st->min_vdisktime = max_vdisktime(st->min_vdisktime, cfqg->vdisktime); } } /* * get averaged number of queues of RT/BE priority. * average is updated, with a formula that gives more weight to higher numbers, * to quickly follows sudden increases and decrease slowly */ static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd, struct cfq_group *cfqg, bool rt) { unsigned min_q, max_q; unsigned mult = cfq_hist_divisor - 1; unsigned round = cfq_hist_divisor / 2; unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg); min_q = min(cfqg->busy_queues_avg[rt], busy); max_q = max(cfqg->busy_queues_avg[rt], busy); cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) / cfq_hist_divisor; return cfqg->busy_queues_avg[rt]; } static inline unsigned cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg) { struct cfq_rb_root *st = &cfqd->grp_service_tree; return cfqd->cfq_target_latency * cfqg->weight / st->total_weight; } static inline unsigned cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) { unsigned slice = cfq_prio_to_slice(cfqd, cfqq); if (cfqd->cfq_latency) { /* * interested queues (we consider only the ones with the same * priority class in the cfq group) */ unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg, cfq_class_rt(cfqq)); unsigned sync_slice = cfqd->cfq_slice[1]; unsigned expect_latency = sync_slice * iq; unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg); if (expect_latency > group_slice) { unsigned base_low_slice = 2 * cfqd->cfq_slice_idle; /* scale low_slice according to IO priority * and sync vs async */ unsigned low_slice = min(slice, base_low_slice * slice / sync_slice); /* the adapted slice value is scaled to fit all iqs * into the target latency */ slice = max(slice * group_slice / expect_latency, low_slice); } } return slice; } static inline void cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) { unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq); cfqq->slice_start = jiffies; cfqq->slice_end = jiffies + slice; cfqq->allocated_slice = slice; cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies); } /* * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end * isn't valid until the first request from the dispatch is activated * and the slice time set. */ static inline bool cfq_slice_used(struct cfq_queue *cfqq) { if (cfq_cfqq_slice_new(cfqq)) return false; if (time_before(jiffies, cfqq->slice_end)) return false; return true; } /* * Lifted from AS - choose which of rq1 and rq2 that is best served now. * We choose the request that is closest to the head right now. Distance * behind the head is penalized and only allowed to a certain extent. */ static struct request * cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last) { sector_t s1, s2, d1 = 0, d2 = 0; unsigned long back_max; #define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */ #define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */ unsigned wrap = 0; /* bit mask: requests behind the disk head? */ if (rq1 == NULL || rq1 == rq2) return rq2; if (rq2 == NULL) return rq1; if (rq_is_sync(rq1) != rq_is_sync(rq2)) return rq_is_sync(rq1) ? rq1 : rq2; if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO) return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2; s1 = blk_rq_pos(rq1); s2 = blk_rq_pos(rq2); /* * by definition, 1KiB is 2 sectors */ back_max = cfqd->cfq_back_max * 2; /* * Strict one way elevator _except_ in the case where we allow * short backward seeks which are biased as twice the cost of a * similar forward seek. */ if (s1 >= last) d1 = s1 - last; else if (s1 + back_max >= last) d1 = (last - s1) * cfqd->cfq_back_penalty; else wrap |= CFQ_RQ1_WRAP; if (s2 >= last) d2 = s2 - last; else if (s2 + back_max >= last) d2 = (last - s2) * cfqd->cfq_back_penalty; else wrap |= CFQ_RQ2_WRAP; /* Found required data */ /* * By doing switch() on the bit mask "wrap" we avoid having to * check two variables for all permutations: --> faster! */ switch (wrap) { case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ if (d1 < d2) return rq1; else if (d2 < d1) return rq2; else { if (s1 >= s2) return rq1; else return rq2; } case CFQ_RQ2_WRAP: return rq1; case CFQ_RQ1_WRAP: return rq2; case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */ default: /* * Since both rqs are wrapped, * start with the one that's further behind head * (--> only *one* back seek required), * since back seek takes more time than forward. */ if (s1 <= s2) return rq1; else return rq2; } } /* * The below is leftmost cache rbtree addon */ static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root) { /* Service tree is empty */ if (!root->count) return NULL; if (!root->left) root->left = rb_first(&root->rb); if (root->left) return rb_entry(root->left, struct cfq_queue, rb_node); return NULL; } static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root) { if (!root->left) root->left = rb_first(&root->rb); if (root->left) return rb_entry_cfqg(root->left); return NULL; } static void rb_erase_init(struct rb_node *n, struct rb_root *root) { rb_erase(n, root); RB_CLEAR_NODE(n); } static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root) { if (root->left == n) root->left = NULL; rb_erase_init(n, &root->rb); --root->count; } /* * would be nice to take fifo expire time into account as well */ static struct request * cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct request *last) { struct rb_node *rbnext = rb_next(&last->rb_node); struct rb_node *rbprev = rb_prev(&last->rb_node); struct request *next = NULL, *prev = NULL; BUG_ON(RB_EMPTY_NODE(&last->rb_node)); if (rbprev) prev = rb_entry_rq(rbprev); if (rbnext) next = rb_entry_rq(rbnext); else { rbnext = rb_first(&cfqq->sort_list); if (rbnext && rbnext != &last->rb_node) next = rb_entry_rq(rbnext); } return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last)); } static unsigned long cfq_slice_offset(struct cfq_data *cfqd, struct cfq_queue *cfqq) { /* * just an approximation, should be ok. */ return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) - cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio)); } static inline s64 cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg) { return cfqg->vdisktime - st->min_vdisktime; } static void __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) { struct rb_node **node = &st->rb.rb_node; struct rb_node *parent = NULL; struct cfq_group *__cfqg; s64 key = cfqg_key(st, cfqg); int left = 1; while (*node != NULL) { parent = *node; __cfqg = rb_entry_cfqg(parent); if (key < cfqg_key(st, __cfqg)) node = &parent->rb_left; else { node = &parent->rb_right; left = 0; } } if (left) st->left = &cfqg->rb_node; rb_link_node(&cfqg->rb_node, parent, node); rb_insert_color(&cfqg->rb_node, &st->rb); } static void cfq_update_group_weight(struct cfq_group *cfqg) { BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); if (cfqg->needs_update) { cfqg->weight = cfqg->new_weight; cfqg->needs_update = false; } } static void cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) { BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); cfq_update_group_weight(cfqg); __cfq_group_service_tree_add(st, cfqg); st->total_weight += cfqg->weight; } static void cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg) { struct cfq_rb_root *st = &cfqd->grp_service_tree; struct cfq_group *__cfqg; struct rb_node *n; cfqg->nr_cfqq++; if (!RB_EMPTY_NODE(&cfqg->rb_node)) return; /* * Currently put the group at the end. Later implement something * so that groups get lesser vtime based on their weights, so that * if group does not loose all if it was not continuously backlogged. */ n = rb_last(&st->rb); if (n) { __cfqg = rb_entry_cfqg(n); cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY; } else cfqg->vdisktime = st->min_vdisktime; cfq_group_service_tree_add(st, cfqg); } static void cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg) { st->total_weight -= cfqg->weight; if (!RB_EMPTY_NODE(&cfqg->rb_node)) cfq_rb_erase(&cfqg->rb_node, st); } static void cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg) { struct cfq_rb_root *st = &cfqd->grp_service_tree; BUG_ON(cfqg->nr_cfqq < 1); cfqg->nr_cfqq--; /* If there are other cfq queues under this group, don't delete it */ if (cfqg->nr_cfqq) return; cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); cfq_group_service_tree_del(st, cfqg); cfqg->saved_workload_slice = 0; cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1); } static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq, unsigned int *unaccounted_time) { unsigned int slice_used; /* * Queue got expired before even a single request completed or * got expired immediately after first request completion. */ if (!cfqq->slice_start || cfqq->slice_start == jiffies) { /* * Also charge the seek time incurred to the group, otherwise * if there are mutiple queues in the group, each can dispatch * a single request on seeky media and cause lots of seek time * and group will never know it. */ slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start), 1); } else { slice_used = jiffies - cfqq->slice_start; if (slice_used > cfqq->allocated_slice) { *unaccounted_time = slice_used - cfqq->allocated_slice; slice_used = cfqq->allocated_slice; } if (time_after(cfqq->slice_start, cfqq->dispatch_start)) *unaccounted_time += cfqq->slice_start - cfqq->dispatch_start; } return slice_used; } static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, struct cfq_queue *cfqq) { struct cfq_rb_root *st = &cfqd->grp_service_tree; unsigned int used_sl, charge, unaccounted_sl = 0; int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) - cfqg->service_tree_idle.count; BUG_ON(nr_sync < 0); used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl); if (iops_mode(cfqd)) charge = cfqq->slice_dispatch; else if (!cfq_cfqq_sync(cfqq) && !nr_sync) charge = cfqq->allocated_slice; /* Can't update vdisktime while group is on service tree */ cfq_group_service_tree_del(st, cfqg); cfqg->vdisktime += cfq_scale_slice(charge, cfqg); /* If a new weight was requested, update now, off tree */ cfq_group_service_tree_add(st, cfqg); /* This group is being expired. Save the context */ if (time_after(cfqd->workload_expires, jiffies)) { cfqg->saved_workload_slice = cfqd->workload_expires - jiffies; cfqg->saved_workload = cfqd->serving_type; cfqg->saved_serving_prio = cfqd->serving_prio; } else cfqg->saved_workload_slice = 0; cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, st->min_vdisktime); cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u sect=%lu", used_sl, cfqq->slice_dispatch, charge, iops_mode(cfqd), cfqq->nr_sectors); cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl, unaccounted_sl); cfq_blkiocg_set_start_empty_time(&cfqg->blkg); } #ifdef CONFIG_CFQ_GROUP_IOSCHED static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg) { if (blkg) return container_of(blkg, struct cfq_group, blkg); return NULL; } static void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg, unsigned int weight) { struct cfq_group *cfqg = cfqg_of_blkg(blkg); cfqg->new_weight = weight; cfqg->needs_update = true; } static void cfq_init_add_cfqg_lists(struct cfq_data *cfqd, struct cfq_group *cfqg, struct blkio_cgroup *blkcg) { struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info; unsigned int major, minor; /* * Add group onto cgroup list. It might happen that bdi->dev is * not initialized yet. Initialize this new group without major * and minor info and this info will be filled in once a new thread * comes for IO. */ if (bdi->dev) { sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, MKDEV(major, minor)); } else cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, 0); cfqd->nr_blkcg_linked_grps++; cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev); /* Add group on cfqd list */ hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list); } /* * Should be called from sleepable context. No request queue lock as per * cpu stats are allocated dynamically and alloc_percpu needs to be called * from sleepable context. */ static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd) { struct cfq_group *cfqg = NULL; int i, j, ret; struct cfq_rb_root *st; cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node); if (!cfqg) return NULL; for_each_cfqg_st(cfqg, i, j, st) *st = CFQ_RB_ROOT; RB_CLEAR_NODE(&cfqg->rb_node); cfqg->ttime.last_end_request = jiffies; /* * Take the initial reference that will be released on destroy * This can be thought of a joint reference by cgroup and * elevator which will be dropped by either elevator exit * or cgroup deletion path depending on who is exiting first. */ cfqg->ref = 1; ret = blkio_alloc_blkg_stats(&cfqg->blkg); if (ret) { kfree(cfqg); return NULL; } return cfqg; } static struct cfq_group * cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg) { struct cfq_group *cfqg = NULL; void *key = cfqd; struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info; unsigned int major, minor; /* * This is the common case when there are no blkio cgroups. * Avoid lookup in this case */ if (blkcg == &blkio_root_cgroup) cfqg = &cfqd->root_group; else cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key)); if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) { sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); cfqg->blkg.dev = MKDEV(major, minor); } return cfqg; } /* * Search for the cfq group current task belongs to. request_queue lock must * be held. */ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd) { struct blkio_cgroup *blkcg; struct cfq_group *cfqg = NULL, *__cfqg = NULL; struct request_queue *q = cfqd->queue; rcu_read_lock(); blkcg = task_blkio_cgroup(current); cfqg = cfq_find_cfqg(cfqd, blkcg); if (cfqg) { rcu_read_unlock(); return cfqg; } /* * Need to allocate a group. Allocation of group also needs allocation * of per cpu stats which in-turn takes a mutex() and can block. Hence * we need to drop rcu lock and queue_lock before we call alloc. * * Not taking any queue reference here and assuming that queue is * around by the time we return. CFQ queue allocation code does * the same. It might be racy though. */ rcu_read_unlock(); spin_unlock_irq(q->queue_lock); cfqg = cfq_alloc_cfqg(cfqd); spin_lock_irq(q->queue_lock); rcu_read_lock(); blkcg = task_blkio_cgroup(current); /* * If some other thread already allocated the group while we were * not holding queue lock, free up the group */ __cfqg = cfq_find_cfqg(cfqd, blkcg); if (__cfqg) { kfree(cfqg); rcu_read_unlock(); return __cfqg; } if (!cfqg) cfqg = &cfqd->root_group; cfq_init_add_cfqg_lists(cfqd, cfqg, blkcg); rcu_read_unlock(); return cfqg; } static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg) { cfqg->ref++; return cfqg; } static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) { /* Currently, all async queues are mapped to root group */ if (!cfq_cfqq_sync(cfqq)) cfqg = &cfqq->cfqd->root_group; cfqq->cfqg = cfqg; /* cfqq reference on cfqg */ cfqq->cfqg->ref++; } static void cfq_put_cfqg(struct cfq_group *cfqg) { struct cfq_rb_root *st; int i, j; BUG_ON(cfqg->ref <= 0); cfqg->ref--; if (cfqg->ref) return; for_each_cfqg_st(cfqg, i, j, st) BUG_ON(!RB_EMPTY_ROOT(&st->rb)); free_percpu(cfqg->blkg.stats_cpu); kfree(cfqg); } static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg) { /* Something wrong if we are trying to remove same group twice */ BUG_ON(hlist_unhashed(&cfqg->cfqd_node)); hlist_del_init(&cfqg->cfqd_node); BUG_ON(cfqd->nr_blkcg_linked_grps <= 0); cfqd->nr_blkcg_linked_grps--; /* * Put the reference taken at the time of creation so that when all * queues are gone, group can be destroyed. */ cfq_put_cfqg(cfqg); } static void cfq_release_cfq_groups(struct cfq_data *cfqd) { struct hlist_node *pos, *n; struct cfq_group *cfqg; hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) { /* * If cgroup removal path got to blk_group first and removed * it from cgroup list, then it will take care of destroying * cfqg also. */ if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg)) cfq_destroy_cfqg(cfqd, cfqg); } } /* * Blk cgroup controller notification saying that blkio_group object is being * delinked as associated cgroup object is going away. That also means that * no new IO will come in this group. So get rid of this group as soon as * any pending IO in the group is finished. * * This function is called under rcu_read_lock(). key is the rcu protected * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu * read lock. * * "key" was fetched from blkio_group under blkio_cgroup->lock. That means * it should not be NULL as even if elevator was exiting, cgroup deltion * path got to it first. */ static void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg) { unsigned long flags; struct cfq_data *cfqd = key; spin_lock_irqsave(cfqd->queue->queue_lock, flags); cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg)); spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); } #else /* GROUP_IOSCHED */ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd) { return &cfqd->root_group; } static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg) { return cfqg; } static inline void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) { cfqq->cfqg = cfqg; } static void cfq_release_cfq_groups(struct cfq_data *cfqd) {} static inline void cfq_put_cfqg(struct cfq_group *cfqg) {} #endif /* GROUP_IOSCHED */ /* * The cfqd->service_trees holds all pending cfq_queue's that have * requests waiting to be processed. It is sorted in the order that * we will service the queues. */ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, bool add_front) { struct rb_node **p, *parent; struct cfq_queue *__cfqq; unsigned long rb_key; struct cfq_rb_root *service_tree; int left; int new_cfqq = 1; service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq), cfqq_type(cfqq)); if (cfq_class_idle(cfqq)) { rb_key = CFQ_IDLE_DELAY; parent = rb_last(&service_tree->rb); if (parent && parent != &cfqq->rb_node) { __cfqq = rb_entry(parent, struct cfq_queue, rb_node); rb_key += __cfqq->rb_key; } else rb_key += jiffies; } else if (!add_front) { /* * Get our rb key offset. Subtract any residual slice * value carried from last service. A negative resid * count indicates slice overrun, and this should position * the next service time further away in the tree. */ rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; rb_key -= cfqq->slice_resid; cfqq->slice_resid = 0; } else { rb_key = -HZ; __cfqq = cfq_rb_first(service_tree); rb_key += __cfqq ? __cfqq->rb_key : jiffies; } if (!RB_EMPTY_NODE(&cfqq->rb_node)) { new_cfqq = 0; /* * same position, nothing more to do */ if (rb_key == cfqq->rb_key && cfqq->service_tree == service_tree) return; cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree); cfqq->service_tree = NULL; } left = 1; parent = NULL; cfqq->service_tree = service_tree; p = &service_tree->rb.rb_node; while (*p) { struct rb_node **n; parent = *p; __cfqq = rb_entry(parent, struct cfq_queue, rb_node); /* * sort by key, that represents service time. */ if (time_before(rb_key, __cfqq->rb_key)) n = &(*p)->rb_left; else { n = &(*p)->rb_right; left = 0; } p = n; } if (left) service_tree->left = &cfqq->rb_node; cfqq->rb_key = rb_key; rb_link_node(&cfqq->rb_node, parent, p); rb_insert_color(&cfqq->rb_node, &service_tree->rb); service_tree->count++; if (add_front || !new_cfqq) return; cfq_group_notify_queue_add(cfqd, cfqq->cfqg); } static struct cfq_queue * cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root, sector_t sector, struct rb_node **ret_parent, struct rb_node ***rb_link) { struct rb_node **p, *parent; struct cfq_queue *cfqq = NULL; parent = NULL; p = &root->rb_node; while (*p) { struct rb_node **n; parent = *p; cfqq = rb_entry(parent, struct cfq_queue, p_node); /* * Sort strictly based on sector. Smallest to the left, * largest to the right. */ if (sector > blk_rq_pos(cfqq->next_rq)) n = &(*p)->rb_right; else if (sector < blk_rq_pos(cfqq->next_rq)) n = &(*p)->rb_left; else break; p = n; cfqq = NULL; } *ret_parent = parent; if (rb_link) *rb_link = p; return cfqq; } static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq) { struct rb_node **p, *parent; struct cfq_queue *__cfqq; if (cfqq->p_root) { rb_erase(&cfqq->p_node, cfqq->p_root); cfqq->p_root = NULL; } if (cfq_class_idle(cfqq)) return; if (!cfqq->next_rq) return; cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio]; __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, blk_rq_pos(cfqq->next_rq), &parent, &p); if (!__cfqq) { rb_link_node(&cfqq->p_node, parent, p); rb_insert_color(&cfqq->p_node, cfqq->p_root); } else cfqq->p_root = NULL; } /* * Update cfqq's position in the service tree. */ static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq) { /* * Resorting requires the cfqq to be on the RR list already. */ if (cfq_cfqq_on_rr(cfqq)) { cfq_service_tree_add(cfqd, cfqq, 0); cfq_prio_tree_add(cfqd, cfqq); } } /* * add to busy list of queues for service, trying to be fair in ordering * the pending list according to last request service */ static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) { cfq_log_cfqq(cfqd, cfqq, "add_to_rr"); BUG_ON(cfq_cfqq_on_rr(cfqq)); cfq_mark_cfqq_on_rr(cfqq); cfqd->busy_queues++; if (cfq_cfqq_sync(cfqq)) cfqd->busy_sync_queues++; cfq_resort_rr_list(cfqd, cfqq); } /* * Called when the cfqq no longer has requests pending, remove it from * the service tree. */ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) { cfq_log_cfqq(cfqd, cfqq, "del_from_rr"); BUG_ON(!cfq_cfqq_on_rr(cfqq)); cfq_clear_cfqq_on_rr(cfqq); if (!RB_EMPTY_NODE(&cfqq->rb_node)) { cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree); cfqq->service_tree = NULL; } if (cfqq->p_root) { rb_erase(&cfqq->p_node, cfqq->p_root); cfqq->p_root = NULL; } cfq_group_notify_queue_del(cfqd, cfqq->cfqg); BUG_ON(!cfqd->busy_queues); cfqd->busy_queues--; if (cfq_cfqq_sync(cfqq)) cfqd->busy_sync_queues--; } /* * rb tree support functions */ static void cfq_del_rq_rb(struct request *rq) { struct cfq_queue *cfqq = RQ_CFQQ(rq); const int sync = rq_is_sync(rq); BUG_ON(!cfqq->queued[sync]); cfqq->queued[sync]--; elv_rb_del(&cfqq->sort_list, rq); if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) { /* * Queue will be deleted from service tree when we actually * expire it later. Right now just remove it from prio tree * as it is empty. */ if (cfqq->p_root) { rb_erase(&cfqq->p_node, cfqq->p_root); cfqq->p_root = NULL; } } } static void cfq_add_rq_rb(struct request *rq) { struct cfq_queue *cfqq = RQ_CFQQ(rq); struct cfq_data *cfqd = cfqq->cfqd; struct request *prev; cfqq->queued[rq_is_sync(rq)]++; elv_rb_add(&cfqq->sort_list, rq); if (!cfq_cfqq_on_rr(cfqq)) cfq_add_cfqq_rr(cfqd, cfqq); /* * check if this request is a better next-serve candidate */ prev = cfqq->next_rq; cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position); /* * adjust priority tree position, if ->next_rq changes */ if (prev != cfqq->next_rq) cfq_prio_tree_add(cfqd, cfqq); BUG_ON(!cfqq->next_rq); } static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq) { elv_rb_del(&cfqq->sort_list, rq); cfqq->queued[rq_is_sync(rq)]--; cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq), rq_is_sync(rq)); cfq_add_rq_rb(rq); cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg, &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq), rq_is_sync(rq)); } static struct request * cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) { struct task_struct *tsk = current; struct cfq_io_cq *cic; struct cfq_queue *cfqq; cic = cfq_cic_lookup(cfqd, tsk->io_context); if (!cic) return NULL; cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); if (cfqq) { sector_t sector = bio->bi_sector + bio_sectors(bio); return elv_rb_find(&cfqq->sort_list, sector); } return NULL; } static void cfq_activate_request(struct request_queue *q, struct request *rq) { struct cfq_data *cfqd = q->elevator->elevator_data; cfqd->rq_in_driver++; cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", cfqd->rq_in_driver); cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); } static void cfq_deactivate_request(struct request_queue *q, struct request *rq) { struct cfq_data *cfqd = q->elevator->elevator_data; WARN_ON(!cfqd->rq_in_driver); cfqd->rq_in_driver--; cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d", cfqd->rq_in_driver); } static void cfq_remove_request(struct request *rq) { struct cfq_queue *cfqq = RQ_CFQQ(rq); if (cfqq->next_rq == rq) cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq); list_del_init(&rq->queuelist); cfq_del_rq_rb(rq); cfqq->cfqd->rq_queued--; cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq), rq_is_sync(rq)); if (rq->cmd_flags & REQ_PRIO) { WARN_ON(!cfqq->prio_pending); cfqq->prio_pending--; } } static int cfq_merge(struct request_queue *q, struct request **req, struct bio *bio) { struct cfq_data *cfqd = q->elevator->elevator_data; struct request *__rq; __rq = cfq_find_rq_fmerge(cfqd, bio); if (__rq && elv_rq_merge_ok(__rq, bio)) { *req = __rq; return ELEVATOR_FRONT_MERGE; } return ELEVATOR_NO_MERGE; } static void cfq_merged_request(struct request_queue *q, struct request *req, int type) { if (type == ELEVATOR_FRONT_MERGE) { struct cfq_queue *cfqq = RQ_CFQQ(req); cfq_reposition_rq_rb(cfqq, req); } } static void cfq_bio_merged(struct request_queue *q, struct request *req, struct bio *bio) { cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg, bio_data_dir(bio), cfq_bio_sync(bio)); } static void cfq_merged_requests(struct request_queue *q, struct request *rq, struct request *next) { struct cfq_queue *cfqq = RQ_CFQQ(rq); struct cfq_data *cfqd = q->elevator->elevator_data; /* * reposition in fifo if next is older than rq */ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && time_before(rq_fifo_time(next), rq_fifo_time(rq))) { list_move(&rq->queuelist, &next->queuelist); rq_set_fifo_time(rq, rq_fifo_time(next)); } if (cfqq->next_rq == next) cfqq->next_rq = rq; cfq_remove_request(next); cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(next), rq_is_sync(next)); cfqq = RQ_CFQQ(next); /* * all requests of this queue are merged to other queues, delete it * from the service tree. If it's the active_queue, * cfq_dispatch_requests() will choose to expire it or do idle */ if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) && cfqq != cfqd->active_queue) cfq_del_cfqq_rr(cfqd, cfqq); } static int cfq_allow_merge(struct request_queue *q, struct request *rq, struct bio *bio) { struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_io_cq *cic; struct cfq_queue *cfqq; /* * Disallow merge of a sync bio into an async request. */ if (cfq_bio_sync(bio) && !rq_is_sync(rq)) return false; /* * Lookup the cfqq that this bio will be queued with and allow * merge only if rq is queued there. */ cic = cfq_cic_lookup(cfqd, current->io_context); if (!cic) return false; cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); return cfqq == RQ_CFQQ(rq); } static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) { del_timer(&cfqd->idle_slice_timer); cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg); } static void __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) { if (cfqq) { cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d", cfqd->serving_prio, cfqd->serving_type); cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg); cfqq->slice_start = 0; cfqq->dispatch_start = jiffies; cfqq->allocated_slice = 0; cfqq->slice_end = 0; cfqq->slice_dispatch = 0; cfqq->nr_sectors = 0; cfq_clear_cfqq_wait_request(cfqq); cfq_clear_cfqq_must_dispatch(cfqq); cfq_clear_cfqq_must_alloc_slice(cfqq); cfq_clear_cfqq_fifo_expire(cfqq); cfq_mark_cfqq_slice_new(cfqq); cfq_del_timer(cfqd, cfqq); } cfqd->active_queue = cfqq; } /* * current cfqq expired its slice (or was too idle), select new one */ static void __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, bool timed_out) { cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); if (cfq_cfqq_wait_request(cfqq)) cfq_del_timer(cfqd, cfqq); cfq_clear_cfqq_wait_request(cfqq); cfq_clear_cfqq_wait_busy(cfqq); /* * If this cfqq is shared between multiple processes, check to * make sure that those processes are still issuing I/Os within * the mean seek distance. If not, it may be time to break the * queues apart again. */ if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq)) cfq_mark_cfqq_split_coop(cfqq); /* * store what was left of this slice, if the queue idled/timed out */ if (timed_out) { if (cfq_cfqq_slice_new(cfqq)) cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq); else cfqq->slice_resid = cfqq->slice_end - jiffies; cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); } cfq_group_served(cfqd, cfqq->cfqg, cfqq); if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) cfq_del_cfqq_rr(cfqd, cfqq); cfq_resort_rr_list(cfqd, cfqq); if (cfqq == cfqd->active_queue) cfqd->active_queue = NULL; if (cfqd->active_cic) { put_io_context(cfqd->active_cic->icq.ioc); cfqd->active_cic = NULL; } } static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out) { struct cfq_queue *cfqq = cfqd->active_queue; if (cfqq) __cfq_slice_expired(cfqd, cfqq, timed_out); } /* * Get next queue for service. Unless we have a queue preemption, * we'll simply select the first cfqq in the service tree. */ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) { struct cfq_rb_root *service_tree = service_tree_for(cfqd->serving_group, cfqd->serving_prio, cfqd->serving_type); if (!cfqd->rq_queued) return NULL; /* There is nothing to dispatch */ if (!service_tree) return NULL; if (RB_EMPTY_ROOT(&service_tree->rb)) return NULL; return cfq_rb_first(service_tree); } static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd) { struct cfq_group *cfqg; struct cfq_queue *cfqq; int i, j; struct cfq_rb_root *st; if (!cfqd->rq_queued) return NULL; cfqg = cfq_get_next_cfqg(cfqd); if (!cfqg) return NULL; for_each_cfqg_st(cfqg, i, j, st) if ((cfqq = cfq_rb_first(st)) != NULL) return cfqq; return NULL; } /* * Get and set a new active queue for service. */ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) { if (!cfqq) cfqq = cfq_get_next_queue(cfqd); __cfq_set_active_queue(cfqd, cfqq); return cfqq; } static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, struct request *rq) { if (blk_rq_pos(rq) >= cfqd->last_position) return blk_rq_pos(rq) - cfqd->last_position; else return cfqd->last_position - blk_rq_pos(rq); } static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct request *rq) { return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR; } static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, struct cfq_queue *cur_cfqq) { struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio]; struct rb_node *parent, *node; struct cfq_queue *__cfqq; sector_t sector = cfqd->last_position; if (RB_EMPTY_ROOT(root)) return NULL; /* * First, if we find a request starting at the end of the last * request, choose it. */ __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL); if (__cfqq) return __cfqq; /* * If the exact sector wasn't found, the parent of the NULL leaf * will contain the closest sector. */ __cfqq = rb_entry(parent, struct cfq_queue, p_node); if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) return __cfqq; if (blk_rq_pos(__cfqq->next_rq) < sector) node = rb_next(&__cfqq->p_node); else node = rb_prev(&__cfqq->p_node); if (!node) return NULL; __cfqq = rb_entry(node, struct cfq_queue, p_node); if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) return __cfqq; return NULL; } /* * cfqd - obvious * cur_cfqq - passed in so that we don't decide that the current queue is * closely cooperating with itself. * * So, basically we're assuming that that cur_cfqq has dispatched at least * one request, and that cfqd->last_position reflects a position on the disk * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid * assumption. */ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, struct cfq_queue *cur_cfqq) { struct cfq_queue *cfqq; if (cfq_class_idle(cur_cfqq)) return NULL; if (!cfq_cfqq_sync(cur_cfqq)) return NULL; if (CFQQ_SEEKY(cur_cfqq)) return NULL; /* * Don't search priority tree if it's the only queue in the group. */ if (cur_cfqq->cfqg->nr_cfqq == 1) return NULL; /* * We should notice if some of the queues are cooperating, eg * working closely on the same area of the disk. In that case, * we can group them together and don't waste time idling. */ cfqq = cfqq_close(cfqd, cur_cfqq); if (!cfqq) return NULL; /* If new queue belongs to different cfq_group, don't choose it */ if (cur_cfqq->cfqg != cfqq->cfqg) return NULL; /* * It only makes sense to merge sync queues. */ if (!cfq_cfqq_sync(cfqq)) return NULL; if (CFQQ_SEEKY(cfqq)) return NULL; /* * Do not merge queues of different priority classes */ if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq)) return NULL; return cfqq; } /* * Determine whether we should enforce idle window for this queue. */ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) { enum wl_prio_t prio = cfqq_prio(cfqq); struct cfq_rb_root *service_tree = cfqq->service_tree; BUG_ON(!service_tree); BUG_ON(!service_tree->count); if (!cfqd->cfq_slice_idle) return false; /* We never do for idle class queues. */ if (prio == IDLE_WORKLOAD) return false; /* We do for queues that were marked with idle window flag. */ if (cfq_cfqq_idle_window(cfqq) && !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)) return true; /* * Otherwise, we do only if they are the last ones * in their service tree. */ if (service_tree->count == 1 && cfq_cfqq_sync(cfqq) && !cfq_io_thinktime_big(cfqd, &service_tree->ttime, false)) return true; cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", service_tree->count); return false; } static void cfq_arm_slice_timer(struct cfq_data *cfqd) { struct cfq_queue *cfqq = cfqd->active_queue; struct cfq_io_cq *cic; unsigned long sl, group_idle = 0; /* * SSD device without seek penalty, disable idling. But only do so * for devices that support queuing, otherwise we still have a problem * with sync vs async workloads. */ if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag) return; WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); WARN_ON(cfq_cfqq_slice_new(cfqq)); /* * idle is disabled, either manually or by past process history */ if (!cfq_should_idle(cfqd, cfqq)) { /* no queue idling. Check for group idling */ if (cfqd->cfq_group_idle) group_idle = cfqd->cfq_group_idle; else return; } /* * still active requests from this queue, don't idle */ if (cfqq->dispatched) return; /* * task has exited, don't wait */ cic = cfqd->active_cic; if (!cic || !atomic_read(&cic->icq.ioc->nr_tasks)) return; /* * If our average think time is larger than the remaining time * slice, then don't idle. This avoids overrunning the allotted * time slice. */ if (sample_valid(cic->ttime.ttime_samples) && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) { cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu", cic->ttime.ttime_mean); return; } /* There are other queues in the group, don't do group idle */ if (group_idle && cfqq->cfqg->nr_cfqq > 1) return; cfq_mark_cfqq_wait_request(cfqq); if (group_idle) sl = cfqd->cfq_group_idle; else sl = cfqd->cfq_slice_idle; mod_timer(&cfqd->idle_slice_timer, jiffies + sl); cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg); cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl, group_idle ? 1 : 0); } /* * Move request from internal lists to the request queue dispatch list. */ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) { struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_queue *cfqq = RQ_CFQQ(rq); cfq_log_cfqq(cfqd, cfqq, "dispatch_insert"); cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); cfq_remove_request(rq); cfqq->dispatched++; (RQ_CFQG(rq))->dispatched++; elv_dispatch_sort(q, rq); if (rq->cmd_flags & REQ_URGENT) { if (!cfqd->nr_urgent_pending) WARN_ON(1); else cfqd->nr_urgent_pending--; cfqd->nr_urgent_in_flight++; } cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; cfqq->nr_sectors += blk_rq_sectors(rq); cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq), rq_data_dir(rq), rq_is_sync(rq)); } /* * return expired entry, or NULL to just start from scratch in rbtree */ static struct request *cfq_check_fifo(struct cfq_queue *cfqq) { struct request *rq = NULL; if (cfq_cfqq_fifo_expire(cfqq)) return NULL; cfq_mark_cfqq_fifo_expire(cfqq); if (list_empty(&cfqq->fifo)) return NULL; rq = rq_entry_fifo(cfqq->fifo.next); if (time_before(jiffies, rq_fifo_time(rq))) rq = NULL; cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); return rq; } static inline int cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) { const int base_rq = cfqd->cfq_slice_async_rq; WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio); } /* * Must be called with the queue_lock held. */ static int cfqq_process_refs(struct cfq_queue *cfqq) { int process_refs, io_refs; io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE]; process_refs = cfqq->ref - io_refs; BUG_ON(process_refs < 0); return process_refs; } static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq) { int process_refs, new_process_refs; struct cfq_queue *__cfqq; /* * If there are no process references on the new_cfqq, then it is * unsafe to follow the ->new_cfqq chain as other cfqq's in the * chain may have dropped their last reference (not just their * last process reference). */ if (!cfqq_process_refs(new_cfqq)) return; /* Avoid a circular list and skip interim queue merges */ while ((__cfqq = new_cfqq->new_cfqq)) { if (__cfqq == cfqq) return; new_cfqq = __cfqq; } process_refs = cfqq_process_refs(cfqq); new_process_refs = cfqq_process_refs(new_cfqq); /* * If the process for the cfqq has gone away, there is no * sense in merging the queues. */ if (process_refs == 0 || new_process_refs == 0) return; /* * Merge in the direction of the lesser amount of work. */ if (new_process_refs >= process_refs) { cfqq->new_cfqq = new_cfqq; new_cfqq->ref += process_refs; } else { new_cfqq->new_cfqq = cfqq; cfqq->ref += new_process_refs; } } static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd, struct cfq_group *cfqg, enum wl_prio_t prio) { struct cfq_queue *queue; int i; bool key_valid = false; unsigned long lowest_key = 0; enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD; for (i = 0; i <= SYNC_WORKLOAD; ++i) { /* select the one with lowest rb_key */ queue = cfq_rb_first(service_tree_for(cfqg, prio, i)); if (queue && (!key_valid || time_before(queue->rb_key, lowest_key))) { lowest_key = queue->rb_key; cur_best = i; key_valid = true; } } return cur_best; } static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) { unsigned slice; unsigned count; struct cfq_rb_root *st; unsigned group_slice; enum wl_prio_t original_prio = cfqd->serving_prio; /* Choose next priority. RT > BE > IDLE */ if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg)) cfqd->serving_prio = RT_WORKLOAD; else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg)) cfqd->serving_prio = BE_WORKLOAD; else { cfqd->serving_prio = IDLE_WORKLOAD; cfqd->workload_expires = jiffies + 1; return; } if (original_prio != cfqd->serving_prio) goto new_workload; /* * For RT and BE, we have to choose also the type * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload * expiration time */ st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type); count = st->count; /* * check workload expiration, and that we still have other queues ready */ if (count && !time_after(jiffies, cfqd->workload_expires)) return; new_workload: /* otherwise select new workload type */ cfqd->serving_type = cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio); st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type); count = st->count; /* * the workload slice is computed as a fraction of target latency * proportional to the number of queues in that workload, over * all the queues in the same priority class */ group_slice = cfq_group_slice(cfqd, cfqg); slice = group_slice * count / max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio], cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg)); if (cfqd->serving_type == ASYNC_WORKLOAD) { unsigned int tmp; /* * Async queues are currently system wide. Just taking * proportion of queues with-in same group will lead to higher * async ratio system wide as generally root group is going * to have higher weight. A more accurate thing would be to * calculate system wide asnc/sync ratio. */ tmp = cfqd->cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg); tmp = tmp/cfqd->busy_queues; slice = min_t(unsigned, slice, tmp); /* async workload slice is scaled down according to * the sync/async slice ratio. */ slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1]; } else /* sync workload slice is at least 2 * cfq_slice_idle */ slice = max(slice, 2 * cfqd->cfq_slice_idle); slice = max_t(unsigned, slice, CFQ_MIN_TT); cfq_log(cfqd, "workload slice:%d", slice); cfqd->workload_expires = jiffies + slice; } static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd) { struct cfq_rb_root *st = &cfqd->grp_service_tree; struct cfq_group *cfqg; if (RB_EMPTY_ROOT(&st->rb)) return NULL; cfqg = cfq_rb_first_group(st); update_min_vdisktime(st); return cfqg; } static void cfq_choose_cfqg(struct cfq_data *cfqd) { struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd); if (!cfqg) return; cfqd->serving_group = cfqg; /* Restore the workload type data */ if (cfqg->saved_workload_slice) { cfqd->workload_expires = jiffies + cfqg->saved_workload_slice; cfqd->serving_type = cfqg->saved_workload; cfqd->serving_prio = cfqg->saved_serving_prio; } else cfqd->workload_expires = jiffies - 1; choose_service_tree(cfqd, cfqg); } /* * Select a queue for service. If we have a current active queue, * check whether to continue servicing it, or retrieve and set a new one. */ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) { struct cfq_queue *cfqq, *new_cfqq = NULL; cfqq = cfqd->active_queue; if (!cfqq) goto new_queue; if (!cfqd->rq_queued) return NULL; /* * We were waiting for group to get backlogged. Expire the queue */ if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list)) goto expire; /* * The active queue has run out of time, expire it and select new. */ if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) { /* * If slice had not expired at the completion of last request * we might not have turned on wait_busy flag. Don't expire * the queue yet. Allow the group to get backlogged. * * The very fact that we have used the slice, that means we * have been idling all along on this queue and it should be * ok to wait for this request to complete. */ if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list) && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { cfqq = NULL; goto keep_queue; } else goto check_group_idle; } /* * The active queue has requests and isn't expired, allow it to * dispatch. */ if (!RB_EMPTY_ROOT(&cfqq->sort_list)) goto keep_queue; /* * If another queue has a request waiting within our mean seek * distance, let it run. The expire code will check for close * cooperators and put the close queue at the front of the service * tree. If possible, merge the expiring queue with the new cfqq. */ new_cfqq = cfq_close_cooperator(cfqd, cfqq); if (new_cfqq) { if (!cfqq->new_cfqq) cfq_setup_merge(cfqq, new_cfqq); goto expire; } /* * No requests pending. If the active queue still has requests in * flight or is idling for a new request, allow either of these * conditions to happen (or time out) before selecting a new queue. */ if (timer_pending(&cfqd->idle_slice_timer)) { cfqq = NULL; goto keep_queue; } /* * This is a deep seek queue, but the device is much faster than * the queue can deliver, don't idle **/ if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) && (cfq_cfqq_slice_new(cfqq) || (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) { cfq_clear_cfqq_deep(cfqq); cfq_clear_cfqq_idle_window(cfqq); } if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { cfqq = NULL; goto keep_queue; } /* * If group idle is enabled and there are requests dispatched from * this group, wait for requests to complete. */ check_group_idle: if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 && cfqq->cfqg->dispatched && !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) { cfqq = NULL; goto keep_queue; } expire: cfq_slice_expired(cfqd, 0); new_queue: /* * Current queue expired. Check if we have to switch to a new * service tree */ if (!new_cfqq) cfq_choose_cfqg(cfqd); cfqq = cfq_set_active_queue(cfqd, new_cfqq); keep_queue: return cfqq; } static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) { int dispatched = 0; while (cfqq->next_rq) { cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq); dispatched++; } BUG_ON(!list_empty(&cfqq->fifo)); /* By default cfqq is not expired if it is empty. Do it explicitly */ __cfq_slice_expired(cfqq->cfqd, cfqq, 0); return dispatched; } /* * Drain our current requests. Used for barriers and when switching * io schedulers on-the-fly. */ static int cfq_forced_dispatch(struct cfq_data *cfqd) { struct cfq_queue *cfqq; int dispatched = 0; /* Expire the timeslice of the current active queue first */ cfq_slice_expired(cfqd, 0); while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) { __cfq_set_active_queue(cfqd, cfqq); dispatched += __cfq_forced_dispatch_cfqq(cfqq); } BUG_ON(cfqd->busy_queues); cfq_log(cfqd, "forced_dispatch=%d", dispatched); return dispatched; } static inline bool cfq_slice_used_soon(struct cfq_data *cfqd, struct cfq_queue *cfqq) { /* the queue hasn't finished any request, can't estimate */ if (cfq_cfqq_slice_new(cfqq)) return true; if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched, cfqq->slice_end)) return true; return false; } static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) { unsigned int max_dispatch; /* * Drain async requests before we start sync IO */ if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC]) return false; /* * If this is an async queue and we have sync IO in flight, let it wait */ if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq)) return false; max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1); if (cfq_class_idle(cfqq)) max_dispatch = 1; /* * Does this cfqq already have too much IO in flight? */ if (cfqq->dispatched >= max_dispatch) { bool promote_sync = false; /* * idle queue must always only have a single IO in flight */ if (cfq_class_idle(cfqq)) return false; /* * If there is only one sync queue * we can ignore async queue here and give the sync * queue no dispatch limit. The reason is a sync queue can * preempt async queue, limiting the sync queue doesn't make * sense. This is useful for aiostress test. */ if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1) promote_sync = true; /* * We have other queues, don't allow more IO from this one */ if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) && !promote_sync) return false; /* * Sole queue user, no limit */ if (cfqd->busy_queues == 1 || promote_sync) max_dispatch = -1; else /* * Normally we start throttling cfqq when cfq_quantum/2 * requests have been dispatched. But we can drive * deeper queue depths at the beginning of slice * subjected to upper limit of cfq_quantum. * */ max_dispatch = cfqd->cfq_quantum; } /* * Async queues must wait a bit before being allowed dispatch. * We also ramp up the dispatch depth gradually for async IO, * based on the last sync IO we serviced */ if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { unsigned long last_sync = jiffies - cfqd->last_delayed_sync; unsigned int depth; depth = last_sync / cfqd->cfq_slice[1]; if (!depth && !cfqq->dispatched) depth = 1; if (depth < max_dispatch) max_dispatch = depth; } /* * If we're below the current max, allow a dispatch */ return cfqq->dispatched < max_dispatch; } /* * Dispatch a request from cfqq, moving them to the request queue * dispatch list. */ static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) { struct request *rq; BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); if (!cfq_may_dispatch(cfqd, cfqq)) return false; /* * follow expired path, else get first next available */ rq = cfq_check_fifo(cfqq); if (!rq) rq = cfqq->next_rq; /* * insert request into driver dispatch list */ cfq_dispatch_insert(cfqd->queue, rq); if (!cfqd->active_cic) { struct cfq_io_cq *cic = RQ_CIC(rq); atomic_long_inc(&cic->icq.ioc->refcount); cfqd->active_cic = cic; } return true; } /* * Find the cfqq that we need to service and move a request from that to the * dispatch list */ static int cfq_dispatch_requests(struct request_queue *q, int force) { struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_queue *cfqq; if (!cfqd->busy_queues) return 0; if (unlikely(force)) return cfq_forced_dispatch(cfqd); cfqq = cfq_select_queue(cfqd); if (!cfqq) return 0; /* * Dispatch a request from this cfqq, if it is allowed */ if (!cfq_dispatch_request(cfqd, cfqq)) return 0; cfqq->slice_dispatch++; cfq_clear_cfqq_must_dispatch(cfqq); /* * expire an async queue immediately if it has used up its slice. idle * queue always expire after 1 dispatch round. */ if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) || cfq_class_idle(cfqq))) { cfqq->slice_end = jiffies + 1; cfq_slice_expired(cfqd, 0); } cfq_log_cfqq(cfqd, cfqq, "dispatched a request"); return 1; } /* * task holds one reference to the queue, dropped when task exits. each rq * in-flight on this queue also holds a reference, dropped when rq is freed. * * Each cfq queue took a reference on the parent group. Drop it now. * queue lock must be held here. */ static void cfq_put_queue(struct cfq_queue *cfqq) { struct cfq_data *cfqd = cfqq->cfqd; struct cfq_group *cfqg; BUG_ON(cfqq->ref <= 0); cfqq->ref--; if (cfqq->ref) return; cfq_log_cfqq(cfqd, cfqq, "put_queue"); BUG_ON(rb_first(&cfqq->sort_list)); BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); cfqg = cfqq->cfqg; if (unlikely(cfqd->active_queue == cfqq)) { __cfq_slice_expired(cfqd, cfqq, 0); cfq_schedule_dispatch(cfqd); } BUG_ON(cfq_cfqq_on_rr(cfqq)); kmem_cache_free(cfq_pool, cfqq); cfq_put_cfqg(cfqg); } static void cfq_put_cooperator(struct cfq_queue *cfqq) { struct cfq_queue *__cfqq, *next; /* * If this queue was scheduled to merge with another queue, be * sure to drop the reference taken on that queue (and others in * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs. */ __cfqq = cfqq->new_cfqq; while (__cfqq) { if (__cfqq == cfqq) { WARN(1, "cfqq->new_cfqq loop detected\n"); break; } next = __cfqq->new_cfqq; cfq_put_queue(__cfqq); __cfqq = next; } } static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) { if (unlikely(cfqq == cfqd->active_queue)) { __cfq_slice_expired(cfqd, cfqq, 0); cfq_schedule_dispatch(cfqd); } cfq_put_cooperator(cfqq); cfq_put_queue(cfqq); } static void cfq_init_icq(struct io_cq *icq) { struct cfq_io_cq *cic = icq_to_cic(icq); cic->ttime.last_end_request = jiffies; } static void cfq_exit_icq(struct io_cq *icq) { struct cfq_io_cq *cic = icq_to_cic(icq); struct cfq_data *cfqd = cic_to_cfqd(cic); if (cic->cfqq[BLK_RW_ASYNC]) { cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]); cic->cfqq[BLK_RW_ASYNC] = NULL; } if (cic->cfqq[BLK_RW_SYNC]) { cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]); cic->cfqq[BLK_RW_SYNC] = NULL; } } static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc) { struct task_struct *tsk = current; int ioprio_class; if (!cfq_cfqq_prio_changed(cfqq)) return; ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio); switch (ioprio_class) { default: printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); case IOPRIO_CLASS_NONE: /* * no prio set, inherit CPU scheduling settings */ cfqq->ioprio = task_nice_ioprio(tsk); cfqq->ioprio_class = task_nice_ioclass(tsk); break; case IOPRIO_CLASS_RT: cfqq->ioprio = task_ioprio(ioc); cfqq->ioprio_class = IOPRIO_CLASS_RT; break; case IOPRIO_CLASS_BE: cfqq->ioprio = task_ioprio(ioc); cfqq->ioprio_class = IOPRIO_CLASS_BE; break; case IOPRIO_CLASS_IDLE: cfqq->ioprio_class = IOPRIO_CLASS_IDLE; cfqq->ioprio = 7; cfq_clear_cfqq_idle_window(cfqq); break; } /* * keep track of original prio settings in case we have to temporarily * elevate the priority of this queue */ cfqq->org_ioprio = cfqq->ioprio; cfq_clear_cfqq_prio_changed(cfqq); } static void changed_ioprio(struct cfq_io_cq *cic) { struct cfq_data *cfqd = cic_to_cfqd(cic); struct cfq_queue *cfqq; if (unlikely(!cfqd)) return; cfqq = cic->cfqq[BLK_RW_ASYNC]; if (cfqq) { struct cfq_queue *new_cfqq; new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->icq.ioc, GFP_ATOMIC); if (new_cfqq) { cic->cfqq[BLK_RW_ASYNC] = new_cfqq; cfq_put_queue(cfqq); } } cfqq = cic->cfqq[BLK_RW_SYNC]; if (cfqq) cfq_mark_cfqq_prio_changed(cfqq); } static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, pid_t pid, bool is_sync) { RB_CLEAR_NODE(&cfqq->rb_node); RB_CLEAR_NODE(&cfqq->p_node); INIT_LIST_HEAD(&cfqq->fifo); cfqq->ref = 0; cfqq->cfqd = cfqd; cfq_mark_cfqq_prio_changed(cfqq); if (is_sync) { if (!cfq_class_idle(cfqq)) cfq_mark_cfqq_idle_window(cfqq); cfq_mark_cfqq_sync(cfqq); } cfqq->pid = pid; } #ifdef CONFIG_CFQ_GROUP_IOSCHED static void changed_cgroup(struct cfq_io_cq *cic) { struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1); struct cfq_data *cfqd = cic_to_cfqd(cic); struct request_queue *q; if (unlikely(!cfqd)) return; q = cfqd->queue; if (sync_cfqq) { /* * Drop reference to sync queue. A new sync queue will be * assigned in new group upon arrival of a fresh request. */ cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup"); cic_set_cfqq(cic, NULL, 1); cfq_put_queue(sync_cfqq); } } #endif /* CONFIG_CFQ_GROUP_IOSCHED */ static struct cfq_queue * cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc, gfp_t gfp_mask) { struct cfq_queue *cfqq, *new_cfqq = NULL; struct cfq_io_cq *cic; struct cfq_group *cfqg; retry: cfqg = cfq_get_cfqg(cfqd); cic = cfq_cic_lookup(cfqd, ioc); /* cic always exists here */ cfqq = cic_to_cfqq(cic, is_sync); /* * Always try a new alloc if we fell back to the OOM cfqq * originally, since it should just be a temporary situation. */ if (!cfqq || cfqq == &cfqd->oom_cfqq) { cfqq = NULL; if (new_cfqq) { cfqq = new_cfqq; new_cfqq = NULL; } else if (gfp_mask & __GFP_WAIT) { spin_unlock_irq(cfqd->queue->queue_lock); new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask | __GFP_ZERO, cfqd->queue->node); spin_lock_irq(cfqd->queue->queue_lock); if (new_cfqq) goto retry; } else { cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask | __GFP_ZERO, cfqd->queue->node); } if (cfqq) { cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync); cfq_init_prio_data(cfqq, ioc); cfq_link_cfqq_cfqg(cfqq, cfqg); cfq_log_cfqq(cfqd, cfqq, "alloced"); } else cfqq = &cfqd->oom_cfqq; } if (new_cfqq) kmem_cache_free(cfq_pool, new_cfqq); return cfqq; } static struct cfq_queue ** cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) { switch (ioprio_class) { case IOPRIO_CLASS_RT: return &cfqd->async_cfqq[0][ioprio]; case IOPRIO_CLASS_BE: return &cfqd->async_cfqq[1][ioprio]; case IOPRIO_CLASS_IDLE: return &cfqd->async_idle_cfqq; default: BUG(); } } static struct cfq_queue * cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc, gfp_t gfp_mask) { const int ioprio = task_ioprio(ioc); const int ioprio_class = task_ioprio_class(ioc); struct cfq_queue **async_cfqq = NULL; struct cfq_queue *cfqq = NULL; if (!is_sync) { async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio); cfqq = *async_cfqq; } if (!cfqq) cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask); /* * pin the queue now that it's allocated, scheduler exit will prune it */ if (!is_sync && !(*async_cfqq)) { cfqq->ref++; *async_cfqq = cfqq; } cfqq->ref++; return cfqq; } static void __cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle) { unsigned long elapsed = jiffies - ttime->last_end_request; elapsed = min(elapsed, 2UL * slice_idle); ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8; ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8; ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples; } static void cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct cfq_io_cq *cic) { if (cfq_cfqq_sync(cfqq)) { __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle); __cfq_update_io_thinktime(&cfqq->service_tree->ttime, cfqd->cfq_slice_idle); } #ifdef CONFIG_CFQ_GROUP_IOSCHED __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle); #endif } static void cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct request *rq) { sector_t sdist = 0; sector_t n_sec = blk_rq_sectors(rq); if (cfqq->last_request_pos) { if (cfqq->last_request_pos < blk_rq_pos(rq)) sdist = blk_rq_pos(rq) - cfqq->last_request_pos; else sdist = cfqq->last_request_pos - blk_rq_pos(rq); } cfqq->seek_history <<= 1; if (blk_queue_nonrot(cfqd->queue)) cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT); else cfqq->seek_history |= (sdist > CFQQ_SEEK_THR); } /* * Disable idle window if the process thinks too long or seeks so much that * it doesn't matter */ static void cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct cfq_io_cq *cic) { int old_idle, enable_idle; /* * Don't idle for async or idle io prio class */ if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq)) return; enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); if (cfqq->queued[0] + cfqq->queued[1] >= 4) cfq_mark_cfqq_deep(cfqq); if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE)) enable_idle = 0; else if (!atomic_read(&cic->icq.ioc->nr_tasks) || !cfqd->cfq_slice_idle || (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq))) enable_idle = 0; else if (sample_valid(cic->ttime.ttime_samples)) { if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle) enable_idle = 0; else enable_idle = 1; } if (old_idle != enable_idle) { cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle); if (enable_idle) cfq_mark_cfqq_idle_window(cfqq); else cfq_clear_cfqq_idle_window(cfqq); } } /* * Check if new_cfqq should preempt the currently active queue. Return 0 for * no or if we aren't sure, a 1 will cause a preempt. */ static bool cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, struct request *rq) { struct cfq_queue *cfqq; cfqq = cfqd->active_queue; if (!cfqq) return false; if (cfq_class_idle(new_cfqq)) return false; if (cfq_class_idle(cfqq)) return true; /* * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice. */ if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq)) return false; /* * if the new request is sync, but the currently running queue is * not, let the sync request have priority. */ if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) return true; if (new_cfqq->cfqg != cfqq->cfqg) return false; if (cfq_slice_used(cfqq)) return true; /* Allow preemption only if we are idling on sync-noidle tree */ if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD && cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD && new_cfqq->service_tree->count == 2 && RB_EMPTY_ROOT(&cfqq->sort_list)) return true; /* * So both queues are sync. Let the new request get disk time if * it's a metadata request and the current queue is doing regular IO. */ if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending) return true; /* * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. */ if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) return true; /* An idle queue should not be idle now for some reason */ if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq)) return true; if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) return false; /* * if this request is as-good as one we would expect from the * current cfqq, let it preempt */ if (cfq_rq_close(cfqd, cfqq, rq)) return true; return false; } /* * cfqq preempts the active queue. if we allowed preempt with no slice left, * let it have half of its nominal slice. */ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) { enum wl_type_t old_type = cfqq_type(cfqd->active_queue); cfq_log_cfqq(cfqd, cfqq, "preempt"); cfq_slice_expired(cfqd, 1); /* * workload type is changed, don't save slice, otherwise preempt * doesn't happen */ if (old_type != cfqq_type(cfqq)) cfqq->cfqg->saved_workload_slice = 0; /* * Put the new queue at the front of the of the current list, * so we know that it will be selected next. */ BUG_ON(!cfq_cfqq_on_rr(cfqq)); cfq_service_tree_add(cfqd, cfqq, 1); cfqq->slice_end = 0; cfq_mark_cfqq_slice_new(cfqq); } /* * Called when a new fs request (rq) is added (to cfqq). Check if there's * something we should do about it */ static void cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct request *rq) { struct cfq_io_cq *cic = RQ_CIC(rq); cfqd->rq_queued++; if (rq->cmd_flags & REQ_PRIO) cfqq->prio_pending++; cfq_update_io_thinktime(cfqd, cfqq, cic); cfq_update_io_seektime(cfqd, cfqq, rq); cfq_update_idle_window(cfqd, cfqq, cic); cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); if (cfqq == cfqd->active_queue) { /* * Remember that we saw a request from this process, but * don't start queuing just yet. Otherwise we risk seeing lots * of tiny requests, because we disrupt the normal plugging * and merging. If the request is already larger than a single * page, let it rip immediately. For that case we assume that * merging is already done. Ditto for a busy system that * has other work pending, don't risk delaying until the * idle timer unplug to continue working. */ if (cfq_cfqq_wait_request(cfqq)) { if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || cfqd->busy_queues > 1) { cfq_del_timer(cfqd, cfqq); cfq_clear_cfqq_wait_request(cfqq); __blk_run_queue(cfqd->queue); } else { cfq_blkiocg_update_idle_time_stats( &cfqq->cfqg->blkg); cfq_mark_cfqq_must_dispatch(cfqq); } } } else if (cfq_should_preempt(cfqd, cfqq, rq)) { /* * not the active queue - expire current slice if it is * idle and has expired it's mean thinktime or this new queue * has some old slice time left and is of higher priority or * this new queue is RT and the current one is BE */ cfq_preempt_queue(cfqd, cfqq); __blk_run_queue(cfqd->queue); } } /* * Called when a request (rq) is reinserted (to cfqq). Check if there's * something we should do about it */ static void cfq_rq_requeued(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct request *rq) { struct cfq_io_cq *cic = RQ_CIC(rq); cfqd->rq_queued++; if (rq->cmd_flags & REQ_PRIO) cfqq->prio_pending++; cfqq->dispatched--; (RQ_CFQG(rq))->dispatched--; cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; cfq_update_io_thinktime(cfqd, cfqq, cic); cfq_update_io_seektime(cfqd, cfqq, rq); cfq_update_idle_window(cfqd, cfqq, cic); cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); if (cfqq == cfqd->active_queue) { if (cfq_cfqq_wait_request(cfqq)) { if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || cfqd->busy_queues > 1) { cfq_del_timer(cfqd, cfqq); cfq_clear_cfqq_wait_request(cfqq); } else { cfq_blkiocg_update_idle_time_stats( &cfqq->cfqg->blkg); cfq_mark_cfqq_must_dispatch(cfqq); } } } else if (cfq_should_preempt(cfqd, cfqq, rq)) { cfq_preempt_queue(cfqd, cfqq); } } static int cfq_reinsert_request(struct request_queue *q, struct request *rq) { struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_queue *cfqq = RQ_CFQQ(rq); if (!cfqq || cfqq->cfqd != cfqd) return -EIO; cfq_log_cfqq(cfqd, cfqq, "re-insert_request"); list_add(&rq->queuelist, &cfqq->fifo); cfq_add_rq_rb(rq); cfq_rq_requeued(cfqd, cfqq, rq); if (rq->cmd_flags & REQ_URGENT) { if (cfqd->nr_urgent_in_flight) cfqd->nr_urgent_in_flight--; cfqd->nr_urgent_pending++; } return 0; } static void cfq_insert_request(struct request_queue *q, struct request *rq) { struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_queue *cfqq = RQ_CFQQ(rq); cfq_log_cfqq(cfqd, cfqq, "insert_request"); cfq_init_prio_data(cfqq, RQ_CIC(rq)->icq.ioc); rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); list_add_tail(&rq->queuelist, &cfqq->fifo); cfq_add_rq_rb(rq); cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg, &cfqd->serving_group->blkg, rq_data_dir(rq), rq_is_sync(rq)); cfq_rq_enqueued(cfqd, cfqq, rq); if (rq->cmd_flags & REQ_URGENT) { WARN_ON(1); blk_dump_rq_flags(rq, ""); rq->cmd_flags &= ~REQ_URGENT; } /* Request is considered URGENT if: * 1. The queue being served is of a lower IO priority then the new * request * OR: * 2. The workload being performed is ASYNC * Only READ requests may be considered as URGENT */ if ((cfqd->active_queue && cfqq->ioprio_class < cfqd->active_queue->ioprio_class) || (cfqd->serving_type == ASYNC_WORKLOAD && rq_data_dir(rq) == READ)) { rq->cmd_flags |= REQ_URGENT; cfqd->nr_urgent_pending++; } } /** * cfq_urgent_pending() - Return TRUE if there is an urgent * request on scheduler * @q: requests queue */ static bool cfq_urgent_pending(struct request_queue *q) { struct cfq_data *cfqd = q->elevator->elevator_data; if (cfqd->nr_urgent_pending && !cfqd->nr_urgent_in_flight) return true; return false; } /* * Update hw_tag based on peak queue depth over 50 samples under * sufficient load. */ static void cfq_update_hw_tag(struct cfq_data *cfqd) { struct cfq_queue *cfqq = cfqd->active_queue; if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth) cfqd->hw_tag_est_depth = cfqd->rq_in_driver; if (cfqd->hw_tag == 1) return; if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN) return; /* * If active queue hasn't enough requests and can idle, cfq might not * dispatch sufficient requests to hardware. Don't zero hw_tag in this * case */ if (cfqq && cfq_cfqq_idle_window(cfqq) && cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] < CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN) return; if (cfqd->hw_tag_samples++ < 50) return; if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN) cfqd->hw_tag = 1; else cfqd->hw_tag = 0; } static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq) { struct cfq_io_cq *cic = cfqd->active_cic; /* If the queue already has requests, don't wait */ if (!RB_EMPTY_ROOT(&cfqq->sort_list)) return false; /* If there are other queues in the group, don't wait */ if (cfqq->cfqg->nr_cfqq > 1) return false; /* the only queue in the group, but think time is big */ if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) return false; if (cfq_slice_used(cfqq)) return true; /* if slice left is less than think time, wait busy */ if (cic && sample_valid(cic->ttime.ttime_samples) && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) return true; /* * If think times is less than a jiffy than ttime_mean=0 and above * will not be true. It might happen that slice has not expired yet * but will expire soon (4-5 ns) during select_queue(). To cover the * case where think time is less than a jiffy, mark the queue wait * busy if only 1 jiffy is left in the slice. */ if (cfqq->slice_end - jiffies == 1) return true; return false; } static void cfq_completed_request(struct request_queue *q, struct request *rq) { struct cfq_queue *cfqq = RQ_CFQQ(rq); struct cfq_data *cfqd = cfqq->cfqd; const int sync = rq_is_sync(rq); unsigned long now; if (rq->cmd_flags & REQ_URGENT) { if (!cfqd->nr_urgent_in_flight) WARN_ON(1); else cfqd->nr_urgent_in_flight--; rq->cmd_flags &= ~REQ_URGENT; } now = jiffies; cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", !!(rq->cmd_flags & REQ_NOIDLE)); cfq_update_hw_tag(cfqd); WARN_ON(!cfqd->rq_in_driver); WARN_ON(!cfqq->dispatched); cfqd->rq_in_driver--; cfqq->dispatched--; (RQ_CFQG(rq))->dispatched--; cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg, rq_start_time_ns(rq), rq_io_start_time_ns(rq), rq_data_dir(rq), rq_is_sync(rq)); cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; if (sync) { struct cfq_rb_root *service_tree; RQ_CIC(rq)->ttime.last_end_request = now; if (cfq_cfqq_on_rr(cfqq)) service_tree = cfqq->service_tree; else service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq), cfqq_type(cfqq)); service_tree->ttime.last_end_request = now; if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now)) cfqd->last_delayed_sync = now; } #ifdef CONFIG_CFQ_GROUP_IOSCHED cfqq->cfqg->ttime.last_end_request = now; #endif /* * If this is the active queue, check if it needs to be expired, * or if we want to idle in case it has no pending requests. */ if (cfqd->active_queue == cfqq) { const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list); if (cfq_cfqq_slice_new(cfqq)) { cfq_set_prio_slice(cfqd, cfqq); cfq_clear_cfqq_slice_new(cfqq); } /* * Should we wait for next request to come in before we expire * the queue. */ if (cfq_should_wait_busy(cfqd, cfqq)) { unsigned long extend_sl = cfqd->cfq_slice_idle; if (!cfqd->cfq_slice_idle) extend_sl = cfqd->cfq_group_idle; cfqq->slice_end = jiffies + extend_sl; cfq_mark_cfqq_wait_busy(cfqq); cfq_log_cfqq(cfqd, cfqq, "will busy wait"); } /* * Idling is not enabled on: * - expired queues * - idle-priority queues * - async queues * - queues with still some requests queued * - when there is a close cooperator */ if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq)) cfq_slice_expired(cfqd, 1); else if (sync && cfqq_empty && !cfq_close_cooperator(cfqd, cfqq)) { cfq_arm_slice_timer(cfqd); } } if (!cfqd->rq_in_driver) cfq_schedule_dispatch(cfqd); } static inline int __cfq_may_queue(struct cfq_queue *cfqq) { if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) { cfq_mark_cfqq_must_alloc_slice(cfqq); return ELV_MQUEUE_MUST; } return ELV_MQUEUE_MAY; } static int cfq_may_queue(struct request_queue *q, int rw) { struct cfq_data *cfqd = q->elevator->elevator_data; struct task_struct *tsk = current; struct cfq_io_cq *cic; struct cfq_queue *cfqq; /* * don't force setup of a queue from here, as a call to may_queue * does not necessarily imply that a request actually will be queued. * so just lookup a possibly existing queue, or return 'may queue' * if that fails */ cic = cfq_cic_lookup(cfqd, tsk->io_context); if (!cic) return ELV_MQUEUE_MAY; cfqq = cic_to_cfqq(cic, rw_is_sync(rw)); if (cfqq) { cfq_init_prio_data(cfqq, cic->icq.ioc); return __cfq_may_queue(cfqq); } return ELV_MQUEUE_MAY; } /* * queue lock held here */ static void cfq_put_request(struct request *rq) { struct cfq_queue *cfqq = RQ_CFQQ(rq); if (cfqq) { const int rw = rq_data_dir(rq); BUG_ON(!cfqq->allocated[rw]); cfqq->allocated[rw]--; /* Put down rq reference on cfqg */ cfq_put_cfqg(RQ_CFQG(rq)); rq->elv.priv[0] = NULL; rq->elv.priv[1] = NULL; cfq_put_queue(cfqq); } } static struct cfq_queue * cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic, struct cfq_queue *cfqq) { cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq); cic_set_cfqq(cic, cfqq->new_cfqq, 1); cfq_mark_cfqq_coop(cfqq->new_cfqq); cfq_put_queue(cfqq); return cic_to_cfqq(cic, 1); } /* * Returns NULL if a new cfqq should be allocated, or the old cfqq if this * was the last process referring to said cfqq. */ static struct cfq_queue * split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq) { if (cfqq_process_refs(cfqq) == 1) { cfqq->pid = current->pid; cfq_clear_cfqq_coop(cfqq); cfq_clear_cfqq_split_coop(cfqq); return cfqq; } cic_set_cfqq(cic, NULL, 1); cfq_put_cooperator(cfqq); cfq_put_queue(cfqq); return NULL; } /* * Allocate cfq data structures associated with this request. */ static int cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) { struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq); const int rw = rq_data_dir(rq); const bool is_sync = rq_is_sync(rq); struct cfq_queue *cfqq; unsigned int changed; might_sleep_if(gfp_mask & __GFP_WAIT); spin_lock_irq(q->queue_lock); /* handle changed notifications */ changed = icq_get_changed(&cic->icq); if (unlikely(changed & ICQ_IOPRIO_CHANGED)) changed_ioprio(cic); #ifdef CONFIG_CFQ_GROUP_IOSCHED if (unlikely(changed & ICQ_CGROUP_CHANGED)) changed_cgroup(cic); #endif new_queue: cfqq = cic_to_cfqq(cic, is_sync); if (!cfqq || cfqq == &cfqd->oom_cfqq) { cfqq = cfq_get_queue(cfqd, is_sync, cic->icq.ioc, gfp_mask); cic_set_cfqq(cic, cfqq, is_sync); } else { /* * If the queue was seeky for too long, break it apart. */ if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) { cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq"); cfqq = split_cfqq(cic, cfqq); if (!cfqq) goto new_queue; } /* * Check to see if this queue is scheduled to merge with * another, closely cooperating queue. The merging of * queues happens here as it must be done in process context. * The reference on new_cfqq was taken in merge_cfqqs. */ if (cfqq->new_cfqq) cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq); } cfqq->allocated[rw]++; cfqq->ref++; rq->elv.priv[0] = cfqq; rq->elv.priv[1] = cfq_ref_get_cfqg(cfqq->cfqg); spin_unlock_irq(q->queue_lock); return 0; } static void cfq_kick_queue(struct work_struct *work) { struct cfq_data *cfqd = container_of(work, struct cfq_data, unplug_work); struct request_queue *q = cfqd->queue; spin_lock_irq(q->queue_lock); __blk_run_queue(cfqd->queue); spin_unlock_irq(q->queue_lock); } /* * Timer running if the active_queue is currently idling inside its time slice */ static void cfq_idle_slice_timer(unsigned long data) { struct cfq_data *cfqd = (struct cfq_data *) data; struct cfq_queue *cfqq; unsigned long flags; int timed_out = 1; cfq_log(cfqd, "idle timer fired"); spin_lock_irqsave(cfqd->queue->queue_lock, flags); cfqq = cfqd->active_queue; if (cfqq) { timed_out = 0; /* * We saw a request before the queue expired, let it through */ if (cfq_cfqq_must_dispatch(cfqq)) goto out_kick; /* * expired */ if (cfq_slice_used(cfqq)) goto expire; /* * only expire and reinvoke request handler, if there are * other queues with pending requests */ if (!cfqd->busy_queues) goto out_cont; /* * not expired and it has a request pending, let it dispatch */ if (!RB_EMPTY_ROOT(&cfqq->sort_list)) goto out_kick; /* * Queue depth flag is reset only when the idle didn't succeed */ cfq_clear_cfqq_deep(cfqq); } expire: cfq_slice_expired(cfqd, timed_out); out_kick: cfq_schedule_dispatch(cfqd); out_cont: spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); } static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) { del_timer_sync(&cfqd->idle_slice_timer); cancel_work_sync(&cfqd->unplug_work); } static void cfq_put_async_queues(struct cfq_data *cfqd) { int i; for (i = 0; i < IOPRIO_BE_NR; i++) { if (cfqd->async_cfqq[0][i]) cfq_put_queue(cfqd->async_cfqq[0][i]); if (cfqd->async_cfqq[1][i]) cfq_put_queue(cfqd->async_cfqq[1][i]); } if (cfqd->async_idle_cfqq) cfq_put_queue(cfqd->async_idle_cfqq); } static void cfq_exit_queue(struct elevator_queue *e) { struct cfq_data *cfqd = e->elevator_data; struct request_queue *q = cfqd->queue; bool wait = false; cfq_shutdown_timer_wq(cfqd); spin_lock_irq(q->queue_lock); if (cfqd->active_queue) __cfq_slice_expired(cfqd, cfqd->active_queue, 0); cfq_put_async_queues(cfqd); cfq_release_cfq_groups(cfqd); /* * If there are groups which we could not unlink from blkcg list, * wait for a rcu period for them to be freed. */ if (cfqd->nr_blkcg_linked_grps) wait = true; spin_unlock_irq(q->queue_lock); cfq_shutdown_timer_wq(cfqd); /* * Wait for cfqg->blkg->key accessors to exit their grace periods. * Do this wait only if there are other unlinked groups out * there. This can happen if cgroup deletion path claimed the * responsibility of cleaning up a group before queue cleanup code * get to the group. * * Do not call synchronize_rcu() unconditionally as there are drivers * which create/delete request queue hundreds of times during scan/boot * and synchronize_rcu() can take significant time and slow down boot. */ if (wait) synchronize_rcu(); #ifdef CONFIG_CFQ_GROUP_IOSCHED /* Free up per cpu stats for root group */ free_percpu(cfqd->root_group.blkg.stats_cpu); #endif kfree(cfqd); } static void *cfq_init_queue(struct request_queue *q) { struct cfq_data *cfqd; int i, j; struct cfq_group *cfqg; struct cfq_rb_root *st; cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); if (!cfqd) return NULL; /* Init root service tree */ cfqd->grp_service_tree = CFQ_RB_ROOT; /* Init root group */ cfqg = &cfqd->root_group; for_each_cfqg_st(cfqg, i, j, st) *st = CFQ_RB_ROOT; RB_CLEAR_NODE(&cfqg->rb_node); /* Give preference to root group over other groups */ cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT; #ifdef CONFIG_CFQ_GROUP_IOSCHED /* * Set root group reference to 2. One reference will be dropped when * all groups on cfqd->cfqg_list are being deleted during queue exit. * Other reference will remain there as we don't want to delete this * group as it is statically allocated and gets destroyed when * throtl_data goes away. */ cfqg->ref = 2; if (blkio_alloc_blkg_stats(&cfqg->blkg)) { kfree(cfqg); kfree(cfqd); return NULL; } rcu_read_lock(); cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd, 0); rcu_read_unlock(); cfqd->nr_blkcg_linked_grps++; /* Add group on cfqd->cfqg_list */ hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list); #endif /* * Not strictly needed (since RB_ROOT just clears the node and we * zeroed cfqd on alloc), but better be safe in case someone decides * to add magic to the rb code */ for (i = 0; i < CFQ_PRIO_LISTS; i++) cfqd->prio_trees[i] = RB_ROOT; /* * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues. * Grab a permanent reference to it, so that the normal code flow * will not attempt to free it. */ cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0); cfqd->oom_cfqq.ref++; cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group); cfqd->queue = q; init_timer(&cfqd->idle_slice_timer); cfqd->idle_slice_timer.function = cfq_idle_slice_timer; cfqd->idle_slice_timer.data = (unsigned long) cfqd; INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); cfqd->cfq_quantum = cfq_quantum; cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; cfqd->cfq_back_max = cfq_back_max; cfqd->cfq_back_penalty = cfq_back_penalty; cfqd->cfq_slice[0] = cfq_slice_async; cfqd->cfq_slice[1] = cfq_slice_sync; cfqd->cfq_target_latency = cfq_target_latency; cfqd->cfq_slice_async_rq = cfq_slice_async_rq; cfqd->cfq_slice_idle = cfq_slice_idle; cfqd->cfq_group_idle = cfq_group_idle; cfqd->cfq_latency = 1; cfqd->hw_tag = -1; /* * we optimistically start assuming sync ops weren't delayed in last * second, in order to have larger depth for async operations. */ cfqd->last_delayed_sync = jiffies - HZ; return cfqd; } /* * sysfs parts below --> */ static ssize_t cfq_var_show(unsigned int var, char *page) { return sprintf(page, "%d\n", var); } static ssize_t cfq_var_store(unsigned int *var, const char *page, size_t count) { char *p = (char *) page; *var = simple_strtoul(p, &p, 10); return count; } #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ static ssize_t __FUNC(struct elevator_queue *e, char *page) \ { \ struct cfq_data *cfqd = e->elevator_data; \ unsigned int __data = __VAR; \ if (__CONV) \ __data = jiffies_to_msecs(__data); \ return cfq_var_show(__data, (page)); \ } SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1); SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1); #undef SHOW_FUNCTION #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ { \ struct cfq_data *cfqd = e->elevator_data; \ unsigned int __data; \ int ret = cfq_var_store(&__data, (page), count); \ if (__data < (MIN)) \ __data = (MIN); \ else if (__data > (MAX)) \ __data = (MAX); \ if (__CONV) \ *(__PTR) = msecs_to_jiffies(__data); \ else \ *(__PTR) = __data; \ return ret; \ } STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1); #undef STORE_FUNCTION #define CFQ_ATTR(name) \ __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store) static struct elv_fs_entry cfq_attrs[] = { CFQ_ATTR(quantum), CFQ_ATTR(fifo_expire_sync), CFQ_ATTR(fifo_expire_async), CFQ_ATTR(back_seek_max), CFQ_ATTR(back_seek_penalty), CFQ_ATTR(slice_sync), CFQ_ATTR(slice_async), CFQ_ATTR(slice_async_rq), CFQ_ATTR(slice_idle), CFQ_ATTR(group_idle), CFQ_ATTR(low_latency), CFQ_ATTR(target_latency), __ATTR_NULL }; static struct elevator_type iosched_cfq = { .ops = { .elevator_merge_fn = cfq_merge, .elevator_merged_fn = cfq_merged_request, .elevator_merge_req_fn = cfq_merged_requests, .elevator_allow_merge_fn = cfq_allow_merge, .elevator_bio_merged_fn = cfq_bio_merged, .elevator_dispatch_fn = cfq_dispatch_requests, .elevator_add_req_fn = cfq_insert_request, .elevator_reinsert_req_fn = cfq_reinsert_request, .elevator_is_urgent_fn = cfq_urgent_pending, .elevator_activate_req_fn = cfq_activate_request, .elevator_deactivate_req_fn = cfq_deactivate_request, .elevator_completed_req_fn = cfq_completed_request, .elevator_former_req_fn = elv_rb_former_request, .elevator_latter_req_fn = elv_rb_latter_request, .elevator_init_icq_fn = cfq_init_icq, .elevator_exit_icq_fn = cfq_exit_icq, .elevator_set_req_fn = cfq_set_request, .elevator_put_req_fn = cfq_put_request, .elevator_may_queue_fn = cfq_may_queue, .elevator_init_fn = cfq_init_queue, .elevator_exit_fn = cfq_exit_queue, }, .icq_size = sizeof(struct cfq_io_cq), .icq_align = __alignof__(struct cfq_io_cq), .elevator_attrs = cfq_attrs, .elevator_name = "cfq", .elevator_owner = THIS_MODULE, }; #ifdef CONFIG_CFQ_GROUP_IOSCHED static struct blkio_policy_type blkio_policy_cfq = { .ops = { .blkio_unlink_group_fn = cfq_unlink_blkio_group, .blkio_update_group_weight_fn = cfq_update_blkio_group_weight, }, .plid = BLKIO_POLICY_PROP, }; #else static struct blkio_policy_type blkio_policy_cfq; #endif static int __init cfq_init(void) { int ret; /* * could be 0 on HZ < 1000 setups */ if (!cfq_slice_async) cfq_slice_async = 1; if (!cfq_slice_idle) cfq_slice_idle = 1; #ifdef CONFIG_CFQ_GROUP_IOSCHED if (!cfq_group_idle) cfq_group_idle = 1; #else cfq_group_idle = 0; #endif cfq_pool = KMEM_CACHE(cfq_queue, 0); if (!cfq_pool) return -ENOMEM; ret = elv_register(&iosched_cfq); if (ret) { kmem_cache_destroy(cfq_pool); return ret; } blkio_policy_register(&blkio_policy_cfq); return 0; } static void __exit cfq_exit(void) { blkio_policy_unregister(&blkio_policy_cfq); elv_unregister(&iosched_cfq); kmem_cache_destroy(cfq_pool); } module_init(cfq_init); module_exit(cfq_exit); MODULE_AUTHOR("Jens Axboe"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");
showliu/android_kernel_xiaomi_aries-1
block/cfq-iosched.c
C
gpl-2.0
105,039
// // CommandLineInterface.cs // // Author: // Fin Christensen <christensen.fin@gmail.com> // // Copyright (c) 2015 Fin Christensen // // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA // using System; using System.Collections; using System.Collections.Generic; using CommandLine; using CommandLine.Text; using FreezingArcher.Core; using FreezingArcher.Reflection; using Attribute = FreezingArcher.Reflection.Attribute; namespace FreezingArcher.Configuration { /// <summary> /// Command line interface class. /// </summary> public class CommandLineInterface { /// <summary> /// The global instance. /// </summary> public static CommandLineInterface Instance; static CommandLineInterface () { Instance = new CommandLineInterface (); } /// <summary> /// Initializes a new instance of the <see cref="FreezingArcher.Configuration.CommandLineInterface"/> class. /// </summary> public CommandLineInterface () { DynamicClassBuilder = new DynamicClassBuilder ("Options"); Handlers = new Dictionary<string, Pair<Action<object>, Type>> (); } /// <summary> /// The dynamic class builder. /// </summary> protected DynamicClassBuilder DynamicClassBuilder; /// <summary> /// The handlers. /// </summary> protected Dictionary<string, Pair<Action<object>, Type>> Handlers; // <summary> // Parses the arguments from the command line. // </summary> /// <returns><c>true</c>, if arguments were parsed, <c>false</c> otherwise.</returns> /// <param name="args">The arguments.</param> public bool ParseArguments (string[] args) { Type t = DynamicClassBuilder.CreateType (); object options = Activator.CreateInstance (t); if (Parser.Default.ParseArguments (args, options)) { foreach (var handler in Handlers) { var read = typeof (Property).GetMethod ("Read"); var readT = read.MakeGenericMethod (handler.Value.B); handler.Value.A.Invoke (readT.Invoke (null, new object[] {options, handler.Key})); } return true; } return false; } /// <summary> /// Adds an option to the command line interface. (-e, --example VALUE) /// </summary> /// <param name="handler">The handler which handles the parsed value from the command line.</param> /// <param name="shortName">The short name of the option.</param> /// <param name="longName">The long name of the option.</param> /// <param name="helpText">The help text displayed in the help message.</param> /// <param name="metaValue">The meta value string displayed in the help message.</param> /// <param name="required">If set to <c>true</c> this option is required.</param> /// <param name="defaultValue">The default value of this option.</param> /// <typeparam name="T">This type specifies of which type the parsed value will be.</typeparam> public void AddOption<T> (Action<T> handler, char shortName, string longName = "", string helpText = "", string metaValue = "", bool required = false, T defaultValue = default (T)) { Attribute attr = new Attribute (typeof (OptionAttribute)); attr.CallConstructor (shortName, longName); attr.AddNamedParameters (new Pair<string, object> ("HelpText", helpText), new Pair<string, object> ("Required", required), new Pair<string, object> ("DefaultValue", defaultValue), new Pair<string, object> ("MetaValue", metaValue)); DynamicClassBuilder.AddProperty (new Property (longName, typeof (T), attr)); Handlers.Add (longName, new Pair<Action<object>, Type> (j => handler ((T) j), typeof (T))); } /// <summary> /// The value list property. /// </summary> protected Property ValueListProperty; /// <summary> /// Sets a value list to the command line interface. (program val1 val2 val3 ...) /// </summary> /// <param name="handler">The handler which handles the parsed values from the command line.</param> /// <param name="metaValue">The meta value string displayed in the help message.</param> /// <param name="maximumElements">Maximum number of elements in the value list. /// A number of -1 allows an unlimited number of values.</param> /// <typeparam name="T">This type specifies of which type the parsed values will be.</typeparam> public void SetValueList<T> (Action<T> handler, string metaValue = "", int maximumElements = -1) where T : IList { Attribute attr = new Attribute (typeof (ValueListAttribute)); attr.CallConstructor (typeof (List<T>)); attr.AddNamedParameters (new Pair<string, object> ("MaximumElements", maximumElements), new Pair<string, object> ("MetaValue", metaValue)); if (ValueListProperty != null) DynamicClassBuilder.RemoveProperty (ValueListProperty); ValueListProperty = new Property ("ValueList", typeof (T), attr); Handlers.Add ("ValueList", new Pair<Action<object>, Type> (j => handler ((T) j), typeof (T))); } /// <summary> /// Adds an option list to the command line interface. (-e, --example val1,val2,val3...) /// </summary> /// <param name="handler">The handler which handles the parsed values from the command line.</param> /// <param name="shortName">The short name of the option.</param> /// <param name="longName">The long name of the option.</param> /// <param name="separator">The separator of the given values.</param> /// <param name="helpText">The help text displayed in the help message.</param> /// <param name="metaValue">The meta value string displayed in the help message.</param> /// <param name="required">If set to <c>true</c> this option is required.</param> /// <typeparam name="T">This type specifies of which type the parsed values will be.</typeparam> public void AddOptionList<T> (Action<T> handler, char shortName, string longName = "", char separator = ',', string helpText = "", string metaValue = "", bool required = false) where T : IList { Attribute attr = new Attribute (typeof (OptionListAttribute)); attr.CallConstructor (shortName, longName, separator); attr.AddNamedParameters (new Pair<string, object> ("HelpText", helpText), new Pair<string, object> ("Required", required), new Pair<string, object> ("MetaValue", metaValue)); DynamicClassBuilder.AddProperty (new Property (longName, typeof (T), attr)); Handlers.Add (longName, new Pair<Action<object>, Type> (j => handler ((T) j), typeof (T))); } /// <summary> /// Adds an option array to the command line interface. (-e, --example val1 val2 val3 ...) /// </summary> /// <param name="handler">The handler which handles the parsed values from the command line.</param> /// <param name="shortName">The short name of the option.</param> /// <param name="longName">The long name of the option.</param> /// <param name="helpText">The help text displayed in the help message.</param> /// <param name="metaValue">The meta value string displayed in the help message.</param> /// <param name="required">If set to <c>true</c> this option is required.</param> /// <param name="defaultValue">The default values of this option.</param> /// <typeparam name="T">This type specifies of which type the parsed values will be.</typeparam> public void AddOptionArray<T> (Action<T[]> handler, char shortName, string longName = "", string helpText = "", string metaValue = "", bool required = false, T[] defaultValue = default (T[])) { Attribute attr = new Attribute (typeof (OptionArrayAttribute)); attr.CallConstructor (shortName, longName); attr.AddNamedParameters (new Pair<string, object> ("HelpText", helpText), new Pair<string, object> ("Required", required), new Pair<string, object> ("DefaultValue", defaultValue), new Pair<string, object> ("MetaValue", metaValue)); DynamicClassBuilder.AddProperty (new Property (longName, typeof (T[]), attr)); Handlers.Add (longName, new Pair<Action<object>, Type> (j => handler ((T[]) j), typeof (T[]))); } /// <summary> /// Configure help message. /// </summary> /// <param name="programName">Program name.</param> /// <param name="version">Version.</param> /// <param name="author">Author.</param> /// <param name="year">Year.</param> /// <param name="shortName">Short name of the help option.</param> /// <param name="longName">Long name of the help option.</param> /// <param name="additionalNewLineAfterOption">If set to <c>true</c> insert additional new line after options /// section.</param> /// <param name="addDashesToOptions">If set to <c>true</c> add dashes to options.</param> /// <param name="preOptionsLines">Lines to appear before options section.</param> /// <param name="postOptionsLines">Lines to appear after options section.</param> public void SetHelp (string programName, string version, string author, int year, char shortName = 'h', string longName = "help", bool additionalNewLineAfterOption = true, bool addDashesToOptions = true, IEnumerable<string> preOptionsLines = null, IEnumerable<string> postOptionsLines = null) { Attribute attr = new Attribute (typeof (HelpOptionAttribute)); attr.CallConstructor (shortName, longName); var function = new Func<Object, string> ((Object instance) => { var help = new HelpText { Heading = new HeadingInfo (programName, version), Copyright = new CopyrightInfo (author, year), AdditionalNewLineAfterOption = additionalNewLineAfterOption, AddDashesToOption = addDashesToOptions }; if (preOptionsLines != null) foreach (string s in preOptionsLines) help.AddPreOptionsLine (s); if (postOptionsLines != null) foreach (string s in postOptionsLines) help.AddPostOptionsLine (s); help.AddOptions (instance); return help; }); Method m = new Method ("GetUsage", function, attr); DynamicClassBuilder.AddMethod (m); } } }
AreonDev/NoWayOut
FreezingArcher/Engine/Configuration/CommandLineInterface.cs
C#
gpl-2.0
11,890
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.commons.net.ftp; import java.io.BufferedReader; import java.io.IOException; import java.util.List; /** * FTPFileEntryParser defines the interface for parsing a single FTP file * listing and converting that information into an * {@link org.apache.commons.net.ftp.FTPFile} instance. * Sometimes you will want to parse unusual listing formats, in which * case you would create your own implementation of FTPFileEntryParser and * if necessary, subclass FTPFile. * <p> * Here are some examples showing how to use one of the classes that * implement this interface. * <p> * The first example shows how to get an <b>iterable</b> list of files in which the * more expensive <code>FTPFile</code> objects are not created until needed. This * is suitable for paged displays. It requires that a parser object be created * beforehand: <code>parser</code> is an object (in the package * <code>org.apache.commons.net.ftp.parser</code>) * implementing this inteface. * * <pre> * FTPClient f=FTPClient(); * f.connect(server); * f.login(username, password); * FTPFileList list = f.createFileList(directory, parser); * FTPFileIterator iter = list.iterator(); * * while (iter.hasNext()) { * FTPFile[] files = iter.getNext(25); // "page size" you want * //do whatever you want with these files, display them, etc. * //expensive FTPFile objects not created until needed. * } * </pre> * * The second example uses the revised <code>FTPClient.listFiles()</code> * API to pull the whole list from the subfolder <code>subfolder</code> in * one call, attempting to automatically detect the parser type. This * method, without a parserKey parameter, indicates that autodection should * be used. * * <pre> * FTPClient f=FTPClient(); * f.connect(server); * f.login(username, password); * FTPFile[] files = f.listFiles("subfolder"); * </pre> * * The third example uses the revised <code>FTPClient.listFiles()</code>> * API to pull the whole list from the current working directory in one call, * but specifying by classname the parser to be used. For this particular * parser class, this approach is necessary since there is no way to * autodetect this server type. * * <pre> * FTPClient f=FTPClient(); * f.connect(server); * f.login(username, password); * FTPFile[] files = f.listFiles( * "org.apache.commons.net.ftp.parser.EnterpriseUnixFTPFileEntryParser", * "."); * </pre> * * The fourth example uses the revised <code>FTPClient.listFiles()</code> * API to pull a single file listing in an arbitrary directory in one call, * specifying by KEY the parser to be used, in this case, VMS. * * <pre> * FTPClient f=FTPClient(); * f.connect(server); * f.login(username, password); * FTPFile[] files = f.listFiles("VMS", "subfolder/foo.java"); * </pre> * * @author <a href="mailto:scohen@apache.org">Steve Cohen</a> * @version $Id: FTPFileEntryParser.java 636854 2008-03-13 19:55:01Z sebb $ * @see org.apache.commons.net.ftp.FTPFile * @see org.apache.commons.net.ftp.FTPClient#createFileList */ public interface FTPFileEntryParser { /** * Parses a line of an FTP server file listing and converts it into a usable * format in the form of an <code> FTPFile </code> instance. If the * file listing line doesn't describe a file, <code> null </code> should be * returned, otherwise a <code> FTPFile </code> instance representing the * files in the directory is returned. * <p> * @param listEntry A line of text from the file listing * @return An FTPFile instance corresponding to the supplied entry */ FTPFile parseFTPEntry(String listEntry); /** * Reads the next entry using the supplied BufferedReader object up to * whatever delemits one entry from the next. Implementors must define * this for the particular ftp system being parsed. In many but not all * cases, this can be defined simply by calling BufferedReader.readLine(). * * @param reader The BufferedReader object from which entries are to be * read. * * @return A string representing the next ftp entry or null if none found. * @exception IOException thrown on any IO Error reading from the reader. */ String readNextEntry(BufferedReader reader) throws IOException; /** * This method is a hook for those implementors (such as * VMSVersioningFTPEntryParser, and possibly others) which need to * perform some action upon the FTPFileList after it has been created * from the server stream, but before any clients see the list. * * The default implementation can be a no-op. * * @param original Original list after it has been created from the server stream * * @return Original list as processed by this method. */ List<String> preParse(List<String> original); } /* Emacs configuration * Local variables: ** * mode: java ** * c-basic-offset: 4 ** * indent-tabs-mode: nil ** * End: ** */
bdaum/zoraPD
com.bdaum.zoom.net.core/src/org/apache/commons/net/ftp/FTPFileEntryParser.java
Java
gpl-2.0
6,090
/** * vCard parser testing. * * First parameter is location of vCard, second location of Gammu backup * how it should be parsed. * * Optional third parameter can be used to generate template backup * file. */ #include <gammu.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include "../helper/memory-display.h" #include "common.h" char buffer[65536000]; char vcard_buffer[65536000]; int main(int argc, char **argv) { size_t pos = 0; GSM_MemoryEntry pbk; GSM_Error error; FILE *f; size_t len; gboolean generate = FALSE; GSM_Backup backup; int i; GSM_Debug_Info *debug_info; /* Configure debugging */ debug_info = GSM_GetGlobalDebug(); GSM_SetDebugFileDescriptor(stderr, FALSE, debug_info); GSM_SetDebugLevel("textall", debug_info); /* Check parameters */ if (argc != 3 && argc != 4) { printf("Not enough parameters!\nUsage: vcard-read file.vcf file.backup\n"); return 1; } /* Check for generating option */ if (argc == 4 && strcmp(argv[3], "generate") == 0) { generate = TRUE; } /* Open file */ f = fopen(argv[1], "r"); test_result(f != NULL); /* Read data */ len = fread(buffer, 1, sizeof(buffer) - 1, f); test_result(feof(f)); /* Zero terminate string */ buffer[len] = 0; /* We don't need file any more */ fclose(f); /* Parse vCard */ error = GSM_DecodeVCARD(NULL, buffer, &pos, &pbk, SonyEricsson_VCard21); gammu_test_result(error, "GSM_DecodeVCARD"); /* Encode vCard back */ pos = 0; error = GSM_EncodeVCARD(NULL, vcard_buffer, sizeof(vcard_buffer), &pos, &pbk, TRUE, SonyEricsson_VCard21); gammu_test_result(error, "GSM_EncodeVCARD"); /* * Best would be to compare here, but we never can get * absolutely same as original. */ printf("ORIGINAL:\n%s\n----\nENCODED:\n%s\n", buffer, vcard_buffer); /* Generate file if we should */ if (generate) { GSM_ClearBackup(&backup); strcpy(backup.Creator, "vCard tester"); pbk.Location = 0; backup.PhonePhonebook[0] = &pbk; backup.PhonePhonebook[1] = NULL; error = GSM_SaveBackupFile(argv[2], &backup, TRUE); gammu_test_result(error, "GSM_SaveBackupFile"); } /* Read file content */ GSM_ClearBackup(&backup); error = GSM_ReadBackupFile(argv[2], &backup, GSM_Backup_GammuUCS2); gammu_test_result(error, "GSM_ReadBackupFile"); /* Compare size */ test_result(pbk.EntriesNum == backup.PhonePhonebook[0]->EntriesNum); /* Compare content */ for (i = 0; i < pbk.EntriesNum; i++) { test_result(pbk.Entries[i].EntryType == backup.PhonePhonebook[0]->Entries[i].EntryType); printf("Entry type: %d\n", pbk.Entries[i].EntryType); switch (pbk.Entries[i].EntryType) { case PBK_Number_General: case PBK_Number_Mobile: case PBK_Number_Fax: case PBK_Number_Pager: case PBK_Number_Other: case PBK_Number_Messaging: case PBK_Number_Video: case PBK_Text_Note: case PBK_Text_Postal: case PBK_Text_Email: case PBK_Text_Email2: case PBK_Text_URL: case PBK_Text_LUID: case PBK_Text_Name: case PBK_Text_LastName: case PBK_Text_FirstName: case PBK_Text_SecondName: case PBK_Text_FormalName: case PBK_Text_NamePrefix: case PBK_Text_NameSuffix: case PBK_Text_NickName: case PBK_Text_Company: case PBK_Text_JobTitle: case PBK_Text_StreetAddress: case PBK_Text_City: case PBK_Text_State: case PBK_Text_Zip: case PBK_Text_Country: case PBK_Text_Custom1: case PBK_Text_Custom2: case PBK_Text_Custom3: case PBK_Text_Custom4: case PBK_Text_UserID: case PBK_Text_PictureName: case PBK_PushToTalkID: case PBK_Text_VOIP: case PBK_Text_SWIS: case PBK_Text_WVID: case PBK_Text_SIP: case PBK_Text_DTMF: test_result(mywstrncmp(pbk.Entries[i].Text, backup.PhonePhonebook[0]->Entries[i].Text, 0) == TRUE); break; case PBK_Photo: test_result((pbk.Entries[i].Picture.Length == backup.PhonePhonebook[0]->Entries[i].Picture.Length) && memcmp(pbk.Entries[i].Picture.Buffer, backup.PhonePhonebook[0]->Entries[i].Picture.Buffer, pbk.Entries[i].Picture.Length) == 0); free(pbk.Entries[i].Picture.Buffer); break; case PBK_Date: case PBK_LastModified: break; case PBK_Category: case PBK_Private: case PBK_RingtoneID: case PBK_PictureID: case PBK_CallLength: case PBK_Caller_Group: test_result(pbk.Entries[i].Number == backup.PhonePhonebook[0]->Entries[i].Number); break; } } error = PrintMemoryEntry(&pbk, NULL); gammu_test_result(error, "PrintMemoryEntry"); /* Free data */ GSM_FreeBackup(&backup); /* We're done */ return 0; } /* Editor configuration * vim: noexpandtab sw=8 ts=8 sts=8 tw=72: */
gammu/gammu
tests/vcard-read.c
C
gpl-2.0
4,619
/* */ /* */ #include <linux/module.h> #include <linux/kernel.h> #include "felica_cal.h" /* */ #define FELICA_I2C_SLAVE_ADDRESS 0x56 #define FELICA_I2C_REG_ADDRSS_01 0x01 #define FELICA_I2C_REG_ADDRSS_02 0x02 /* */ /* */ static int felica_cal_open (struct inode *inode, struct file *fp) { FELICA_DEBUG_MSG_LOW("[FELICA_CAL] felica_cal_open\n"); return 0; } /* */ static int felica_cal_release (struct inode *inode, struct file *fp) { FELICA_DEBUG_MSG_LOW("[FELICA_CAL] felica_cal_release \n"); return 0; } /* */ static ssize_t felica_cal_read(struct file *fp, char *buf, size_t count, loff_t *pos) { unsigned char read_buf = 0x00; int rc = -1; FELICA_DEBUG_MSG_LOW("[FELICA_CAL] felica_cal_read - start \n"); /* */ if(NULL == fp) { FELICA_DEBUG_MSG_HIGH("[FELICA_CAL] ERROR fp \n"); return -1; } if(NULL == buf) { FELICA_DEBUG_MSG_HIGH("[FELICA_CAL] ERROR buf \n"); return -1; } if(1 != count) { FELICA_DEBUG_MSG_HIGH("[FELICA_CAL] ERROR count \n"); return -1; } if(NULL == pos) { FELICA_DEBUG_MSG_HIGH("[FELICA_CAL] ERROR file \n"); return -1; } rc = felica_i2c_read(FELICA_I2C_REG_ADDRSS_01, &read_buf, 1); if(rc) { FELICA_DEBUG_MSG_HIGH("[FELICA_CAL] felica_i2c_read : %d \n",rc); return -1; } FELICA_DEBUG_MSG_LOW("[FELICA_CAL] felica_cal : 0x%02x \n",read_buf); rc = copy_to_user(buf, &read_buf, count); if(rc) { FELICA_DEBUG_MSG_HIGH("[FELICA_CAL] ERROR - copy_from_user \n"); return -1; } FELICA_DEBUG_MSG_LOW("[FELICA_CAL] felica_cal_read - end \n"); return 1; } /* */ static ssize_t felica_cal_write(struct file *fp, const char *buf, size_t count, loff_t *pos) { unsigned char write_buf = 0x00, read_buf = 0x00; int rc = -1; FELICA_DEBUG_MSG_LOW("[FELICA_CAL] felica_cal_write - start \n"); /* */ if(NULL == fp) { FELICA_DEBUG_MSG_HIGH("[FELICA_CAL] ERROR file \n"); return -1; } if(NULL == buf) { FELICA_DEBUG_MSG_HIGH("[FELICA_CAL] ERROR buf \n"); return -1; } if(1 != count) { FELICA_DEBUG_MSG_HIGH("[FELICA_CAL]ERROR count \n"); return -1; } if(NULL == pos) { FELICA_DEBUG_MSG_HIGH("[FELICA_CAL] ERROR file \n"); return -1; } /* */ rc = copy_from_user(&write_buf, buf, count); if(rc) { FELICA_DEBUG_MSG_HIGH("[FELICA_CAL] ERROR - copy_from_user \n"); return -1; } FELICA_DEBUG_MSG_LOW("[FELICA_CAL] write_buf : 0x%02x \n",write_buf); /* */ rc = felica_i2c_read(FELICA_I2C_REG_ADDRSS_01, &read_buf, 1); udelay(50); /* */ write_buf = write_buf | 0x80; rc = felica_i2c_write(FELICA_I2C_REG_ADDRSS_01, &write_buf, 1); mdelay(2); /* */ rc = felica_i2c_read(FELICA_I2C_REG_ADDRSS_01, &read_buf, 1); udelay(50); FELICA_DEBUG_MSG_LOW("[FELICA_CAL] felica_cal_write - end \n"); return 1; } /* */ static struct file_operations felica_cal_fops = { .owner = THIS_MODULE, .open = felica_cal_open, .read = felica_cal_read, .write = felica_cal_write, .release = felica_cal_release, }; static struct miscdevice felica_cal_device = { .minor = 242, .name = FELICA_CAL_NAME, .fops = &felica_cal_fops }; /* */ static int felica_cal_init(void) { int rc = -1; FELICA_DEBUG_MSG_LOW("[FELICA_CAL] felica_cal_init - start \n"); /* */ rc = misc_register(&felica_cal_device); if (rc < 0) { FELICA_DEBUG_MSG_HIGH("[FELICA_CAL] ERROR - can not register felica_cal \n"); return rc; } FELICA_DEBUG_MSG_LOW("[FELICA_CAL] felica_cal_init - end \n"); return 0; } /* */ static void felica_cal_exit(void) { FELICA_DEBUG_MSG_LOW("[FELICA_CAL] felica_cal_exit - start \n"); /* */ misc_deregister(&felica_cal_device); FELICA_DEBUG_MSG_LOW("[FELICA_CAL] felica_cal_exit - end \n"); } module_init(felica_cal_init); module_exit(felica_cal_exit); MODULE_LICENSE("Dual BSD/GPL");
curbthepain/revkernel_us990
drivers/felica/felica_cal.c
C
gpl-2.0
4,674
#!/bin/sh # see if we are in the top of the tree if [ ! -f configure.proto ]; then cd ../.. if [ ! -f configure.proto ]; then echo "please run this script from the base of the VICE directory" echo "or from the appropriate build directory" exit 1 fi fi curdir=`pwd` ./configure gcc src/arch/amigaos/fixdeps.c -o ./fixdeps cd src ../fixdeps cd .. make make bindist
AreaScout/vice-gles2
build/amigaos/build-os4-native.sh
Shell
gpl-2.0
384
# original code is from openmips gb Team: [OMaClockLcd] Renderer # # Thx to arn354 # import math from Components.Renderer.Renderer import Renderer from skin import parseColor from enigma import eCanvas, eSize, gRGB, eRect class AnalogClockLCD(Renderer): def __init__(self): Renderer.__init__(self) self.fColor = gRGB(255, 255, 255, 0) self.fColors = gRGB(255, 0, 0, 0) self.fColorm = gRGB(255, 0, 0, 0) self.fColorh = gRGB(255, 255, 255, 0) self.bColor = gRGB(0, 0, 0, 255) self.forend = -1 self.linewidth = 1 self.positionheight = 1 self.positionwidth = 1 self.linesize = 1 GUI_WIDGET = eCanvas def applySkin(self, desktop, parent): attribs = [] for (attrib, what,) in self.skinAttributes: if (attrib == 'hColor'): self.fColorh = parseColor(what) elif (attrib == 'mColor'): self.fColorm = parseColor(what) elif (attrib == 'sColor'): self.fColors = parseColor(what) elif (attrib == 'linewidth'): self.linewidth = int(what) elif (attrib == 'positionheight'): self.positionheight = int(what) elif (attrib == 'positionwidth'): self.positionwidth = int(what) elif (attrib == 'linesize'): self.linesize = int(what) else: attribs.append((attrib, what)) self.skinAttributes = attribs return Renderer.applySkin(self, desktop, parent) def calc(self, w, r, m, m1): a = (w * 6) z = (math.pi / 180) x = int(round((r * math.sin((a * z))))) y = int(round((r * math.cos((a * z))))) return ((m + x), (m1 - y)) def hand(self, opt): width = self.positionwidth height = self.positionheight r = (width / 2) r1 = (height / 2) if opt == 'sec': self.fColor = self.fColors elif opt == 'min': self.fColor = self.fColorm else: self.fColor = self.fColorh (endX, endY,) = self.calc(self.forend, self.linesize, r, r1) self.line_draw(r, r1, endX, endY) def line_draw(self, x0, y0, x1, y1): steep = (abs((y1 - y0)) > abs((x1 - x0))) if steep: x0, y0 = y0, x0 x1, y1 = y1, x1 if (x0 > x1): x0, x1 = x1, x0 y0, y1 = y1, y0 if (y0 < y1): ystep = 1 else: ystep = -1 deltax = (x1 - x0) deltay = abs((y1 - y0)) error = (-deltax / 2) y = int(y0) for x in range(int(x0), (int(x1) + 1)): if steep: self.instance.fillRect(eRect(y, x, self.linewidth, self.linewidth), self.fColor) else: self.instance.fillRect(eRect(x, y, self.linewidth, self.linewidth), self.fColor) error = (error + deltay) if (error > 0): y = (y + ystep) error = (error - deltax) def changed(self, what): opt = (self.source.text).split(',') try: sopt = int(opt[0]) if len(opt) < 2: opt.append('') except Exception as e: return if (what[0] == self.CHANGED_CLEAR): pass elif self.instance: self.instance.show() if (self.forend != sopt): self.forend = sopt self.instance.clear(self.bColor) self.hand(opt[1]) def parseSize(self, str): (x, y,) = str.split(',') return eSize(int(x), int(y)) def postWidgetCreate(self, instance): for (attrib, value,) in self.skinAttributes: if ((attrib == 'size') and self.instance.setSize(self.parseSize(value))): pass self.instance.clear(self.bColor)
openatv/enigma2
lib/python/Components/Renderer/AnalogClockLCD.py
Python
gpl-2.0
3,177
/* APPLE LOCAL file mainline */ /* Darwin pragma for __attribute__ ((ms_struct)). */ /* { dg-do compile { target *-*-darwin* } } */ /* { dg-options "-Wall" } */ #pragma ms_struct on #pragma ms_struct off #pragma ms_struct reset #pragma ms_struct /* { dg-warning "malformed" } */ #pragma ms_struct on top of spaghetti /* { dg-warning "junk" } */ struct foo { int a; int b; char c; };
unofficial-opensource-apple/gcc_40
gcc/testsuite/gcc.dg/pragma-ms_struct.c
C
gpl-2.0
397
/******************************************************************************* * * File archive.h * * Copyright (C) 2011 Martin Luescher * * This software is distributed under the terms of the GNU General Public * License (GPL) * *******************************************************************************/ #ifndef ARCHIVE_H #define ARCHIVE_H #ifndef SU3_H #include "su3.h" #endif /* ARCHIVE_C */ extern void write_cnfg(char *out); extern void read_cnfg(char *in); extern void export_cnfg(char *out); extern void import_cnfg(char *in); /* MARCHIVE_C */ extern void write_mfld(char *out); extern void read_mfld(char *in); extern void export_mfld(char *out); extern void import_mfld(char *in); /* SARCHIVE_C */ extern void write_sfld(char *out,spinor_dble *sd); extern void read_sfld(char *in,spinor_dble *sd); extern void export_sfld(char *out,spinor_dble *sd); extern void import_sfld(char *in,spinor_dble *sd); #endif
bjoern-leder/mrw
include/archive.h
C
gpl-2.0
932
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html> <head> <% orbi_flow_head() %> <% hijack_language() %> </head> <body> <script> function is_read() { if($$('#have_read').is(':checked')) $$('#next_btn').attr("disabled",false); else $$('#next_btn').attr("disabled",""); } </script> <% tos_form_submit() %> <input type="hidden" name="hid_save_time" value="1"> <div id="container"> <div class="dialog-content"> <!-- Welcome --> <div id="card-tos" class="install-card clearfix"> <img class="netgear-logo" src="images/netgear.png"> <div class="desc-title">$tos_title</div> <div class="tos-area"> <h1 >NETGEAR, INC</h1> <p>Orbiアプリケーション、OrbiソフトウェアおよびOrbiデバイス マスターサービス利用規約</p> <p>本マスターサービス利用規約(「本契約」)ならびに本契約内で言及されるプライバシーポリシー(https://www.netgear.com/about/privacy-policy/)は、本契約を承諾する個人または企業(「お客様」)に対してNETGEAR, INC.(「NETGEAR」)が本ソフトウェア(以下で定義します)および本サービス(本Orbiデバイスまたは本Orbiアプリケーションの使用を通じて利用可能となるサービスおよび情報をいいます)を提供する条件としてNETGEARが合意した規約を記載したものです。本契約は次の各号のいずれにも該当しない場合に適用されます。(i) お客様が本Orbiアプリケーション、本ソフトウェアまたはOrbiデバイスを対象とする別途の購入契約をNETGEARと締結している場合(その場合は当該別途の契約が優先します)。(ii) NETGEARの他の規約が適用される場合。</p> <p>本ソフトウェアをダウンロード、インストールまたはその他の形で使用することにより、お客様は本契約の条項を承諾し法的に拘束されることになります。お客様が本契約の条項に同意しない場合には、本ソフトウェアのダウンロード、インストールまたはその他の形での使用を行わないようお願いいたします。</p> <p>お客様が第三者のために本ソフトウェアを取得する場合、お客様はNETGEARに対して、お客様は当該第三者を代理して当該第三者のために本契約の条項を承諾する法的権限を有していることを表明します。本契約を以下のとおり締結することにより、NETGEARと当該第三者との間に法的拘束力のある契約が成立し、その場合本契約で使用する「お客様」の語は当該第三者を意味するものとします。</p><p>お客様の居住地が、インターネットベースのアプリケーションの使用が年齢により制限されている法域または本契約等の契約を締結する能力が年齢により制限されている法域内にあり、かつお客様が当該法域の管轄下にあるとともにそれらの年齢制限を下回る場合には、お客様は本契約の締結およびNETGEARソフトウェアのダウンロード、インストールまたは使用を行うことができません。お客様は、本契約を締結することにより、お客様がお客様の法域において本ソフトウェアの使用を許可されていることを確認済である旨を明示的に表明するものとします。</p> <p>1.ソフトウェアの定義:本契約は以下の品目の入手および使用に対して適用されます。(i) 本契約が添付されたNETGEARの本Orbiアプリケーションで、お客様が購入したNETGEARの製品もしくはサービスまたはNETGEARがNETGEARのウェブサイト、NETGEARの販売代理店のウェブサイト、Google PlayストアもしくはApple App Storeでダウンロードにより提供する製品もしくはサービスに含まれるもの。(ii) それらのソフトウェアのインストールマニュアル、ユーザーガイドまたはその他の文書で、本Orbiアプリケーションに関連してNETGEARが提供するもの(「本文書」と総称します。)、および (iii) それらのソフトウェアおよび本文書の新規のアップグレードまたはアップデートで、NETGEARから取得したもの(上記のすべてを「本ソフトウェア」と総称します)。</p> <p>2.ライセンスの許諾:ライセンスについては以下のとおりとします。ただし、お客様による本契約の条項の遵守を前提とします。</p> <p>2.1 非排他的ライセンス:NETGEARおよびそのサプライヤーは、お客様に対して、本OrbiデバイスおよびNETGEAR製品と共に本ソフトウェアのコピーを使用する非排他的ライセンスを付与します。それ以外の権利は付与されません。</p> <p>2.2 複製:お客様は、バックアップの目的で、または本Orbiデバイスおよびその他のNETGEAR製品を使用するために必要な範囲で、本ソフトウェアを複製することができます。ただし、お客様は、本ソフトウェアの元のコピーに表示されている著作権表示およびその他の権利の表示をすべて複製するものとします。</p> <p>2.3 サードパーティソフトウェア:(a) NETGEARは本ソフトウェアの一部として、または本ソフトウェアに関係して、以下のソフトウェアを提供することができます。(i) 別途のサードパーティのライセンス条項(「サードパーティライセンス条項」)に基づいて提供するサードパーティソフトウェア、または (ii) 公開されたライセンスの条項(「オープンソースライセンス条項」)に基づいて提供する「オープンソース」ソフトウェア。</p> <p> (b) お客様は次の点につき確認し合意するものとします。(i) 上記のサードパーティソフトウェアおよびオープンソースソフトウェアは、該当するサードパーティライセンス条項およびオープンソースライセンス条項のみが適用されること、および (ii) 第11条で別途規定する場合を除き、それらの条項は該当するライセンサーとお客様との間でのみ有効であり、NETGEARはそれらのソフトウェアに関して本契約上の義務または責任を負わないこと。</p> <p>2.4 アップデート:NETGEARは、本サービス、本ソフトウェアまたは本Orbiデバイスのパフォーマンスを改善するパッチ、不具合の修正、アップデート、アップグレードおよびその他の変更(「本アップデート」)を随時開発しています。<b>本サービス、本ソフトウェアまたは本Orbiデバイスを使用した場合、お客様は本アップデートの自動的なダウンロードおよびインストールに同意したものとみなされます。この場合、本アップデートはお客様のモバイルデバイスおよび本Orbiデバイス(のうち該当するもの)に自動的にダウンロードおよびインストールされ、お客様に別途の通知を行うことや別途の同意を求めることはありません。</b>お客様がそのような本アップデートを希望しない場合、お客様が利用できる回避手段は次の各号のいずれかに限られます。(i) アプリケーションの本アップデートについては、お客様のモバイルデバイスの設定により自動更新機能をオフにするか、またはNETGEARもしくはOrbiアプリケーションをお客様のモバイルデバイスから削除すること。(ii) 本Orbiデバイスまたは本ソフトウェアについては、お客様のOrbiアカウントを廃止するか、またはお客様の本Orbiデバイスの使用を中止すること。お客様が上記のいずれかを行った後にその取消しを行った場合(たとえば、お客様のモバイルデバイスの自動更新機能をオンにする、またはお客様が本Orbiデバイスを再び使い始める等)、本アップデートの自動的なダウンロードおよびインストールが再開されることをお客様は理解しているものとし、また、自動的な本アップデートに同意したものとします。いずれかの本アップデートが自動的にインストールされない場合、お客様が本サービス、本ソフトウェアおよび本Orbiデバイスの全機能について使用とアクセスを継続するためには本アップデートを手動でインストールする必要が生じることがあり、そのときお客様は当該本アップデートを直ちにインストールするものとします。</p> <p>3.使用の制限:</p> <p>3.1 一般的制限:お客様は、第2条で明示的に許可された場合以外は(かつ、明示的に許可された範囲以外については)、または適用法下で当該権利を排除できない場合を除き、以下の事項を行うこと(および、第三者にそれを許可することまたはそれを扇動すること)はできません。(i) バックアップの目的またはお客様の本サービス、本ソフトウェアまたは本Orbiデバイスを使用する目的で必要となる場合以外に、本ソフトウェアのコピーその他の複製を何らかの形で行うこと、(ii) 本ソフトウェア(またはそのコピーもしくはコンポーネント)またはそれに対する権利の、第三者への配布、配信、サブライセンス、リース、レンタル、貸与、譲渡、またはその他の形での移転を行うこと、(iii) 本ソフトウェア上または本ソフトウェア内にある製品識別、商標、著作権、機密性、専有性その他に関する表示または注意書きを除去、改変または隠蔽すること、(iv) タイムシェアリングもしくはサービス窓口で使用する目的、または本契約上明示的に許可されていないその他の方法で本ソフトウェアを使用すること、または (v) 本ソフトウェア(またはそのコピーもしくはその一部)をお客様の製品に組み込むこと。</p> <p>3.2 変更およびリバースエンジニアリング:お客様は、第2条で明示的に許可された場合以外は(かつ、明示的に許可された範囲以外については)、または適用法下で当該権利を排除できない場合を除き、以下の事項を行うこと(および、第三者にそれを許可することまたはそれを扇動すること)はできません。(i) 本ソフトウェアまたはその一部をリバースエンジニアリング、逆アセンブルまたは逆コンパイルすること、もしくはソースコードを解明しようとすること、または (ii) 本ソフトウェアの改変版、適合版、変更版、変換版、改良版または派生版(「二次著作物」)を作成すること。</p> <p>3.3 協力:お客様は、本契約に違反する本ソフトウェアの使用またはアクセスを阻止および特定するためにNETGEARに協力し、NETGEARから求められた合理的な援助をすべて行うものとします。</p> <p>3.4.高リスク用途:本ソフトウェア、本サービスおよび本Orbiデバイスは、確実な動作が求められる用途または環境(核関連施設の運転、航空ナビゲーションもしくは航空通信、航空管制、生命維持装置、外科用埋め込み機器、兵器システム、または本ソフトウェア、本サービスおよび本Orbiデバイス(もしくはそれらの一部)に不具合が生じた場合に死亡、負傷もしくは重大な物理的もしくは環境的損害を直接発生させる恐れのあるその他のアプリケーション、デバイスもしくはシステム等(「高リスク行為」))の下で使用することを想定または保証していません。お客様は、本契約の他の規定にかかわらず、高リスク行為に関係して本ソフトウェア、本サービスおよび本Orbiデバイス(もしくはそれらの一部)を使用することまたは第三者がそれらを使用することを許可することができません。お客様が高リスク行為に関係して本ソフトウェア、本サービスおよび本Orbiデバイス(またはそれらの一部)を使用した場合(またはお客様が使用を許可した第三者がそれらを使用した場合)、それに伴う危険はすべてお客様が負担するとともに、お客様はそのような使用の結果としてNETGEAR側に生じた、お客様に起因するすべての請求、費用および責任に対してNETGEARを補償、防御および免責するものとします。</p> <p>4.料金およびサポート:</p> <p>4.1 <b>全般:</b>お客様は、すべての手数料および料金について、その支払期限が到来した時点で有効な手数料、料金および請求に関する規定に従って支払を行うことに同意します。お客様が指定したクレジットカードを利用する場合にはお客様の決済業者との契約が適用され、お客様によるクレジットカードの使用に関連してお客様の権利義務を確認するためには本契約でなく決済業者との当該契約を参照する必要があります。お客様は、NETGEARにクレジットカード番号および関連する支払情報を提供した場合、支払期限が到来したすべての手数料および料金についてNETGEARがお客様のアカウントに対して直ちに請求を行う権限があること、およびNETGEARが別途の通知や同意の確認を行わないことに同意したものとみなされます。お客様は、お客様の請求先住所またはお客様が本契約上利用するクレジットカードに変更があった場合直ちにNETGEARに通知することに同意するものとします。NETGEARは、ウェブサイトへの掲載またはお客様への電子メールの送信により、将来、価格および請求方法を随時変更することができます。</p> <p>a. <b>税金:</b>お客様が本契約に基づいて行った本Orbiデバイスもしくは本サービスの購入または本Orbiデバイスもしくは本サービスに関する支払が売上税の賦課対象となり、かつお客様が当該売上税額をNETGEARに未送金である場合、お客様は当該売上税額および関係する罰金または利子を該当の税務当局に支払う責任を負います。本条にいう「売上税」とは、すべての売上税または使用税および売上金額を基準とするその他すべての税金でNETGEARが顧客に移転することを許可されたものであり、該当の課税法域で売上税または使用税が賦課されない場合は売上税と同等の効果を有するものをいいます。</p> <p>b. <b>源泉徴収:</b>お客様は、NETGEARに手数料を支払う場合には必ず、源泉徴収不要な形で、かつ源泉徴収を行わずに支払を行うことに同意します。NETGEARに対する手数料の支払に対して源泉徴収課税が行われた場合にはお客様はそれを負担する責任を負うとともに、お客様は当該税額の支払を証明するために該当の課税当局が発行した公式の領収証または当社が正当に請求するその他の証拠書類をNETGEARに提出するものとします。</p> <p>c. <b>紛争:</b>当社が請求した料金についてお客様が係争を提起する場合、当該料金を記載したクレジットカード明細書の受領後7日以内にNETGEARに書面で通知してください。請求に関する係争は次の電子メールアドレスまで通知するものとします。support@NETGEAR.com</p> <p>4.2<b> サポート:</b>NETGEARは、本サービス、本ソフトウェアまたは本Orbiデバイスのアップデート、サポート、アップグレードまたは改良を、理由によらず通知なしに随時停止することができます。</p> <p>5. <b>知的財産権:</b></p> <p>5.1 ソフトウェアに対する権利:本サービス、本ソフトウェアおよび本Orbiデバイスの知的財産権は、米国およびその他の関連法域の著作権法(国際条約を含みます)、ならびに米国、諸外国および国際協定における知的財産権または産業財産権の保護に関する法律、法規および条約によって保護されています。NETGEARおよびそのライセンサーは、本ソフトウェア(あらゆるコピーおよびその一部を含む)に対して唯一かつ排他的な権利、資格および所有権、ならびにそこに含まれるすべての知的財産権を有し保持するものとします。NETGEARおよびそのライセンサーは、本契約で明示的に与えられていないすべての権利を有します。お客様は、本ソフトウェア(お客様に提供されたすべてのコピーまたはお客様が作成したすべてのコピーまたはお客様のために作成されたすべてのコピーを含む)がお客様にライセンスされたものであり、販売されたものではないことを確認し合意するものとします。</p> <p>5.2 協力情報および二次著作物に対する権利:本サービス、本ソフトウェアまたは本Orbiデバイスに関してお客様またはお客様の代理人から提供された意見、バグ報告、操作性に関するフィードバック、その他の提案およびフィードバック(「協力情報」)、ならびにお客様によってまたはお客様のために作成された本ソフトウェアの二次著作物(本契約で許可されていないものも含む)およびそこに含まれるすべての知的財産権は、NETGEARまたはそのライセンサーの唯一かつ排他的な財産であるものとします。</p> <p>5.3 権利の譲渡:お客様は、本契約にて、協力情報または二次著作物およびそこに含まれるすべての知的財産権に対する世界規模のすべての権利、資格、および所有権をNETGEARに譲渡することに取消不能の形で合意するものとします。また、NETGEARからの要求に応じて、上記の権利の完成、記録および施行に関してNETGEARの負担でNETGEARを支援することに合意するものとします。関係法令によって上記の譲渡が無効である場合のみ、お客様は、本契約にて、上記の協力情報および二次著作物を既知または将来考えられるいかなる方法でも使用、複製、その他の形または営利目的で利用できるようにするための排他的、譲渡可能、サブライセンス可能、無制限、および恒久的な世界規模のライセンスをNETGEARに付与することに取消不能の形で合意するものとします。</p> <p>5.4 <b>技術およびデータの収集:</b>お客様は、本ソフトウェア、本サービスおよび本Orbiデバイスを使用するために必要なすべての通信回線、モニター、PC、モデム、ルーター、モバイルデバイス、お客様のインターネットサービスプロバイダー(「ISP」)およびお客様のモバイルデバイスキャリア(「キャリア」)等、ならびにアクセスをお客様の負担で用意および保守する必要があります。</p> <p><b>本ソフトウェアの通常のインストールおよび機能の一部として、NETGEARは本ソフトウェアまたはOrbiネットワークのパフォーマンスを最適化するためのデータを収集します。収集される情報には、設定完了までに費やされたクリック回数および時間、設定の開始から終了までの経路、インターネットサービスプロバイダーからのWiFi通信速度、Orbiネットワークとお客様のモバイルデバイス間のLAN通信速度、およびお客様のネットワークを設定するために使用されたモバイルデバイスのオペレーティングシステムとモデルが含まれることがあります(ただしそれらに限りません)。本ソフトウェアの機能の一部として、Orbiネットワークのトポロジが変更されたとき、またはOrbiデバイスおよびサービスに関する技術更新があるときに、NETGEARからお客様に通知が送られることがあります。</b>Orbiには、本ソフトウェアが実装する自動ファームウェア更新および自動ネットワークトポロジ最適化の日次実行機能が実装されることがあります。 </p> <p>本ソフトウェアを使用することによって、お客様は、NETGEARが上記の情報を収集する場合があることに同意するものとします。</p> <p>NETGEARは将来的に、本サービス経由でサードパーティ製品およびサービス(「本サードパーティ製品およびサービス」)の一つ以上と本Orbiデバイス、本ソフトウェアおよび本サービスとを接続する機会をお客様に提供することがあります。お客様は本サードパーティ製品およびサービスを接続するかどうか、および接続したい本サードパーティ製品およびサービスを決定します。</p> <p>お客様が特定の本サードパーティ製品およびサービスについて承諾した場合、その接続を有効にするために、それ以後お客様はNETGEARがお客様ならびにお客様の本サービス、本ソフトウェアおよび本Orbiデバイスに関する情報および制御データ(個人情報を含みます)を、当社のプライバシーポリシーに従って交換することに同意します。この情報が特定の本サードパーティ製品およびサービスとの間で共有された場合、その後の当該情報の利用についてはNETGEARのプライバシーポリシーではなく当該サードパーティのプライバシーポリシーが適用されます。お客様は、NETGEARが本サードパーティ製品およびサービスの安全性について一切の表明または保証を行わないことを認め、それに同意します。したがってNETGEARは、第11条で別途規定する場合を除き、お客様による本サードパーティ製品およびサービスの利用、または本サードパーティ製品およびサービスを原因とする(もしくはそれらに関係する)負傷、死亡、資産の損失その他の損害もしくは損失に対して責任を負いません。本サードパーティ製品およびサービスについて不明の点があれば当該サードパーティまでお問い合わせください。お客様およびお客様による本Orbiデバイス、本ソフトウェアおよび本サービスの利用に対して、お客様が居住する地域のデータ保護、プライバシーその他に関する法令が適用される場合があります。お客様は、お客様が本Orbiデバイス、本ソフトウェアおよび本サービスを利用する際に適用法令を遵守していることを確認する責任は(NETGEARではなく)お客様自身が負うことに同意します。</p> <p>本ソフトウェアのインストール中およびインストール後にNETGEARが収集するお客様の情報に対しては当社のプライバシーポリシーが適用されます。当社のプライバシーポリシーは次のURLで閲覧できます。</p><p>http://www.netgear.com/about/privacypolicy/</p> <p>6. <b>登録およびアカウントデータ:</b></p> <p>6.1.本Orbiデバイス、本ソフトウェアおよび本サービス上で(またはそれらを通じて)提供される特定の機能を利用するためには、お客様がアカウントの登録と開設(ユーザーIDとパスワード(「アカウントデータ」)の設定を含みます)を行うことが必要となる場合があります。お客様は、登録時に正確、完全かつ最新の情報(「登録データ」)を提供することに同意します。お客様は、NETGEARが、お客様のアカウントの維持管理および料金の請求を目的としてお客様の提供した登録データを保存および使用する場合があることに同意します。お客様は自身のアカウントデータの機密保持およびお客様のアカウントで生じた行為について全面的に責任を負います。お客様は、お客様のアカウントまたはアカウントデータが不正に使用された場合直ちにNETGEARに通知することに同意します。</p> <p>6.2.NETGEARはシングルサインオン処理(「シングルサインオン」)を用いた登録ソリューションの実装および統合を行いました。シングルサインオンとは、お客様がNETGEARにアカウントを開設した場合、お客様はその後NETGEARの他のサービスでお客様のユーザーアカウントおよびユーザーIDを使用でき、サービス毎に別個のユーザーアカウントおよびユーザーIDを維持管理する必要がなくなることをいいます。また、お客様はシングルサインオンにより、異なるデバイスで同時にサービスへのサインインおよびサービスからのサインオフを行うことが可能となります。</p> <p>6.3 お客様はシングルサインオンにより、アプリケーションプログラミングインターフェイス(API)その他のソフトウェアを通じて、Facebook、TwitterおよびGoogle+等のソーシャルメディアサイトがお客様に提供するサービスおよびお客様のアカウントをシングルサインオンと接続することが可能となります。 </p> <p>お客様がシングルサインオンによりお客様のアカウントを当該サードパーティのサイトと接続した場合、お客様は当社がそれらのアカウントの情報(個人を特定できる情報を含む場合があります)にアクセスすることに同意したものとみなされます(その場合でもNETGEARプライバシーポリシーにより行われる開示は制限または排除されません)。さらに、お客様は、お客様のソーシャルメディアサイトその他のサードパーティのアカウント経由でシングルサインオンにアクセスすることにより、シングルサインオンを通じてお客様が行った行為が当該サードパーティのサイト上で公開される場合があることを理解します。それらのサードパーティサイトを利用するかどうかについてはお客様の選択によります。シングルサインオンによりお客様のアカウントを当該サードパーティのサイトと接続する前に、またはお客様がそれらのサイトを通じてページまたは情報をお客様の友人と共有する前に、各サードパーティのサイトのプライバシーポリシーを確認するようお薦めします。</p> <p>お客様はシングルサインオンユーザー名の所有権を持たないことに同意します。NETGEARはお客様のシングルサインオンユーザー名を回収または使用することができます。</p> <p>6.4.お客様はお客様のアカウントデータのセキュリティを管理し(具体的には、推測されやすいユーザーIDもしくはパスワードを避けること、お客様のパスワードを定期的に変更すること、およびお客様のパスワードを公開しないことまたは他のユーザーまたは第三者にお客様のアカウントデータもしくはサービスにアクセスさせないことなどの手段によります)、お客様のアカウントデータおよびお客様のアカウントに対する第三者の不正なアクセスを防止します。</p> <p>6.5.NETGEARは、お客様のアカウントデータを使用する(またはお客様のアカウントデータを提供する)ユーザーまたは第三者から受領した(またはそれらの者が行った)指示を、お客様による指示とみなすことができます。</p> <p>6.6.お客様はお客様以外のユーザーまたは個人のアカウントデータまたはサービスを、当該各サービスの保有者であるユーザーまたは個人の許可なく使用しないことに同意します。</p> <p>6.7.お客様は、お客様の登録データもしくはアカウントデータの不正使用またはその他のセキュリティの侵害を発見した場合直ちにNETGEARに通知するものとします。</p> <p>6.8.お客様はNETGEARの各サービスにアクセスすることによりお客様の登録データまたはアカウントデータへのアクセスおよびその変更を随時行うことができます。</p> <p>7.終了:</p> <p>7.1 契約期間:本契約は、お客様が本契約に同意した日から有効になり、本契約に従って終了するまで無期限に継続されます。</p> <p>7.2 終了:NETGEARが有する他の権利および救済手段に影響を与えることなく、お客様が本契約の条項に違反した場合、お客様への即時通告とともに本契約を終了する資格をNETGEARは有します。そのような場合、NETGEARはそれぞれの案分された料金をお客様に払い戻します。また、お客様は、お客様が所有または管理する本ソフトウェアのすべてのコピーを破壊することによって、いつでも本契約を終了できます。NETGEARが公示、お客様へのメール通知、Webサイトへの投稿またはプレスリリースを通して本契約の終了を要請した場合、その公示、Webサイトへの投稿、またはプレスリリースで指定された日付(「終了日」)をもって本契約は終了します。お客様は、終了日またはそれ以前に本ソフトウェアのコピーの使用を中止することに同意するものとします。本契約または本ソフトウェアに関するライセンスがいかなる理由で終了した場合でも、第1条、第3条、第5条、第6条、第7.2条、第8条、第9条、第10条、第11条、第12条、第13条および第14条は存続するものとします。</p> <p>お客様が本Orbiデバイスを新しい所有者に譲渡した場合、お客様が当該本Orbiデバイスに関して本サービスを利用する権利は自動的に消滅します。当該新しい所有者はお客様のNETGEARアカウントで当該本Orbiデバイスまたは本サービスを使用する権利を有しないため、別個のアカウントをNETGEARに登録する必要があります。</p> <p>8.商標:</p> <p>8.1 商標ライセンスの非認可:お客様は、本契約によって、企業名、商標、サービスマーク、認定マーク、商品名、標語、ロゴ、その他NETGEARまたはその子会社あるいは関連会社の名称(「NETGEARマーク」と総称)を使用する権利がお客様に付与されないことを確認するものとします(ただし、本契約に明示した場合を除きます)。これらのマークが本ソフトウェアに記載されている場合、または本契約に関連してお客様に提供された資料に含まれている場合でも、お客様に上記の権利は付与されません。</p> <p>8.2 採用または使用の非認可:お客様は、企業名、商標、サービスマーク、認定マーク、商品名、標語、ロゴ、その他同様の名称を含む一切のNETGEARマークを、その一部であっても、採用、使用または登録しないことに同意するものとします。お客様は、本契約の期間中または終了後を問わず、お客様の企業名、商品名、製品名またはセカンドレベルドメイン名の一部として「NETGEAR」または「ORBI」を使用しないことに同意するものとします(ただし、それにより上記規定は影響を受けません)。</p> <p>9.機密保持:お客様は、本ソフトウェアにNETGEARの重要な企業秘密および機密情報(「機密情報」)が含まれることを確認するものとします。お客様は、同様の非開示義務を持つ第三者を除き、この機密情報をいかなる第三者にも提供または開示しないことに同意するものとします。</p> <p>10.保証および免責:</p> <p>本Orbiデバイスについてのハードウェア保証は当該デバイスに付属する限定保証書に記載しており、また次のサイトで閲覧することができます。https://www.netgear.com/about/warranty</p> <p>10.1 本ソフトウェアに関して、ドイツ民法第437条、第536条、および第536a条に定められた強制力のある保証の権利が適用されます。</p> <p>10.2 第10条1項の規定および第11条で規定された責任のいずれかまたは両方に従い、NETGEAR、そのライセンサー、サプライヤー、販売業者、代理業者または従業員による口頭あるいは書面での情報または助言によって本保証の範囲が変更または拡大されることは一切ないものとします。NETGEARは、ライセンシー、ディストリビューター、エンドユーザーまたはその他第三者の要求を本ソフトウェアおよび本サービスが満たすこと、または本ソフトウェアおよび本サービスの動作に中断またはエラーがないことは保証しません。NETGEARは、ライセンシーによる本ソフトウェアおよび本サービスの使用、誤用、インストールミス、取り扱いミス、事故または悪用によって生じた欠陥状態または不適合、ライセンシーの製品に本ソフトウェアおよび本サービスを組み込むことで生じたエラー、あるいはNETGEARが提供した変更、指示、更新または修正の適用にライセンシーが失敗したことに起因するエラーに関して、一切保証しません。</p> <p>11.責任の限定:</p> <p>11.1 以下の事項に対するNETGEARの責任は無制限とします:</p> <p>(i) NETGEAR、その取締役または役員、またはその他代理人の故意または重大な過失によって発生した損失または損害。および、</p> <p>(ii) 死亡、身体障害または疾病を生じさせる行為による、保証または調達リスクの想定に由来する、またはドイツの製造物責任法に基づく損失または損害。</p> <p>11.2 重要な契約上の義務の不履行により損害が発生した場合、NETGEAR、その取締役、役員、またはその他代理人の重大な過失以外の過失に対するNETGEARの責任は、いかなる場合にも、典型的かつ予見可能な損害額、すなわち、損害の原因となった本ソフトウェアおよび本サービス(または当該本ソフトウェアおよび本サービスを利用するための製品)に対してNETGEARが提示する定価またはお客様が支払った金額の下限を超えないものとします。</p> <p>11.3 上記第11条1項ならびに第11条2項の規定に従い、NETGEARのその他一切の責任は排除されます。</p> <p>11.4 第11条2項の規定に従い責任が制限される場合、利益損失またはその他一切の間接的損害に対してNETGEARは責任を負いません。</p> <p>12.一般条項:</p> <p>12.1 <b>税金:</b>お客様は、本ソフトウェアおよび本サービスまたはその使用あるいは本契約の履行に対して課せられる税金、関税、その他の料金について全責任を負います。お客様は、NETGEARの純利益または給与支払に適用されるものを除き、NETGEARに課せられるこれらの税金、関税、その他の料金を支払う義務を負うものとします。</p> <p>12.2 <b>お客様が受け取る対価:</b>本契約で付与される権利およびライセンスは、お客様が本契約の条項を遵守することを前提としています。お客様は、本契約の履行によってお客様が受け取る唯一かつ排他的な対価は本契約で付与される権利およびライセンスであり、NETGEARまたはそのサプライヤーから報酬、返済、その他の対価を受け取る資格は一切ないことを確認し合意するものとします。</p> <p>12.3 <b>差し止めによる救済:</b>お客様は、本契約の下で提供される本ソフトウェアまたは一切の情報、資料またはサービスを本契約の条項に反する方法で複製、開示または使用した場合、NETGEARに回復不能な損害が生じ、それに対してNETGEARは適切な法的救済措置を持たないことを確認し合意するものとします。したがって、NETGEARは、妥当な場合、暫定的な保護命令、仮差し止めおよび恒久的差し止め命令を含め、またこれらに限定されず、法廷で衡平法上の救済を得る資格があるものとします。</p> <p>12.4 <b>提携の否認:</b>当事者は、本契約の下で独立した契約者としてそれぞれの義務を負います。本契約は、NETGEARとお客様の間に雇用、代理契約、販売契約、フランチャイズ、共同事業、法的提携、その他同様の法的関係を生じさせるものではなく、その意図もありません。いずれの当事者も、相手の当事者を代理または拘束する権利または権限はなく、これらの権利または権限を持つことを第三者に表明することもできません。</p> <p>13.5 譲渡の禁止:お客様は、任意、合併の結果、法の運用、その他理由を問わず、NETGEARからの書面による事前の承諾なく、本契約あるいは本契約で規定される権利または義務をその一部であっても譲渡または委譲することはできません。NETGEARは、独自の裁量により、その承諾を保留することがあります。譲渡または委譲の試みは一切無効であり、本契約の重大な違反とみなされます。</p> <p>12.6 <b>完全合意および修正:</b>本契約は、本契約の主題に関する完全かつ最終的、排他的な合意となります。お客様の事前の通知がある場合を除き、本契約は全体的にも部分的にも修正されません。</p> <p>12.7 <b>権利放棄:</b>本契約の不履行または条項に対する権利放棄は、継続的な権利放棄あるいはその他の不履行または条項の権利放棄とはみなされず、その権利放棄が指示された事例にのみ適用されるものとします。</p> <p>12.8 <b>適用法と言語:</b>本契約は、抵触法および米国法を除き、カリフォルニア州法に準拠します。この法の選択は、消費者保護法などの適用法に基づく合意によって無効化できない条項によってお客様に提供される保護を制限するものではありません。国際物品売買契約に関する国際連合条約の適用は明示的に否認されます。本契約書の正式文書の言語は英語であり、本契約の解釈または説明は英語版に基づきます。本契約あるいはその他の関連文書または関連通知が他の言語に翻訳される場合でも、お客様に適用される法令で規定されている場合を除き、英語版が優先されます。</p> <p>12.9 <b>輸出:</b>お客様は、本ソフトウェアおよび本Orbiデバイスの輸出または再輸出が輸出入規制の対象となることを確認するものとします。具体的には、本ソフトウェアおよび本Orbiデバイスについて次の各号の行為は禁止されています(ただしそれらに限りません)。(a) 米国が禁輸対象としている国に輸出または再輸出すること。(b) 米国財務省の特定国籍業者リストまたは米国商務省の禁止顧客リストもしくは禁止法人リストに掲載された者に対して輸出または再輸出すること。お客様が本ソフトウェアおよび本Orbiデバイスを使用した場合、お客様は、(i) お客様が米国政府の禁輸措置の適用を受ける国に居住していないこと、または米国政府によって「テロリスト支援」国家として指定された国に居住していないこと、および (ii) お客様が米国政府の禁止または制限を受けた当事者のリストに記載されていないことを表明し保証したものとみなします。通常、お客様は、これらすべての米国輸出規制に厳格に従い、米国商務省によって明示的に許可されない限り、米国輸出管理法および規制で禁止または制限されている場所、最終用途またはエンドユーザーに本ソフトウェアおよび本Orbiデバイスまたはその直接製品の一切を輸出、再輸出、移送または転送しないものとします。お客様は、本条項に違反することによってNETGEARおよびそのサプライヤーに直接的または間接的に申し立て、損失、責任または損害が生じた場合には、その一切を補償し、NETGEARおよびそのサプライヤーを無害に保つことに同意するものとします。</p> <p>本サービス、本ソフトウェアおよび本Orbiデバイスはすべての者が(またはすべての国で)利用できるとは限りません。お客様が、本サービス、本ソフトウェアおよび本OrbiデバイスのサポートがNETGEARによって行われている国の外部から本サービス、本ソフトウェアまたは本Orbiデバイスにアクセスすることを選択した場合、お客様は任意によりそれを行うのであり、お客様はお客様の国における適用法令を遵守する責任を全面的に負います。お客様は、本サービス、本ソフトウェアまたは本Orbiデバイスの機能の一部または全部がそれらの国で動作しない場合があること、またはそれらの国で使用することが不適当である場合があることを理解しており、かつそれを承諾します。NETGEARは、NETGEARがサポートを行っている国の外部でお客様が本サービス、本ソフトウェアもしくは本Orbiデバイスにアクセスしたことまたはそれらを利用したことにより生じた損害または損失について、何ら責任を負いません(ただし、法令により認められる範囲に限ります)。お客様が本サイトへのアクセスもしくは本サイトの利用または本サービスの利用を行った場合、お客様は本利用規約に拘束されます。</p> <p>12.10 <b>可分性:</b>管轄裁判所が関係法令に基づいて本契約の条項のすべてまたは一部を違法、無効または強制不能と判断した場合は、その裁判所の管轄区域においてその違法性、無効性または強制不能性が適用される範囲でのみ、該当条項または該当部分が無効になるものとします。また、これらは、当事者の目的に最大限の効果を与えるために関係法令の遵守が必要な範囲で修正されるものとします。その管轄区域での該当条項の違法性、無効性または強制不能性は、他の管轄区域での本契約の該当条項またはその他の条項の合法性、有効性または強制性には一切影響しないものとします。</p> <p>12.11 <b>不可抗力:</b>いずれの当事者も、偶発的出来事、遅延、障害に直接的または間接的に全面的または一部起因する、あるいは当事者の合理的な支配の及ばない性質に起因する、本契約の不履行または履行遅延について、一切責任を負わないものとします。不可抗力となる要因には、火災、爆発、地震、嵐、洪水、その他の気象災害、必要なインフラや原材料の利用不能、停電、ストライキ、ロックアウト、構成要素の利用不能、労働組合活動またはその他の労働争議、戦争、テロ、暴動、反乱、天災または人災、法律、法令、命令、輸出管理規制、布告、規制、条例、政府または国家機関からの命令、管轄裁判所の判決(本契約の当事者による違反に起因する場合を除く)などが含まれますが、上記の一般的な内容に則する限りこれらに限定されません。</p> <p>12.12 <b>通知:</b>NETGEARがお客様に電子メールアドレスの提供を求める場合、お客様はNETGEARに最新の電子メールアドレスを提供する責任を負います。お客様がNETGEARに提供した最新の電子メールアドレスが無効であった場合または本利用規約上義務づけられる(または認められる)お客様への通知が何らかの理由により送達できなかった場合には、NETGEARが当該通知を含む電子メールを送信したことを以て、通知が有効に行われたものとみなします。NETGEARは、お客様またはお客様のネットワークプロバイダーが電子メールによる通知に自動フィルタリングを適用した場合、それについて責任を負いません。NETGEARへの通知は次の電子メールアドレスに宛てて送信する必要があります。support@NETGEAR.com</p> <p>12.13 <b>契約の完全性:</b>本利用規約(参照の形式により本利用規約に含まれるその他の各種規約を含みます)は、お客様とNETGEARとの間で合意した事項のすべてであり、本利用規約の主題に関してお客様とNETGEARとの間で本利用規約以前に行った合意、交渉その他の意思疎通のすべてに優先します。</p> <p>12.15. <b>法令遵守:</b>お客様は本サービス、本ソフトウェアおよび本Orbiデバイスの利用に際してすべての適用法令を遵守することに同意します。</p> <p>12.16. <b>電子的通信への同意:</b>お客様が本サービス、本ソフトウェアおよび本Orbiデバイスを利用した場合、お客様は、本Orbiデバイス、本ソフトウェアおよび本サービスに関する電子的通信を当社から受領することに同意したものとみなします。該当する通信には以下のものが含まれますが、これらに限定されません。(a) お客様による本製品、本ソフトウェア、および本サービスの使用に関する通知。使用上の違反に関する通知を含みます。(b) アップデート。(c) 適用法に基づいてお客様が同意する場合に、NETGEARの製品、ソフトウェア、およびサービスに関する販売促進のための情報および資料を提供する電子メール。お客様の個人情報を当社が利用する方法、お客様の設定を変更する方法、および販売促進のための資料の受信を拒否する方法の詳細につきましては、当社のプライバシーポリシーをご覧ください。</p> <p>お客様は、当社がお客様に対して電子的に送付する通知、同意、開示その他の通信が通信に関する法的要件をすべて充足していることに同意します。</p> <p><b>NETGEAR, Inc.</b></p> <p><b>350 East Plumeria Drive</b></p> <p><b>San Jose, CA 95134</b></p> <p><b>コーポレートオフィスの電話番号</b></p> <p><b>408-907-8000</b></p> <p><b>コーポレートオフィスのファックス番号</b></p> <p><b>408-907-8097</b></p> <p><b>または</b></p> <p><b>NETGEAR International Limited</b></p> <p><b>First Floor Building 3 University Technology Centre Curraheen Road</b></p> <p><b>Cork, Ireland</b></p> <p><b>コーポレートオフィスの電話番号</b></p> <p><b>353-21-2333-200</b></p> <p><b>コーポレートオフィスのファックス番号</b></p> <p><b>353-21-2333-299</b></p> <p>改訂第1版、発効日付:<b>2016年9月14日</b></p> </div> <% tos_agree() %> </div> </div> </div> </form> </body> </html>
dan82840/Netgear-RBR50
git_home/net-cgi.git/www/orbi/BRS_tos_eujp.html
HTML
gpl-2.0
50,310
### GIVEN Given /^I have no collections$/ do Collection.delete_all end Given /^the collection "([^\"]*)" is deleted$/ do |collection_title| step %{I am logged in as the owner of "#{collection_title}"} visit edit_collection_path(Collection.find_by(title: collection_title)) click_link "Delete Collection" click_button "Yes, Delete Collection" page.should have_content("Collection was successfully deleted.") end When /^I am logged in as the owner of "([^\"]*)"$/ do |collection| c = Collection.find_by(title: collection) step %{I am logged in as "#{c.owners.first.user.login}"} end When /^I view the collection "([^\"]*)"$/ do |collection| visit collection_path(Collection.find_by(title: collection)) end When /^I add my work to the collection$/ do step %{I follow "Add To Collection"} fill_in("collection_names", with: "Various_Penguins") click_button("Add") end When /^I add the work "([^\"]*)" to the collection "([^\"]*)"$/ do |work_title, collection_title| w = Work.find_by(title: work_title) c = Collection.find_by(title: collection_title) visit work_path(w) click_link "Add To Collection" fill_in("collection_names", with: c.name) click_button("Add") end When(/^I view the(?: ([^"]*)) collection items page for "(.*?)"$/) do |item_status, collection| c = Collection.find_by(title: collection) if item_status == "approved" visit collection_items_path(c, approved: true) elsif item_status == "rejected" visit collection_items_path(c, rejected: true) elsif item_status == "invited" visit collection_items_path(c, invited: true) else visit collection_items_path(c) end end Given /^mod1 lives in Alaska$/ do step %{I am logged in as "mod1"} step %{I go to mod1 preferences page} step %{I select "(GMT-09:00) Alaska" from "preference_time_zone"} step %{I press "Update"} end Given /^(?:I have )?(?:a|an|the) (hidden)?(?: )?(anonymous)?(?: )?(moderated)?(?: )?(closed)?(?: )?collection "([^\"]*)"(?: with name "([^\"]*)")?$/ do |hidden, anon, moderated, closed, title, name| step %{I am logged in as "moderator"} step %{I set up the collection "#{title}" with name "#{name}"} check("This collection is unrevealed") unless hidden.blank? check("This collection is anonymous") unless anon.blank? check("This collection is moderated") unless moderated.blank? check("This collection is closed") unless closed.blank? step %{I submit} step %{I am logged out} end Given /^I open the collection with the title "([^\"]*)"$/ do |title| step %{I am logged in as "moderator"} visit collection_path(Collection.find_by(title: title)) step %{I follow "Collection Settings"} step %{I uncheck "This collection is closed"} step %{I submit} step %{I am logged out} end Given /^I close the collection with the title "([^\"]*)"$/ do |title| step %{I am logged in as "moderator"} visit collection_path(Collection.find_by(title: title)) step %{I follow "Collection Settings"} step %{I check "This collection is closed"} step %{I submit} step %{I am logged out} end Given /^I have added (?:a|the) co\-moderator "([^\"]*)" to collection "([^\"]*)"$/ do |name, title| # create the user step %{I am logged in as "#{name}"} step %{I am logged in as "mod1"} visit collection_path(Collection.find_by(title: title)) click_link("Membership") step %{I fill in "participants_to_invite" with "#{name}"} step %{I press "Submit"} step %{I select "Moderator" from "#{name}_role"} # TODO: fix the form, it is malformed right now click_button("#{name}_submit") step %{I should see "Updated #{name}"} end ### WHEN When /^I set up (?:a|the) collection "([^"]*)"(?: with name "([^"]*)")?$/ do |title, name| visit new_collection_path fill_in("collection_name", with: (name.blank? ? title.gsub(/[^\w]/, '_') : name)) fill_in("collection_title", with: title) end When /^I create (?:a|the) collection "([^"]*)"(?: with name "([^"]*)")?$/ do |title, name| name = title.gsub(/[^\w]/, '_') if name.blank? step %{I set up the collection "#{title}" with name "#{name}"} step %{I submit} end When /^I add (?:a|the) subcollection "([^"]*)"(?: with name "([^"]*)")? to (?:a|the) parent collection named "([^"]*)"$/ do |title, name, parent_name| if Collection.find_by_name(parent_name).nil? step %{I create the collection "#{parent_name}" with name "#{parent_name}"} end name = title.gsub(/[^\w]/, '_') if name.blank? step %{I set up the collection "#{title}" with name "#{name}"} fill_in("collection_parent_name", with: parent_name) step %{I submit} end When /^I sort by fandom$/ do within(:xpath, "//li[a[contains(@title,'Sort')]]") do step %{I follow "Fandom 1"} end end When /^I reveal works for "([^\"]*)"$/ do |title| step %{I am logged in as the owner of "#{title}"} visit collection_path(Collection.find_by(title: title)) step %{I follow "Collection Settings"} uncheck "This collection is unrevealed" click_button "Update" page.should have_content("Collection was successfully updated") end When /^I reveal authors for "([^\"]*)"$/ do |title| step %{I am logged in as the owner of "#{title}"} visit collection_path(Collection.find_by(title: title)) step %{I follow "Collection Settings"} uncheck "This collection is anonymous" click_button "Update" page.should have_content("Collection was successfully updated") end When /^I check all the collection settings checkboxes$/ do check("collection_collection_preference_attributes_moderated") check("collection_collection_preference_attributes_closed") check("collection_collection_preference_attributes_unrevealed") check("collection_collection_preference_attributes_anonymous") check("collection_collection_preference_attributes_show_random") check("collection_collection_preference_attributes_email_notify") end When /^I accept the invitation for my work in the collection "([^\"]*)"$/ do |collection| the_collection = Collection.find_by(title: collection) collection_item_id = the_collection.collection_items.first.id visit user_collection_items_path(User.current_user) step %{I select "Approved" from "collection_items_#{collection_item_id}_user_approval_status"} end ### THEN Then /^"([^"]*)" collection exists$/ do |title| assert Collection.where(title: title).exists? end Then /^the name of the collection "([^"]*)" should be "([^"]*)"$/ do |title, name| assert Collection.find_by(title: title).name == name end Then /^I should see a collection not found message for "([^\"]+)"$/ do |collection_name| step %{I should see /We couldn't find the collection(?:.+and)? #{collection_name}/} end Then /^the collection "(.*)" should be deleted/ do |collection| assert Collection.where(title: collection).first.nil? end Then /^the work "([^\"]*)" should be hidden from me$/ do |title| work = Work.find_by(title: title) visit work_path(work) page.should have_content("Mystery Work") page.should_not have_content(title) page.should have_content("This work is part of an ongoing challenge and will be revealed soon!") page.should_not have_content(Sanitize.clean(work.chapters.first.content)) if work.collections.first visit collection_path(work.collections.first) page.should_not have_content(title) page.should have_content("Mystery Work") end visit user_path(work.users.first) page.should_not have_content(title) end Then /^the work "([^\"]*)" should be visible to me$/ do |title| work = Work.find_by(title: title) visit work_path(work) page.should have_content(title) page.should have_content(Sanitize.clean(work.chapters.first.content)) end Then /^the author of "([^\"]*)" should be visible to me on the work page$/ do |title| work = Work.find_by(title: title) visit work_path(work) authors = work.pseuds.uniq.sort.collect(&:byline).join(", ") page.should have_content("Anonymous [#{authors}]") end Then /^the author of "([^\"]*)" should be publicly visible$/ do |title| work = Work.find_by(title: title) byline = work.users.first.pseuds.first.byline visit work_path(work) step %{I should see "#{byline}" within "title"} step %{I should see "#{byline}" within ".byline"} if work.collections.first visit collection_path(work.collections.first) page.should have_content("#{title} by #{byline}") end end Then /^the author of "([^\"]*)" should be hidden from me$/ do |title| work = Work.find_by(title: title) visit work_path(work) page.should_not have_content(work.users.first.pseuds.first.byline) step %{I should see "Anonymous" within "title"} step %{I should see "Anonymous" within ".byline"} visit collection_path(work.collections.first) page.should_not have_content("#{title} by #{work.users.first.pseuds.first.byline}") page.should have_content("#{title} by Anonymous") visit user_path(work.users.first) page.should_not have_content(title) end
ariana-paris/otwarchive
features/step_definitions/collection_steps.rb
Ruby
gpl-2.0
8,888
/* * * Copyright (c) 2011 Realtek Semiconductor Corp. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/config.h> #include <linux/irq.h> #include <asm/irq_cpu.h> #include <prom.h> #include <platform.h> #include <linux/hardirq.h> #if defined(CONFIG_RTL_819X) #include <common/rtl_types.h> #endif spinlock_t irq_lock = SPIN_LOCK_UNLOCKED; static void rtl8196_enable_irq(unsigned int irq) { unsigned long flags; spin_lock_irqsave(&irq_lock, flags); REG32(GIMR) = REG32(GIMR) | (1 << irq); spin_unlock_irqrestore(&irq_lock, flags); } static unsigned int rtl8196_startup_irq(unsigned int irq) { rtl8196_enable_irq(irq); return 0; } static void rtl8196_disable_irq(unsigned int irq) { unsigned long flags; spin_lock_irqsave(&irq_lock, flags); REG32(GIMR) = REG32(GIMR) & (~(1 << irq)); spin_unlock_irqrestore(&irq_lock, flags); } static void rtl8196_end_irq(unsigned int irq) { unsigned long flags; spin_lock_irqsave(&irq_lock, flags); REG32(GIMR) = REG32(GIMR) | (1 << irq); spin_unlock_irqrestore(&irq_lock, flags); } #define rtl8196_shutdown_irq rtl8196_disable_irq #define rtl8196_mask_and_ack_irq rtl8196_disable_irq static struct irq_chip irq_type = { .typename = "RTL8196", .startup = rtl8196_startup_irq, .shutdown = rtl8196_shutdown_irq, .enable = rtl8196_enable_irq, .disable = rtl8196_disable_irq, .ack = rtl8196_mask_and_ack_irq, .end = rtl8196_end_irq, .mask = rtl8196_disable_irq, .mask_ack = rtl8196_mask_and_ack_irq, .unmask = rtl8196_enable_irq, }; /* * RTL8196b Interrupt Scheme (Subject to change) * * Source EXT_INT IRQ CPU INT * -------- ------- ------ ------- * PCIB0TO 0 0 2 * PCIB1TO 1 1 2 * LBCTMOm0 2 2 2 * LBCTMOm1 3 3 2 * LBCTMOs 4 4 2 * TIMER0 8 8 7 * TIMER1 9 9 2 * USB 10 10 4 * UART0 12 12 3 * UART1 13 13 2 * PCI 14 14 5 * SWCORE 15 15 6 * GPIO_ABCD 16 16 2 * GPIO_EFGH 17 17 2 * HCI 18 18 2 * PCM 19 19 2 * CRYPTO 20 20 2 * GDMA 21 21 2 */ void __init arch_init_irq(void) { int i; /* Initialize for IRQ: 0~31 */ for (i = 0; i < 32; i++) { //irq_desc[i].chip = &irq_type; set_irq_chip_and_handler(i, &irq_type, handle_level_irq); } /* Enable all interrupt mask of CPU */ write_c0_status(read_c0_status() | ST0_IM); /* Set GIMR, IRR */ REG32(GIMR) = TC0_IE | UART0_IE |SW_IE ; REG32(IRR0) = IRR0_SETTING; REG32(IRR1) = IRR1_SETTING; REG32(IRR2) = IRR2_SETTING; REG32(IRR3) = IRR3_SETTING; } #define ST0_USED_IM (_ULCAST_(0xfc00)) /* interrupt 2/3/4/5/6/7 */ #define ST0_REENTRY_IM (_ULCAST_(0xe000)) /* interrupt 5/6/7 */ __IRAM_GEN asmlinkage void plat_irq_dispatch(void) { #if 0 unsigned int cpuint_ip; unsigned int cpuint_mask; unsigned int extint_ip; cpuint_mask = read_c0_status() & ST0_IM; cpuint_ip = read_c0_cause() & read_c0_status() & ST0_IM; #if 1 write_c0_status(read_c0_status()&(~ST0_IM)); #else write_c0_status((read_c0_status()&(~ST0_IM))|(cpuint_mask&(~(STATUSF_IP6|STATUSF_IP5)))); #endif do { if (cpuint_ip & CAUSEF_IP7) { /* Timer 0 */ do_IRQ(TC0_IRQ); } else if (cpuint_ip & CAUSEF_IP5) { /* PCIE */ do_IRQ(PCIE_IRQ); } if (cpuint_ip & CAUSEF_IP6) { /* switch core*/ do_IRQ(SWCORE_IRQ); } else if (cpuint_ip & CAUSEF_IP3) { /* pci */ do_IRQ(PCI_IRQ); } else if (cpuint_ip & CAUSEF_IP4) { /*USB*/ do_IRQ(USB_IRQ); } else if (cpuint_ip & CAUSEF_IP2) { /* For shared interrupts */ unsigned int extint_ip = REG32(GIMR) & REG32(GISR); if (extint_ip & UART0_IP) { /* UART 0 */ do_IRQ(UART0_IRQ); } else if (extint_ip & TC1_IP) { do_IRQ(TC1_IRQ); } #if 0 /* currently we do not use uart1 */ else if (extint_ip & UART1_IP) { do_IRQ(UART1_IRQ); } #endif else { prom_printf("Unknown Interrupt2:%x\n",extint_ip); REG32(GISR) = extint_ip; } } cpuint_ip = read_c0_cause() & (STATUSF_IP6|STATUSF_IP5|STATUSF_IP7); } while(cpuint_ip); #if 0 /* patch for wds+wep hang up issue */ write_c0_status((read_c0_status()&(~ST0_IM))|(cpuint_mask)); #else write_c0_status((read_c0_status()|(ST0_IM))); #endif #else unsigned int cpuint_ip; unsigned int extint_ip; cpuint_ip = read_c0_cause() & ST0_USED_IM; write_c0_status(read_c0_status()&(~ST0_IM)); #if 0 if ( ST0_REENTRY_IM & cpuint_ip ) { do { if (cpuint_ip & CAUSEF_IP7) { /* Timer 0 */ do_IRQ(TC0_IRQ); } if (cpuint_ip & CAUSEF_IP6) { /* switch core*/ do_IRQ(SWCORE_IRQ); } if (cpuint_ip & CAUSEF_IP5) { /* PCIE */ do_IRQ(PCIE_IRQ); } cpuint_ip = read_c0_cause() & ST0_REENTRY_IM; } while(cpuint_ip); } #else if (cpuint_ip & CAUSEF_IP6) { /* switch core*/ do_IRQ(SWCORE_IRQ); } else if (cpuint_ip & CAUSEF_IP5) { /* PCIE */ do_IRQ(PCIE_IRQ); } else if (cpuint_ip & CAUSEF_IP7) { /* Timer 0 */ do_IRQ(TC0_IRQ); } #endif else if(cpuint_ip & CAUSEF_IP3) { /* pci */ do_IRQ(PCI_IRQ); } else if (cpuint_ip & CAUSEF_IP4) { /*USB*/ do_IRQ(USB_IRQ); } else if (cpuint_ip & CAUSEF_IP2) { /* For shared interrupts */ extint_ip = REG32(GIMR) & REG32(GISR); if (extint_ip & UART0_IP) { /* UART 0 */ do_IRQ(UART0_IRQ); } #if 0 else if (extint_ip & TC1_IP) { do_IRQ(TC1_IRQ); } /* currently we do not use uart1 */ else if (extint_ip & UART1_IP) { do_IRQ(UART1_IRQ); } else { prom_printf("Unknown Interrupt2:%x\n",extint_ip); REG32(GISR) = extint_ip; } #endif } write_c0_status((read_c0_status()|(ST0_USED_IM))); #endif }
utessel/edimax
target/linux/realtek/files/arch/mips/rtl8196b/int.c
C
gpl-2.0
6,532
/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "[BATT][BMS] %s: " fmt, __func__ #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/power_supply.h> #include <linux/spmi.h> #include <linux/rtc.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/qpnp/qpnp-adc.h> #include <linux/qpnp/qpnp-charger.h> #include <linux/qpnp/qpnp-bms.h> #include <linux/qpnp/power-on.h> #include <linux/of_batterydata.h> #include <mach/devices_dtb.h> #include <mach/htc_gauge.h> #include <mach/htc_footprint.h> #include <linux/interrupt.h> #ifdef CONFIG_HTC_BATT_8960 #include "mach/htc_battery_cell.h" #endif #ifdef pr_debug #undef pr_debug #endif #define pr_debug(fmt, args...) do { \ if (flag_enable_bms_charger_log) \ printk(KERN_INFO pr_fmt(fmt), ## args); \ } while (0) #define REVISION1 0x0 #define REVISION2 0x1 #define BMS1_STATUS1 0x8 #define BMS1_INT_RT_STS 0x10 #define BMS1_MODE_CTL 0X40 #define BMS1_CC_DATA_CTL 0x42 #define BMS1_CC_CLEAR_CTL 0x43 #define BMS1_TOL_CTL 0X44 #define BMS1_OCV_USE_LOW_LIMIT_THR0 0x48 #define BMS1_OCV_USE_LOW_LIMIT_THR1 0x49 #define BMS1_OCV_USE_HIGH_LIMIT_THR0 0x4A #define BMS1_OCV_USE_HIGH_LIMIT_THR1 0x4B #define BMS1_OCV_USE_LIMIT_CTL 0x4C #define BMS1_S1_DELAY_CTL 0x5A #define BMS1_OCV_THR0 0x50 #define BMS1_S2_SAMP_AVG_CTL 0x61 #define BMS1_SW_CC_THR0 0xA0 #define BMS1_S3_VSENSE_THR_CTL 0x77 #define BMS1_OCV_FOR_R_DATA0 0x80 #define BMS1_VSENSE_FOR_R_DATA0 0x82 #define BMS1_CC_DATA0 0x8A #define BMS1_SW_CC_DATA0 0xA8 #define BMS1_OCV_FOR_SOC_DATA0 0x90 #define BMS1_VSENSE_PON_DATA0 0x94 #define BMS1_VSENSE_AVG_DATA0 0x98 #define BMS1_VBAT_AVG_DATA0 0x9E #define SOC_STORAGE_REG 0xB0 #define IAVG_STORAGE_REG 0xB1 #define BMS_FCC_COUNT 0xB2 #define BMS_FCC_BASE_REG 0xB3 #define BMS_CHGCYL_BASE_REG 0xB8 #define CHARGE_INCREASE_STORAGE 0xBD #define CHARGE_CYCLE_STORAGE_LSB 0xBE #define IADC1_BMS_REVISION2 0x01 #define IADC1_BMS_ADC_CH_SEL_CTL 0x48 #define IADC1_BMS_ADC_INT_RSNSN_CTL 0x49 #define IADC1_BMS_FAST_AVG_EN 0x5B #define IGNORE_SOC_TEMP_DECIDEG 50 #define IAVG_STEP_SIZE_MA 10 #define IAVG_INVALID 0xFF #define SOC_INVALID 0x7E #define IAVG_SAMPLES 16 #define MAX_FCC_CYCLES 5 #define DELTA_FCC_PERCENT 5 #define VALID_FCC_CHGCYL_RANGE 50 #define CHGCYL_RESOLUTION 20 #define FCC_DEFAULT_TEMP 250 #define BMS_STORE_MAGIC_NUM 0xDDAACC00 #define BMS_STORE_MAGIC_OFFSET 1056 #define BMS_STORE_SOC_OFFSET 1060 #define BMS_STORE_OCV_OFFSET 1064 #define BMS_STORE_CC_OFFSET 1068 #define BMS_STORE_CURRTIME_OFFSET 1072 #define QPNP_BMS_DEV_NAME "qcom,qpnp-bms" #define FIRST_SW_EST_OCV_THR_MS (21600000) #define DEFAULT_SW_EST_OCV_THR_MS (79200000) #define DISABLE_SW_OCV_LEVEL_THRESHOLD 30 enum { SHDW_CC, CC }; enum { NORESET, RESET }; struct soc_params { int fcc_uah; int cc_uah; int rbatt_mohm; int iavg_ua; int uuc_uah; int ocv_charge_uah; int delta_time_s; }; struct raw_soc_params { uint16_t last_good_ocv_raw; int64_t cc; int64_t shdw_cc; int last_good_ocv_uv; }; struct fcc_sample { int fcc_new; int chargecycles; }; struct bms_irq { unsigned int irq; unsigned long disabled; }; struct bms_wakeup_source { struct wakeup_source source; unsigned long disabled; }; struct qpnp_bms_chip { struct device *dev; struct power_supply bms_psy; bool bms_psy_registered; struct power_supply *batt_psy; struct spmi_device *spmi; wait_queue_head_t bms_wait_queue; u16 base; u16 iadc_base; u16 batt_pres_addr; u16 soc_storage_addr; u8 revision1; u8 revision2; u8 iadc_bms_revision1; u8 iadc_bms_revision2; int battery_present; int battery_status; bool batfet_closed; bool new_battery; bool done_charging; bool last_soc_invalid; int store_batt_data_soc_thre; int batt_stored_magic_num; int batt_stored_soc; int batt_stored_ocv_uv; int batt_stored_cc_uah; int cc_backup_uah; int ocv_backup_uv; unsigned int batt_stored_update_time; int r_sense_uohm; unsigned int v_cutoff_uv; unsigned int shutdown_vol_criteria; int max_voltage_uv; int r_conn_mohm; int shutdown_soc_valid_limit; int adjust_soc_low_threshold; int chg_term_ua; enum battery_type batt_type; unsigned int fcc_mah; struct single_row_lut *fcc_temp_lut; struct single_row_lut *fcc_sf_lut; struct pc_temp_ocv_lut *pc_temp_ocv_lut; struct sf_lut *pc_sf_lut; struct sf_lut *rbatt_sf_lut; struct sf_lut *rbatt_est_ocv_lut; int default_rbatt_mohm; int rbatt_capacitive_mohm; int rbatt_mohm; struct delayed_work calculate_soc_delayed_work; struct work_struct recalc_work; struct work_struct batfet_open_work; struct mutex bms_output_lock; struct mutex last_ocv_uv_mutex; struct mutex vbat_monitor_mutex; struct mutex soc_invalidation_mutex; struct mutex last_soc_mutex; struct mutex status_lock; bool use_external_rsense; bool use_ocv_thresholds; bool ignore_shutdown_soc; bool shutdown_soc_invalid; int shutdown_soc; int shutdown_iavg_ma; struct wake_lock low_voltage_wake_lock; int low_voltage_threshold; int low_soc_calc_threshold; int low_soc_calculate_soc_ms; int low_voltage_calculate_soc_ms; int calculate_soc_ms; struct bms_wakeup_source soc_wake_source; struct wake_lock cv_wake_lock; uint16_t ocv_reading_at_100; uint16_t prev_last_good_ocv_raw; int insertion_ocv_uv; int last_ocv_uv; int charging_adjusted_ocv; int last_ocv_temp; int last_cc_uah; unsigned long last_soc_change_sec; unsigned long tm_sec; unsigned long report_tm_sec; bool first_time_calc_soc; bool first_time_calc_uuc; int64_t software_cc_uah; int64_t software_shdw_cc_uah; int iavg_samples_ma[IAVG_SAMPLES]; int iavg_index; int iavg_num_samples; struct timespec t_soc_queried; int last_soc; int last_soc_est; int last_soc_unbound; bool was_charging_at_sleep; int charge_start_tm_sec; int catch_up_time_sec; struct single_row_lut *adjusted_fcc_temp_lut; struct qpnp_adc_tm_btm_param vbat_monitor_params; struct qpnp_adc_tm_btm_param die_temp_monitor_params; int temperature_margin; unsigned int vadc_v0625; unsigned int vadc_v1250; unsigned int criteria_sw_est_ocv; unsigned int rconn_mohm_sw_est_ocv; int system_load_count; int prev_uuc_iavg_ma; int prev_pc_unusable; int ibat_at_cv_ua; int soc_at_cv; int prev_chg_soc; int calculated_soc; int prev_voltage_based_soc; bool use_voltage_soc; bool in_cv_range; int prev_batt_terminal_uv; int high_ocv_correction_limit_uv; int low_ocv_correction_limit_uv; int flat_ocv_threshold_uv; int hold_soc_est; int ocv_high_threshold_uv; int ocv_low_threshold_uv; unsigned long last_recalc_time; struct fcc_sample *fcc_learning_samples; u8 fcc_sample_count; int enable_fcc_learning; int min_fcc_learning_soc; int min_fcc_ocv_pc; int min_fcc_learning_samples; int start_soc; int end_soc; int start_pc; int start_cc_uah; int start_real_soc; int end_cc_uah; uint16_t fcc_new_mah; int fcc_new_batt_temp; uint16_t charge_cycles; u8 charge_increase; int fcc_resolution; bool battery_removed; struct bms_irq cc_thr_irq; struct bms_irq ocv_for_r_irq; struct bms_irq good_ocv_irq; struct bms_irq charge_begin_irq; struct bms_irq sw_cc_thr_irq; struct bms_irq ocv_thr_irq; struct bms_irq vsense_avg_irq; struct bms_irq vsense_for_r_irq; struct qpnp_vadc_chip *vadc_dev; struct qpnp_iadc_chip *iadc_dev; struct qpnp_adc_tm_chip *adc_tm_dev; bool batt_full_fake_ocv; int enable_batt_full_fake_ocv; }; struct pm8941_bms_debug { int fcc_uah; int uuc_uah; int rc_uah; int ruc_uah; int cc_uah; int ori_cc_uah; int raw_soc; int adjusted_soc; int ori_soc_change; int rbatt; int rbatt_sf; int uuc_iavg_ma; int uuc_rbatt_mohm; int ori_uuc_uah; int unusable_uv; int pc_unusable; int rc_pc; int shutdown_soc; int soc_rbatt; int pon_est_ocv; int ibat_for_est_ocv; int vbat_for_est_ocv; int rbat_for_est_ocv; int time_last_change_s; int last_ocv_raw_uv; }; static struct pm8941_bms_debug bms_dbg; struct pm8941_battery_data_store { int store_soc; int store_ocv_uv; int store_cc_uah; unsigned long store_currtime_ms; }; static struct pm8941_battery_data_store store_emmc; static struct of_device_id qpnp_bms_match_table[] = { { .compatible = QPNP_BMS_DEV_NAME }, {} }; #if !(defined(CONFIG_HTC_BATT_8960)) static char *qpnp_bms_supplicants[] = { "battery" }; static enum power_supply_property msm_bms_power_props[] = { POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_RESISTANCE, POWER_SUPPLY_PROP_CHARGE_COUNTER, POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW, POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, POWER_SUPPLY_PROP_CHARGE_FULL, POWER_SUPPLY_PROP_CYCLE_COUNT, }; #endif struct htc_bms_timer { unsigned long batt_system_jiffies; unsigned long batt_suspend_ms; unsigned long no_ocv_update_period_ms; }; static struct htc_bms_timer htc_batt_bms_timer; static int ocv_update_stop_active_mask = OCV_UPDATE_STOP_BIT_CABLE_IN | OCV_UPDATE_STOP_BIT_ATTR_FILE | OCV_UPDATE_STOP_BIT_BOOT_UP; static int ocv_update_stop_reason; static int is_ocv_update_start = 0; struct mutex ocv_update_lock; static int batt_level = 0; static void disable_ocv_update_with_reason(bool disable, int reason); static int discard_backup_fcc_data(struct qpnp_bms_chip *chip); static void backup_charge_cycle(struct qpnp_bms_chip *chip); static int64_t read_battery_id(struct qpnp_bms_chip *chip); static int get_rbatt(struct qpnp_bms_chip *chip, int soc_rbatt_mohm, int batt_temp); int emmc_misc_write(int val, int offset); static int calculate_cc(struct qpnp_bms_chip *chip, int64_t cc, int cc_type, int clear_cc); static bool bms_reset; static struct qpnp_bms_chip *the_chip; static bool flag_enable_bms_charger_log; #define BATT_LOG_BUF_LEN (512) static char batt_log_buf[BATT_LOG_BUF_LEN]; static unsigned long allow_ocv_time = 0; static int new_boot_soc = 0; static int consistent_flag = false;; static int store_soc_ui = -1; static int qpnp_read_wrapper(struct qpnp_bms_chip *chip, u8 *val, u16 base, int count) { int rc; struct spmi_device *spmi = chip->spmi; rc = spmi_ext_register_readl(spmi->ctrl, spmi->sid, base, val, count); if (rc) { pr_err("SPMI read failed rc=%d\n", rc); return rc; } return 0; } static int qpnp_write_wrapper(struct qpnp_bms_chip *chip, u8 *val, u16 base, int count) { int rc; struct spmi_device *spmi = chip->spmi; rc = spmi_ext_register_writel(spmi->ctrl, spmi->sid, base, val, count); if (rc) { pr_err("SPMI write failed rc=%d\n", rc); return rc; } return 0; } static int qpnp_masked_write_base(struct qpnp_bms_chip *chip, u16 addr, u8 mask, u8 val) { int rc; u8 reg; rc = qpnp_read_wrapper(chip, &reg, addr, 1); if (rc) { pr_err("read failed addr = %03X, rc = %d\n", addr, rc); return rc; } reg &= ~mask; reg |= val & mask; rc = qpnp_write_wrapper(chip, &reg, addr, 1); if (rc) { pr_err("write failed addr = %03X, val = %02x, mask = %02x, reg = %02x, rc = %d\n", addr, val, mask, reg, rc); return rc; } return 0; } static int qpnp_masked_write_iadc(struct qpnp_bms_chip *chip, u16 addr, u8 mask, u8 val) { return qpnp_masked_write_base(chip, chip->iadc_base + addr, mask, val); } static int qpnp_masked_write(struct qpnp_bms_chip *chip, u16 addr, u8 mask, u8 val) { return qpnp_masked_write_base(chip, chip->base + addr, mask, val); } static void bms_stay_awake(struct bms_wakeup_source *source) { if (__test_and_clear_bit(0, &source->disabled)) { __pm_stay_awake(&source->source); pr_debug("enabled source %s\n", source->source.name); } } static void bms_relax(struct bms_wakeup_source *source) { if (!__test_and_set_bit(0, &source->disabled)) { __pm_relax(&source->source); pr_debug("disabled source %s\n", source->source.name); } } static void enable_bms_irq(struct bms_irq *irq) { if (__test_and_clear_bit(0, &irq->disabled)) { enable_irq(irq->irq); pr_debug("enabled irq %d\n", irq->irq); } } static void disable_bms_irq(struct bms_irq *irq) { if (!__test_and_set_bit(0, &irq->disabled)) { disable_irq(irq->irq); pr_debug("disabled irq %d\n", irq->irq); } } #define HOLD_OREG_DATA BIT(0) static int lock_output_data(struct qpnp_bms_chip *chip) { int rc; rc = qpnp_masked_write(chip, BMS1_CC_DATA_CTL, HOLD_OREG_DATA, HOLD_OREG_DATA); if (rc) { pr_err("couldnt lock bms output rc = %d\n", rc); return rc; } return 0; } static int unlock_output_data(struct qpnp_bms_chip *chip) { int rc; rc = qpnp_masked_write(chip, BMS1_CC_DATA_CTL, HOLD_OREG_DATA, 0); if (rc) { pr_err("fail to unlock BMS_CONTROL rc = %d\n", rc); return rc; } return 0; } #define V_PER_BIT_MUL_FACTOR 97656 #define V_PER_BIT_DIV_FACTOR 1000 #define VADC_INTRINSIC_OFFSET 0x6000 static int vadc_reading_to_uv(int reading) { if (reading <= VADC_INTRINSIC_OFFSET) return 0; return (reading - VADC_INTRINSIC_OFFSET) * V_PER_BIT_MUL_FACTOR / V_PER_BIT_DIV_FACTOR; } #define VADC_CALIB_UV 625000 #define VBATT_MUL_FACTOR 3 static int adjust_vbatt_reading(struct qpnp_bms_chip *chip, int reading_uv) { s64 numerator, denominator; if (reading_uv == 0) return 0; if (chip->vadc_v0625 == 0 || chip->vadc_v1250 == 0) { pr_debug("No cal yet return %d\n", VBATT_MUL_FACTOR * reading_uv); return VBATT_MUL_FACTOR * reading_uv; } numerator = ((s64)reading_uv - chip->vadc_v0625) * VADC_CALIB_UV; denominator = (s64)chip->vadc_v1250 - chip->vadc_v0625; if (denominator == 0) return reading_uv * VBATT_MUL_FACTOR; return (VADC_CALIB_UV + div_s64(numerator, denominator)) * VBATT_MUL_FACTOR; } static int convert_vbatt_uv_to_raw(struct qpnp_bms_chip *chip, int unadjusted_vbatt) { int scaled_vbatt = unadjusted_vbatt / VBATT_MUL_FACTOR; if (scaled_vbatt <= 0) return VADC_INTRINSIC_OFFSET; return ((scaled_vbatt * V_PER_BIT_DIV_FACTOR) / V_PER_BIT_MUL_FACTOR) + VADC_INTRINSIC_OFFSET; } static inline int convert_vbatt_raw_to_uv(struct qpnp_bms_chip *chip, uint16_t reading) { int64_t uv; int rc; uv = vadc_reading_to_uv(reading); pr_debug("%u raw converted into %lld uv\n", reading, uv); uv = adjust_vbatt_reading(chip, uv); pr_debug("adjusted into %lld uv\n", uv); rc = qpnp_vbat_sns_comp_result(chip->vadc_dev, &uv); if (rc) pr_debug("could not compensate vbatt\n"); pr_debug("compensated into %lld uv\n", uv); return uv; } #define CC_READING_RESOLUTION_N 542535 #define CC_READING_RESOLUTION_D 100000 static s64 cc_reading_to_uv(s64 reading) { return div_s64(reading * CC_READING_RESOLUTION_N, CC_READING_RESOLUTION_D); } #define QPNP_ADC_GAIN_IDEAL 3291LL static s64 cc_adjust_for_gain(s64 uv, uint16_t gain) { s64 result_uv; pr_debug("adjusting_uv = %lld\n", uv); if (gain == 0) { pr_debug("gain is %d, not adjusting\n", gain); return uv; } pr_debug("adjusting by factor: %lld/%hu = %lld%%\n", QPNP_ADC_GAIN_IDEAL, gain, div_s64(QPNP_ADC_GAIN_IDEAL * 100LL, (s64)gain)); result_uv = div_s64(uv * QPNP_ADC_GAIN_IDEAL, (s64)gain); pr_debug("result_uv = %lld\n", result_uv); return result_uv; } static s64 cc_reverse_adjust_for_gain(struct qpnp_bms_chip *chip, s64 uv) { struct qpnp_iadc_calib calibration; int gain; s64 result_uv; qpnp_iadc_get_gain_and_offset(chip->iadc_dev, &calibration); gain = (int)calibration.gain_raw - (int)calibration.offset_raw; pr_debug("reverse adjusting_uv = %lld\n", uv); if (gain == 0) { pr_debug("gain is %d, not adjusting\n", gain); return uv; } pr_debug("adjusting by factor: %hu/%lld = %lld%%\n", gain, QPNP_ADC_GAIN_IDEAL, div64_s64((s64)gain * 100LL, (s64)QPNP_ADC_GAIN_IDEAL)); result_uv = div64_s64(uv * (s64)gain, QPNP_ADC_GAIN_IDEAL); pr_debug("result_uv = %lld\n", result_uv); return result_uv; } static int convert_vsense_to_uv(struct qpnp_bms_chip *chip, int16_t reading) { struct qpnp_iadc_calib calibration; qpnp_iadc_get_gain_and_offset(chip->iadc_dev, &calibration); return cc_adjust_for_gain(cc_reading_to_uv(reading), calibration.gain_raw - calibration.offset_raw); } static int read_vsense_avg(struct qpnp_bms_chip *chip, int *result_uv) { int rc; int16_t reading; rc = qpnp_read_wrapper(chip, (u8 *)&reading, chip->base + BMS1_VSENSE_AVG_DATA0, 2); if (rc) { pr_err("fail to read VSENSE_AVG rc = %d\n", rc); return rc; } *result_uv = convert_vsense_to_uv(chip, reading); return 0; } static int get_battery_current(struct qpnp_bms_chip *chip, int *result_ua) { int rc, vsense_uv = 0; int64_t temp_current; if (chip->r_sense_uohm == 0) { pr_err("r_sense is zero\n"); return -EINVAL; } mutex_lock(&chip->bms_output_lock); lock_output_data(chip); read_vsense_avg(chip, &vsense_uv); unlock_output_data(chip); mutex_unlock(&chip->bms_output_lock); pr_debug("vsense_uv=%duV\n", vsense_uv); temp_current = div_s64((vsense_uv * 1000000LL), (int)chip->r_sense_uohm); rc = qpnp_iadc_comp_result(chip->iadc_dev, &temp_current); if (rc) pr_debug("error compensation failed: %d\n", rc); *result_ua = temp_current; pr_debug("err compensated ibat=%duA\n", *result_ua); return 0; } static int get_battery_voltage(struct qpnp_bms_chip *chip, int *result_uv) { int rc; struct qpnp_vadc_result adc_result; rc = qpnp_vadc_read(chip->vadc_dev, VBAT_SNS, &adc_result); if (rc) { pr_err("error reading adc channel = %d, rc = %d\n", VBAT_SNS, rc); return rc; } pr_debug("mvolts phy = %lld meas = 0x%llx\n", adc_result.physical, adc_result.measurement); *result_uv = (int)adc_result.physical; return 0; } #define CC_36_BIT_MASK 0xFFFFFFFFFLL static uint64_t convert_s64_to_s36(int64_t raw64) { return (uint64_t) raw64 & CC_36_BIT_MASK; } #define SIGN_EXTEND_36_TO_64_MASK (-1LL ^ CC_36_BIT_MASK) static int64_t convert_s36_to_s64(uint64_t raw36) { raw36 = raw36 & CC_36_BIT_MASK; return (raw36 >> 35) == 0LL ? raw36 : (SIGN_EXTEND_36_TO_64_MASK | raw36); } static int read_cc_raw(struct qpnp_bms_chip *chip, int64_t *reading, int cc_type) { int64_t raw_reading; int rc; if (cc_type == SHDW_CC) rc = qpnp_read_wrapper(chip, (u8 *)&raw_reading, chip->base + BMS1_SW_CC_DATA0, 5); else rc = qpnp_read_wrapper(chip, (u8 *)&raw_reading, chip->base + BMS1_CC_DATA0, 5); if (rc) { pr_err("Error reading cc: rc = %d\n", rc); return -ENXIO; } *reading = convert_s36_to_s64(raw_reading); return 0; } static int calib_vadc(struct qpnp_bms_chip *chip) { int rc, raw_0625, raw_1250; struct qpnp_vadc_result result; rc = qpnp_vadc_read(chip->vadc_dev, REF_625MV, &result); if (rc) { pr_debug("vadc read failed with rc = %d\n", rc); return rc; } raw_0625 = result.adc_code; rc = qpnp_vadc_read(chip->vadc_dev, REF_125V, &result); if (rc) { pr_debug("vadc read failed with rc = %d\n", rc); return rc; } raw_1250 = result.adc_code; chip->vadc_v0625 = vadc_reading_to_uv(raw_0625); chip->vadc_v1250 = vadc_reading_to_uv(raw_1250); pr_debug("vadc calib: 0625 = %d raw (%d uv), 1250 = %d raw (%d uv)\n", raw_0625, chip->vadc_v0625, raw_1250, chip->vadc_v1250); return 0; } static void convert_and_store_ocv(struct qpnp_bms_chip *chip, struct raw_soc_params *raw, int batt_temp) { int rc; pr_info("prev_last_good_ocv_raw = %d, last_good_ocv_raw = %d\n", chip->prev_last_good_ocv_raw, raw->last_good_ocv_raw); rc = calib_vadc(chip); if (rc) pr_err("Vadc reference voltage read failed, rc = %d\n", rc); chip->prev_last_good_ocv_raw = raw->last_good_ocv_raw; raw->last_good_ocv_uv = convert_vbatt_raw_to_uv(chip, raw->last_good_ocv_raw); chip->last_ocv_uv = raw->last_good_ocv_uv; chip->last_ocv_temp = batt_temp; chip->software_cc_uah = 0; pr_debug("last_good_ocv_uv = %d\n", raw->last_good_ocv_uv); } #define CLEAR_CC BIT(7) #define CLEAR_SHDW_CC BIT(6) static void reset_cc(struct qpnp_bms_chip *chip, u8 flags) { int rc; pr_info("resetting cc manually with flags %hhu\n", flags); mutex_lock(&chip->bms_output_lock); rc = qpnp_masked_write(chip, BMS1_CC_CLEAR_CTL, flags, flags); if (rc) pr_err("cc reset failed: %d\n", rc); udelay(100); rc = qpnp_masked_write(chip, BMS1_CC_CLEAR_CTL, flags, 0); if (rc) pr_err("cc reenable failed: %d\n", rc); mutex_unlock(&chip->bms_output_lock); } static int get_battery_status(struct qpnp_bms_chip *chip) { #if !(defined(CONFIG_HTC_BATT_8960)) union power_supply_propval ret = {0,}; if (chip->batt_psy == NULL) chip->batt_psy = power_supply_get_by_name("battery"); if (chip->batt_psy) { chip->batt_psy->get_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS, &ret); return ret.intval; } pr_debug("battery power supply is not registered\n"); return POWER_SUPPLY_STATUS_UNKNOWN; #else return pm8941_get_battery_status(); #endif } static bool is_battery_charging(struct qpnp_bms_chip *chip) { return get_battery_status(chip) == POWER_SUPPLY_STATUS_CHARGING; } static bool is_battery_full(struct qpnp_bms_chip *chip) { return get_battery_status(chip) == POWER_SUPPLY_STATUS_FULL; } static bool is_battery_present(struct qpnp_bms_chip *chip) { #if !(defined(CONFIG_HTC_BATT_8960)) union power_supply_propval ret = {0,}; if (chip->batt_psy == NULL) chip->batt_psy = power_supply_get_by_name("battery"); if (chip->batt_psy) { chip->batt_psy->get_property(chip->batt_psy, POWER_SUPPLY_PROP_PRESENT, &ret); return ret.intval; } pr_debug("battery power supply is not registered\n"); return false; #else return pm8941_get_batt_present(); #endif } static int get_battery_insertion_ocv_uv(struct qpnp_bms_chip *chip) { union power_supply_propval ret = {0,}; int rc, vbat; if (chip->batt_psy == NULL) chip->batt_psy = power_supply_get_by_name("battery"); if (chip->batt_psy) { rc = chip->batt_psy->get_property(chip->batt_psy, POWER_SUPPLY_PROP_VOLTAGE_OCV, &ret); if (rc) { pr_debug("Battery psy does not have voltage ocv\n"); rc = get_battery_voltage(chip, &vbat); if (rc) return -EINVAL; return vbat; } return ret.intval; } pr_debug("battery power supply is not registered\n"); return -EINVAL; } static bool is_batfet_closed(struct qpnp_bms_chip *chip) { union power_supply_propval ret = {0,}; if (chip->batt_psy == NULL) chip->batt_psy = power_supply_get_by_name("battery"); if (chip->batt_psy) { chip->batt_psy->get_property(chip->batt_psy, POWER_SUPPLY_PROP_ONLINE, &ret); return !!ret.intval; } pr_debug("battery power supply is not registered\n"); return true; } static int get_simultaneous_batt_v_and_i(struct qpnp_bms_chip *chip, int *ibat_ua, int *vbat_uv) { struct qpnp_iadc_result i_result; struct qpnp_vadc_result v_result; enum qpnp_iadc_channels iadc_channel; int rc; iadc_channel = chip->use_external_rsense ? EXTERNAL_RSENSE : INTERNAL_RSENSE; if (is_battery_full(chip)) { rc = get_battery_current(chip, ibat_ua); if (rc) { pr_err("bms current read failed with rc: %d\n", rc); return rc; } rc = qpnp_vadc_read(chip->vadc_dev, VBAT_SNS, &v_result); if (rc) { pr_err("vadc read failed with rc: %d\n", rc); return rc; } *vbat_uv = (int)v_result.physical; } else { rc = qpnp_iadc_vadc_sync_read(chip->iadc_dev, iadc_channel, &i_result, VBAT_SNS, &v_result); if (rc) { pr_err("adc sync read failed with rc: %d\n", rc); return rc; } *ibat_ua = -1 * (int)i_result.result_ua; *vbat_uv = (int)v_result.physical; } return 0; } static int estimate_ocv(struct qpnp_bms_chip *chip, int batt_temp) { int ibat_ua, vbat_uv, ocv_est_uv; int rc; int rbatt_mohm; rbatt_mohm = get_rbatt(chip, bms_dbg.soc_rbatt, batt_temp); rc = get_simultaneous_batt_v_and_i(chip, &ibat_ua, &vbat_uv); if (rc) { pr_err("simultaneous failed rc = %d\n", rc); return rc; } ocv_est_uv = vbat_uv + (ibat_ua * rbatt_mohm) / 1000; bms_dbg.ibat_for_est_ocv = ibat_ua; bms_dbg.vbat_for_est_ocv = vbat_uv; bms_dbg.rbat_for_est_ocv = rbatt_mohm; pr_info("estimated pon ocv = %d, vbat_uv = %d, ibat_ua = %d, " "rbatt_mohm = %d\n", ocv_est_uv, vbat_uv, ibat_ua, rbatt_mohm); return ocv_est_uv; } static void reset_for_new_battery(struct qpnp_bms_chip *chip, int batt_temp) { chip->last_ocv_uv = estimate_ocv(chip, batt_temp); mutex_lock(&chip->last_soc_mutex); chip->last_soc = -EINVAL; chip->last_soc_invalid = true; mutex_unlock(&chip->last_soc_mutex); chip->soc_at_cv = -EINVAL; chip->shutdown_soc_invalid = true; chip->shutdown_soc = 0; chip->shutdown_iavg_ma = 0; chip->prev_pc_unusable = -EINVAL; reset_cc(chip, CLEAR_CC | CLEAR_SHDW_CC); chip->software_cc_uah = 0; chip->software_shdw_cc_uah = 0; chip->last_cc_uah = INT_MIN; chip->last_ocv_temp = batt_temp; chip->prev_batt_terminal_uv = 0; if (chip->enable_fcc_learning) { chip->adjusted_fcc_temp_lut = NULL; chip->fcc_new_mah = -EINVAL; chip->charge_increase = 0; chip->charge_cycles = 0; backup_charge_cycle(chip); discard_backup_fcc_data(chip); memset(chip->fcc_learning_samples, 0, chip->min_fcc_learning_samples * sizeof(struct fcc_sample)); } } int pm8941_bms_batt_full_fake_ocv(void) { if (!the_chip) { pr_info("called before initialization\n"); return -EINVAL; } if (the_chip->enable_batt_full_fake_ocv && store_soc_ui == 100) the_chip->batt_full_fake_ocv = true; return 0; } #define OCV_RAW_UNINITIALIZED 0xFFFF #define MIN_OCV_UV 2000000 static int read_soc_params_raw(struct qpnp_bms_chip *chip, struct raw_soc_params *raw, int batt_temp) { int warm_reset; int rc; mutex_lock(&chip->bms_output_lock); lock_output_data(chip); rc = qpnp_read_wrapper(chip, (u8 *)&raw->last_good_ocv_raw, chip->base + BMS1_OCV_FOR_SOC_DATA0, 2); if (rc) { pr_err("Error reading ocv: rc = %d\n", rc); rc = -ENXIO; goto error_handle; } rc = read_cc_raw(chip, &raw->cc, CC); if (rc) { pr_err("Failed to read raw cc data, rc = %d\n", rc); goto error_handle; } rc = read_cc_raw(chip, &raw->shdw_cc, SHDW_CC); if (rc) { pr_err("Failed to read raw shdw_cc data, rc = %d\n", rc); goto error_handle; } unlock_output_data(chip); mutex_unlock(&chip->bms_output_lock); if (chip->prev_last_good_ocv_raw == OCV_RAW_UNINITIALIZED) { convert_and_store_ocv(chip, raw, batt_temp); warm_reset = qpnp_pon_is_warm_reset(); pr_info("PON_OCV_UV=%d, cc=%llx, warm_reset=%d, ocv_backup=%d, " "cc_backup=%d\n", chip->last_ocv_uv, raw->cc, warm_reset, chip->ocv_backup_uv, chip->cc_backup_uah); bms_dbg.last_ocv_raw_uv = raw->last_good_ocv_uv; if (read_backup_ocv_uv() > 0 && warm_reset > 0) { chip->cc_backup_uah = read_backup_cc_uah(); raw->last_good_ocv_uv = chip->last_ocv_uv = read_backup_ocv_uv(); } if (chip->ocv_backup_uv) { if (chip->ocv_reading_at_100 > 0) chip->last_ocv_uv = chip->max_voltage_uv; else raw->last_good_ocv_uv = chip->last_ocv_uv = chip->ocv_backup_uv; } pr_info("last_good_ocv_raw=0x%x,last_good_ocv_uv/ori=%duV/%duV," "ocv_reading_at_100=%x,cc_backup=%d,ocv_backup=%d,last_ocv_uv=%d\n", raw->last_good_ocv_raw, raw->last_good_ocv_uv, bms_dbg.last_ocv_raw_uv, chip->ocv_reading_at_100, chip->cc_backup_uah, chip->ocv_backup_uv, chip->last_ocv_uv); #if 0 if (raw->last_good_ocv_uv < MIN_OCV_UV || warm_reset > 0) { pr_debug("OCV is stale or bad, estimating new OCV.\n"); chip->last_ocv_uv = estimate_ocv(chip, batt_temp); bms_dbg.pon_est_ocv = chip->last_ocv_uv; raw->last_good_ocv_uv = chip->last_ocv_uv; reset_cc(chip, CLEAR_CC | CLEAR_SHDW_CC); pr_info("New PON_OCV_UV = %d, cc = %llx\n", chip->last_ocv_uv, raw->cc); } #endif } else if (chip->new_battery) { reset_for_new_battery(chip, batt_temp); raw->cc = 0; raw->shdw_cc = 0; raw->last_good_ocv_uv = chip->last_ocv_uv; chip->new_battery = false; } else if (chip->done_charging) { chip->done_charging = false; chip->ocv_reading_at_100 = raw->last_good_ocv_raw; chip->last_ocv_uv = chip->max_voltage_uv; raw->last_good_ocv_uv = chip->max_voltage_uv; raw->cc = 0; raw->shdw_cc = 0; reset_cc(chip, CLEAR_CC | CLEAR_SHDW_CC); chip->last_ocv_temp = batt_temp; chip->software_cc_uah = 0; chip->software_shdw_cc_uah = 0; chip->last_cc_uah = INT_MIN; pr_info("EOC Battery full ocv_reading = 0x%x\n", chip->ocv_reading_at_100); } else if (chip->prev_last_good_ocv_raw != raw->last_good_ocv_raw) { convert_and_store_ocv(chip, raw, batt_temp); htc_batt_bms_timer.no_ocv_update_period_ms = 0; if(chip->criteria_sw_est_ocv == FIRST_SW_EST_OCV_THR_MS) { rc = of_property_read_u32(chip->spmi->dev.of_node, "qcom,criteria-sw-est-ocv", &chip->criteria_sw_est_ocv); if (rc) { pr_err("err:%d, criteria-sw-est-ocv missing in dt, set default value\n", rc); chip->criteria_sw_est_ocv = DEFAULT_SW_EST_OCV_THR_MS; } } chip->last_cc_uah = INT_MIN; store_emmc.store_ocv_uv = bms_dbg.last_ocv_raw_uv = raw->last_good_ocv_uv; chip->ocv_backup_uv = 0; chip->cc_backup_uah = 0; write_backup_cc_uah(0); write_backup_ocv_uv(0); chip->batt_full_fake_ocv = false; } else if (chip->batt_full_fake_ocv) { chip->batt_full_fake_ocv = false; chip->cc_backup_uah = bms_dbg.ori_cc_uah = calculate_cc(chip, raw->cc, CC, NORESET); chip->ocv_reading_at_100 = raw->last_good_ocv_raw; raw->last_good_ocv_uv = chip->last_ocv_uv = chip->max_voltage_uv; pr_info("Fake full ocv_reading_at_100=0x%x, last_ocv_uv=%d, cc_backup_uah=%d\n", chip->ocv_reading_at_100, chip->last_ocv_uv, chip->cc_backup_uah); store_emmc.store_ocv_uv = chip->last_ocv_uv; store_emmc.store_cc_uah = 0; write_backup_cc_uah(chip->cc_backup_uah); write_backup_ocv_uv(chip->last_ocv_uv); } else { store_emmc.store_ocv_uv = raw->last_good_ocv_uv = chip->last_ocv_uv; } if (chip->ocv_reading_at_100 != raw->last_good_ocv_raw) chip->ocv_reading_at_100 = OCV_RAW_UNINITIALIZED; pr_debug("last_good_ocv_raw= 0x%x, last_good_ocv_uv= %duV\n", raw->last_good_ocv_raw, raw->last_good_ocv_uv); pr_debug("cc_raw= 0x%llx\n", raw->cc); return 0; error_handle: mutex_unlock(&chip->bms_output_lock); return rc; } static int calculate_pc(struct qpnp_bms_chip *chip, int ocv_uv, int batt_temp) { int pc; pc = interpolate_pc(chip->pc_temp_ocv_lut, batt_temp / 10, ocv_uv / 1000); pr_debug("pc = %u %% for ocv = %d uv batt_temp = %d\n", pc, ocv_uv, batt_temp); return pc; } static int calculate_fcc(struct qpnp_bms_chip *chip, int batt_temp) { int fcc_uah; if (chip->adjusted_fcc_temp_lut == NULL) { fcc_uah = interpolate_fcc(chip->fcc_temp_lut, batt_temp) * 1000; pr_debug("fcc = %d uAh\n", fcc_uah); return fcc_uah; } else { return 1000 * interpolate_fcc(chip->adjusted_fcc_temp_lut, batt_temp); } } static int calculate_ocv_charge(struct qpnp_bms_chip *chip, struct raw_soc_params *raw, int fcc_uah) { int ocv_uv, pc; ocv_uv = raw->last_good_ocv_uv; pc = calculate_pc(chip, ocv_uv, chip->last_ocv_temp); bms_dbg.rc_pc = pc; pr_debug("ocv_uv = %d pc = %d\n", ocv_uv, pc); return (fcc_uah * pc) / 100; } #define CC_READING_TICKS 56 #define SLEEP_CLK_HZ 32764 #define SECONDS_PER_HOUR 3600 static s64 cc_uv_to_pvh(s64 cc_uv) { return div_s64(cc_uv * CC_READING_TICKS * 100000, SLEEP_CLK_HZ * SECONDS_PER_HOUR) * 10; } static int calculate_cc(struct qpnp_bms_chip *chip, int64_t cc, int cc_type, int clear_cc) { struct qpnp_iadc_calib calibration; struct qpnp_vadc_result result; int64_t cc_voltage_uv, cc_pvh, cc_uah, *software_counter; int rc; software_counter = cc_type == SHDW_CC ? &chip->software_shdw_cc_uah : &chip->software_cc_uah; rc = qpnp_vadc_read(chip->vadc_dev, DIE_TEMP, &result); if (rc) { pr_err("could not read pmic die temperature: %d\n", rc); return *software_counter; } qpnp_iadc_get_gain_and_offset(chip->iadc_dev, &calibration); pr_debug("%scc = %lld, die_temp = %lld\n", cc_type == SHDW_CC ? "shdw_" : "", cc, result.physical); cc_voltage_uv = cc_reading_to_uv(cc); cc_voltage_uv = cc_adjust_for_gain(cc_voltage_uv, calibration.gain_raw - calibration.offset_raw); cc_pvh = cc_uv_to_pvh(cc_voltage_uv); cc_uah = div_s64(cc_pvh, chip->r_sense_uohm); rc = qpnp_iadc_comp_result(chip->iadc_dev, &cc_uah); if (rc) pr_debug("error compensation failed: %d\n", rc); if (clear_cc == RESET) { pr_info("software_%scc = %lld, added cc_uah = %lld\n", cc_type == SHDW_CC ? "sw_" : "", *software_counter, cc_uah); *software_counter += cc_uah; reset_cc(chip, cc_type == SHDW_CC ? CLEAR_SHDW_CC : CLEAR_CC); return (int)*software_counter; } else { pr_debug("software_%scc = %lld, cc_uah = %lld, total = %lld\n", cc_type == SHDW_CC ? "shdw_" : "", *software_counter, cc_uah, *software_counter + cc_uah); return *software_counter + cc_uah; } } static int get_rbatt(struct qpnp_bms_chip *chip, int soc_rbatt_mohm, int batt_temp) { int rbatt_mohm, scalefactor; rbatt_mohm = chip->default_rbatt_mohm; if (chip->rbatt_sf_lut == NULL) { pr_debug("RBATT = %d\n", rbatt_mohm); return rbatt_mohm; } batt_temp = batt_temp / 10; #ifdef CONFIG_ARCH_MSM8226 if (soc_rbatt_mohm > 100) scalefactor = interpolate_scalingfactor(chip->rbatt_sf_lut, batt_temp, 100); else #endif scalefactor = interpolate_scalingfactor(chip->rbatt_sf_lut, batt_temp, soc_rbatt_mohm); bms_dbg.rbatt_sf = scalefactor; rbatt_mohm = (rbatt_mohm * scalefactor) / 100; rbatt_mohm += chip->r_conn_mohm; rbatt_mohm += chip->rbatt_capacitive_mohm; return rbatt_mohm; } #define IAVG_MINIMAL_TIME 2 static void calculate_iavg(struct qpnp_bms_chip *chip, int cc_uah, int *iavg_ua, int delta_time_s) { int delta_cc_uah = 0; if (delta_time_s < IAVG_MINIMAL_TIME || chip->last_cc_uah == INT_MIN) { get_battery_current(chip, iavg_ua); goto out; } delta_cc_uah = cc_uah - chip->last_cc_uah; *iavg_ua = div_s64((s64)delta_cc_uah * 3600, delta_time_s); out: pr_debug("delta_cc = %d iavg_ua = %d\n", delta_cc_uah, (int)*iavg_ua); chip->last_cc_uah = cc_uah; } #if !(defined(CONFIG_HTC_BATT_8960)) static int calculate_termination_uuc(struct qpnp_bms_chip *chip, struct soc_params *params, int batt_temp, int uuc_iavg_ma, int *ret_pc_unusable) { int unusable_uv, pc_unusable, uuc_uah; int i = 0; int ocv_mv; int batt_temp_degc = batt_temp / 10; int rbatt_mohm; int delta_uv; int prev_delta_uv = 0; int prev_rbatt_mohm = 0; int uuc_rbatt_mohm; for (i = 0; i <= 100; i++) { ocv_mv = interpolate_ocv(chip->pc_temp_ocv_lut, batt_temp_degc, i); rbatt_mohm = get_rbatt(chip, i, batt_temp); unusable_uv = (rbatt_mohm * uuc_iavg_ma) + (chip->v_cutoff_uv); delta_uv = ocv_mv * 1000 - unusable_uv; if (delta_uv > 0) break; prev_delta_uv = delta_uv; prev_rbatt_mohm = rbatt_mohm; } uuc_rbatt_mohm = linear_interpolate(rbatt_mohm, delta_uv, prev_rbatt_mohm, prev_delta_uv, 0); unusable_uv = (uuc_rbatt_mohm * uuc_iavg_ma) + (chip->v_cutoff_uv); bms_dbg.uuc_rbatt_mohm = uuc_rbatt_mohm; bms_dbg.unusable_uv = unusable_uv; pc_unusable = calculate_pc(chip, unusable_uv, batt_temp); uuc_uah = (params->fcc_uah * pc_unusable) / 100; pr_debug("For uuc_iavg_ma = %d, unusable_rbatt = %d unusable_uv = %d unusable_pc = %d rbatt_pc = %d uuc = %d\n", uuc_iavg_ma, uuc_rbatt_mohm, unusable_uv, pc_unusable, i, uuc_uah); *ret_pc_unusable = pc_unusable; return uuc_uah; } #define TIME_PER_PERCENT_UUC 60 static int adjust_uuc(struct qpnp_bms_chip *chip, struct soc_params *params, int new_pc_unusable, int new_uuc_uah, int batt_temp) { int new_unusable_mv, new_iavg_ma; int batt_temp_degc = batt_temp / 10; int max_percent_change; max_percent_change = max(params->delta_time_s / TIME_PER_PERCENT_UUC, 1); if (chip->prev_pc_unusable == -EINVAL || abs(chip->prev_pc_unusable - new_pc_unusable) <= max_percent_change) { chip->prev_pc_unusable = new_pc_unusable; return new_uuc_uah; } if (new_pc_unusable > chip->prev_pc_unusable) chip->prev_pc_unusable += max_percent_change; else chip->prev_pc_unusable -= max_percent_change; new_uuc_uah = (params->fcc_uah * chip->prev_pc_unusable) / 100; new_unusable_mv = interpolate_ocv(chip->pc_temp_ocv_lut, batt_temp_degc, chip->prev_pc_unusable); if (new_unusable_mv < chip->v_cutoff_uv/1000) new_unusable_mv = chip->v_cutoff_uv/1000; new_iavg_ma = (new_unusable_mv * 1000 - chip->v_cutoff_uv) / params->rbatt_mohm; if (new_iavg_ma == 0) new_iavg_ma = 1; chip->prev_uuc_iavg_ma = new_iavg_ma; pr_debug("Restricting UUC to %d (%d%%) unusable_mv = %d iavg_ma = %d\n", new_uuc_uah, chip->prev_pc_unusable, new_unusable_mv, new_iavg_ma); return new_uuc_uah; } #endif #define MIN_IAVG_MA 250 #if !(defined(CONFIG_HTC_BATT_8960)) static int calculate_unusable_charge_uah(struct qpnp_bms_chip *chip, struct soc_params *params, int batt_temp) { int uuc_uah_iavg; int i; int uuc_iavg_ma = params->iavg_ua / 1000; int pc_unusable; if (chip->first_time_calc_uuc && chip->shutdown_iavg_ma != 0) { pr_debug("Using shutdown_iavg_ma = %d in all samples\n", chip->shutdown_iavg_ma); for (i = 0; i < IAVG_SAMPLES; i++) chip->iavg_samples_ma[i] = chip->shutdown_iavg_ma; chip->iavg_index = 0; chip->iavg_num_samples = IAVG_SAMPLES; } if (params->delta_time_s >= IAVG_MINIMAL_TIME) { if (uuc_iavg_ma < MIN_IAVG_MA) uuc_iavg_ma = MIN_IAVG_MA; chip->iavg_samples_ma[chip->iavg_index] = uuc_iavg_ma; chip->iavg_index = (chip->iavg_index + 1) % IAVG_SAMPLES; chip->iavg_num_samples++; if (chip->iavg_num_samples >= IAVG_SAMPLES) chip->iavg_num_samples = IAVG_SAMPLES; } uuc_iavg_ma = 0; if (chip->iavg_num_samples != 0) { for (i = 0; i < chip->iavg_num_samples; i++) { pr_debug("iavg_samples_ma[%d] = %d\n", i, chip->iavg_samples_ma[i]); uuc_iavg_ma += chip->iavg_samples_ma[i]; } uuc_iavg_ma = DIV_ROUND_CLOSEST(uuc_iavg_ma, chip->iavg_num_samples); } if (bms_reset) return (params->fcc_uah * 3) / 100; uuc_uah_iavg = calculate_termination_uuc(chip, params, batt_temp, uuc_iavg_ma, &pc_unusable); bms_dbg.pc_unusable = pc_unusable; bms_dbg.uuc_iavg_ma = uuc_iavg_ma; bms_dbg.ori_uuc_uah = uuc_uah_iavg; pr_debug("uuc_iavg_ma = %d uuc with iavg = %d\n", uuc_iavg_ma, uuc_uah_iavg); chip->prev_uuc_iavg_ma = uuc_iavg_ma; uuc_uah_iavg = adjust_uuc(chip, params, pc_unusable, uuc_uah_iavg, batt_temp); return uuc_uah_iavg; } #else #define IBAT_FOR_UC_MA 2000 static int calculate_unusable_charge_uah(struct qpnp_bms_chip *chip, struct soc_params *params, int batt_temp) { int voltage_unusable_uv, pc_unusable; voltage_unusable_uv = (params->rbatt_mohm * IBAT_FOR_UC_MA) + chip->v_cutoff_uv; pc_unusable = calculate_pc(chip, voltage_unusable_uv, batt_temp); pr_debug("rbatt = %umilliOhms unusable_v =%d unusable_pc = %d\n", params->rbatt_mohm, voltage_unusable_uv, pc_unusable); bms_dbg.unusable_uv = voltage_unusable_uv; bms_dbg.pc_unusable = pc_unusable; return (params->fcc_uah* pc_unusable) / 100; } #endif static s64 find_ocv_charge_for_soc(struct qpnp_bms_chip *chip, struct soc_params *params, int soc) { return div_s64((s64)soc * (params->fcc_uah - params->uuc_uah), 100) + params->cc_uah + params->uuc_uah; } static int find_pc_for_soc(struct qpnp_bms_chip *chip, struct soc_params *params, int soc) { int ocv_charge_uah = find_ocv_charge_for_soc(chip, params, soc); int pc; pc = DIV_ROUND_CLOSEST((int)ocv_charge_uah * 100, params->fcc_uah); pc = clamp(pc, 0, 100); pr_debug("soc = %d, fcc = %d uuc = %d rc = %d pc = %d\n", soc, params->fcc_uah, params->uuc_uah, ocv_charge_uah, pc); return pc; } #define SIGN(x) ((x) < 0 ? -1 : 1) #define UV_PER_SPIN 50000 static int find_ocv_for_pc(struct qpnp_bms_chip *chip, int batt_temp, int pc) { int new_pc; int batt_temp_degc = batt_temp / 10; int ocv_mv; int delta_mv = 5; int max_spin_count; int count = 0; int sign, new_sign; ocv_mv = interpolate_ocv(chip->pc_temp_ocv_lut, batt_temp_degc, pc); new_pc = interpolate_pc(chip->pc_temp_ocv_lut, batt_temp_degc, ocv_mv); pr_debug("test revlookup pc = %d for ocv = %d\n", new_pc, ocv_mv); max_spin_count = 1 + (chip->max_voltage_uv - chip->v_cutoff_uv) / UV_PER_SPIN; sign = SIGN(pc - new_pc); while (abs(new_pc - pc) != 0 && count < max_spin_count) { new_sign = SIGN(pc - new_pc); if (new_sign != sign) { if (delta_mv > 1) delta_mv = 1; else break; } sign = new_sign; ocv_mv = ocv_mv + delta_mv * sign; new_pc = interpolate_pc(chip->pc_temp_ocv_lut, batt_temp_degc, ocv_mv); pr_debug("test revlookup pc = %d for ocv = %d\n", new_pc, ocv_mv); count++; } return ocv_mv * 1000; } static int get_rbatt_for_estimate_ocv(struct sf_lut *rbatt_lut, int temp) { int x, y, rows, cols; rows = rbatt_lut->rows; cols = rbatt_lut->cols; for (x= 0; x < rows; x++) { for (y= 0; y < cols; y++) { if (temp < rbatt_lut->row_entries[y]) return rbatt_lut->sf[x][y]; } } return rbatt_lut->sf[rows-1][cols-1]; } static int estimate_sw_ocv(struct qpnp_bms_chip *chip, int ibatt_ua, int vbat_uv) { int ocv_est_uv, batt_temp, rc; int rbatt_mohm, rbatt_for_estimated_ocv; struct qpnp_vadc_result result; if (!chip) { pr_info("[EST]called before initialization\n"); return -EINVAL; } rc = qpnp_vadc_read(chip->vadc_dev, LR_MUX1_BATT_THERM, &result); if (rc) { pr_info("[EST]error reading adc channel = %d, rc = %d\n", LR_MUX1_BATT_THERM, rc); return rc; } pr_debug("batt_temp phy = %lld, meas = 0x%llx\n", result.physical, result.measurement); batt_temp = (int)result.physical; if (batt_temp <= 0) { pr_info("[EST]batt_temp=%d, return!\n", batt_temp); return 0; } if (chip->rbatt_est_ocv_lut == NULL) { pr_info("[EST]rbatt_est_ocv_lut is NULL\n"); return 0; } rbatt_for_estimated_ocv = get_rbatt_for_estimate_ocv(chip->rbatt_est_ocv_lut, batt_temp/10); rbatt_mohm = rbatt_for_estimated_ocv + chip->rconn_mohm_sw_est_ocv; ocv_est_uv = vbat_uv + (ibatt_ua * rbatt_mohm) / 1000; pr_info("[EST]estimated ocv=%d, rbatt=%d, rconn=%d, ibatt_ua=%d, vbat_uv=%d, " "ori last_ocv_uv=%d, no_hw_ocv_ms=%lu\n", ocv_est_uv, rbatt_for_estimated_ocv, chip->rconn_mohm_sw_est_ocv, ibatt_ua, vbat_uv, chip->last_ocv_uv, htc_batt_bms_timer.no_ocv_update_period_ms); return ocv_est_uv; } static int pm8941_bms_estimate_ocv(void) { int rc; int estimated_ocv_uv = 0; int ibatt_ua, vbat_uv; struct raw_soc_params raw; if (!the_chip) { pr_info("[EST] called before initialization\n"); return -EINVAL; } rc = get_simultaneous_batt_v_and_i(the_chip, &ibatt_ua, &vbat_uv); if (rc) { pr_err("[EST]simultaneous failed rc = %d\n", rc); return rc; } if (ibatt_ua > 60000) { pr_info("[EST]ibatt_ua=%d uA exceed 60mA, " "no_hw_ocv_ms=%lu, return!\n", ibatt_ua, htc_batt_bms_timer.no_ocv_update_period_ms); return 0; } mutex_lock(&the_chip->bms_output_lock); lock_output_data(the_chip); rc = qpnp_read_wrapper(the_chip, (u8 *)&raw.last_good_ocv_raw, the_chip->base + BMS1_OCV_FOR_SOC_DATA0, 2); if (rc) { pr_info("[EST]error reading ocv: rc = %d, return!\n", rc); return 0; } rc = read_cc_raw(the_chip, &raw.cc, CC); if (rc) { pr_info("[EST]failed to read raw cc data, rc = %d\n", rc); return 0; } rc = read_cc_raw(the_chip, &raw.shdw_cc, SHDW_CC); if (rc) { pr_info("[EST]failed to read raw shdw_cc data, rc = %d\n", rc); return 0; } unlock_output_data(the_chip); mutex_unlock(&the_chip->bms_output_lock); if (the_chip->prev_last_good_ocv_raw != raw.last_good_ocv_raw) { pr_info("[EST]ocv is updated by hw, pre_ocv_raw=%x, last_ocv_raw=%x, " "no_hw_ocv_ms=%lu\n", the_chip->prev_last_good_ocv_raw, raw.last_good_ocv_raw, htc_batt_bms_timer.no_ocv_update_period_ms); return 0; } estimated_ocv_uv = estimate_sw_ocv(the_chip, ibatt_ua, vbat_uv); if (estimated_ocv_uv > 0) { store_emmc.store_ocv_uv = the_chip->last_ocv_uv = estimated_ocv_uv; the_chip->cc_backup_uah = bms_dbg.ori_cc_uah = calculate_cc(the_chip, raw.cc, CC, NORESET); store_emmc.store_cc_uah = 0; write_backup_cc_uah(the_chip->cc_backup_uah); write_backup_ocv_uv(the_chip->last_ocv_uv); htc_batt_bms_timer.no_ocv_update_period_ms = 0; if(the_chip->criteria_sw_est_ocv == FIRST_SW_EST_OCV_THR_MS) { rc = of_property_read_u32(the_chip->spmi->dev.of_node, "qcom,criteria-sw-est-ocv", &the_chip->criteria_sw_est_ocv); if (rc) { pr_err("err:%d, criteria-sw-est-ocv missing in dt, set default value\n", rc); the_chip->criteria_sw_est_ocv = DEFAULT_SW_EST_OCV_THR_MS; } } pr_info("[EST]last_ocv=%d, ori_cc_uah=%d, backup_cc=%d, " "no_hw_ocv_ms=%ld, criteria_sw_est_ocv=%d\n", the_chip->last_ocv_uv, bms_dbg.ori_cc_uah, the_chip->cc_backup_uah, htc_batt_bms_timer.no_ocv_update_period_ms, the_chip->criteria_sw_est_ocv); } return estimated_ocv_uv; } static int get_current_time(unsigned long *now_tm_sec) { struct rtc_time tm; struct rtc_device *rtc; int rc; rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE); if (rtc == NULL) { pr_err("%s: unable to open rtc device (%s)\n", __FILE__, CONFIG_RTC_HCTOSYS_DEVICE); return -EINVAL; } rc = rtc_read_time(rtc, &tm); if (rc) { pr_err("Error reading rtc device (%s) : %d\n", CONFIG_RTC_HCTOSYS_DEVICE, rc); goto close_time; } rc = rtc_valid_tm(&tm); if (rc) { pr_err("Invalid RTC time (%s): %d\n", CONFIG_RTC_HCTOSYS_DEVICE, rc); goto close_time; } rtc_tm_to_time(&tm, now_tm_sec); close_time: rtc_class_close(rtc); return rc; } #if !(defined(CONFIG_HTC_BATT_8960)) static int get_prop_bms_batt_resistance(struct qpnp_bms_chip *chip) { return chip->rbatt_mohm * 1000; } #endif static int get_prop_bms_current_now(struct qpnp_bms_chip *chip) { int rc, result_ua; rc = get_battery_current(chip, &result_ua); if (rc) { pr_err("failed to get current: %d\n", rc); return rc; } return result_ua; } static int get_prop_bms_charge_counter(struct qpnp_bms_chip *chip) { int64_t cc_raw; mutex_lock(&chip->bms_output_lock); lock_output_data(chip); read_cc_raw(chip, &cc_raw, false); unlock_output_data(chip); mutex_unlock(&chip->bms_output_lock); return calculate_cc(chip, cc_raw, CC, NORESET); } static int get_prop_bms_charge_counter_shadow(struct qpnp_bms_chip *chip) { int64_t cc_raw; mutex_lock(&chip->bms_output_lock); lock_output_data(chip); read_cc_raw(chip, &cc_raw, true); unlock_output_data(chip); mutex_unlock(&chip->bms_output_lock); return calculate_cc(chip, cc_raw, SHDW_CC, NORESET); } #if !(defined(CONFIG_HTC_BATT_8960)) static int get_prop_bms_charge_full_design(struct qpnp_bms_chip *chip) { return chip->fcc_mah * 1000; } #endif static int get_prop_bms_charge_full(struct qpnp_bms_chip *chip) { int rc; struct qpnp_vadc_result result; rc = qpnp_vadc_read(chip->vadc_dev, LR_MUX1_BATT_THERM, &result); if (rc) { pr_err("Unable to read battery temperature\n"); return rc; } return calculate_fcc(chip, (int)result.physical); } static int calculate_delta_time(unsigned long *time_stamp, int *delta_time_s) { unsigned long now_tm_sec = 0; *delta_time_s = 0; if (get_current_time(&now_tm_sec)) { pr_err("RTC read failed, delta_s = %d\n", *delta_time_s); return 0; } *delta_time_s = (now_tm_sec - *time_stamp); pr_debug("time_stamp = %ld, now_tm_sec = %ld, delta_s = %d\n", *time_stamp, now_tm_sec, *delta_time_s); *time_stamp = now_tm_sec; return 0; } static void calculate_soc_params(struct qpnp_bms_chip *chip, struct raw_soc_params *raw, struct soc_params *params, int batt_temp) { int soc_rbatt, shdw_cc_uah; calculate_delta_time(&chip->tm_sec, &params->delta_time_s); pr_debug("tm_sec = %ld, delta_s = %d\n", chip->tm_sec, params->delta_time_s); params->fcc_uah = calculate_fcc(chip, batt_temp); pr_debug("FCC = %uuAh batt_temp = %d\n", params->fcc_uah, batt_temp); params->ocv_charge_uah = calculate_ocv_charge( chip, raw, params->fcc_uah); pr_debug("ocv_charge_uah = %uuAh\n", params->ocv_charge_uah); params->cc_uah = bms_dbg.ori_cc_uah = calculate_cc(chip, raw->cc, CC, NORESET); store_emmc.store_cc_uah = params->cc_uah -= chip->cc_backup_uah; pr_debug("cc_uah = %d. after subtracting %d cc_uah = %d\n", bms_dbg.ori_cc_uah, chip->cc_backup_uah, params->cc_uah); shdw_cc_uah = calculate_cc(chip, raw->shdw_cc, SHDW_CC, NORESET); pr_debug("cc_uah = %duAh raw->cc = %llx, shdw_cc_uah = %duAh raw->shdw_cc = %llx\n", params->cc_uah, raw->cc, shdw_cc_uah, raw->shdw_cc); soc_rbatt = ((params->ocv_charge_uah - params->cc_uah) * 100) / params->fcc_uah; bms_dbg.soc_rbatt = soc_rbatt; if (soc_rbatt < 0) soc_rbatt = 0; params->rbatt_mohm = get_rbatt(chip, soc_rbatt, batt_temp); bms_dbg.rbatt = params->rbatt_mohm; pr_debug("rbatt_mohm = %d\n", params->rbatt_mohm); if (params->rbatt_mohm != chip->rbatt_mohm) { chip->rbatt_mohm = params->rbatt_mohm; if (chip->bms_psy_registered) power_supply_changed(&chip->bms_psy); } calculate_iavg(chip, params->cc_uah, &params->iavg_ua, params->delta_time_s); params->uuc_uah = calculate_unusable_charge_uah(chip, params, batt_temp); pr_debug("UUC = %uuAh\n", params->uuc_uah); } static int bound_soc(int soc) { soc = max(0, soc); soc = min(100, soc); return soc; } #define IBAT_TOL_MASK 0x0F #define OCV_TOL_MASK 0xF0 #define IBAT_TOL_DEFAULT 0x03 #define IBAT_TOL_NOCHG 0x0F #define OCV_TOL_DEFAULT 0x20 #define OCV_TOL_NO_OCV 0x00 static int stop_ocv_updates(struct qpnp_bms_chip *chip) { pr_debug("stopping ocv updates\n"); return qpnp_masked_write(chip, BMS1_TOL_CTL, OCV_TOL_MASK, OCV_TOL_NO_OCV); } static int reset_bms_for_test(struct qpnp_bms_chip *chip) { int ibat_ua = 0, vbat_uv = 0, rc; int ocv_est_uv; if (!chip) { pr_err("BMS driver has not been initialized yet!\n"); return -EINVAL; } rc = get_simultaneous_batt_v_and_i(chip, &ibat_ua, &vbat_uv); ocv_est_uv = vbat_uv + (ibat_ua * chip->r_conn_mohm) / 1000; pr_info("forcing ocv to be %d due to bms reset mode\n", ocv_est_uv); chip->last_ocv_uv = ocv_est_uv; mutex_lock(&chip->last_soc_mutex); chip->last_soc = -EINVAL; chip->last_soc_invalid = true; mutex_unlock(&chip->last_soc_mutex); reset_cc(chip, CLEAR_CC | CLEAR_SHDW_CC); chip->software_cc_uah = 0; chip->software_shdw_cc_uah = 0; chip->last_cc_uah = INT_MIN; stop_ocv_updates(chip); pr_debug("bms reset to ocv = %duv vbat_ua = %d ibat_ua = %d\n", chip->last_ocv_uv, vbat_uv, ibat_ua); return rc; } static int bms_reset_set(const char *val, const struct kernel_param *kp) { int rc; rc = param_set_bool(val, kp); if (rc) { pr_err("Unable to set bms_reset: %d\n", rc); return rc; } if (*(bool *)kp->arg) { #if !(defined(CONFIG_HTC_BATT_8960)) struct power_supply *bms_psy = power_supply_get_by_name("bms"); struct qpnp_bms_chip *chip = container_of(bms_psy, struct qpnp_bms_chip, bms_psy); rc = reset_bms_for_test(chip); #else rc = reset_bms_for_test(the_chip); #endif if (rc) { pr_err("Unable to modify bms_reset: %d\n", rc); return rc; } } return 0; } static struct kernel_param_ops bms_reset_ops = { .set = bms_reset_set, .get = param_get_bool, }; module_param_cb(bms_reset, &bms_reset_ops, &bms_reset, 0644); #define SOC_STORAGE_MASK 0xFE static void backup_soc_and_iavg(struct qpnp_bms_chip *chip, int batt_temp, int soc) { u8 temp; int rc; int iavg_ma = chip->prev_uuc_iavg_ma; if (iavg_ma > MIN_IAVG_MA) temp = (iavg_ma - MIN_IAVG_MA) / IAVG_STEP_SIZE_MA; else temp = 0; rc = qpnp_write_wrapper(chip, &temp, chip->base + IAVG_STORAGE_REG, 1); if (batt_temp > IGNORE_SOC_TEMP_DECIDEG) qpnp_masked_write_base(chip, chip->soc_storage_addr, SOC_STORAGE_MASK, (soc + 1) << 1); } static int scale_soc_while_chg(struct qpnp_bms_chip *chip, int chg_time_sec, int catch_up_sec, int new_soc, int prev_soc) { int scaled_soc; int numerator; pr_debug("cts = %d catch_up_sec = %d\n", chg_time_sec, catch_up_sec); if (catch_up_sec == 0) return new_soc; if (chg_time_sec > catch_up_sec) return new_soc; numerator = (catch_up_sec - chg_time_sec) * prev_soc + chg_time_sec * new_soc; scaled_soc = numerator / catch_up_sec; pr_debug("cts = %d new_soc = %d prev_soc = %d scaled_soc = %d\n", chg_time_sec, new_soc, prev_soc, scaled_soc); return scaled_soc; } static int bms_fake_battery = -EINVAL; module_param(bms_fake_battery, int, 0644); static int report_voltage_based_soc(struct qpnp_bms_chip *chip) { pr_debug("Reported voltage based soc = %d\n", chip->prev_voltage_based_soc); return chip->prev_voltage_based_soc; } #define SOC_CATCHUP_SEC_MAX 600 #define SOC_CATCHUP_SEC_PER_PERCENT 60 #define MAX_CATCHUP_SOC (SOC_CATCHUP_SEC_MAX / SOC_CATCHUP_SEC_PER_PERCENT) #define SOC_CHANGE_PER_SEC 5 #define REPORT_SOC_WAIT_MS 10000 static int report_cc_based_soc(struct qpnp_bms_chip *chip) { int soc, soc_change; int time_since_last_change_sec, charge_time_sec = 0; unsigned long last_change_sec; struct timespec now; struct qpnp_vadc_result result; int batt_temp; int rc; bool charging, charging_since_last_report; rc = wait_event_interruptible_timeout(chip->bms_wait_queue, chip->calculated_soc != -EINVAL, round_jiffies_relative(msecs_to_jiffies (REPORT_SOC_WAIT_MS))); if (rc == 0 && chip->calculated_soc == -EINVAL) { pr_debug("calculate soc timed out\n"); } else if (rc == -ERESTARTSYS) { pr_err("Wait for SoC interrupted.\n"); return rc; } rc = qpnp_vadc_read(chip->vadc_dev, LR_MUX1_BATT_THERM, &result); if (rc) { pr_err("error reading adc channel = %d, rc = %d\n", LR_MUX1_BATT_THERM, rc); return rc; } pr_debug("batt_temp phy = %lld meas = 0x%llx\n", result.physical, result.measurement); batt_temp = (int)result.physical; mutex_lock(&chip->last_soc_mutex); soc = chip->calculated_soc; last_change_sec = chip->last_soc_change_sec; calculate_delta_time(&last_change_sec, &time_since_last_change_sec); bms_dbg.time_last_change_s = time_since_last_change_sec; charging = is_battery_charging(chip); charging_since_last_report = charging || (chip->last_soc_unbound && chip->was_charging_at_sleep); if (charging) { if (chip->charge_start_tm_sec == 0) { if (abs(soc - chip->last_soc) < MAX_CATCHUP_SOC) chip->catch_up_time_sec = (soc - chip->last_soc) * SOC_CATCHUP_SEC_PER_PERCENT; else chip->catch_up_time_sec = SOC_CATCHUP_SEC_MAX; if (chip->catch_up_time_sec < 0) chip->catch_up_time_sec = 0; chip->charge_start_tm_sec = last_change_sec; } charge_time_sec = min(SOC_CATCHUP_SEC_MAX, (int)last_change_sec - chip->charge_start_tm_sec); if (chip->last_soc == soc) chip->catch_up_time_sec = 0; } if (chip->last_soc != -EINVAL) { if (chip->last_soc < soc && !charging_since_last_report) soc = chip->last_soc; else if (chip->last_soc < soc && soc != 100) soc = scale_soc_while_chg(chip, charge_time_sec, chip->catch_up_time_sec, soc, chip->last_soc); soc_change = min((int)abs(chip->last_soc - soc), time_since_last_change_sec / SOC_CHANGE_PER_SEC); bms_dbg.ori_soc_change = soc_change; if (chip->last_soc_unbound) { chip->last_soc_unbound = false; } else { soc_change = min(1, soc_change); } if (soc < chip->last_soc && soc != 0) soc = chip->last_soc - soc_change; if (soc > chip->last_soc && soc != 100) soc = chip->last_soc + soc_change; } if (chip->last_soc != soc && !chip->last_soc_unbound) chip->last_soc_change_sec = last_change_sec; pr_info("last_soc = %d, calculated_soc = %d, soc = %d, time since last change = %d," "ori_soc_change = %d, soc_change = %d\n", chip->last_soc, chip->calculated_soc, soc, time_since_last_change_sec, bms_dbg.ori_soc_change, soc_change); chip->last_soc = bound_soc(soc); backup_soc_and_iavg(chip, batt_temp, chip->last_soc); pr_debug("Reported SOC = %d\n", chip->last_soc); chip->t_soc_queried = now; mutex_unlock(&chip->last_soc_mutex); return soc; } static int report_state_of_charge(struct qpnp_bms_chip *chip) { if (bms_fake_battery != -EINVAL) { pr_debug("Returning Fake SOC = %d%%\n", bms_fake_battery); return bms_fake_battery; } else if (chip->use_voltage_soc) return report_voltage_based_soc(chip); else return report_cc_based_soc(chip); } #if !(defined(CONFIG_HTC_BATT_8960)) #define VDD_MAX_ERR 5000 #define VDD_STEP_SIZE 10000 #define MAX_COUNT_BEFORE_RESET_TO_CC 3 static int charging_adjustments(struct qpnp_bms_chip *chip, struct soc_params *params, int soc, int vbat_uv, int ibat_ua, int batt_temp) { int chg_soc, soc_ibat, batt_terminal_uv, weight_ibat, weight_cc; batt_terminal_uv = vbat_uv + (ibat_ua * chip->r_conn_mohm) / 1000; if (chip->soc_at_cv == -EINVAL) { if (batt_terminal_uv >= chip->max_voltage_uv - VDD_MAX_ERR) { chip->soc_at_cv = soc; chip->prev_chg_soc = soc; chip->ibat_at_cv_ua = params->iavg_ua; pr_debug("CC_TO_CV ibat_ua = %d CHG SOC %d\n", ibat_ua, soc); } else { pr_debug("CC CHG SOC %d\n", soc); } chip->prev_batt_terminal_uv = batt_terminal_uv; chip->system_load_count = 0; return soc; } else if (ibat_ua > 0 && batt_terminal_uv < chip->max_voltage_uv - (VDD_MAX_ERR * 2)) { if (chip->system_load_count > MAX_COUNT_BEFORE_RESET_TO_CC) { chip->soc_at_cv = -EINVAL; pr_debug("Vbat below CV threshold, resetting CC_TO_CV\n"); chip->system_load_count = 0; } else { chip->system_load_count += 1; pr_debug("Vbat below CV threshold, count: %d\n", chip->system_load_count); } return soc; } else if (ibat_ua > 0) { pr_debug("NOT CHARGING SOC %d\n", soc); chip->system_load_count = 0; chip->prev_chg_soc = soc; return soc; } chip->system_load_count = 0; if (batt_terminal_uv <= chip->prev_batt_terminal_uv - VDD_STEP_SIZE) { pr_debug("batt_terminal_uv %d < (max = %d - 10000); CC CHG SOC %d\n", batt_terminal_uv, chip->prev_batt_terminal_uv, chip->prev_chg_soc); chip->prev_batt_terminal_uv = batt_terminal_uv; return chip->prev_chg_soc; } soc_ibat = bound_soc(linear_interpolate(chip->soc_at_cv, chip->ibat_at_cv_ua, 100, -1 * chip->chg_term_ua, params->iavg_ua)); weight_ibat = bound_soc(linear_interpolate(1, chip->soc_at_cv, 100, 100, chip->prev_chg_soc)); weight_cc = 100 - weight_ibat; chg_soc = bound_soc(DIV_ROUND_CLOSEST(soc_ibat * weight_ibat + weight_cc * soc, 100)); pr_debug("weight_ibat = %d, weight_cc = %d, soc_ibat = %d, soc_cc = %d\n", weight_ibat, weight_cc, soc_ibat, soc); if (chg_soc > chip->prev_chg_soc) { chip->prev_chg_soc = chg_soc; chip->charging_adjusted_ocv = find_ocv_for_pc(chip, batt_temp, find_pc_for_soc(chip, params, chg_soc)); pr_debug("CC CHG ADJ OCV = %d CHG SOC %d\n", chip->charging_adjusted_ocv, chip->prev_chg_soc); } pr_debug("Reporting CHG SOC %d\n", chip->prev_chg_soc); chip->prev_batt_terminal_uv = batt_terminal_uv; return chip->prev_chg_soc; } static void very_low_voltage_check(struct qpnp_bms_chip *chip, int vbat_uv) { if (vbat_uv <= chip->low_voltage_threshold && !wake_lock_active(&chip->low_voltage_wake_lock)) { pr_debug("voltage = %d low holding wakelock\n", vbat_uv); wake_lock(&chip->low_voltage_wake_lock); } else if (vbat_uv > chip->low_voltage_threshold && wake_lock_active(&chip->low_voltage_wake_lock)) { pr_debug("voltage = %d releasing wakelock\n", vbat_uv); wake_unlock(&chip->low_voltage_wake_lock); } } static void cv_voltage_check(struct qpnp_bms_chip *chip, int vbat_uv) { if (wake_lock_active(&chip->cv_wake_lock)) { if (chip->soc_at_cv != -EINVAL) { pr_debug("hit CV, releasing cv wakelock\n"); wake_unlock(&chip->cv_wake_lock); } else if (!is_battery_charging(chip)) { pr_debug("charging stopped, releasing cv wakelock\n"); wake_unlock(&chip->cv_wake_lock); } } else if (vbat_uv > chip->max_voltage_uv - VBATT_ERROR_MARGIN && chip->soc_at_cv == -EINVAL && is_battery_charging(chip) && !wake_lock_active(&chip->cv_wake_lock)) { pr_debug("voltage = %d holding cv wakelock\n", vbat_uv); wake_lock(&chip->cv_wake_lock); } } #define NO_ADJUST_HIGH_SOC_THRESHOLD 90 static int adjust_soc(struct qpnp_bms_chip *chip, struct soc_params *params, int soc, int batt_temp) { int ibat_ua = 0, vbat_uv = 0; int ocv_est_uv = 0, soc_est = 0, pc_est = 0, pc = 0; int delta_ocv_uv = 0; int n = 0; int rc_new_uah = 0; int pc_new = 0; int soc_new = 0; int slope = 0; int rc = 0; int delta_ocv_uv_limit = 0; int correction_limit_uv = 0; rc = get_simultaneous_batt_v_and_i(chip, &ibat_ua, &vbat_uv); if (rc < 0) { pr_err("simultaneous vbat ibat failed err = %d\n", rc); goto out; } very_low_voltage_check(chip, vbat_uv); cv_voltage_check(chip, vbat_uv); delta_ocv_uv_limit = DIV_ROUND_CLOSEST(ibat_ua, 1000); ocv_est_uv = vbat_uv + (ibat_ua * params->rbatt_mohm)/1000; pc_est = calculate_pc(chip, ocv_est_uv, batt_temp); soc_est = div_s64((s64)params->fcc_uah * pc_est - params->uuc_uah*100, (s64)params->fcc_uah - params->uuc_uah); soc_est = bound_soc(soc_est); if (bms_reset) { pr_debug("bms reset mode, SOC adjustment skipped\n"); goto out; } if (is_battery_charging(chip)) { soc = charging_adjustments(chip, params, soc, vbat_uv, ibat_ua, batt_temp); if (chip->soc_at_cv != -EINVAL || ibat_ua < 0) goto out; } if (soc_est == soc || soc_est > chip->adjust_soc_low_threshold || soc >= NO_ADJUST_HIGH_SOC_THRESHOLD) goto out; if (chip->last_soc_est == -EINVAL) chip->last_soc_est = soc; n = min(200, max(1 , soc + soc_est + chip->last_soc_est)); chip->last_soc_est = soc_est; pc = calculate_pc(chip, chip->last_ocv_uv, chip->last_ocv_temp); if (pc > 0) { pc_new = calculate_pc(chip, chip->last_ocv_uv - (++slope * 1000), chip->last_ocv_temp); while (pc_new == pc) { slope = slope + 10; pc_new = calculate_pc(chip, chip->last_ocv_uv - (slope * 1000), chip->last_ocv_temp); } } else { pc = 1; pc_new = 0; slope = 1; } delta_ocv_uv = div_s64((soc - soc_est) * (s64)slope * 1000, n * (pc - pc_new)); if (abs(delta_ocv_uv) > delta_ocv_uv_limit) { pr_debug("limiting delta ocv %d limit = %d\n", delta_ocv_uv, delta_ocv_uv_limit); if (delta_ocv_uv > 0) delta_ocv_uv = delta_ocv_uv_limit; else delta_ocv_uv = -1 * delta_ocv_uv_limit; pr_debug("new delta ocv = %d\n", delta_ocv_uv); } #if !(defined(CONFIG_HTC_BATT_8960)) if (wake_lock_active(&chip->low_voltage_wake_lock)) goto skip_limits; #endif if (chip->last_ocv_uv > chip->flat_ocv_threshold_uv) correction_limit_uv = chip->high_ocv_correction_limit_uv; else correction_limit_uv = chip->low_ocv_correction_limit_uv; if (abs(delta_ocv_uv) > correction_limit_uv) { pr_debug("limiting delta ocv %d limit = %d\n", delta_ocv_uv, correction_limit_uv); if (delta_ocv_uv > 0) delta_ocv_uv = correction_limit_uv; else delta_ocv_uv = -correction_limit_uv; pr_debug("new delta ocv = %d\n", delta_ocv_uv); } skip_limits: chip->last_ocv_uv -= delta_ocv_uv; if (chip->last_ocv_uv >= chip->max_voltage_uv) chip->last_ocv_uv = chip->max_voltage_uv; pc_new = calculate_pc(chip, chip->last_ocv_uv, chip->last_ocv_temp); rc_new_uah = (params->fcc_uah * pc_new) / 100; soc_new = (rc_new_uah - params->cc_uah - params->uuc_uah)*100 / (params->fcc_uah - params->uuc_uah); soc_new = bound_soc(soc_new); if (soc_new == 0 && soc_est >= chip->hold_soc_est) soc_new = 1; soc = soc_new; out: pr_info("ibat_ua=%d,vbat_uv=%d,ocv_est_uv=%d,pc_est=%d," "soc_est=%d,n=%d,delta_ocv_uv=%d,last_ocv_uv=%d," "pc_new=%d,soc_new=%d,rbatt=%d,slope=%d\n", ibat_ua, vbat_uv, ocv_est_uv, pc_est, soc_est, n, delta_ocv_uv, chip->last_ocv_uv, pc_new, soc_new, params->rbatt_mohm, slope); return soc; } #endif static int clamp_soc_based_on_voltage(struct qpnp_bms_chip *chip, int soc) { int rc, vbat_uv, batt_temp; rc = get_battery_voltage(chip, &vbat_uv); if (rc < 0) { pr_err("adc vbat failed err = %d\n", rc); return soc; } rc = pm8941_get_batt_temperature(&batt_temp); if (rc) { pr_err("get temperature failed err = %d\n", rc); return soc; } if (soc == 0) pr_info("batt_vol = %d, batt_temp = %d\n", vbat_uv, batt_temp); if (chip->shutdown_vol_criteria && soc == 0 && vbat_uv > chip->shutdown_vol_criteria && batt_temp > 0) { pr_debug("clamping soc to 1, temp = %d, vbat (%d) > cutoff (%d)\n", vbat_uv, batt_temp, chip->shutdown_vol_criteria); return 1; } else { pr_debug("not clamping, using soc = %d, vbat = %d and cutoff = %d\n", soc, vbat_uv, chip->shutdown_vol_criteria); return soc; } } static int64_t convert_cc_uah_to_raw(struct qpnp_bms_chip *chip, int64_t cc_uah) { int64_t cc_uv, cc_pvh, cc_raw; cc_pvh = cc_uah * chip->r_sense_uohm; cc_uv = div_s64(cc_pvh * SLEEP_CLK_HZ * SECONDS_PER_HOUR, CC_READING_TICKS * 1000000LL); cc_raw = div_s64(cc_uv * CC_READING_RESOLUTION_D, CC_READING_RESOLUTION_N); return cc_raw; } #define CC_STEP_INCREMENT_UAH 1500 #define OCV_STEP_INCREMENT 0x10 static void configure_soc_wakeup(struct qpnp_bms_chip *chip, struct soc_params *params, int batt_temp, int target_soc) { int target_ocv_uv; int64_t target_cc_uah, cc_raw_64, current_shdw_cc_raw_64; int64_t current_shdw_cc_uah, iadc_comp_factor; uint64_t cc_raw, current_shdw_cc_raw; int16_t ocv_raw, current_ocv_raw; current_shdw_cc_raw = 0; mutex_lock(&chip->bms_output_lock); lock_output_data(chip); qpnp_read_wrapper(chip, (u8 *)&current_ocv_raw, chip->base + BMS1_OCV_FOR_SOC_DATA0, 2); unlock_output_data(chip); mutex_unlock(&chip->bms_output_lock); current_shdw_cc_uah = get_prop_bms_charge_counter_shadow(chip); current_shdw_cc_raw_64 = convert_cc_uah_to_raw(chip, current_shdw_cc_uah); target_cc_uah = (100 - target_soc) * (params->fcc_uah - params->uuc_uah) / 100 - current_shdw_cc_uah; if (target_cc_uah < 0) { target_cc_uah = CC_STEP_INCREMENT_UAH; } iadc_comp_factor = 100000; qpnp_iadc_comp_result(chip->iadc_dev, &iadc_comp_factor); target_cc_uah = div64_s64(target_cc_uah * 100000, iadc_comp_factor); target_cc_uah = cc_reverse_adjust_for_gain(chip, target_cc_uah); cc_raw_64 = convert_cc_uah_to_raw(chip, target_cc_uah); cc_raw = convert_s64_to_s36(cc_raw_64); target_ocv_uv = find_ocv_for_pc(chip, batt_temp, find_pc_for_soc(chip, params, target_soc)); ocv_raw = convert_vbatt_uv_to_raw(chip, target_ocv_uv); if (current_ocv_raw != chip->ocv_reading_at_100 && current_ocv_raw < ocv_raw) ocv_raw = current_ocv_raw - OCV_STEP_INCREMENT; qpnp_write_wrapper(chip, (u8 *)&cc_raw, chip->base + BMS1_SW_CC_THR0, 5); qpnp_write_wrapper(chip, (u8 *)&ocv_raw, chip->base + BMS1_OCV_THR0, 2); pr_debug("current sw_cc_raw = 0x%llx, current ocv = 0x%hx\n", current_shdw_cc_raw, (uint16_t)current_ocv_raw); pr_debug("target_cc_uah = %lld, raw64 = 0x%llx, raw 36 = 0x%llx, ocv_raw = 0x%hx\n", target_cc_uah, (uint64_t)cc_raw_64, cc_raw, (uint16_t)ocv_raw); } static int calculate_raw_soc(struct qpnp_bms_chip *chip, struct raw_soc_params *raw, struct soc_params *params, int batt_temp) { int soc, remaining_usable_charge_uah; remaining_usable_charge_uah = params->ocv_charge_uah - params->cc_uah - params->uuc_uah; pr_debug("RUC = %duAh\n", remaining_usable_charge_uah); soc = DIV_ROUND_CLOSEST((remaining_usable_charge_uah * 100), (params->fcc_uah - params->uuc_uah)); bms_dbg.fcc_uah = params->fcc_uah; bms_dbg.uuc_uah = params->uuc_uah; bms_dbg.rc_uah = params->ocv_charge_uah; bms_dbg.ruc_uah = remaining_usable_charge_uah; bms_dbg.cc_uah = params->cc_uah; bms_dbg.raw_soc = soc; if (chip->first_time_calc_soc && soc < 0) { pr_info("soc is %d, adjusting pon ocv to make it 0\n", soc); chip->last_ocv_uv = find_ocv_for_pc(chip, batt_temp, find_pc_for_soc(chip, params, 0)); params->ocv_charge_uah = find_ocv_charge_for_soc(chip, params, 0); remaining_usable_charge_uah = params->ocv_charge_uah - params->cc_uah - params->uuc_uah; soc = DIV_ROUND_CLOSEST((remaining_usable_charge_uah * 100), (params->fcc_uah - params->uuc_uah)); pr_info("DONE for O soc is %d, pon ocv adjusted to %duV, " "ocv_charge_uah=%d\n", soc, chip->last_ocv_uv, params->ocv_charge_uah); } if (soc > 100) soc = 100; if (soc < 0) { pr_debug("bad rem_usb_chg = %d rem_chg %d, cc_uah %d, unusb_chg %d\n", remaining_usable_charge_uah, params->ocv_charge_uah, params->cc_uah, params->uuc_uah); pr_debug("for bad rem_usb_chg last_ocv_uv = %d batt_temp = %d fcc = %d soc =%d\n", chip->last_ocv_uv, batt_temp, params->fcc_uah, soc); soc = 0; } return soc; } #define SLEEP_RECALC_INTERVAL 3 static int calculate_state_of_charge(struct qpnp_bms_chip *chip, struct raw_soc_params *raw, int batt_temp) { struct soc_params params; int soc, previous_soc, shutdown_soc, new_calculated_soc; int remaining_usable_charge_uah; calculate_soc_params(chip, raw, &params, batt_temp); if (!is_battery_present(chip)) { pr_debug("battery gone, reporting 100\n"); new_calculated_soc = 100; goto done_calculating; } if (params.fcc_uah - params.uuc_uah <= 0) { pr_debug("FCC = %duAh, UUC = %duAh forcing soc = 0\n", params.fcc_uah, params.uuc_uah); new_calculated_soc = 0; goto done_calculating; } soc = calculate_raw_soc(chip, raw, &params, batt_temp); mutex_lock(&chip->soc_invalidation_mutex); shutdown_soc = chip->shutdown_soc; if (chip->first_time_calc_soc && soc != shutdown_soc && !chip->shutdown_soc_invalid) { pr_info("soc = %d before forcing shutdown_soc = %d\n", soc, shutdown_soc); chip->last_ocv_uv = find_ocv_for_pc(chip, batt_temp, find_pc_for_soc(chip, &params, shutdown_soc)); params.ocv_charge_uah = find_ocv_charge_for_soc(chip, &params, shutdown_soc); remaining_usable_charge_uah = params.ocv_charge_uah - params.cc_uah - params.uuc_uah; soc = DIV_ROUND_CLOSEST((remaining_usable_charge_uah * 100), (params.fcc_uah - params.uuc_uah)); pr_info("DONE for shutdown_soc = %d soc is %d, adjusted ocv to %duV\n", shutdown_soc, soc, chip->last_ocv_uv); } mutex_unlock(&chip->soc_invalidation_mutex); pr_debug("SOC before adjustment = %d\n", soc); #if !(defined(CONFIG_HTC_BATT_8960)) new_calculated_soc = adjust_soc(chip, &params, soc, batt_temp); #else new_calculated_soc = soc; #endif bms_dbg.adjusted_soc = new_calculated_soc; new_calculated_soc = clamp_soc_based_on_voltage(chip, new_calculated_soc); if (is_battery_full(chip)) configure_soc_wakeup(chip, &params, batt_temp, bound_soc(new_calculated_soc - 1)); done_calculating: mutex_lock(&chip->last_soc_mutex); previous_soc = chip->calculated_soc; chip->calculated_soc = new_calculated_soc; pr_debug("CC based calculated SOC = %d\n", chip->calculated_soc); if (chip->last_soc_invalid) { chip->last_soc_invalid = false; chip->last_soc = -EINVAL; } if (params.delta_time_s * 1000 > chip->calculate_soc_ms * SLEEP_RECALC_INTERVAL && !chip->first_time_calc_soc) { chip->last_soc_unbound = true; chip->last_soc_change_sec = chip->last_recalc_time; pr_debug("last_soc unbound because elapsed time = %d\n", params.delta_time_s); } mutex_unlock(&chip->last_soc_mutex); wake_up_interruptible(&chip->bms_wait_queue); if (new_calculated_soc != previous_soc && chip->bms_psy_registered) { power_supply_changed(&chip->bms_psy); pr_debug("power supply changed\n"); } else { report_state_of_charge(chip); } pr_info("FCC=%d,UC=%d,RC=%d,CC_uAh/ori=%d/%d,RUC=%d,SOC=%d,raw_soc=%d," "start_pc=%d,end_pc=%d,OCV_uV/ori=%d/%d,OCV_raw=%x," "rbatt=%d,rbatt_sf=%d,batt_temp=%d,soc_rbatt=%d," "ori_uuc_uah=%d,uuc_rbatt=%d,uuc_iavg_ma=%d," "unusable_uv=%d,pc_unusable=%d,rc_pc=%d,sh_soc=%d," "t_last_change_s=%d,adj_soc=%d,cal_soc=%d,cc_raw=%lld," "shdw_cc_raw=%lld,ocv_at_100=%x," "cc_backup=%d,ocv_backup=%d,consistent_flag=%d,is_ocv_update_start=%d," "no_hw_ocv_ms=%ld\n", bms_dbg.fcc_uah, bms_dbg.uuc_uah, bms_dbg.rc_uah, bms_dbg.cc_uah,bms_dbg.ori_cc_uah, bms_dbg.ruc_uah, soc, bms_dbg.raw_soc, chip->start_soc, chip->end_soc, raw->last_good_ocv_uv, bms_dbg.last_ocv_raw_uv, raw->last_good_ocv_raw, bms_dbg.rbatt, bms_dbg.rbatt_sf, batt_temp, bms_dbg.soc_rbatt, bms_dbg.ori_uuc_uah, bms_dbg.uuc_rbatt_mohm, bms_dbg.uuc_iavg_ma, bms_dbg.unusable_uv, bms_dbg.pc_unusable, bms_dbg.rc_pc, bms_dbg.shutdown_soc, bms_dbg.time_last_change_s, bms_dbg.adjusted_soc, chip->calculated_soc, raw->cc, raw->shdw_cc, chip->ocv_reading_at_100, chip->cc_backup_uah, chip->ocv_backup_uv, consistent_flag, is_ocv_update_start, htc_batt_bms_timer.no_ocv_update_period_ms); get_current_time(&chip->last_recalc_time); chip->first_time_calc_soc = 0; chip->first_time_calc_uuc = 0; return chip->calculated_soc; } static int calculate_soc_from_voltage(struct qpnp_bms_chip *chip) { int voltage_range_uv, voltage_remaining_uv, voltage_based_soc; int rc, vbat_uv; rc = get_battery_voltage(chip, &vbat_uv); if (rc < 0) { pr_err("adc vbat failed err = %d\n", rc); return rc; } voltage_range_uv = chip->max_voltage_uv - chip->v_cutoff_uv; voltage_remaining_uv = vbat_uv - chip->v_cutoff_uv; voltage_based_soc = voltage_remaining_uv * 100 / voltage_range_uv; voltage_based_soc = clamp(voltage_based_soc, 0, 100); if (chip->prev_voltage_based_soc != voltage_based_soc && chip->bms_psy_registered) { power_supply_changed(&chip->bms_psy); pr_debug("power supply changed\n"); } chip->prev_voltage_based_soc = voltage_based_soc; pr_debug("vbat used = %duv\n", vbat_uv); pr_debug("Calculated voltage based soc = %d\n", voltage_based_soc); return voltage_based_soc; } static int recalculate_raw_soc(struct qpnp_bms_chip *chip) { int batt_temp, rc, soc; struct qpnp_vadc_result result; struct raw_soc_params raw; struct soc_params params; bms_stay_awake(&chip->soc_wake_source); if (chip->use_voltage_soc) { soc = calculate_soc_from_voltage(chip); } else { if (!chip->batfet_closed) qpnp_iadc_calibrate_for_trim(chip->iadc_dev, true); rc = qpnp_vadc_read(chip->vadc_dev, LR_MUX1_BATT_THERM, &result); if (rc) { pr_err("error reading vadc LR_MUX1_BATT_THERM = %d, rc = %d\n", LR_MUX1_BATT_THERM, rc); soc = chip->calculated_soc; } else { pr_debug("batt_temp phy = %lld meas = 0x%llx\n", result.physical, result.measurement); batt_temp = (int)result.physical; mutex_lock(&chip->last_ocv_uv_mutex); read_soc_params_raw(chip, &raw, batt_temp); calculate_soc_params(chip, &raw, &params, batt_temp); if (!is_battery_present(chip)) { pr_debug("battery gone\n"); soc = 0; } else if (params.fcc_uah - params.uuc_uah <= 0) { pr_debug("FCC = %duAh, UUC = %duAh forcing soc = 0\n", params.fcc_uah, params.uuc_uah); soc = 0; } else { soc = calculate_raw_soc(chip, &raw, &params, batt_temp); } mutex_unlock(&chip->last_ocv_uv_mutex); } } bms_relax(&chip->soc_wake_source); return soc; } static int recalculate_soc(struct qpnp_bms_chip *chip) { int batt_temp, rc, soc; struct qpnp_vadc_result result; struct raw_soc_params raw; bms_stay_awake(&chip->soc_wake_source); mutex_lock(&chip->vbat_monitor_mutex); if (chip->vbat_monitor_params.state_request != ADC_TM_HIGH_LOW_THR_DISABLE) qpnp_adc_tm_channel_measure(chip->adc_tm_dev, &chip->vbat_monitor_params); mutex_unlock(&chip->vbat_monitor_mutex); if (chip->use_voltage_soc) { soc = calculate_soc_from_voltage(chip); } else { if (!chip->batfet_closed) qpnp_iadc_calibrate_for_trim(chip->iadc_dev, true); rc = qpnp_vadc_read(chip->vadc_dev, LR_MUX1_BATT_THERM, &result); if (rc) { pr_err("error reading vadc LR_MUX1_BATT_THERM = %d, rc = %d\n", LR_MUX1_BATT_THERM, rc); soc = chip->calculated_soc; } else { pr_debug("batt_temp phy = %lld meas = 0x%llx\n", result.physical, result.measurement); batt_temp = (int)result.physical; mutex_lock(&chip->last_ocv_uv_mutex); read_soc_params_raw(chip, &raw, batt_temp); soc = calculate_state_of_charge(chip, &raw, batt_temp); mutex_unlock(&chip->last_ocv_uv_mutex); } } bms_relax(&chip->soc_wake_source); return soc; } static void recalculate_work(struct work_struct *work) { struct qpnp_bms_chip *chip = container_of(work, struct qpnp_bms_chip, recalc_work); recalculate_soc(chip); } static int get_calculation_delay_ms(struct qpnp_bms_chip *chip) { #if !(defined(CONFIG_HTC_BATT_8960)) if (wake_lock_active(&chip->low_voltage_wake_lock)) return chip->low_voltage_calculate_soc_ms; else if (chip->calculated_soc < chip->low_soc_calc_threshold) return chip->low_soc_calculate_soc_ms; else #endif return chip->calculate_soc_ms; } static void calculate_soc_work(struct work_struct *work) { struct qpnp_bms_chip *chip = container_of(work, struct qpnp_bms_chip, calculate_soc_delayed_work.work); recalculate_soc(chip); schedule_delayed_work(&chip->calculate_soc_delayed_work, round_jiffies_relative(msecs_to_jiffies (get_calculation_delay_ms(chip)))); } #define VBATT_ERROR_MARGIN 20000 #if !(defined(CONFIG_HTC_BATT_8960)) static void configure_vbat_monitor_low(struct qpnp_bms_chip *chip) { mutex_lock(&chip->vbat_monitor_mutex); if (chip->vbat_monitor_params.state_request == ADC_TM_HIGH_LOW_THR_ENABLE) { pr_debug("battery entered cutoff range\n"); #if !(defined(CONFIG_HTC_BATT_8960)) if (!wake_lock_active(&chip->low_voltage_wake_lock)) { pr_debug("voltage low, holding wakelock\n"); wake_lock(&chip->low_voltage_wake_lock); cancel_delayed_work_sync( &chip->calculate_soc_delayed_work); schedule_delayed_work( &chip->calculate_soc_delayed_work, 0); } #endif chip->vbat_monitor_params.state_request = ADC_TM_HIGH_THR_ENABLE; chip->vbat_monitor_params.high_thr = (chip->low_voltage_threshold + VBATT_ERROR_MARGIN); pr_debug("set low thr to %d and high to %d\n", chip->vbat_monitor_params.low_thr, chip->vbat_monitor_params.high_thr); chip->vbat_monitor_params.low_thr = 0; } else if (chip->vbat_monitor_params.state_request == ADC_TM_LOW_THR_ENABLE) { #if !(defined(CONFIG_HTC_BATT_8960)) pr_debug("battery entered normal range\n"); if (wake_lock_active(&chip->cv_wake_lock)) { wake_unlock(&chip->cv_wake_lock); pr_debug("releasing cv wake lock\n"); } #endif chip->in_cv_range = false; chip->vbat_monitor_params.state_request = ADC_TM_HIGH_LOW_THR_ENABLE; chip->vbat_monitor_params.high_thr = chip->max_voltage_uv - VBATT_ERROR_MARGIN; chip->vbat_monitor_params.low_thr = chip->low_voltage_threshold; pr_debug("set low thr to %d and high to %d\n", chip->vbat_monitor_params.low_thr, chip->vbat_monitor_params.high_thr); } qpnp_adc_tm_channel_measure(chip->adc_tm_dev, &chip->vbat_monitor_params); mutex_unlock(&chip->vbat_monitor_mutex); } #define CV_LOW_THRESHOLD_HYST_UV 100000 static void configure_vbat_monitor_high(struct qpnp_bms_chip *chip) { mutex_lock(&chip->vbat_monitor_mutex); if (chip->vbat_monitor_params.state_request == ADC_TM_HIGH_LOW_THR_ENABLE) { pr_debug("battery entered vddmax range\n"); chip->in_cv_range = true; #if !(defined(CONFIG_HTC_BATT_8960)) if (!wake_lock_active(&chip->cv_wake_lock)) { wake_lock(&chip->cv_wake_lock); pr_debug("holding cv wake lock\n"); } #endif schedule_work(&chip->recalc_work); chip->vbat_monitor_params.state_request = ADC_TM_LOW_THR_ENABLE; chip->vbat_monitor_params.low_thr = (chip->max_voltage_uv - CV_LOW_THRESHOLD_HYST_UV); chip->vbat_monitor_params.high_thr = chip->max_voltage_uv * 2; pr_debug("set low thr to %d and high to %d\n", chip->vbat_monitor_params.low_thr, chip->vbat_monitor_params.high_thr); } else if (chip->vbat_monitor_params.state_request == ADC_TM_HIGH_THR_ENABLE) { #if !(defined(CONFIG_HTC_BATT_8960)) pr_debug("battery entered normal range\n"); if (wake_lock_active(&chip->low_voltage_wake_lock)) { pr_debug("voltage high, releasing wakelock\n"); wake_unlock(&chip->low_voltage_wake_lock); } #endif chip->vbat_monitor_params.state_request = ADC_TM_HIGH_LOW_THR_ENABLE; chip->vbat_monitor_params.high_thr = chip->max_voltage_uv - VBATT_ERROR_MARGIN; chip->vbat_monitor_params.low_thr = chip->low_voltage_threshold; pr_debug("set low thr to %d and high to %d\n", chip->vbat_monitor_params.low_thr, chip->vbat_monitor_params.high_thr); } qpnp_adc_tm_channel_measure(chip->adc_tm_dev, &chip->vbat_monitor_params); mutex_unlock(&chip->vbat_monitor_mutex); } static void btm_notify_vbat(enum qpnp_tm_state state, void *ctx) { struct qpnp_bms_chip *chip = ctx; int vbat_uv; struct qpnp_vadc_result result; int rc; rc = qpnp_vadc_read(chip->vadc_dev, VBAT_SNS, &result); pr_debug("vbat = %lld, raw = 0x%x\n", result.physical, result.adc_code); get_battery_voltage(chip, &vbat_uv); pr_debug("vbat is at %d, state is at %d\n", vbat_uv, state); if (state == ADC_TM_LOW_STATE) { pr_debug("low voltage btm notification triggered\n"); if (vbat_uv - VBATT_ERROR_MARGIN < chip->vbat_monitor_params.low_thr) { configure_vbat_monitor_low(chip); } else { pr_debug("faulty btm trigger, discarding\n"); qpnp_adc_tm_channel_measure(chip->adc_tm_dev, &chip->vbat_monitor_params); } } else if (state == ADC_TM_HIGH_STATE) { pr_debug("high voltage btm notification triggered\n"); if (vbat_uv + VBATT_ERROR_MARGIN > chip->vbat_monitor_params.high_thr) { configure_vbat_monitor_high(chip); } else { pr_debug("faulty btm trigger, discarding\n"); qpnp_adc_tm_channel_measure(chip->adc_tm_dev, &chip->vbat_monitor_params); } } else { pr_debug("unknown voltage notification state: %d\n", state); } if (chip->bms_psy_registered) power_supply_changed(&chip->bms_psy); } #endif static int reset_vbat_monitoring(struct qpnp_bms_chip *chip) { int rc; chip->vbat_monitor_params.state_request = ADC_TM_HIGH_LOW_THR_DISABLE; rc = qpnp_adc_tm_channel_measure(chip->adc_tm_dev, &chip->vbat_monitor_params); if (rc) { pr_err("tm disable failed: %d\n", rc); return rc; } #if !(defined(CONFIG_HTC_BATT_8960)) if (wake_lock_active(&chip->low_voltage_wake_lock)) { pr_debug("battery removed, releasing wakelock\n"); wake_unlock(&chip->low_voltage_wake_lock); } #endif if (chip->in_cv_range) { pr_debug("battery removed, removing in_cv_range state\n"); chip->in_cv_range = false; } return 0; } #if !(defined(CONFIG_HTC_BATT_8960)) static int setup_vbat_monitoring(struct qpnp_bms_chip *chip) { int rc; chip->vbat_monitor_params.low_thr = chip->low_voltage_threshold; chip->vbat_monitor_params.high_thr = chip->max_voltage_uv - VBATT_ERROR_MARGIN; chip->vbat_monitor_params.state_request = ADC_TM_HIGH_LOW_THR_ENABLE; chip->vbat_monitor_params.channel = VBAT_SNS; chip->vbat_monitor_params.btm_ctx = (void *)chip; chip->vbat_monitor_params.timer_interval = ADC_MEAS1_INTERVAL_1S; chip->vbat_monitor_params.threshold_notification = &btm_notify_vbat; pr_debug("set low thr to %d and high to %d\n", chip->vbat_monitor_params.low_thr, chip->vbat_monitor_params.high_thr); if (!is_battery_present(chip)) { pr_debug("no battery inserted, do not enable vbat monitoring\n"); chip->vbat_monitor_params.state_request = ADC_TM_HIGH_LOW_THR_DISABLE; } else { rc = qpnp_adc_tm_channel_measure(chip->adc_tm_dev, &chip->vbat_monitor_params); if (rc) { pr_err("tm setup failed: %d\n", rc); return rc; } } pr_debug("setup complete\n"); return 0; } #endif static void readjust_fcc_table(struct qpnp_bms_chip *chip) { struct single_row_lut *temp, *old; int i, fcc, ratio; if (!chip->enable_fcc_learning) return; if (!chip->fcc_temp_lut) { pr_err("The static fcc lut table is NULL\n"); return; } temp = devm_kzalloc(chip->dev, sizeof(struct single_row_lut), GFP_KERNEL); if (!temp) { pr_err("Cannot allocate memory for adjusted fcc table\n"); return; } fcc = interpolate_fcc(chip->fcc_temp_lut, chip->fcc_new_batt_temp); temp->cols = chip->fcc_temp_lut->cols; for (i = 0; i < chip->fcc_temp_lut->cols; i++) { temp->x[i] = chip->fcc_temp_lut->x[i]; ratio = div_u64(chip->fcc_temp_lut->y[i] * 1000, fcc); temp->y[i] = (ratio * chip->fcc_new_mah); temp->y[i] /= 1000; } old = chip->adjusted_fcc_temp_lut; chip->adjusted_fcc_temp_lut = temp; devm_kfree(chip->dev, old); } static int read_fcc_data_from_backup(struct qpnp_bms_chip *chip) { int rc, i; u8 fcc = 0, chgcyl = 0; for (i = 0; i < chip->min_fcc_learning_samples; i++) { rc = qpnp_read_wrapper(chip, &fcc, chip->base + BMS_FCC_BASE_REG + i, 1); rc |= qpnp_read_wrapper(chip, &chgcyl, chip->base + BMS_CHGCYL_BASE_REG + i, 1); if (rc) { pr_err("Unable to read FCC data\n"); return rc; } if (fcc == 0 || (fcc == 0xFF && chgcyl == 0xFF)) { chip->fcc_learning_samples[i].fcc_new = 0; chip->fcc_learning_samples[i].chargecycles = 0; } else { chip->fcc_sample_count++; chip->fcc_learning_samples[i].fcc_new = fcc * chip->fcc_resolution; chip->fcc_learning_samples[i].chargecycles = chgcyl * CHGCYL_RESOLUTION; } } return 0; } static int discard_backup_fcc_data(struct qpnp_bms_chip *chip) { int rc = 0, i; u8 temp_u8 = 0; chip->fcc_sample_count = 0; for (i = 0; i < chip->min_fcc_learning_samples; i++) { rc = qpnp_write_wrapper(chip, &temp_u8, chip->base + BMS_FCC_BASE_REG + i, 1); rc |= qpnp_write_wrapper(chip, &temp_u8, chip->base + BMS_CHGCYL_BASE_REG + i, 1); if (rc) { pr_err("Unable to clear FCC data\n"); return rc; } } return 0; } static void average_fcc_samples_and_readjust_fcc_table(struct qpnp_bms_chip *chip) { int i, temp_fcc_avg = 0, temp_fcc_delta = 0, new_fcc_avg = 0; struct fcc_sample *ft; for (i = 0; i < chip->min_fcc_learning_samples; i++) temp_fcc_avg += chip->fcc_learning_samples[i].fcc_new; temp_fcc_avg /= chip->min_fcc_learning_samples; temp_fcc_delta = div_u64(temp_fcc_avg * DELTA_FCC_PERCENT, 100); for (i = 0; i < chip->min_fcc_learning_samples; i++) { ft = &chip->fcc_learning_samples[i]; if (abs(ft->fcc_new - temp_fcc_avg) > temp_fcc_delta) new_fcc_avg += temp_fcc_avg; else new_fcc_avg += ft->fcc_new; } new_fcc_avg /= chip->min_fcc_learning_samples; chip->fcc_new_mah = new_fcc_avg; chip->fcc_new_batt_temp = FCC_DEFAULT_TEMP; pr_info("FCC update: New fcc_mah=%d, fcc_batt_temp=%d\n", new_fcc_avg, FCC_DEFAULT_TEMP); readjust_fcc_table(chip); } static void backup_charge_cycle(struct qpnp_bms_chip *chip) { int rc = 0; if (chip->charge_increase >= 0) { rc = qpnp_write_wrapper(chip, &chip->charge_increase, chip->base + CHARGE_INCREASE_STORAGE, 1); if (rc) pr_err("Unable to backup charge_increase\n"); } if (chip->charge_cycles >= 0) { rc = qpnp_write_wrapper(chip, (u8 *)&chip->charge_cycles, chip->base + CHARGE_CYCLE_STORAGE_LSB, 2); if (rc) pr_err("Unable to backup charge_cycles\n"); } } static bool chargecycles_in_range(struct qpnp_bms_chip *chip) { int i, min_cycle, max_cycle, valid_range; max_cycle = min_cycle = chip->fcc_learning_samples[0].chargecycles; for (i = 1; i < chip->min_fcc_learning_samples; i++) { if (min_cycle > chip->fcc_learning_samples[i].chargecycles) min_cycle = chip->fcc_learning_samples[i].chargecycles; if (max_cycle < chip->fcc_learning_samples[i].chargecycles) max_cycle = chip->fcc_learning_samples[i].chargecycles; } valid_range = DIV_ROUND_UP(VALID_FCC_CHGCYL_RANGE, CHGCYL_RESOLUTION) * CHGCYL_RESOLUTION; if (abs(max_cycle - min_cycle) > valid_range) return false; return true; } static int read_chgcycle_data_from_backup(struct qpnp_bms_chip *chip) { int rc; uint16_t temp_u16 = 0; u8 temp_u8 = 0; rc = qpnp_read_wrapper(chip, &temp_u8, chip->base + CHARGE_INCREASE_STORAGE, 1); if (!rc && temp_u8 != 0xFF) chip->charge_increase = temp_u8; rc = qpnp_read_wrapper(chip, (u8 *)&temp_u16, chip->base + CHARGE_CYCLE_STORAGE_LSB, 2); if (!rc && temp_u16 != 0xFFFF) chip->charge_cycles = temp_u16; return rc; } static void attempt_learning_new_fcc(struct qpnp_bms_chip *chip) { pr_debug("Total FCC sample count=%d\n", chip->fcc_sample_count); if ((chip->fcc_sample_count == chip->min_fcc_learning_samples) && chargecycles_in_range(chip)) average_fcc_samples_and_readjust_fcc_table(chip); } static int calculate_real_soc(struct qpnp_bms_chip *chip, int batt_temp, struct raw_soc_params *raw, int cc_uah) { int fcc_uah, rc_uah; fcc_uah = calculate_fcc(chip, batt_temp); rc_uah = calculate_ocv_charge(chip, raw, fcc_uah); return ((rc_uah - cc_uah) * 100) / fcc_uah; } #define MAX_U8_VALUE ((u8)(~0U)) static int backup_new_fcc(struct qpnp_bms_chip *chip, int fcc_mah, int chargecycles) { int rc, min_cycle, i; u8 fcc_new, chgcyl, pos = 0; struct fcc_sample *ft; if ((fcc_mah > (chip->fcc_resolution * MAX_U8_VALUE)) || (chargecycles > (CHGCYL_RESOLUTION * MAX_U8_VALUE))) { pr_warn("FCC/Chgcyl beyond storage limit. FCC=%d, chgcyl=%d\n", fcc_mah, chargecycles); return -EINVAL; } if (chip->fcc_sample_count == chip->min_fcc_learning_samples) { min_cycle = chip->fcc_learning_samples[0].chargecycles; for (i = 1; i < chip->min_fcc_learning_samples; i++) { if (min_cycle > chip->fcc_learning_samples[i].chargecycles) pos = i; } } else { for (i = 0; i < chip->min_fcc_learning_samples; i++) { ft = &chip->fcc_learning_samples[i]; if (ft->fcc_new == 0 || (ft->fcc_new == 0xFF && ft->chargecycles == 0xFF)) { pos = i; break; } } chip->fcc_sample_count++; } chip->fcc_learning_samples[pos].fcc_new = fcc_mah; chip->fcc_learning_samples[pos].chargecycles = chargecycles; fcc_new = DIV_ROUND_UP(fcc_mah, chip->fcc_resolution); rc = qpnp_write_wrapper(chip, (u8 *)&fcc_new, chip->base + BMS_FCC_BASE_REG + pos, 1); if (rc) return rc; chgcyl = DIV_ROUND_UP(chargecycles, CHGCYL_RESOLUTION); rc = qpnp_write_wrapper(chip, (u8 *)&chgcyl, chip->base + BMS_CHGCYL_BASE_REG + pos, 1); if (rc) return rc; pr_debug("Backup new FCC: fcc_new=%d, chargecycle=%d, pos=%d\n", fcc_new, chgcyl, pos); return rc; } static void update_fcc_learning_table(struct qpnp_bms_chip *chip, int new_fcc_uah, int chargecycles, int batt_temp) { int rc, fcc_default, fcc_temp; fcc_default = calculate_fcc(chip, FCC_DEFAULT_TEMP) / 1000; fcc_temp = calculate_fcc(chip, batt_temp) / 1000; new_fcc_uah = (new_fcc_uah / fcc_temp) * fcc_default; rc = backup_new_fcc(chip, new_fcc_uah / 1000, chargecycles); if (rc) { pr_err("Unable to backup new FCC\n"); return; } attempt_learning_new_fcc(chip); } static bool is_new_fcc_valid(int new_fcc_uah, int fcc_uah) { if ((new_fcc_uah >= (fcc_uah / 2)) && ((new_fcc_uah * 100) <= (fcc_uah * 105))) return true; pr_debug("FCC rejected - not within valid limit\n"); return false; } static void fcc_learning_config(struct qpnp_bms_chip *chip, bool start) { int rc, batt_temp; struct raw_soc_params raw; struct qpnp_vadc_result result; int fcc_uah, new_fcc_uah, delta_cc_uah, delta_soc; rc = qpnp_vadc_read(chip->vadc_dev, LR_MUX1_BATT_THERM, &result); if (rc) { pr_err("Unable to read batt_temp\n"); return; } else { batt_temp = (int)result.physical; } rc = read_soc_params_raw(chip, &raw, batt_temp); if (rc) { pr_err("Unable to read CC, cannot update FCC\n"); return; } if (start) { chip->start_pc = interpolate_pc(chip->pc_temp_ocv_lut, batt_temp / 10, raw.last_good_ocv_uv / 1000); chip->start_cc_uah = calculate_cc(chip, raw.cc, CC, NORESET); chip->start_real_soc = calculate_real_soc(chip, batt_temp, &raw, chip->start_cc_uah); pr_debug("start_pc=%d, start_cc=%d, start_soc=%d real_soc=%d\n", chip->start_pc, chip->start_cc_uah, chip->start_soc, chip->start_real_soc); } else { chip->end_cc_uah = calculate_cc(chip, raw.cc, CC, NORESET); delta_soc = 100 - chip->start_real_soc; delta_cc_uah = abs(chip->end_cc_uah - chip->start_cc_uah); new_fcc_uah = div_u64(delta_cc_uah * 100, delta_soc); fcc_uah = calculate_fcc(chip, batt_temp); pr_debug("start_soc=%d, start_pc=%d, start_real_soc=%d, start_cc=%d, end_cc=%d, new_fcc=%d\n", chip->start_soc, chip->start_pc, chip->start_real_soc, chip->start_cc_uah, chip->end_cc_uah, new_fcc_uah); if (is_new_fcc_valid(new_fcc_uah, fcc_uah)) update_fcc_learning_table(chip, new_fcc_uah, chip->charge_cycles, batt_temp); } } #define MAX_CAL_TRIES 200 #define MIN_CAL_UA 3000 static void batfet_open_work(struct work_struct *work) { int i; int rc; int result_ua; u8 orig_delay, sample_delay; struct qpnp_bms_chip *chip = container_of(work, struct qpnp_bms_chip, batfet_open_work); rc = qpnp_read_wrapper(chip, &orig_delay, chip->base + BMS1_S1_DELAY_CTL, 1); sample_delay = 0x0; rc = qpnp_write_wrapper(chip, &sample_delay, chip->base + BMS1_S1_DELAY_CTL, 1); for (i = 0; (!chip->batfet_closed) && i < MAX_CAL_TRIES; i++) { rc = qpnp_iadc_calibrate_for_trim(chip->iadc_dev, false); msleep(20); rc |= get_battery_current(chip, &result_ua); if (rc == 0 && abs(result_ua) <= MIN_CAL_UA) { pr_debug("good cal at %d attempt\n", i); break; } } pr_debug("batfet_closed = %d i = %d result_ua = %d\n", chip->batfet_closed, i, result_ua); rc = qpnp_write_wrapper(chip, &orig_delay, chip->base + BMS1_S1_DELAY_CTL, 1); } static void charging_began(struct qpnp_bms_chip *chip) { mutex_lock(&chip->last_soc_mutex); chip->charge_start_tm_sec = 0; chip->catch_up_time_sec = 0; mutex_unlock(&chip->last_soc_mutex); chip->start_soc = report_state_of_charge(chip); mutex_lock(&chip->last_ocv_uv_mutex); if (chip->enable_fcc_learning) fcc_learning_config(chip, true); chip->soc_at_cv = -EINVAL; chip->prev_chg_soc = -EINVAL; mutex_unlock(&chip->last_ocv_uv_mutex); } static void charging_ended(struct qpnp_bms_chip *chip) { mutex_lock(&chip->last_soc_mutex); chip->charge_start_tm_sec = 0; chip->catch_up_time_sec = 0; mutex_unlock(&chip->last_soc_mutex); chip->end_soc = report_state_of_charge(chip); mutex_lock(&chip->last_ocv_uv_mutex); chip->soc_at_cv = -EINVAL; chip->prev_chg_soc = -EINVAL; if (chip->end_soc > chip->start_soc) { chip->charge_increase += (chip->end_soc - chip->start_soc); if (chip->charge_increase > 100) { chip->charge_cycles++; chip->charge_increase = chip->charge_increase % 100; } if (chip->enable_fcc_learning) backup_charge_cycle(chip); } if (get_battery_status(chip) == POWER_SUPPLY_STATUS_FULL) { if (chip->enable_fcc_learning && (chip->start_soc <= chip->min_fcc_learning_soc) && (chip->start_pc <= chip->min_fcc_ocv_pc)) fcc_learning_config(chip, false); chip->done_charging = true; chip->last_soc_invalid = true; } else if (chip->charging_adjusted_ocv > 0) { pr_debug("Charging stopped before full, adjusted OCV = %d\n", chip->charging_adjusted_ocv); chip->last_ocv_uv = chip->charging_adjusted_ocv; } chip->charging_adjusted_ocv = -EINVAL; mutex_unlock(&chip->last_ocv_uv_mutex); } static void battery_status_check(struct qpnp_bms_chip *chip) { int status = get_battery_status(chip); mutex_lock(&chip->status_lock); if (chip->battery_status != status) { pr_debug("status = %d, shadow status = %d\n", status, chip->battery_status); if (status == POWER_SUPPLY_STATUS_CHARGING) { pr_debug("charging started\n"); charging_began(chip); } else if (chip->battery_status == POWER_SUPPLY_STATUS_CHARGING) { pr_debug("charging ended\n"); charging_ended(chip); } if (status == POWER_SUPPLY_STATUS_FULL) { pr_debug("battery full\n"); enable_bms_irq(&chip->ocv_thr_irq); recalculate_soc(chip); } else if (chip->battery_status == POWER_SUPPLY_STATUS_FULL) { pr_debug("battery not full any more\n"); disable_bms_irq(&chip->ocv_thr_irq); } chip->battery_status = status; schedule_work(&chip->recalc_work); } mutex_unlock(&chip->status_lock); } #define CALIB_WRKARND_DIG_MAJOR_MAX 0x03 static void batfet_status_check(struct qpnp_bms_chip *chip) { bool batfet_closed; if (chip->iadc_bms_revision2 > CALIB_WRKARND_DIG_MAJOR_MAX) return; batfet_closed = is_batfet_closed(chip); if (chip->batfet_closed != batfet_closed) { chip->batfet_closed = batfet_closed; if (batfet_closed == false) { schedule_work(&chip->batfet_open_work); qpnp_iadc_skip_calibration(chip->iadc_dev); } else { qpnp_iadc_calibrate_for_trim(chip->iadc_dev, true); qpnp_iadc_resume_calibration(chip->iadc_dev); } } } static void battery_insertion_check(struct qpnp_bms_chip *chip) { int present = (int)is_battery_present(chip); int insertion_ocv_uv = get_battery_insertion_ocv_uv(chip); int insertion_ocv_taken = (insertion_ocv_uv > 0); mutex_lock(&chip->vbat_monitor_mutex); if (chip->battery_present != present && (present == insertion_ocv_taken || chip->battery_present == -EINVAL)) { pr_debug("status = %d, shadow status = %d, insertion_ocv_uv = %d\n", present, chip->battery_present, insertion_ocv_uv); if (chip->battery_present != -EINVAL) { if (present) { chip->insertion_ocv_uv = insertion_ocv_uv; #if !(defined(CONFIG_HTC_BATT_8960)) setup_vbat_monitoring(chip); #endif chip->new_battery = true; } else { reset_vbat_monitoring(chip); } } chip->battery_present = present; schedule_work(&chip->recalc_work); } mutex_unlock(&chip->vbat_monitor_mutex); } #if !(defined(CONFIG_HTC_BATT_8960)) static int get_prop_bms_capacity(struct qpnp_bms_chip *chip) { return report_state_of_charge(chip); } static void qpnp_bms_external_power_changed(struct power_supply *psy) { struct qpnp_bms_chip *chip = container_of(psy, struct qpnp_bms_chip, bms_psy); battery_insertion_check(chip); batfet_status_check(chip); battery_status_check(chip); } static int qpnp_bms_power_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct qpnp_bms_chip *chip = container_of(psy, struct qpnp_bms_chip, bms_psy); switch (psp) { case POWER_SUPPLY_PROP_CAPACITY: val->intval = get_prop_bms_capacity(chip); break; case POWER_SUPPLY_PROP_STATUS: val->intval = chip->battery_status; break; case POWER_SUPPLY_PROP_CURRENT_NOW: val->intval = get_prop_bms_current_now(chip); break; case POWER_SUPPLY_PROP_RESISTANCE: val->intval = get_prop_bms_batt_resistance(chip); break; case POWER_SUPPLY_PROP_CHARGE_COUNTER: val->intval = get_prop_bms_charge_counter(chip); break; case POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW: val->intval = get_prop_bms_charge_counter_shadow(chip); break; case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: val->intval = get_prop_bms_charge_full_design(chip); break; case POWER_SUPPLY_PROP_CHARGE_FULL: val->intval = get_prop_bms_charge_full(chip); break; case POWER_SUPPLY_PROP_CYCLE_COUNT: val->intval = chip->charge_cycles; break; default: return -EINVAL; } return 0; } #else static ssize_t kernel_write(struct file *file, const char *buf, size_t count, loff_t pos) { mm_segment_t old_fs; ssize_t res; old_fs = get_fs(); set_fs(get_ds()); res = vfs_write(file, (const char __user *)buf, count, &pos); set_fs(old_fs); return res; } int emmc_misc_write(int val, int offset) { char filename[32] = ""; int w_val = val; struct file *filp = NULL; ssize_t nread; int pnum = get_partition_num_by_name("misc"); if (pnum < 0) { pr_warn("unknown partition number for misc partition\n"); return 0; } sprintf(filename, "/dev/block/mmcblk0p%d", pnum); filp = filp_open(filename, O_RDWR, 0); if (IS_ERR(filp)) { pr_info("unable to open file: %s\n", filename); return PTR_ERR(filp); } filp->f_pos = offset; nread = kernel_write(filp, (char *)&w_val, sizeof(int), filp->f_pos); pr_info("%X (%d)\n", w_val, nread); filp_close(filp, NULL); return 1; } int pm8941_bms_get_batt_current(int *result) { if (!the_chip) { pr_warn("called before init\n"); return -EINVAL; } *result = get_prop_bms_current_now(the_chip); return 0; } int pm8941_bms_get_batt_soc(int *result) { int state_of_charge; struct timespec xtime; unsigned long currtime_ms; unsigned long time_since_last_update_ms, cur_jiffies; xtime = CURRENT_TIME; currtime_ms = xtime.tv_sec * MSEC_PER_SEC + xtime.tv_nsec / NSEC_PER_MSEC; if (!the_chip) { pr_warn("called before init\n"); return -EINVAL; } batt_level = state_of_charge = *result = recalculate_soc(the_chip); if (new_boot_soc && allow_ocv_time && (currtime_ms >= allow_ocv_time)) { pr_info("OCV can be update due to currtime(%lu) >= allow_ocv_time(%lu) " "(OCV_UPDATE_STOP_BIT_BOOT_UP)\n", currtime_ms, allow_ocv_time); new_boot_soc = 0; allow_ocv_time = 0; disable_ocv_update_with_reason(false, OCV_UPDATE_STOP_BIT_BOOT_UP); } if (the_chip->store_batt_data_soc_thre > 0 && state_of_charge <= the_chip->store_batt_data_soc_thre && (store_soc_ui >= 0 && store_soc_ui <= 100)) { store_emmc.store_soc = store_soc_ui; store_emmc.store_currtime_ms = currtime_ms; } if (the_chip->criteria_sw_est_ocv > 0) { cur_jiffies = jiffies; time_since_last_update_ms = (cur_jiffies - htc_batt_bms_timer.batt_system_jiffies) * MSEC_PER_SEC / HZ; htc_batt_bms_timer.no_ocv_update_period_ms += time_since_last_update_ms; htc_batt_bms_timer.batt_system_jiffies = cur_jiffies; } return 0; } int pm8941_bms_get_batt_cc(int *result) { if (!the_chip) { pr_warn("called before init\n"); return -EINVAL; } *result = get_prop_bms_charge_counter(the_chip); return 0; } int pm8941_bms_get_fcc(void) { if (!the_chip) { pr_err("called before init\n"); return -EINVAL; } return get_prop_bms_charge_full(the_chip); } static int get_bms_reg(void *data, u64 *val) { int addr = (int)data; int rc; u8 bms_sts; if (!the_chip) { pr_err("called before init\n"); return -EINVAL; } rc = qpnp_read_wrapper(the_chip, &bms_sts, the_chip->base + addr, 1); if (rc) { pr_err("failed to read BMS1 register sts %d\n", rc); return -EAGAIN; } pr_debug("addr:0x%X, val:0x%X\n", (the_chip->base + addr), bms_sts); *val = bms_sts; return 0; } static int get_iadc_reg(void *data, u64 *val) { int addr = (int)data; int rc; u8 iadc_sts; if (!the_chip) { pr_err("called before init\n"); return -EINVAL; } rc = qpnp_read_wrapper(the_chip, &iadc_sts, the_chip->iadc_base + addr, 1); if (rc) { pr_err("failed to read IADC1 register sts %d\n", rc); return -EAGAIN; } pr_debug("addr:0x%X, val:0x%X\n", (the_chip->iadc_base + addr), iadc_sts); *val = iadc_sts; return 0; } static int dump_all(void) { u64 val; unsigned int len =0; int batt_temp, rc; struct raw_soc_params raw; struct qpnp_vadc_result result; unsigned long flags; memset(batt_log_buf, 0, sizeof(BATT_LOG_BUF_LEN)); get_bms_reg((void *)BMS1_STATUS1, &val); len += scnprintf(batt_log_buf + len, BATT_LOG_BUF_LEN - len, "BMS1_STATUS1=0x%02llx,", val); get_bms_reg((void *)BMS1_INT_RT_STS, &val); len += scnprintf(batt_log_buf + len, BATT_LOG_BUF_LEN - len, "BMS1_INT_RT_STS=0x%02llx,", val); get_bms_reg((void *)BMS1_TOL_CTL, &val); len += scnprintf(batt_log_buf + len, BATT_LOG_BUF_LEN - len, "TOL_CTL=0x%02llx,", val); get_bms_reg((void *)BMS1_OCV_USE_LOW_LIMIT_THR0, &val); len += scnprintf(batt_log_buf + len, BATT_LOG_BUF_LEN - len, "OCV_USE_LOW_LIMIT_THR0=0x%02llx,", val); get_bms_reg((void *)BMS1_OCV_USE_HIGH_LIMIT_THR0, &val); len += scnprintf(batt_log_buf + len, BATT_LOG_BUF_LEN - len, "OCV_USE_HIGH_LIMIT_THR0=0x%02llx,", val); get_bms_reg((void *)BMS1_OCV_USE_LIMIT_CTL, &val); len += scnprintf(batt_log_buf + len, BATT_LOG_BUF_LEN - len, "OCV_USE_LIMIT_CTL=0x%02llx,", val); get_bms_reg((void *)BMS1_S3_VSENSE_THR_CTL , &val); len += scnprintf(batt_log_buf + len, BATT_LOG_BUF_LEN - len, "S3_VSENSE_THR_CTL =0x%02llx,", val); rc = qpnp_vadc_read(the_chip->vadc_dev, LR_MUX1_BATT_THERM, &result); if (rc) { pr_err("error reading vadc LR_MUX1_BATT_THERM = %d, rc = %d\n", LR_MUX1_BATT_THERM, rc); return rc; } batt_temp = (int)result.physical; read_soc_params_raw(the_chip, &raw, batt_temp); len += scnprintf(batt_log_buf + len, BATT_LOG_BUF_LEN - len, "cc(uAh)=%d,", get_prop_bms_charge_counter(the_chip)); len += scnprintf(batt_log_buf + len, BATT_LOG_BUF_LEN - len, "last_good_ocv_uv=%d,", raw.last_good_ocv_uv); get_bms_reg((void *)SOC_STORAGE_REG, &val); len += scnprintf(batt_log_buf + len, BATT_LOG_BUF_LEN - len, "SOC_STORAGE_REG=0x%lld,", val); get_bms_reg((void *)IAVG_STORAGE_REG, &val); len += scnprintf(batt_log_buf + len, BATT_LOG_BUF_LEN - len, "IAVG_STORAGE_REG=0x%lld,", val); get_iadc_reg((void *)IADC1_BMS_ADC_CH_SEL_CTL, &val); len += scnprintf(batt_log_buf + len, BATT_LOG_BUF_LEN - len, "ADC_CH_SEL_CTL=0x%lld ", val); local_irq_save(flags); len += scnprintf(batt_log_buf + len, BATT_LOG_BUF_LEN - len, "[irq]%d%d%d%d %d%d%d%d", irq_read_line(the_chip->cc_thr_irq.irq), irq_read_line(the_chip->ocv_for_r_irq.irq), irq_read_line(the_chip->good_ocv_irq.irq), irq_read_line(the_chip->charge_begin_irq.irq), irq_read_line(the_chip->sw_cc_thr_irq.irq), irq_read_line(the_chip->ocv_thr_irq.irq), irq_read_line(the_chip->vsense_avg_irq.irq), irq_read_line(the_chip->vsense_for_r_irq.irq)); local_irq_restore(flags); if(BATT_LOG_BUF_LEN - len <= 1) pr_warn("batt log length maybe out of buffer range!!!"); pr_info("%s\n", batt_log_buf); return 0; } inline int pm8941_bms_dump_all(void) { if (!the_chip) { pr_err("called before init\n"); return -EINVAL; } dump_all(); return 0; } int pm8941_bms_get_attr_text(char *buf, int size) { struct raw_soc_params raw; int len = 0; u64 val = 0; struct soc_params params; int batt_temp, rc, soc_rbatt, shdw_cc_uah; int remaining_usable_charge_uah; struct qpnp_vadc_result result; if (!the_chip) { pr_err("driver not initialized\n"); return 0; } len += scnprintf(buf + len, size - len, "is_ocv_update_start: %d;\n", is_ocv_update_start); len += scnprintf(buf + len, size - len, "consistent_flag: %d;\n", consistent_flag); get_bms_reg((void *)BMS1_STATUS1, &val); len += scnprintf(buf + len, size - len, "BMS1_STATUS1: 0x%02llx;\n", val); get_bms_reg((void *)BMS1_INT_RT_STS, &val); len += scnprintf(buf + len, size - len, "BMS1_INT_RT_STS: 0x%02llx;\n", val); get_bms_reg((void *)BMS1_TOL_CTL, &val); len += scnprintf(buf + len, size - len, "TOL_CTL: 0x%02llx;\n", val); get_bms_reg((void *)BMS1_OCV_USE_LOW_LIMIT_THR0, &val); len += scnprintf(buf + len, size - len, "OCV_USE_LOW_LIMIT_THR0: 0x%02llx;\n", val); get_bms_reg((void *)BMS1_OCV_USE_HIGH_LIMIT_THR0, &val); len += scnprintf(buf + len, size - len, "OCV_USE_HIGH_LIMIT_THR0: 0x%02llx;\n", val); get_bms_reg((void *)BMS1_OCV_USE_LIMIT_CTL, &val); len += scnprintf(buf + len, size - len, "OCV_USE_LIMIT_CTL: 0x%02llx;\n", val); get_bms_reg((void *)BMS1_S3_VSENSE_THR_CTL , &val); len += scnprintf(buf + len, size - len, "S3_VSENSE_THR_CTL: 0x%02llx;\n", val); get_bms_reg((void *)SOC_STORAGE_REG, &val); len += scnprintf(buf + len, size - len, "SOC_STORAGE_REG: 0x%lld;\n", val); get_bms_reg((void *)IAVG_STORAGE_REG, &val); len += scnprintf(buf + len, size - len, "IAVG_STORAGE_REG: 0x%lld;\n", val); get_iadc_reg((void *)IADC1_BMS_ADC_CH_SEL_CTL, &val); len += scnprintf(buf + len, size - len, "ADC_CH_SEL_CTL: 0x%lld;\n", val); rc = qpnp_vadc_read(the_chip->vadc_dev, LR_MUX1_BATT_THERM, &result); if (rc) { pr_err("error reading vadc LR_MUX1_BATT_THERM = %d, rc = %d\n", LR_MUX1_BATT_THERM, rc); return len; } batt_temp = (int)result.physical; mutex_lock(&the_chip->last_ocv_uv_mutex); read_soc_params_raw(the_chip, &raw, batt_temp); len += scnprintf(buf + len, size - len, "LAST_GOOD_OCV_RAW: 0x%x;\n", raw.last_good_ocv_raw); len += scnprintf(buf + len, size - len, "CC_RAW: 0x%llx;\n", raw.cc); len += scnprintf(buf + len, size - len, "last_good_ocv_uv: %duV;\n", raw.last_good_ocv_uv); len += scnprintf(buf + len, size - len, "ori_last_good_ocv_uv: %duV;\n", bms_dbg.last_ocv_raw_uv); len += scnprintf(buf + len, size - len, "backup_last_good_ocv_uv: %duV;\n", the_chip->ocv_backup_uv); calculate_delta_time(&the_chip->tm_sec, &params.delta_time_s); params.fcc_uah = calculate_fcc(the_chip, batt_temp); params.ocv_charge_uah = calculate_ocv_charge(the_chip, &raw, params.fcc_uah); params.cc_uah = calculate_cc(the_chip, raw.cc, CC, NORESET); params.cc_uah -= the_chip->cc_backup_uah; shdw_cc_uah = calculate_cc(the_chip, raw.shdw_cc, SHDW_CC, NORESET); soc_rbatt = ((params.ocv_charge_uah - params.cc_uah) * 100) /params.fcc_uah; if (soc_rbatt < 0) soc_rbatt = 0; params.rbatt_mohm = get_rbatt(the_chip, soc_rbatt, batt_temp); calculate_iavg(the_chip, params.cc_uah, &params.iavg_ua, params.delta_time_s); params.uuc_uah = calculate_unusable_charge_uah(the_chip, &params, batt_temp); remaining_usable_charge_uah = params.ocv_charge_uah - params.cc_uah - params.uuc_uah; mutex_unlock(&the_chip->last_ocv_uv_mutex); len += scnprintf(buf + len, size - len, "rbatt(milliOhms): %d;\n", params.rbatt_mohm); len += scnprintf(buf + len, size - len, "rbatt_scalefactor: %d;\n", bms_dbg.rbatt_sf); len += scnprintf(buf + len, size - len, "soc_rbatt(%%): %d;\n", soc_rbatt); len += scnprintf(buf + len, size - len, "unusable_uv: %d;\n", bms_dbg.unusable_uv); len += scnprintf(buf + len, size - len, "pc_unusable(%%): %d;\n", bms_dbg.pc_unusable); len += scnprintf(buf + len, size - len, "rc_pc(%%): %d;\n", bms_dbg.rc_pc); len += scnprintf(buf + len, size - len, "fcc(uAh): %d;\n", params.fcc_uah); len += scnprintf(buf + len, size - len, "unusable_charge(uAh): %d;\n", params.uuc_uah); len += scnprintf(buf + len, size - len, "remaining_charge(uAh): %d;\n", params.ocv_charge_uah); len += scnprintf(buf + len, size - len, "remaining_usable_charge_uah: %d;\n", remaining_usable_charge_uah); len += scnprintf(buf + len, size - len, "uuc_iavg_ma: %d;\n", bms_dbg.uuc_iavg_ma); len += scnprintf(buf + len, size - len, "uuc_rbatt_mohm: %d;\n", bms_dbg.uuc_rbatt_mohm); len += scnprintf(buf + len, size - len, "ori_unusable_charge(uAh): %d;\n", bms_dbg.ori_uuc_uah); len += scnprintf(buf + len, size - len, "cc(uAh): %d;\n", params.cc_uah); len += scnprintf(buf + len, size - len, "ori_cc(uAh): %d;\n", bms_dbg.ori_cc_uah); len += scnprintf(buf + len, size - len, "backup_cc(uAh): %d;\n", the_chip->cc_backup_uah); len += scnprintf(buf + len, size - len, "start_soc: %d;\n", the_chip->start_soc); len += scnprintf(buf + len, size - len, "end_soc: %d;\n", the_chip->end_soc); len += scnprintf(buf + len, size - len, "shdw_cc_uah(uAh): %d;\n", shdw_cc_uah); len += scnprintf(buf + len, size - len, "pon_est_ocv: %d;\n", bms_dbg.pon_est_ocv); len += scnprintf(buf + len, size - len, "ibat_for_est_ocv: %d;\n", bms_dbg.ibat_for_est_ocv); len += scnprintf(buf + len, size - len, "vbat_for_est_ocv: %d;\n", bms_dbg.vbat_for_est_ocv); len += scnprintf(buf + len, size - len, "rbat_for_est_ocv: %d;\n", bms_dbg.rbat_for_est_ocv); return len; } int pm8941_get_batt_id(int *result) { int64_t battery_id_raw; int battery_id_mv; if (!the_chip) { pr_err("called before init\n"); return -EINVAL; } battery_id_raw = read_battery_id(the_chip); if (battery_id_raw < 0) { pr_err("cannot read battery id err = %lld\n", battery_id_raw); return -EINVAL; } battery_id_mv = (int)battery_id_raw / 1000; *result = htc_battery_cell_find_and_set_id_auto(battery_id_mv); return 0; } int pm8941_bms_get_percent_charge(struct qpnp_bms_chip *chip) { int rc, batt_temp, soc; struct raw_soc_params raw; struct qpnp_vadc_result result; rc = qpnp_vadc_read(chip->vadc_dev, LR_MUX1_BATT_THERM, &result); if (rc) { pr_err("error reading vadc LR_MUX1_BATT_THERM = %d, rc = %d\n", LR_MUX1_BATT_THERM, rc); return rc; } batt_temp = (int)result.physical; mutex_lock(&chip->last_ocv_uv_mutex); read_soc_params_raw(chip, &raw, batt_temp); soc = calculate_state_of_charge(chip, &raw, batt_temp); mutex_unlock(&chip->last_ocv_uv_mutex); return soc; } int pm8941_bms_store_battery_data_emmc(void) { if (the_chip->store_batt_data_soc_thre > 0 && store_emmc.store_soc > 0 && store_emmc.store_soc <= the_chip->store_batt_data_soc_thre) { emmc_misc_write(BMS_STORE_MAGIC_NUM, BMS_STORE_MAGIC_OFFSET); emmc_misc_write(store_emmc.store_soc, BMS_STORE_SOC_OFFSET); emmc_misc_write(store_emmc.store_ocv_uv, BMS_STORE_OCV_OFFSET); emmc_misc_write(store_emmc.store_cc_uah, BMS_STORE_CC_OFFSET); emmc_misc_write(store_emmc.store_currtime_ms, BMS_STORE_CURRTIME_OFFSET); pr_info("Stored soc=%d,OCV=%d,ori_cc_uah=%d,stored_cc_uah:%d,currtime_ms=%lu\n", store_emmc.store_soc, store_emmc.store_ocv_uv, bms_dbg.ori_cc_uah, store_emmc.store_cc_uah, store_emmc.store_currtime_ms); } return 0; } int pm8941_bms_store_battery_ui_soc(int soc_ui) { if (soc_ui < 0 || soc_ui > 100) return -EINVAL; store_soc_ui = soc_ui; return 0; } int pm8941_bms_get_battery_ui_soc(void) { if (!the_chip) { pr_err("called before init\n"); return -EINVAL; } pr_debug("batt_stored_soc: %d\n", the_chip->batt_stored_soc); if (the_chip->batt_stored_soc <= 0 || the_chip->batt_stored_soc > 100 || !consistent_flag) return -EINVAL; return the_chip->batt_stored_soc; } int pm8941_bms_stop_ocv_updates(void) { if (!the_chip) { pr_err("called before init\n"); return -EINVAL; } if (!is_ocv_update_start) { pr_info("ocv updates is already stopped"); return -EINVAL; } is_ocv_update_start = 0; pr_info("stopping ocv updates, is_ocv_update_start=%d", is_ocv_update_start); return qpnp_masked_write(the_chip, BMS1_TOL_CTL, OCV_TOL_MASK, OCV_TOL_NO_OCV); } int pm8941_bms_start_ocv_updates(void) { if (!the_chip) { pr_err("called before init\n"); return -EINVAL; } if (is_ocv_update_start) { pr_info("ocv updates is already started"); return -EINVAL; } is_ocv_update_start = 1; pr_info("starting ocv updates, is_ocv_update_start=%d", is_ocv_update_start); return qpnp_masked_write(the_chip, BMS1_TOL_CTL, OCV_TOL_MASK, OCV_TOL_DEFAULT); } static void disable_ocv_update_with_reason(bool disable, int reason) { int prev_ocv_update_stop_reason; mutex_lock(&ocv_update_lock); prev_ocv_update_stop_reason = ocv_update_stop_reason; if (ocv_update_stop_active_mask & reason) { if (disable) ocv_update_stop_reason |= reason; else ocv_update_stop_reason &= ~reason; if (prev_ocv_update_stop_reason ^ ocv_update_stop_reason) { pr_info("ocv_update_stop_reason:0x%x->0x%d\n", prev_ocv_update_stop_reason, ocv_update_stop_reason); if (!!prev_ocv_update_stop_reason != !!ocv_update_stop_reason) { if (!!ocv_update_stop_reason) pm8941_bms_stop_ocv_updates(); else pm8941_bms_start_ocv_updates(); } } } mutex_unlock(&ocv_update_lock); } static void pm8941_btm_voltage_alarm_notify(enum qpnp_tm_state state, void *ctx) { struct qpnp_bms_chip *chip = ctx; int vbat_uv; struct qpnp_vadc_result result; qpnp_vadc_read(chip->vadc_dev, VBAT_SNS, &result); pr_debug("vbat = %lld, raw = 0x%x\n", result.physical, result.adc_code); get_battery_voltage(chip, &vbat_uv); pr_info("vbat is at %d, state is at %d\n", vbat_uv, state); if (state == ADC_TM_LOW_STATE) { pr_debug("low voltage btm notification triggered\n"); if (vbat_uv - VBATT_ERROR_MARGIN < chip->vbat_monitor_params.low_thr) { pm8941_batt_lower_alarm_threshold_set(0); htc_gauge_event_notify(HTC_GAUGE_EVENT_LOW_VOLTAGE_ALARM); } else { pr_debug("faulty btm trigger, discarding\n"); qpnp_adc_tm_channel_measure(chip->adc_tm_dev, &chip->vbat_monitor_params); } } else if (state == ADC_TM_HIGH_STATE) { pr_debug("high voltage btm notification triggered\n"); } else { pr_debug("unknown voltage notification state: %d\n", state); } } int pm8941_batt_lower_alarm_threshold_set(int threshold_mV) { int rc; the_chip->vbat_monitor_params.low_thr = threshold_mV * 1000; the_chip->vbat_monitor_params.high_thr = the_chip->max_voltage_uv * 2; the_chip->vbat_monitor_params.state_request = ADC_TM_LOW_THR_ENABLE; the_chip->vbat_monitor_params.channel = VBAT_SNS; the_chip->vbat_monitor_params.btm_ctx = (void *)the_chip; the_chip->vbat_monitor_params.timer_interval = ADC_MEAS1_INTERVAL_1S; the_chip->vbat_monitor_params.threshold_notification = &pm8941_btm_voltage_alarm_notify; pr_debug("set low thr to %d and high to %d\n", the_chip->vbat_monitor_params.low_thr, the_chip->vbat_monitor_params.high_thr); if (!is_battery_present(the_chip)) { pr_debug("no battery inserted, do not enable vbat monitoring\n"); the_chip->vbat_monitor_params.state_request = ADC_TM_HIGH_LOW_THR_DISABLE; } else { rc = qpnp_adc_tm_channel_measure(the_chip->adc_tm_dev, &the_chip->vbat_monitor_params); if (rc) { pr_err("tm setup failed: %d\n", rc); return rc; } } pr_debug("setup complete\n"); return 0; } #endif #define OCV_USE_LIMIT_EN BIT(7) static int set_ocv_voltage_thresholds(struct qpnp_bms_chip *chip, int low_voltage_threshold, int high_voltage_threshold) { uint16_t low_voltage_raw, high_voltage_raw; int rc; low_voltage_raw = convert_vbatt_uv_to_raw(chip, low_voltage_threshold); high_voltage_raw = convert_vbatt_uv_to_raw(chip, high_voltage_threshold); rc = qpnp_write_wrapper(chip, (u8 *)&low_voltage_raw, chip->base + BMS1_OCV_USE_LOW_LIMIT_THR0, 2); if (rc) { pr_err("Failed to set ocv low voltage threshold: %d\n", rc); return rc; } rc = qpnp_write_wrapper(chip, (u8 *)&high_voltage_raw, chip->base + BMS1_OCV_USE_HIGH_LIMIT_THR0, 2); if (rc) { pr_err("Failed to set ocv high voltage threshold: %d\n", rc); return rc; } rc = qpnp_masked_write(chip, BMS1_OCV_USE_LIMIT_CTL, OCV_USE_LIMIT_EN, OCV_USE_LIMIT_EN); if (rc) { pr_err("Failed to enabled ocv voltage thresholds: %d\n", rc); return rc; } pr_debug("ocv low threshold set to %d uv or 0x%x raw\n", low_voltage_threshold, low_voltage_raw); pr_debug("ocv high threshold set to %d uv or 0x%x raw\n", high_voltage_threshold, high_voltage_raw); return 0; } static int read_shutdown_iavg_ma(struct qpnp_bms_chip *chip) { u8 iavg; int rc; rc = qpnp_read_wrapper(chip, &iavg, chip->base + IAVG_STORAGE_REG, 1); if (rc) { pr_err("failed to read addr = %d %d assuming %d\n", chip->base + IAVG_STORAGE_REG, rc, MIN_IAVG_MA); return MIN_IAVG_MA; } else if (iavg == IAVG_INVALID) { pr_err("invalid iavg read from BMS1_DATA_REG_1, using %d\n", MIN_IAVG_MA); return MIN_IAVG_MA; } else { if (iavg == 0) return MIN_IAVG_MA; else return MIN_IAVG_MA + IAVG_STEP_SIZE_MA * iavg; } } static int read_shutdown_soc(struct qpnp_bms_chip *chip) { u8 stored_soc; int rc, shutdown_soc; rc = qpnp_read_wrapper(chip, &stored_soc, chip->soc_storage_addr, 1); if (rc) { pr_err("failed to read addr = %d %d\n", chip->soc_storage_addr, rc); return SOC_INVALID; } if ((stored_soc >> 1) > 0) shutdown_soc = (stored_soc >> 1) - 1; else shutdown_soc = SOC_INVALID; pr_debug("stored soc = 0x%02x, shutdown_soc = %d\n", stored_soc, shutdown_soc); return shutdown_soc; } #define BAT_REMOVED_OFFMODE_BIT BIT(6) static bool is_battery_replaced_in_offmode(struct qpnp_bms_chip *chip) { u8 batt_pres; int rc; if (chip->batt_pres_addr) { rc = qpnp_read_wrapper(chip, &batt_pres, chip->batt_pres_addr, 1); pr_debug("offmode removed: %02x\n", batt_pres); if (!rc && (batt_pres & BAT_REMOVED_OFFMODE_BIT)) return true; } return false; } static void load_shutdown_data(struct qpnp_bms_chip *chip) { int calculated_soc, shutdown_soc; bool invalid_stored_soc; bool offmode_battery_replaced; bool shutdown_soc_out_of_limit; shutdown_soc = read_shutdown_soc(chip); bms_dbg.shutdown_soc = shutdown_soc; invalid_stored_soc = (shutdown_soc == SOC_INVALID); calculated_soc = recalculate_raw_soc(chip); shutdown_soc_out_of_limit = (abs(shutdown_soc - calculated_soc) > chip->shutdown_soc_valid_limit); pr_debug("calculated_soc = %d, valid_limit = %d\n", calculated_soc, chip->shutdown_soc_valid_limit); offmode_battery_replaced = is_battery_replaced_in_offmode(chip); if (chip->ignore_shutdown_soc || invalid_stored_soc || offmode_battery_replaced || shutdown_soc_out_of_limit) { chip->battery_removed = true; chip->shutdown_soc_invalid = true; chip->shutdown_iavg_ma = 0; pr_info("Ignoring shutdown SoC: invalid = %d, offmode = %d, out_of_limit = %d\n", invalid_stored_soc, offmode_battery_replaced, shutdown_soc_out_of_limit); } else { chip->shutdown_iavg_ma = read_shutdown_iavg_ma(chip); chip->shutdown_soc = shutdown_soc; } pr_info("raw_soc=%d,shutdown_soc=%d,shutdown_iavg=%d," "shutdown_soc_invalid=%d,battery_removed=%d\n", calculated_soc, chip->shutdown_soc, chip->shutdown_iavg_ma, chip->shutdown_soc_invalid, chip->battery_removed); } static irqreturn_t bms_ocv_thr_irq_handler(int irq, void *_chip) { struct qpnp_bms_chip *chip = _chip; pr_info("[irq]ocv_thr irq triggered\n"); bms_stay_awake(&chip->soc_wake_source); schedule_work(&chip->recalc_work); return IRQ_HANDLED; } static int64_t read_battery_id(struct qpnp_bms_chip *chip) { int rc; struct qpnp_vadc_result result; rc = qpnp_vadc_read(chip->vadc_dev, LR_MUX2_BAT_ID, &result); if (rc) { pr_err("error reading batt id channel = %d, rc = %d\n", LR_MUX2_BAT_ID, rc); return rc; } return result.physical; } static int set_battery_data(struct qpnp_bms_chip *chip) { int64_t battery_id; int rc = 0, dt_data = false; struct bms_battery_data *batt_data; struct device_node *node; int id_result; if (chip->batt_type == BATT_DESAY) { batt_data = &desay_5200_data; } else if (chip->batt_type == BATT_PALLADIUM) { batt_data = &palladium_1500_data; } else if (chip->batt_type == BATT_OEM) { batt_data = &oem_batt_data; } else if (chip->batt_type == BATT_QRD_4V35_2000MAH) { batt_data = &QRD_4v35_2000mAh_data; } else if (chip->batt_type == BATT_QRD_4V2_1300MAH) { batt_data = &qrd_4v2_1300mah_data; } else { #if !(defined(CONFIG_HTC_BATT_8960)) battery_id = read_battery_id(chip); if (battery_id < 0) { pr_err("cannot read battery id err = %lld\n", battery_id); return battery_id; } #else battery_id = (int)read_battery_id(chip) / 1000; id_result = htc_battery_cell_find_and_set_id_auto(battery_id); pr_info("batt ID vol= %lldmv, id_result= %d\n", battery_id, id_result); #endif node = of_find_node_by_name(chip->spmi->dev.of_node, "qcom,battery-data"); if (!node) { pr_warn("No available batterydata, using palladium 1500\n"); batt_data = &palladium_1500_data; goto assign_data; } batt_data = devm_kzalloc(chip->dev, sizeof(struct bms_battery_data), GFP_KERNEL); if (!batt_data) { pr_err("Could not alloc battery data\n"); batt_data = &palladium_1500_data; goto assign_data; } batt_data->fcc_temp_lut = devm_kzalloc(chip->dev, sizeof(struct single_row_lut), GFP_KERNEL); batt_data->pc_temp_ocv_lut = devm_kzalloc(chip->dev, sizeof(struct pc_temp_ocv_lut), GFP_KERNEL); batt_data->rbatt_sf_lut = devm_kzalloc(chip->dev, sizeof(struct sf_lut), GFP_KERNEL); batt_data->rbatt_est_ocv_lut = devm_kzalloc(chip->dev, sizeof(struct sf_lut), GFP_KERNEL); batt_data->max_voltage_uv = -1; batt_data->cutoff_uv = -1; batt_data->iterm_ua = -1; #if !(defined(CONFIG_HTC_BATT_8960)) rc = of_batterydata_read_data(node, batt_data, battery_id); #else rc = of_batterydata_read_data_by_id_result(node, batt_data, id_result); #endif if (rc == 0 && batt_data->fcc_temp_lut && batt_data->pc_temp_ocv_lut && batt_data->rbatt_sf_lut) { dt_data = true; } else { pr_err("battery data load failed, using palladium 1500\n"); devm_kfree(chip->dev, batt_data->fcc_temp_lut); devm_kfree(chip->dev, batt_data->pc_temp_ocv_lut); devm_kfree(chip->dev, batt_data->rbatt_sf_lut); devm_kfree(chip->dev, batt_data); batt_data = &palladium_1500_data; } } assign_data: chip->fcc_mah = batt_data->fcc; chip->fcc_temp_lut = batt_data->fcc_temp_lut; chip->fcc_sf_lut = batt_data->fcc_sf_lut; chip->pc_temp_ocv_lut = batt_data->pc_temp_ocv_lut; chip->pc_sf_lut = batt_data->pc_sf_lut; chip->rbatt_sf_lut = batt_data->rbatt_sf_lut; chip->default_rbatt_mohm = batt_data->default_rbatt_mohm; chip->rbatt_capacitive_mohm = batt_data->rbatt_capacitive_mohm; chip->flat_ocv_threshold_uv = batt_data->flat_ocv_threshold_uv; chip->rbatt_est_ocv_lut = batt_data->rbatt_est_ocv_lut; if (batt_data->max_voltage_uv >= 0 && dt_data) chip->max_voltage_uv = batt_data->max_voltage_uv; if (batt_data->cutoff_uv >= 0 && dt_data) chip->v_cutoff_uv = batt_data->cutoff_uv; if (batt_data->iterm_ua >= 0 && dt_data) chip->chg_term_ua = batt_data->iterm_ua; if (chip->pc_temp_ocv_lut == NULL) { pr_err("temp ocv lut table has not been loaded\n"); if (dt_data) { devm_kfree(chip->dev, batt_data->fcc_temp_lut); devm_kfree(chip->dev, batt_data->pc_temp_ocv_lut); devm_kfree(chip->dev, batt_data->rbatt_sf_lut); devm_kfree(chip->dev, batt_data); } return -EINVAL; } if (dt_data) devm_kfree(chip->dev, batt_data); return 0; } static int bms_get_adc(struct qpnp_bms_chip *chip, struct spmi_device *spmi) { int rc = 0; chip->vadc_dev = qpnp_get_vadc(&spmi->dev, "bms"); if (IS_ERR(chip->vadc_dev)) { rc = PTR_ERR(chip->vadc_dev); if (rc != -EPROBE_DEFER) pr_err("vadc property missing, rc=%d\n", rc); return rc; } chip->iadc_dev = qpnp_get_iadc(&spmi->dev, "bms"); if (IS_ERR(chip->iadc_dev)) { rc = PTR_ERR(chip->iadc_dev); if (rc != -EPROBE_DEFER) pr_err("iadc property missing, rc=%d\n", rc); return rc; } chip->adc_tm_dev = qpnp_get_adc_tm(&spmi->dev, "bms"); if (IS_ERR(chip->adc_tm_dev)) { rc = PTR_ERR(chip->adc_tm_dev); if (rc != -EPROBE_DEFER) pr_err("adc-tm not ready, defer probe\n"); return rc; } return 0; } #define SPMI_PROP_READ(chip_prop, qpnp_spmi_property, retval, optional)\ do { \ if (retval) \ break; \ retval = of_property_read_u32(chip->spmi->dev.of_node, \ "qcom," qpnp_spmi_property, \ &chip->chip_prop); \ if ((retval == -EINVAL) && optional) \ retval = 0; \ else if (retval) { \ pr_err("Error reading " #qpnp_spmi_property \ " property %d\n", rc); \ } \ } while (0) #define SPMI_PROP_READ_BOOL(chip_prop, qpnp_spmi_property) \ do { \ chip->chip_prop = of_property_read_bool(chip->spmi->dev.of_node,\ "qcom," qpnp_spmi_property); \ } while (0) static inline int bms_read_properties(struct qpnp_bms_chip *chip) { int rc = 0; SPMI_PROP_READ(r_sense_uohm, "r-sense-uohm", rc, false); SPMI_PROP_READ(v_cutoff_uv, "v-cutoff-uv", rc, false); SPMI_PROP_READ(max_voltage_uv, "max-voltage-uv", rc, false); SPMI_PROP_READ(r_conn_mohm, "r-conn-mohm", rc, false); SPMI_PROP_READ(chg_term_ua, "chg-term-ua", rc, false); SPMI_PROP_READ(shutdown_soc_valid_limit, "shutdown-soc-valid-limit", rc, false); SPMI_PROP_READ(adjust_soc_low_threshold, "adjust-soc-low-threshold", rc, false); SPMI_PROP_READ(batt_type, "batt-type", rc, false); SPMI_PROP_READ(low_soc_calc_threshold, "low-soc-calculate-soc-threshold", rc, false); SPMI_PROP_READ(low_soc_calculate_soc_ms, "low-soc-calculate-soc-ms", rc, false); SPMI_PROP_READ(low_voltage_calculate_soc_ms, "low-voltage-calculate-soc-ms", rc, false); SPMI_PROP_READ(calculate_soc_ms, "calculate-soc-ms", rc, false); SPMI_PROP_READ(high_ocv_correction_limit_uv, "high-ocv-correction-limit-uv", rc, false); SPMI_PROP_READ(low_ocv_correction_limit_uv, "low-ocv-correction-limit-uv", rc, false); SPMI_PROP_READ(hold_soc_est, "hold-soc-est", rc, false); SPMI_PROP_READ(criteria_sw_est_ocv, "criteria-sw-est-ocv", rc, true); SPMI_PROP_READ(rconn_mohm_sw_est_ocv, "rconn-mohm-sw-est-ocv", rc, true); SPMI_PROP_READ(ocv_high_threshold_uv, "ocv-voltage-high-threshold-uv", rc, false); SPMI_PROP_READ(ocv_low_threshold_uv, "ocv-voltage-low-threshold-uv", rc, false); SPMI_PROP_READ(low_voltage_threshold, "low-voltage-threshold", rc, false); SPMI_PROP_READ(temperature_margin, "tm-temp-margin", rc, false); SPMI_PROP_READ(shutdown_vol_criteria, "shutdown-vol-criteria", rc, true); SPMI_PROP_READ(batt_stored_magic_num, "stored-batt-magic-num", rc, true); SPMI_PROP_READ(batt_stored_soc, "stored-batt-soc", rc, true); SPMI_PROP_READ(batt_stored_update_time, "stored-batt-update-time", rc, true); SPMI_PROP_READ(store_batt_data_soc_thre, "store-batt-data-soc-thre", rc, true); SPMI_PROP_READ(enable_batt_full_fake_ocv, "enable-batt-full-fake-ocv", rc, true); chip->use_external_rsense = of_property_read_bool( chip->spmi->dev.of_node, "qcom,use-external-rsense"); chip->ignore_shutdown_soc = of_property_read_bool( chip->spmi->dev.of_node, "qcom,ignore-shutdown-soc"); chip->use_voltage_soc = of_property_read_bool(chip->spmi->dev.of_node, "qcom,use-voltage-soc"); chip->use_ocv_thresholds = of_property_read_bool( chip->spmi->dev.of_node, "qcom,use-ocv-thresholds"); if (chip->adjust_soc_low_threshold >= 45) chip->adjust_soc_low_threshold = 45; SPMI_PROP_READ_BOOL(enable_fcc_learning, "enable-fcc-learning"); if (chip->enable_fcc_learning) { SPMI_PROP_READ(min_fcc_learning_soc, "min-fcc-learning-soc", rc, false); SPMI_PROP_READ(min_fcc_ocv_pc, "min-fcc-ocv-pc", rc, false); SPMI_PROP_READ(min_fcc_learning_samples, "min-fcc-learning-samples", rc, false); SPMI_PROP_READ(fcc_resolution, "fcc-resolution", rc, false); if (chip->min_fcc_learning_samples > MAX_FCC_CYCLES) chip->min_fcc_learning_samples = MAX_FCC_CYCLES; chip->fcc_learning_samples = devm_kzalloc(&chip->spmi->dev, (sizeof(struct fcc_sample) * chip->min_fcc_learning_samples), GFP_KERNEL); if (chip->fcc_learning_samples == NULL) return -ENOMEM; pr_debug("min-fcc-soc=%d, min-fcc-pc=%d, min-fcc-cycles=%d\n", chip->min_fcc_learning_soc, chip->min_fcc_ocv_pc, chip->min_fcc_learning_samples); } if (rc) { pr_err("Missing required properties.\n"); return rc; } pr_debug("dts data: r_sense_uohm:%d, v_cutoff_uv:%d, max_v:%d\n", chip->r_sense_uohm, chip->v_cutoff_uv, chip->max_voltage_uv); pr_debug("r_conn:%d, shutdown_soc: %d, adjust_soc_low:%d\n", chip->r_conn_mohm, chip->shutdown_soc_valid_limit, chip->adjust_soc_low_threshold); pr_debug("chg_term_ua:%d, batt_type:%d\n", chip->chg_term_ua, chip->batt_type); pr_debug("ignore_shutdown_soc:%d, use_voltage_soc:%d\n", chip->ignore_shutdown_soc, chip->use_voltage_soc); pr_debug("use external rsense: %d\n", chip->use_external_rsense); pr_info("magic_num:0x%X, stored_soc:%d, update_time:%u, store_batt_data_soc_thre:%d\n", chip->batt_stored_magic_num, chip->batt_stored_soc, chip->batt_stored_update_time, chip->store_batt_data_soc_thre); return 0; } static inline int bms_read_batt_stored_properties(struct qpnp_bms_chip *chip) { int rc = 0; SPMI_PROP_READ(batt_stored_ocv_uv, "stored-batt-ocv-uv", rc, true); SPMI_PROP_READ(batt_stored_cc_uah, "stored-batt-cc-uah", rc, true); pr_info("stored_ocv_uv:%d, stored_cc_uah:%d\n", chip->batt_stored_ocv_uv, chip->batt_stored_cc_uah); return 0; } static inline void bms_initialize_constants(struct qpnp_bms_chip *chip) { chip->prev_pc_unusable = -EINVAL; chip->soc_at_cv = -EINVAL; chip->calculated_soc = -EINVAL; chip->last_soc = -EINVAL; chip->last_soc_est = -EINVAL; chip->battery_present = -EINVAL; chip->battery_status = POWER_SUPPLY_STATUS_UNKNOWN; chip->last_cc_uah = INT_MIN; chip->ocv_reading_at_100 = OCV_RAW_UNINITIALIZED; chip->prev_last_good_ocv_raw = OCV_RAW_UNINITIALIZED; chip->first_time_calc_soc = 1; chip->first_time_calc_uuc = 1; chip->batt_stored_ocv_uv = 0; chip->batt_stored_cc_uah = 0; chip->cc_backup_uah = 0; chip->ocv_backup_uv = 0; } #define SPMI_FIND_IRQ(chip, irq_name) \ do { \ chip->irq_name##_irq.irq = spmi_get_irq_byname(chip->spmi, \ resource, #irq_name); \ if (chip->irq_name##_irq.irq < 0) { \ pr_err("Unable to get " #irq_name " irq\n"); \ return -ENXIO; \ } \ } while (0) static int bms_find_irqs(struct qpnp_bms_chip *chip, struct spmi_resource *resource) { SPMI_FIND_IRQ(chip, cc_thr); SPMI_FIND_IRQ(chip, ocv_for_r); SPMI_FIND_IRQ(chip, good_ocv); SPMI_FIND_IRQ(chip, charge_begin); SPMI_FIND_IRQ(chip, sw_cc_thr); SPMI_FIND_IRQ(chip, ocv_thr); SPMI_FIND_IRQ(chip, vsense_avg); SPMI_FIND_IRQ(chip, vsense_for_r); return 0; } #define SPMI_REQUEST_IRQ(chip, rc, irq_name) \ do { \ rc = devm_request_irq(chip->dev, chip->irq_name##_irq.irq, \ bms_##irq_name##_irq_handler, \ IRQF_TRIGGER_RISING, #irq_name, chip); \ if (rc < 0) { \ pr_err("Unable to request " #irq_name " irq: %d\n", rc);\ return -ENXIO; \ } \ } while (0) static int bms_request_irqs(struct qpnp_bms_chip *chip) { int rc; SPMI_REQUEST_IRQ(chip, rc, ocv_thr); enable_irq_wake(chip->ocv_thr_irq.irq); return 0; } #define REG_OFFSET_PERP_TYPE 0x04 #define REG_OFFSET_PERP_SUBTYPE 0x05 #define BMS_BMS_TYPE 0xD #define BMS_BMS1_SUBTYPE 0x1 #define BMS_IADC_TYPE 0x8 #define BMS_IADC1_SUBTYPE 0x3 #define BMS_IADC2_SUBTYPE 0x5 static int register_spmi(struct qpnp_bms_chip *chip, struct spmi_device *spmi) { struct spmi_resource *spmi_resource; struct resource *resource; int rc; u8 type, subtype; chip->dev = &(spmi->dev); chip->spmi = spmi; spmi_for_each_container_dev(spmi_resource, spmi) { if (!spmi_resource) { pr_err("qpnp_bms: spmi resource absent\n"); return -ENXIO; } resource = spmi_get_resource(spmi, spmi_resource, IORESOURCE_MEM, 0); if (!(resource && resource->start)) { pr_err("node %s IO resource absent!\n", spmi->dev.of_node->full_name); return -ENXIO; } pr_debug("Node name = %s\n", spmi_resource->of_node->name); if (strcmp("qcom,batt-pres-status", spmi_resource->of_node->name) == 0) { chip->batt_pres_addr = resource->start; continue; } else if (strcmp("qcom,soc-storage-reg", spmi_resource->of_node->name) == 0) { chip->soc_storage_addr = resource->start; continue; } rc = qpnp_read_wrapper(chip, &type, resource->start + REG_OFFSET_PERP_TYPE, 1); if (rc) { pr_err("Peripheral type read failed rc=%d\n", rc); return rc; } rc = qpnp_read_wrapper(chip, &subtype, resource->start + REG_OFFSET_PERP_SUBTYPE, 1); if (rc) { pr_err("Peripheral subtype read failed rc=%d\n", rc); return rc; } if (type == BMS_BMS_TYPE && subtype == BMS_BMS1_SUBTYPE) { chip->base = resource->start; rc = bms_find_irqs(chip, spmi_resource); if (rc) { pr_err("Could not find irqs\n"); return rc; } } else if (type == BMS_IADC_TYPE && (subtype == BMS_IADC1_SUBTYPE || subtype == BMS_IADC2_SUBTYPE)) { chip->iadc_base = resource->start; } else { pr_warn("Invalid peripheral start=0x%x type=0x%x, subtype=0x%x\n", resource->start, type, subtype); } } if (chip->base == 0) { dev_err(&spmi->dev, "BMS peripheral was not registered\n"); return -EINVAL; } if (chip->iadc_base == 0) { dev_err(&spmi->dev, "BMS_IADC peripheral was not registered\n"); return -EINVAL; } if (chip->soc_storage_addr == 0) { chip->soc_storage_addr = chip->base + SOC_STORAGE_REG; } pr_debug("bms-base = 0x%04x, iadc-base = 0x%04x, bat-pres-reg = 0x%04x, soc-storage-reg = 0x%04x\n", chip->base, chip->iadc_base, chip->batt_pres_addr, chip->soc_storage_addr); return 0; } #define ADC_CH_SEL_MASK 0x7 #define ADC_INT_RSNSN_CTL_MASK 0x3 #define ADC_INT_RSNSN_CTL_VALUE_EXT_RENSE 0x2 #define FAST_AVG_EN_MASK 0x80 #define FAST_AVG_EN_VALUE_EXT_RSENSE 0x80 static int read_iadc_channel_select(struct qpnp_bms_chip *chip) { u8 iadc_channel_select; int32_t rds_rsense_nohm; int rc; rc = qpnp_read_wrapper(chip, &iadc_channel_select, chip->iadc_base + IADC1_BMS_ADC_CH_SEL_CTL, 1); if (rc) { pr_err("Error reading bms_iadc channel register %d\n", rc); return rc; } iadc_channel_select &= ADC_CH_SEL_MASK; if (iadc_channel_select != EXTERNAL_RSENSE && iadc_channel_select != INTERNAL_RSENSE) { pr_err("IADC1_BMS_IADC configured incorrectly. Selected channel = %d\n", iadc_channel_select); return -EINVAL; } if (chip->use_external_rsense) { pr_debug("External rsense selected\n"); if (iadc_channel_select == INTERNAL_RSENSE) { pr_info("Internal rsense detected; Changing rsense to external\n"); rc = qpnp_masked_write_iadc(chip, IADC1_BMS_ADC_CH_SEL_CTL, ADC_CH_SEL_MASK, EXTERNAL_RSENSE); if (rc) { pr_err("Unable to set IADC1_BMS channel %x to %x: %d\n", IADC1_BMS_ADC_CH_SEL_CTL, EXTERNAL_RSENSE, rc); return rc; } reset_cc(chip, CLEAR_CC | CLEAR_SHDW_CC); chip->software_cc_uah = 0; chip->software_shdw_cc_uah = 0; } } else { pr_debug("Internal rsense selected\n"); if (iadc_channel_select == EXTERNAL_RSENSE) { pr_info("External rsense detected; Changing rsense to internal\n"); rc = qpnp_masked_write_iadc(chip, IADC1_BMS_ADC_CH_SEL_CTL, ADC_CH_SEL_MASK, INTERNAL_RSENSE); if (rc) { pr_err("Unable to set IADC1_BMS channel %x to %x: %d\n", IADC1_BMS_ADC_CH_SEL_CTL, INTERNAL_RSENSE, rc); return rc; } reset_cc(chip, CLEAR_CC | CLEAR_SHDW_CC); chip->software_shdw_cc_uah = 0; } rc = qpnp_iadc_get_rsense(chip->iadc_dev, &rds_rsense_nohm); if (rc) { pr_err("Unable to read RDS resistance value from IADC; rc = %d\n", rc); return rc; } chip->r_sense_uohm = rds_rsense_nohm/1000; pr_debug("rds_rsense = %d nOhm, saved as %d uOhm\n", rds_rsense_nohm, chip->r_sense_uohm); } if (chip->use_external_rsense) { if (chip->iadc_bms_revision2 > CALIB_WRKARND_DIG_MAJOR_MAX) { rc = qpnp_masked_write_iadc(chip, IADC1_BMS_ADC_INT_RSNSN_CTL, ADC_INT_RSNSN_CTL_MASK, ADC_INT_RSNSN_CTL_VALUE_EXT_RENSE); if (rc) { pr_err("Unable to set batfet config %x to %x: %d\n", IADC1_BMS_ADC_INT_RSNSN_CTL, ADC_INT_RSNSN_CTL_VALUE_EXT_RENSE, rc); return rc; } } else { rc = qpnp_masked_write_iadc(chip, IADC1_BMS_FAST_AVG_EN, FAST_AVG_EN_MASK, FAST_AVG_EN_VALUE_EXT_RSENSE); if (rc) { pr_err("Unable to set batfet config %x to %x: %d\n", IADC1_BMS_FAST_AVG_EN, FAST_AVG_EN_VALUE_EXT_RSENSE, rc); return rc; } } } return 0; } #if !(defined(CONFIG_HTC_BATT_8960)) static int refresh_die_temp_monitor(struct qpnp_bms_chip *chip) { struct qpnp_vadc_result result; int rc; rc = qpnp_vadc_read(chip->vadc_dev, DIE_TEMP, &result); pr_debug("low = %lld, high = %lld\n", result.physical - chip->temperature_margin, result.physical + chip->temperature_margin); chip->die_temp_monitor_params.high_temp = result.physical + chip->temperature_margin; chip->die_temp_monitor_params.low_temp = result.physical - chip->temperature_margin; chip->die_temp_monitor_params.state_request = ADC_TM_HIGH_LOW_THR_ENABLE; return qpnp_adc_tm_channel_measure(chip->adc_tm_dev, &chip->die_temp_monitor_params); } static void btm_notify_die_temp(enum qpnp_tm_state state, void *ctx) { struct qpnp_bms_chip *chip = ctx; struct qpnp_vadc_result result; int rc; rc = qpnp_vadc_read(chip->vadc_dev, DIE_TEMP, &result); if (state == ADC_TM_LOW_STATE) pr_debug("low state triggered\n"); else if (state == ADC_TM_HIGH_STATE) pr_debug("high state triggered\n"); pr_debug("die temp = %lld, raw = 0x%x\n", result.physical, result.adc_code); schedule_work(&chip->recalc_work); refresh_die_temp_monitor(chip); } static int setup_die_temp_monitoring(struct qpnp_bms_chip *chip) { int rc; chip->die_temp_monitor_params.channel = DIE_TEMP; chip->die_temp_monitor_params.btm_ctx = (void *)chip; chip->die_temp_monitor_params.timer_interval = ADC_MEAS1_INTERVAL_1S; chip->die_temp_monitor_params.threshold_notification = &btm_notify_die_temp; rc = refresh_die_temp_monitor(chip); if (rc) { pr_err("tm setup failed: %d\n", rc); return rc; } pr_debug("setup complete\n"); return 0; } #endif static int __devinit qpnp_bms_probe(struct spmi_device *spmi) { struct qpnp_bms_chip *chip; bool warm_reset; int rc, vbatt, curr_soc, batt_temp; struct timespec xtime; unsigned long currtime_ms; struct raw_soc_params raw; struct qpnp_vadc_result result; struct soc_params params; chip = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_bms_chip), GFP_KERNEL); if (chip == NULL) { pr_err("kzalloc() failed.\n"); return -ENOMEM; } rc = bms_get_adc(chip, spmi); if (rc < 0) goto error_read; mutex_init(&chip->bms_output_lock); mutex_init(&chip->last_ocv_uv_mutex); mutex_init(&chip->vbat_monitor_mutex); mutex_init(&chip->soc_invalidation_mutex); mutex_init(&chip->last_soc_mutex); mutex_init(&chip->status_lock); mutex_init(&ocv_update_lock); init_waitqueue_head(&chip->bms_wait_queue); warm_reset = qpnp_pon_is_warm_reset(); rc = warm_reset; if (rc < 0) goto error_read; rc = register_spmi(chip, spmi); if (rc) { pr_err("error registering spmi resource %d\n", rc); goto error_resource; } rc = qpnp_read_wrapper(chip, &chip->revision1, chip->base + REVISION1, 1); if (rc) { pr_err("error reading version register %d\n", rc); goto error_read; } rc = qpnp_read_wrapper(chip, &chip->revision2, chip->base + REVISION2, 1); if (rc) { pr_err("Error reading version register %d\n", rc); goto error_read; } pr_debug("BMS version: %hhu.%hhu\n", chip->revision2, chip->revision1); rc = qpnp_read_wrapper(chip, &chip->iadc_bms_revision2, chip->iadc_base + REVISION2, 1); if (rc) { pr_err("Error reading version register %d\n", rc); goto error_read; } rc = qpnp_read_wrapper(chip, &chip->iadc_bms_revision1, chip->iadc_base + REVISION1, 1); if (rc) { pr_err("Error reading version register %d\n", rc); goto error_read; } pr_debug("IADC_BMS version: %hhu.%hhu\n", chip->iadc_bms_revision2, chip->iadc_bms_revision1); rc = bms_read_properties(chip); if (rc) { pr_err("Unable to read all bms properties, rc = %d\n", rc); goto error_read; } rc = read_iadc_channel_select(chip); if (rc) { pr_err("Unable to get iadc selected channel = %d\n", rc); goto error_read; } if (chip->use_ocv_thresholds) { rc = set_ocv_voltage_thresholds(chip, chip->ocv_low_threshold_uv, chip->ocv_high_threshold_uv); if (rc) { pr_err("Could not set ocv voltage thresholds: %d\n", rc); goto error_read; } } rc = set_battery_data(chip); if (rc) { pr_err("Bad battery data %d\n", rc); goto error_read; } bms_initialize_constants(chip); wakeup_source_init(&chip->soc_wake_source.source, "qpnp_soc_wake"); #if !(defined(CONFIG_HTC_BATT_8960)) wake_lock_init(&chip->low_voltage_wake_lock, WAKE_LOCK_SUSPEND, "qpnp_low_voltage_lock"); wake_lock_init(&chip->cv_wake_lock, WAKE_LOCK_SUSPEND, "qpnp_cv_lock"); #endif INIT_DELAYED_WORK(&chip->calculate_soc_delayed_work, calculate_soc_work); INIT_WORK(&chip->recalc_work, recalculate_work); INIT_WORK(&chip->batfet_open_work, batfet_open_work); dev_set_drvdata(&spmi->dev, chip); device_init_wakeup(&spmi->dev, 1); load_shutdown_data(chip); if (chip->criteria_sw_est_ocv) chip->criteria_sw_est_ocv = FIRST_SW_EST_OCV_THR_MS; if (chip->enable_fcc_learning) { if (chip->battery_removed) { rc = discard_backup_fcc_data(chip); if (rc) pr_err("Could not discard backed-up FCC data\n"); } else { rc = read_chgcycle_data_from_backup(chip); if (rc) pr_err("Unable to restore charge-cycle data\n"); rc = read_fcc_data_from_backup(chip); if (rc) pr_err("Unable to restore FCC-learning data\n"); else attempt_learning_new_fcc(chip); } } #if !(defined(CONFIG_HTC_BATT_8960)) rc = setup_vbat_monitoring(chip); if (rc < 0) { pr_err("failed to set up voltage notifications: %d\n", rc); goto error_setup; } rc = setup_die_temp_monitoring(chip); if (rc < 0) { pr_err("failed to set up die temp notifications: %d\n", rc); goto error_setup; } #endif battery_insertion_check(chip); batfet_status_check(chip); battery_status_check(chip); the_chip = chip; htc_batt_bms_timer.batt_system_jiffies = jiffies; rc = pm8941_bms_start_ocv_updates(); if (rc) { pr_err("failed to enable HW OCV measurement: %d\n", rc); goto error_setup; } curr_soc = pm8941_bms_get_percent_charge(chip); xtime = CURRENT_TIME; currtime_ms = xtime.tv_sec * MSEC_PER_SEC + xtime.tv_nsec / NSEC_PER_MSEC; if (chip->batt_stored_magic_num == BMS_STORE_MAGIC_NUM && chip->store_batt_data_soc_thre > 0 && (curr_soc > chip->batt_stored_soc || chip->batt_stored_soc - curr_soc > 1) && (currtime_ms - chip->batt_stored_update_time) < 3600000 ) { rc = bms_read_batt_stored_properties(chip); if (rc) { pr_err("Unable to read all bms properties, rc = %d\n", rc); goto error_read; } rc = qpnp_vadc_read(chip->vadc_dev, LR_MUX1_BATT_THERM, &result); if (rc) { pr_err("error reading vadc LR_MUX1_BATT_THERM = %d, rc = %d\n", LR_MUX1_BATT_THERM, rc); return rc; } batt_temp = (int)result.physical; read_soc_params_raw(chip, &raw, batt_temp); calculate_soc_params(chip, &raw, &params, batt_temp); chip->ocv_backup_uv = chip->last_ocv_uv = chip->batt_stored_ocv_uv; chip->cc_backup_uah = params.cc_uah - chip->batt_stored_cc_uah; new_boot_soc = pm8941_bms_get_percent_charge(chip); disable_ocv_update_with_reason(true, OCV_UPDATE_STOP_BIT_BOOT_UP); allow_ocv_time = currtime_ms + 3600000; consistent_flag = true; } #if !(defined(CONFIG_HTC_BATT_8960)) chip->bms_psy.name = "bms"; chip->bms_psy.type = POWER_SUPPLY_TYPE_BMS; chip->bms_psy.properties = msm_bms_power_props; chip->bms_psy.num_properties = ARRAY_SIZE(msm_bms_power_props); chip->bms_psy.get_property = qpnp_bms_power_get_property; chip->bms_psy.external_power_changed = qpnp_bms_external_power_changed; chip->bms_psy.supplied_to = qpnp_bms_supplicants; chip->bms_psy.num_supplicants = ARRAY_SIZE(qpnp_bms_supplicants); rc = power_supply_register(chip->dev, &chip->bms_psy); if (rc < 0) { pr_err("power_supply_register bms failed rc = %d\n", rc); goto unregister_dc; } chip->bms_psy_registered = true; #endif vbatt = 0; rc = get_battery_voltage(chip, &vbatt); if (rc) { pr_err("error reading vbat_sns adc channel = %d, rc = %d\n", VBAT_SNS, rc); goto unregister_dc; } rc = bms_request_irqs(chip); if (rc) { pr_err("error requesting bms irqs, rc = %d\n", rc); goto unregister_dc; } htc_gauge_event_notify(HTC_GAUGE_EVENT_READY); pm8941_fake_chg_gone_irq_handler(); pm8941_fake_usbin_valid_irq_handler(); pm8941_fake_coarse_det_usb_irq_handler(); pr_info("curr_soc=%d,new_boot_soc:%d,stored_soc:%d,vbatt=%d,OCV=%d,r_sense_uohm=%u," "warm_reset=%d,raw.cc:%lld,stored_cc:%d,cc_backup:%d,stored_ocv:%d," "boot_currtime_ms:%lu,allow_ocv_time:%lu,stored_time:%u\n", curr_soc, new_boot_soc, chip->batt_stored_soc, vbatt, chip->last_ocv_uv, chip->r_sense_uohm, warm_reset, raw.cc, chip->batt_stored_cc_uah, chip->cc_backup_uah, chip->batt_stored_ocv_uv, currtime_ms, allow_ocv_time, chip->batt_stored_update_time); return 0; unregister_dc: if (chip->bms_psy_registered) { power_supply_unregister(&chip->bms_psy); chip->bms_psy_registered = false; } error_setup: dev_set_drvdata(&spmi->dev, NULL); wakeup_source_trash(&chip->soc_wake_source.source); #if !(defined(CONFIG_HTC_BATT_8960)) wake_lock_destroy(&chip->low_voltage_wake_lock); wake_lock_destroy(&chip->cv_wake_lock); #endif error_resource: error_read: return rc; } static int qpnp_bms_remove(struct spmi_device *spmi) { dev_set_drvdata(&spmi->dev, NULL); the_chip = NULL; return 0; } static int bms_suspend(struct device *dev) { struct qpnp_bms_chip *chip = dev_get_drvdata(dev); cancel_delayed_work_sync(&chip->calculate_soc_delayed_work); chip->was_charging_at_sleep = is_battery_charging(chip); return 0; } static int bms_resume(struct device *dev) { int rc; int soc_calc_period; int time_until_next_recalc = 0; unsigned long time_since_last_recalc; unsigned long tm_now_sec; struct qpnp_bms_chip *chip = dev_get_drvdata(dev); rc = get_current_time(&tm_now_sec); if (rc) { pr_err("Could not read current time: %d\n", rc); } else { soc_calc_period = get_calculation_delay_ms(chip); time_since_last_recalc = tm_now_sec - chip->last_recalc_time; pr_debug("Time since last recalc: %lu\n", time_since_last_recalc); time_until_next_recalc = max(0, soc_calc_period - (int)(time_since_last_recalc * 1000)); } if (time_until_next_recalc == 0) bms_stay_awake(&chip->soc_wake_source); schedule_delayed_work(&chip->calculate_soc_delayed_work, round_jiffies_relative(msecs_to_jiffies (time_until_next_recalc))); return 0; } static int bms_prepare(struct device *dev) { unsigned long time_since_last_update_ms, cur_jiffies; struct timespec xtime; if (the_chip->criteria_sw_est_ocv <= 0) return 0; cur_jiffies = jiffies; time_since_last_update_ms = (cur_jiffies - htc_batt_bms_timer.batt_system_jiffies) * MSEC_PER_SEC / HZ; htc_batt_bms_timer.no_ocv_update_period_ms += time_since_last_update_ms; htc_batt_bms_timer.batt_system_jiffies = cur_jiffies; xtime = CURRENT_TIME; htc_batt_bms_timer.batt_suspend_ms = xtime.tv_sec * MSEC_PER_SEC + xtime.tv_nsec / NSEC_PER_MSEC; return 0; } static void bms_complete(struct device *dev) { struct timespec xtime; unsigned long resume_ms, sr_time_period_ms; if (the_chip->criteria_sw_est_ocv <= 0) return; xtime = CURRENT_TIME; htc_batt_bms_timer.batt_system_jiffies = jiffies; resume_ms = xtime.tv_sec * MSEC_PER_SEC + xtime.tv_nsec / NSEC_PER_MSEC; sr_time_period_ms = resume_ms - htc_batt_bms_timer.batt_suspend_ms; htc_batt_bms_timer.no_ocv_update_period_ms += sr_time_period_ms; if (htc_batt_bms_timer.no_ocv_update_period_ms > the_chip->criteria_sw_est_ocv && batt_level > DISABLE_SW_OCV_LEVEL_THRESHOLD) pm8941_bms_estimate_ocv(); } static const struct dev_pm_ops qpnp_bms_pm_ops = { .prepare = bms_prepare, .complete = bms_complete, .resume = bms_resume, .suspend = bms_suspend, }; static struct spmi_driver qpnp_bms_driver = { .probe = qpnp_bms_probe, .remove = __devexit_p(qpnp_bms_remove), .driver = { .name = QPNP_BMS_DEV_NAME, .owner = THIS_MODULE, .of_match_table = qpnp_bms_match_table, .pm = &qpnp_bms_pm_ops, }, }; static int __init qpnp_bms_init(void) { pr_info("QPNP BMS INIT\n"); flag_enable_bms_charger_log = (get_kernel_flag() & KERNEL_FLAG_ENABLE_BMS_CHARGER_LOG) ? 1 : 0; return spmi_driver_register(&qpnp_bms_driver); } static void __exit qpnp_bms_exit(void) { pr_info("QPNP BMS EXIT\n"); return spmi_driver_unregister(&qpnp_bms_driver); } module_init(qpnp_bms_init); module_exit(qpnp_bms_exit); MODULE_DESCRIPTION("QPNP BMS Driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" QPNP_BMS_DEV_NAME);
jameshilliard/m8-3.4.0-gb1fa77f
drivers/power/qpnp-bms.c
C
gpl-2.0
152,658
<?php /** * Akeeba Engine * The modular PHP5 site backup engine * @copyright Copyright (c)2009-2014 Nicholas K. Dionysopoulos * @license GNU GPL version 3 or, at your option, any later version * @package akeebaengine * */ namespace Akeeba\Engine\Filter; // Protection against direct access defined('AKEEBAENGINE') or die(); use Akeeba\Engine\Factory; /** * System Restore Point - Database Tables */ class SRPData extends Base { private $params = array(); function __construct() { $this->object = 'dbobject'; $this->subtype = 'all'; $this->method = 'api'; $this->filter_name = 'SRPData'; if (Factory::getKettenrad()->getTag() != 'restorepoint') { $this->enabled = false; } else { $this->init(); } } private function init() { // Fetch the configuration $config = Factory::getConfiguration(); $this->params = array( 'name' => $config->get('core.filters.srp.name', 'name'), 'extraprefixes' => $config->get('core.filters.srp.extraprefixes', array()), 'customtables' => $config->get('core.filters.srp.customtables', array()), 'skiptables' => $config->get('core.filters.srp.skiptables', array()) ); } protected function is_excluded_by_api($test, $root) { $barename = (substr($test, 0, 3) == '#__') ? substr($test, 3) : $test; // Is it one of our customtables? if (in_array($barename, $this->params['customtables'])) { return false; } // Does it start with the name prefix? foreach($this->params['name'] as $name) { if (strpos($barename, $name . '_') === 0) { return false; } } // Does it start with any of our extra prefixes? foreach ($this->params['extraprefixes'] as $prefix) { if (substr($prefix, -1) != '_') { $prefix .= '_'; } if (strpos($barename, $prefix) === 0) { return false; } } // Exclude all other tables return true; } }
PAKOTxx/midmu
administrator/components/com_akeeba/platform/joomla25/Filter/SRPData.php
PHP
gpl-2.0
1,958
/* * Copyright (C) 2010-2011 Project SkyFire <http://www.projectskyfire.org/> * Copyright (C) 2008-2011 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2005-2011 MaNGOS <http://getmangos.com/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "vortex_pinnacle.h" #include "ScriptPCH.h" #define MAX_ENCOUNTER 3 class instance_vortex_pinnacle : public InstanceMapScript { public: instance_vortex_pinnacle() : InstanceMapScript("instance_vortex_pinnacle", 657) { } struct instance_vortex_pinnacle_InstanceMapScript : public InstanceScript { instance_vortex_pinnacle_InstanceMapScript(InstanceMap* map) : InstanceScript(map) { GrandVizierErtan = 0; memset(&Encounter, 0, sizeof(Encounter)); } void OnCreatureCreate(Creature* creature) { switch (creature->GetEntry()) { case BOSS_GRAND_VIZIER_ERTAN: GrandVizierErtan = creature->GetGUID(); break; } } void OnGameObjectCreate(GameObject* go) {} uint64 GetData64(uint32 type) { return 0; } uint32 GetData(uint32 type) { return Encounter[type]; } void SetData(uint32 uiType, uint32 uiData) { Encounter[uiType] = uiData; if (uiData == DONE) SaveToDB(); } std::string GetSaveData() { OUT_SAVE_INST_DATA; std::ostringstream saveStream; saveStream << "H O O " << GetBossSaveData(); OUT_SAVE_INST_DATA_COMPLETE; return saveStream.str(); } void Load(const char* in) { if (!in) { OUT_LOAD_INST_DATA_FAIL; return; } OUT_LOAD_INST_DATA(in); char dataHead1, dataHead2, dataHead3; std::istringstream loadStream(in); loadStream >> dataHead1 >> dataHead2 >> dataHead3; /*if (dataHead1 == 'H' && dataHead2 == 'O' && dataHead3 == 'O') { for (uint8 i = 0; i < MAX_ENCOUNTER; ++i) { uint32 tmpState; loadStream >> tmpState; if (tmpState == IN_PROGRESS || tmpState > SPECIAL) tmpState = NOT_STARTED; SetBossState(i, EncounterState(tmpState)); } } else OUT_LOAD_INST_DATA_FAIL;*/ OUT_LOAD_INST_DATA_COMPLETE; } private: uint32 Encounter[MAX_ENCOUNTER]; uint64 GrandVizierErtan; }; InstanceScript* GetInstanceScript(InstanceMap* map) const { return new instance_vortex_pinnacle_InstanceMapScript(map); } }; void AddSC_instance_vortex_pinnacle() { new instance_vortex_pinnacle(); }
sourceleaker/gaycore
src/server/scripts/Kalimdor/VortexPinnacle/instance_vortex_pinnacle.cpp
C++
gpl-2.0
3,828
<?php /** * @copyright Copyright (C) 2009 - 2011 Ready Bytes Software Labs Pvt. Ltd. All rights reserved. * @license http://www.gnu.org/licenses/gpl-2.0.html GNU/GPL * @package PayPlans * @subpackage Frontend * @contact shyam@readybytes.in * website http://www.jpayplans.com * Technical Support : Forum - http://www.jpayplans.com/support/support-forum.html */ if(defined('_JEXEC')===false) die(); class PayplansadminControllerLog extends XiController { protected $_defaultOrderingDirection = 'DESC'; function _remove($itemId=null, $userId=null) { //get the model $model = $this->getModel(); //find the user, if nothing mentioned if($userId == null){ $userId = XiFactory::getUser()->id; } if(!$model->delete($itemId)){ //we need to set error message $this->setError($model->getError()); return false; } return true; } }
Xervmon/b2b
administrator/components/com_payplans/controllers/log.php
PHP
gpl-2.0
866
@charset "utf-8"; /* CSS Document */ #message ul li { font-size: 8pt; }
EasyLovine/ZencTbi
ezl_utile/ezl_genFichierSitemapXML.css
CSS
gpl-2.0
76
<?php /* vim: set expandtab sw=4 ts=4 sts=4: */ /** ** Test for PMA_Util::checkParameters from Util.class.php * * @package PhpMyAdmin-test * @group common.lib-tests */ /* * Include to test. */ require_once 'libraries/core.lib.php'; require_once 'libraries/Util.class.php'; require_once 'libraries/Theme.class.php'; require_once 'libraries/Config.class.php'; require_once 'libraries/select_lang.lib.php'; require_once 'libraries/sanitizing.lib.php'; class PMA_CheckParameters_Test extends PHPUnit_Framework_TestCase { function setup() { $GLOBALS['PMA_Config'] = new PMA_Config(); $_SESSION['PMA_Theme'] = new PMA_Theme(); $GLOBALS['cfg'] = array('ServerDefault' => 1); $GLOBALS['pmaThemeImage'] = 'theme/'; $GLOBALS['text_dir'] = 'ltr'; } function testCheckParameterMissing() { $GLOBALS['PMA_PHP_SELF'] = PMA_getenv('PHP_SELF'); $GLOBALS['pmaThemePath'] = $_SESSION['PMA_Theme']->getPath(); $this->expectOutputRegex("/Missing parameter: field/"); PMA_Util::checkParameters( array('db', 'table', 'field') ); } function testCheckParameter() { $GLOBALS['PMA_PHP_SELF'] = PMA_getenv('PHP_SELF'); $GLOBALS['pmaThemePath'] = $_SESSION['PMA_Theme']->getPath(); $GLOBALS['db'] = "dbDatabase"; $GLOBALS['table'] = "tblTable"; $GLOBALS['field'] = "test_field"; $GLOBALS['sql_query'] = "SELECT * FROM tblTable;"; $this->expectOutputString(""); PMA_Util::checkParameters( array('db', 'table', 'field', 'sql_query') ); } }
raedkleelsame/phpmyadmin
test/libraries/common/PMA_checkParameters_test.php
PHP
gpl-2.0
1,635
using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Xml.Serialization; using System.Runtime.Serialization; namespace IronCow.Rest { [DataContract] public class RawNote : RawRtmElement { [XmlAttribute("created")] [DataMember] public string Created { get; set; } [XmlAttribute("modified")] [DataMember] public string Modified { get; set; } [XmlAttribute("title")] [DataMember] public string Title { get; set; } [XmlText] [DataMember] public string Body { get; set; } } }
mbmccormick/Milkman
IronCow/Rest/RawNote.cs
C#
gpl-2.0
635
<?php defined( 'ABSPATH' ) or die( 'No script kiddies please!' ); /** * UFBL Library Class * Class with all the necessary functions */ if ( !class_exists( 'UFBL_Lib' ) ) { class UFBL_Lib { /** * * @param string $view_file * @returns void */ public static function load_view( $view_file, $variable_array = array() ) { if ( !empty( $variable_array ) && is_array( $variable_array ) ) { /** * Creating a variable for each key */ foreach ( $variable_array as $key => $val ) { $$key = $val; } } if ( file_exists( UFBL_PATH . 'inc/views/' . $view_file . '.php' ) ) { include UFBL_PATH . 'inc/views/' . $view_file . '.php'; } else { echo UFBL_PATH . 'inc/views/' . $view_file . '.php File Not found'; } } /** * * @param string $core_file * @return void */ public static function load_core( $core_file, $variable_array = array() ) { if ( !empty( $variable_array ) && is_array( $variable_array ) ) { /** * Creating a variable for each key */ foreach ( $variable_array as $key => $val ) { $$key = $val; } } if ( file_exists( UFBL_PATH . 'inc/cores/' . $core_file . '.php' ) ) { include UFBL_PATH . 'inc/cores/' . $core_file . '.php'; } else { echo UFBL_PATH . 'inc/cores/' . $core_file . '.php File Not Found'; } } /** * * @param array $array * @return void */ public static function print_array( $array ) { echo "<pre>"; print_r( $array ); echo "</pre>"; } /** * Returns Form default values * @return array */ public static function get_default_detail() { $default_detail = array(); $default_detail['field_data'] = array(); $default_detail['form_design'] = array( 'plugin_style' => 1, 'form_width' => '', 'form_template' => 'ufbl-default-template' ); $default_detail['email_settings'] = array( 'email_reciever' => array( get_option( 'admin_email' ) ), 'from_name' => '', 'from_email' => '', 'from_subject' => '' ); return $default_detail; } public static function do_form_process() { $form_data = array(); foreach ( $_POST['form_data'] as $val ) { if ( strpos( $val['name'], '[]' ) !== false ) { $form_data_name = str_replace( '[]', '', $val['name'] ); if ( !isset( $form_data[$form_data_name] ) ) { $form_data[$form_data_name] = array(); } $form_data[$form_data_name][] = $val['value']; } else { $form_data[$val['name']] = $val['value']; } } $form_id = sanitize_text_field( $form_data['form_id'] ); $form_temp_data = $form_data; $form_row = UFBL_Model::get_form_detail( $form_id ); $form_detail = maybe_unserialize( $form_row['form_detail'] ); $field_data = $form_detail['field_data']; //self::print_array( $form_data ); $form_response = array(); $form_response['error_keys'] = array(); $error_flag = 0; $email_reference_array = array(); foreach ( $field_data as $key => $value ) { switch ( $field_data[$key]['field_type'] ) { case 'textfield': $val = isset( $form_data[$key] ) ? sanitize_text_field( $form_data[$key] ) : ''; if ( isset( $field_data[$key]['required'] ) && $field_data[$key]['required'] == 1 && $val == '' ) { $error_message = (isset( $field_data[$key]['error_message'] ) && $field_data[$key]['error_message'] != '') ? $field_data[$key]['error_message'] : __( 'This field is required', 'ultimate-form-builder-lite' ); $error_flag = 1; $form_response['error_keys'][$key] = $error_message; } else { if ( $field_data[$key]['max_chars'] != '' && strlen( $val ) > $field_data[$key]['max_chars'] ) { $error_message = (isset( $field_data[$key]['error_message'] ) && $field_data[$key]['error_message'] != '') ? $field_data[$key]['error_message'] : __( 'Characters exceeded.Max characters allowed is :', 'ultimate-form-builder-lite' ) . $field_data[$key]['max_chars']; $error_flag = 1; $form_response['error_keys'][$key] = $error_message; } if ( $field_data[$key]['min_chars'] != '' && strlen( $val ) < $field_data[$key]['min_chars'] ) { $error_message = (isset( $field_data[$key]['error_message'] ) && $field_data[$key]['error_message'] != '') ? $field_data[$key]['error_message'] : __( 'Mininum characters required is :', 'ultimate-form-builder-lite' ) . $field_data[$key]['min_chars']; $error_flag = 1; $form_response['error_keys'][$key] = $error_message; } } if ( $error_flag == 0 ) { $email_reference_array[$key] = array( 'label' => $field_data[$key]['field_label'], 'value' => $val ); } break; case 'textarea': $val = isset( $form_data[$key] ) ? sanitize_text_field( $form_data[$key] ) : ''; if ( isset( $field_data[$key]['required'] ) && $field_data[$key]['required'] == 1 && $val == '' ) { $error_message = (isset( $field_data[$key]['error_message'] ) && $field_data[$key]['error_message'] != '') ? $field_data[$key]['error_message'] : __( 'This field is required', 'ultimate-form-builder-lite' ); $error_flag = 1; $form_response['error_keys'][$key] = $error_message; } else { if ( $field_data[$key]['max_chars'] != '' && strlen( $val ) > $field_data[$key]['max_chars'] ) { $error_message = (isset( $field_data[$key]['error_message'] ) && $field_data[$key]['error_message'] != '') ? $field_data[$key]['error_message'] : __( 'Characters exceeded.Max characters allowed is :', 'ultimate-form-builder-lite' ) . $field_data[$key]['max_chars']; $error_flag = 1; $form_response['error_keys'][$key] = $error_message; } if ( $field_data[$key]['min_chars'] != '' && strlen( $val ) < $field_data[$key]['min_chars'] ) { $error_message = (isset( $field_data[$key]['error_message'] ) && $field_data[$key]['error_message'] != '') ? $field_data[$key]['error_message'] : __( 'Mininum characters required is :', 'ultimate-form-builder-lite' ) . $field_data[$key]['min_chars']; $error_flag = 1; $form_response['error_keys'][$key] = $error_message; } } if ( $error_flag == 0 ) { $email_reference_array[$key] = array( 'label' => $field_data[$key]['field_label'], 'value' => $val ); } break; case 'email': $val = isset( $form_data[$key] ) ? sanitize_text_field( $form_data[$key] ) : ''; if ( isset( $field_data[$key]['required'] ) && $field_data[$key]['required'] == 1 && $val == '' ) { $error_message = (isset( $field_data[$key]['error_message'] ) && $field_data[$key]['error_message'] != '') ? $field_data[$key]['error_message'] : __( 'This field is required', 'ultimate-form-builder-lite' ); $error_flag = 1; $form_response['error_keys'][$key] = $error_message; } else { if ( !is_email( $val ) && $val != '' ) { $error_message = (isset( $field_data[$key]['error_message'] ) && $field_data[$key]['error_message'] != '') ? $field_data[$key]['error_message'] : __( 'Please enter the valid email address.', 'ultimate-form-builder-lite' ); $error_flag = 1; $form_response['error_keys'][$key] = $error_message; } } if ( $error_flag == 0 ) { $email_reference_array[$key] = array( 'label' => $field_data[$key]['field_label'], 'value' => $val ); } break; case 'checkbox': if ( isset( $form_data[$key] ) ) { $val = array_map( 'sanitize_text_field', $form_data[$key] ); $val = implode( ',', $val ); } else { $val = ''; } if ( isset( $field_data[$key]['required'] ) && $field_data[$key]['required'] == 1 && $val == '' ) { $error_message = (isset( $field_data[$key]['error_message'] ) && $field_data[$key]['error_message'] != '') ? $field_data[$key]['error_message'] : __( 'This field is required', 'ultimate-form-builder-lite' ); $error_flag = 1; $form_response['error_keys'][$key] = $error_message; } if ( $error_flag == 0 ) { $email_reference_array[$key] = array( 'label' => $field_data[$key]['field_label'], 'value' => $val ); } break; case 'radio': $val = isset( $form_data[$key] ) ? sanitize_text_field( $form_data[$key] ) : ''; if ( isset( $field_data[$key]['required'] ) && $field_data[$key]['required'] == 1 && $val == '' ) { $error_message = (isset( $field_data[$key]['error_message'] ) && $field_data[$key]['error_message'] != '') ? $field_data[$key]['error_message'] : __( 'This field is required', 'ultimate-form-builder-lite' ); $error_flag = 1; $form_response['error_keys'][$key] = $error_message; } if ( $error_flag == 0 ) { $email_reference_array[$key] = array( 'label' => $field_data[$key]['field_label'], 'value' => $val ); } if ( $error_flag == 0 ) { $email_reference_array[$key] = array( 'label' => $field_data[$key]['field_label'], 'value' => $val ); } break; case 'dropdown': if ( isset( $form_data[$key] ) ) { if ( is_array( $form_data[$key] ) ) { $val = array_map( 'sanitize_text_field', $form_data[$key] ); $val = implode( ',', $val ); } else { $val = sanitize_text_field( $form_data[$key] ); } } else { $val = ''; } if ( isset( $field_data[$key]['required'] ) && $field_data[$key]['required'] == 1 && $val == '' ) { $error_message = (isset( $field_data[$key]['error_message'] ) && $field_data[$key]['error_message'] != '') ? $field_data[$key]['error_message'] : __( 'This field is required', 'ultimate-form-builder-lite' ); $error_flag = 1; $form_response['error_keys'][$key] = $error_message; } if ( $error_flag == 0 ) { $val = (is_array($val))?implode(',',$val):$val; $email_reference_array[$key] = array( 'label' => $field_data[$key]['field_label'], 'value' => $val ); } break; case 'password': $val = isset( $form_data[$key] ) ? $form_data[$key] : ''; if ( isset( $field_data[$key]['required'] ) && $field_data[$key]['required'] == 1 && $val == '' ) { $error_message = (isset( $field_data[$key]['error_message'] ) && $field_data[$key]['error_message'] != '') ? $field_data[$key]['error_message'] : __( 'This field is required', 'ultimate-form-builder-lite' ); $error_flag = 1; $form_response['error_keys'][$key] = $error_message; } else { if ( $field_data[$key]['max_chars'] != '' && strlen( $val ) > $field_data[$key]['max_chars'] ) { $error_message = (isset( $field_data[$key]['error_message'] ) && $field_data[$key]['error_message'] != '') ? $field_data[$key]['error_message'] : __( 'Characters exceeded.Max characters allowed is :', 'ultimate-form-builder-lite' ) . $field_data[$key]['max_chars']; $error_flag = 1; $form_response['error_keys'][$key] = $error_message; } if ( $field_data[$key]['min_chars'] != '' && strlen( $val ) < $field_data[$key]['min_chars'] ) { $error_message = (isset( $field_data[$key]['error_message'] ) && $field_data[$key]['error_message'] != '') ? $field_data[$key]['error_message'] : __( 'Mininum characters required is :', 'ultimate-form-builder-lite' ) . $field_data[$key]['min_chars']; $error_flag = 1; $form_response['error_keys'][$key] = $error_message; } } if ( $error_flag == 0 ) { $email_reference_array[$key] = array( 'label' => $field_data[$key]['field_label'], 'value' => $val ); } break; case 'number': $val = isset( $form_data[$key] ) ? sanitize_text_field( $form_data[$key] ) : ''; if ( isset( $field_data[$key]['required'] ) && $field_data[$key]['required'] == 1 && $val == '' ) { $error_message = (isset( $field_data[$key]['error_message'] ) && $field_data[$key]['error_message'] != '') ? $field_data[$key]['error_message'] : __( 'This field is required', 'ultimate-form-builder-lite' ); $error_flag = 1; $form_response['error_keys'][$key] = $error_message; } else { if ( $val!='' && !is_numeric( $val ) ) { $error_message = (isset( $field_data[$key]['error_message'] ) && $field_data[$key]['error_message'] != '') ? $field_data[$key]['error_message'] : __( 'Only numbers allowed.', 'ultimate-form-builder-lite' ); $error_flag = 1; $form_response['error_keys'][$key] = $error_message; } else { if ( $field_data[$key]['max_value'] != '' && intval( $val ) > $field_data[$key]['max_value'] ) { $error_message = (isset( $field_data[$key]['error_message'] ) && $field_data[$key]['error_message'] != '') ? $field_data[$key]['error_message'] : __( 'Characters exceeded.Max characters allowed is :', 'ultimate-form-builder-lite' ) . $field_data[$key]['max_value']; $error_flag = 1; $form_response['error_keys'][$key] = $error_message; } if ( $field_data[$key]['min_value'] != '' && intval( $val ) < $field_data[$key]['min_value'] ) { $error_message = (isset( $field_data[$key]['error_message'] ) && $field_data[$key]['error_message'] != '') ? $field_data[$key]['error_message'] : __( 'Mininum characters required is :', 'ultimate-form-builder-lite' ) . $field_data[$key]['min_value']; $error_flag = 1; $form_response['error_keys'][$key] = $error_message; } } } if ( $error_flag == 0 ) { $email_reference_array[$key] = array( 'label' => $field_data[$key]['field_label'], 'value' => $val ); } break; case 'hidden': $val = isset( $form_data[$key] ) ? sanitize_text_field( $form_data[$key] ) : ''; if ( $error_flag == 0 ) { $email_reference_array[$key] = array( 'label' => $field_data[$key]['field_label'], 'value' => $val ); } break; case 'captcha': if ( $value['captcha_type'] == 'mathematical' ) { $val = isset( $form_data[$key] ) ? sanitize_text_field( $form_data[$key] ) : 0; if ( $val != 0 ) { $num1_key = $key . '_num_1'; $num2_key = $key . '_num_2'; $num1 = $form_data[$num1_key]; $num2 = $form_data[$num2_key]; $result = $num1 + $num2; if ( $result != $val ) { $error_message = (isset( $field_data[$key]['error_message'] ) && $field_data[$key]['error_message'] != '') ? $field_data[$key]['error_message'] : __( 'Please enter the correct sum.', 'ultimate-form-builder-lite' ); $error_flag = 1; $form_response['error_keys'][$key] = $error_message; } } else { $error_message = (isset( $field_data[$key]['error_message'] ) && $field_data[$key]['error_message'] != '') ? $field_data[$key]['error_message'] : __( 'Please enter the correct sum.', 'ultimate-form-builder-lite' ); $error_flag = 1; $form_response['error_keys'][$key] = $error_message; } } else { $captcha = sanitize_text_field( $_POST['captchaResponse'] ); // get the captchaResponse parameter sent from our ajax if ( !$captcha ) { $error_message = (isset( $field_data[$key]['error_message'] ) && $field_data[$key]['error_message'] != '') ? $field_data[$key]['error_message'] : __( 'Please check the captcha.', 'ultimate-form-builder-lite' ); $error_flag = 1; $form_response['error_keys'][$key] = $error_message; } else { $secret_key = $value['secret_key']; $response_html = wp_remote_get( "https://www.google.com/recaptcha/api/siteverify?secret=" . $secret_key . "&response=" . $captcha ); //self::print_array($response_html); $response = json_decode( $response_html['body'] ); if ( $response->success == false ) { $error_message = (isset( $field_data[$key]['error_message'] ) && $field_data[$key]['error_message'] != '') ? $field_data[$key]['error_message'] : __( 'Please check the captcha.', 'ultimate-form-builder-lite' ); $error_flag = 1; $form_response['error_keys'][$key] = $error_message; } } } break; default: break; }//switch close }//foreach form data close $form_response['error_flag'] = $error_flag; $form_submission_message = (isset( $form_detail['form_design']['form_submission_message'] ) && $form_detail['form_design']['form_submission_message'] != '') ? esc_attr( $form_detail['form_design']['form_submission_message'] ) : __( 'Form submitted successfully.', 'ultimate-form-builder-lite' ); $form_error_message = ( isset( $form_detail['form_design']['form_error_message'] ) && $form_detail['form_design']['form_error_message'] != '') ? esc_attr( $form_detail['form_design']['form_error_message'] ) : __( 'Validation Errors Occured.Please check and submit the form again.', 'ultimate-form-builder-lite' ); $form_response['response_message'] = ($error_flag == 1) ? $form_error_message : $form_submission_message; if ( $error_flag == 0 ) { self::do_email_process( $email_reference_array, $form_row ); UFBL_Model::save_to_db( $form_data ); } echo json_encode( $form_response ); die(); } /** * Do the email sending process after form validation * return void * @param array $form_data */ public static function do_email_process( $email_reference_array = array(), $form_row = array() ) { if ( !empty( $form_row ) && !empty( $email_reference_array ) ) { $form_title = $form_row['form_title']; $form_detail = maybe_unserialize( $form_row['form_detail'] ); $field_data = $form_detail['field_data']; $fields_html = ''; $count = 0; foreach ( $email_reference_array as $key => $val ) { $field_label = ($field_data[$key]['field_label'] != '') ? $field_data[$key]['field_label'] : __( 'Untitled', 'ultimate-form-builder-lite' ) . ' ' . $field_data[$key]['field_type']; $count++; if ( $count % 2 == 0 ) { $fields_html .= '<tr style="background-color:#eee;"><td style="width:150px;border:1px solid #D54E21;" ><strong>' . $field_label . ':</strong></td> <td style="border:1px solid #D54E21;">' . $val['value'] . '</td><tr>'; } else { $fields_html .= '<tr><td style="width:150px;border:1px solid #D54E21;" ><strong>' . $field_label . ':</strong></td> <td style="border:1px solid #D54E21;">' . $val['value'] . '</td><tr>'; } } $form_html = '<html> <head><title></title></head> <body> <table style="border:1px solid #D54E21" cellspacing="0" cellpadding="10" align="center" style="width:600px;"> <tr><td colspan="2" style="text-align:center;"><h2>' . $form_title . '</h2></td></tr> ' . $fields_html . '</table></body> </html>'; $site_url = str_replace( 'http://', '', site_url() ); $site_url = str_replace( 'https://', '', $site_url ); $email_subject = ($form_detail['email_settings']['from_subject'] != '') ? esc_attr( $form_detail['email_settings']['from_subject'] ) : __( 'New Form Submission', 'ultimate-form-builder-lite' ); $from_name = ($form_detail['email_settings']['from_name'] != '') ? esc_attr( $form_detail['email_settings']['from_name'] ) : __( 'No Name', 'ultimate-form-builder-lite' ); $from_email = ($form_detail['email_settings']['from_email'] != '') ? esc_attr( $form_detail['email_settings']['from_email'] ) : 'noreply@' . $site_url; $admin_email = get_option( 'admin_email' ); $email_recievers = $form_detail['email_settings']['email_reciever']; $headers = array(); $headers[] = 'Content-Type: text/html; charset=UTF-8'; $headers[] = 'From: ' . $from_name . '<' . $from_email . '>' ; foreach ( $email_recievers as $email_reciever ) { $to_email = ($email_reciever == '') ? $admin_email : esc_attr( $email_reciever ); //$mail = mail( $to_email, $email_subject, $form_html, $headers ); $mail = wp_mail( $to_email,$email_subject, $form_html, $headers ); } } } /** * Function to generate CSV for form entries * @param array $form_data * @param array $entry_rows */ public static function generate_csv( $form_data, $entry_rows ) { //self::print_array( $form_data ); //self::print_array( $entry_rows ); $output = ''; foreach ( $form_data['form_labels'] as $label ) { //$output .=$label . ','; $output .='"' . $label . '",'; } $output .='"' . __( 'Entry Created On', 'ultimate-form-builder-lite' ) . '",'; $output .="\n"; foreach ( $entry_rows as $entry_row ) { $entry_detail = maybe_unserialize( $entry_row['entry_detail'] ); foreach ( $form_data['form_keys'] as $form_key ) { if ( isset( $entry_detail[$form_key] ) ) { if ( is_array( $entry_detail[$form_key] ) ) { $entry_value = array_map( 'esc_attr', $entry_detail[$form_key] ); $entry_value = implode( ', ', $entry_value ); } else { $entry_value = esc_attr( $entry_detail[$form_key] ); } } else { $entry_value = ''; } //$output .=$entry_value . ','; $output .='"' . $entry_value . '",'; } $output .='"' . $entry_row['entry_created'] . '",'; $output .="\n"; } $filename = "form-entries.csv"; header( 'Content-type: application/csv' ); header( 'Content-Disposition: attachment; filename=' . $filename ); echo $output; exit; } } //class termination }//class exists check
hle25-micros/misc-my
wp-content/plugins/ultimate-form-builder-lite/classes/ufbl-lib.php
PHP
gpl-2.0
21,413
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * BestFirst.java * Copyright (C) 1999 University of Waikato, Hamilton, New Zealand * */ package weka.attributeSelection; import weka.core.FastVector; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.Range; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.Utils; import java.io.Serializable; import java.util.BitSet; import java.util.Enumeration; import java.util.Hashtable; import java.util.Vector; /** <!-- globalinfo-start --> * BestFirst:<br/> * <br/> * Searches the space of attribute subsets by greedy hillclimbing augmented with a backtracking facility. Setting the number of consecutive non-improving nodes allowed controls the level of backtracking done. Best first may start with the empty set of attributes and search forward, or start with the full set of attributes and search backward, or start at any point and search in both directions (by considering all possible single attribute additions and deletions at a given point).<br/> * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -P &lt;start set&gt; * Specify a starting set of attributes. * Eg. 1,3,5-7.</pre> * * <pre> -D &lt;0 = backward | 1 = forward | 2 = bi-directional&gt; * Direction of search. (default = 1).</pre> * * <pre> -N &lt;num&gt; * Number of non-improving nodes to * consider before terminating search.</pre> * * <pre> -S &lt;num&gt; * Size of lookup cache for evaluated subsets. * Expressed as a multiple of the number of * attributes in the data set. (default = 1)</pre> * <!-- options-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * Martin Guetlein (cashing merit of expanded nodes) * @version $Revision: 1.29 $ */ public class BestFirst extends ASSearch implements OptionHandler, StartSetHandler { /** for serialization */ static final long serialVersionUID = 7841338689536821867L; // Inner classes /** * Class for a node in a linked list. Used in best first search. * @author Mark Hall (mhall@cs.waikato.ac.nz) **/ public class Link2 implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = -8236598311516351420L; /* BitSet group; */ Object [] m_data; double m_merit; /** * Constructor */ public Link2 (Object [] data, double mer) { // group = (BitSet)gr.clone(); m_data = data; m_merit = mer; } /** Get a group */ public Object [] getData () { return m_data; } public String toString () { return ("Node: " + m_data.toString() + " " + m_merit); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.29 $"); } } /** * Class for handling a linked list. Used in best first search. * Extends the Vector class. * @author Mark Hall (mhall@cs.waikato.ac.nz) **/ public class LinkedList2 extends FastVector { /** for serialization */ static final long serialVersionUID = 3250538292330398929L; /** Max number of elements in the list */ int m_MaxSize; // ================ // Public methods // ================ public LinkedList2 (int sz) { super(); m_MaxSize = sz; } /** * removes an element (Link) at a specific index from the list. * @param index the index of the element to be removed. **/ public void removeLinkAt (int index) throws Exception { if ((index >= 0) && (index < size())) { removeElementAt(index); } else { throw new Exception("index out of range (removeLinkAt)"); } } /** * returns the element (Link) at a specific index from the list. * @param index the index of the element to be returned. **/ public Link2 getLinkAt (int index) throws Exception { if (size() == 0) { throw new Exception("List is empty (getLinkAt)"); } else {if ((index >= 0) && (index < size())) { return ((Link2)(elementAt(index))); } else { throw new Exception("index out of range (getLinkAt)"); } } } /** * adds an element (Link) to the list. * @param data the attribute set specification * @param mer the "merit" of this attribute set **/ public void addToList (Object [] data, double mer) throws Exception { Link2 newL = new Link2(data, mer); if (size() == 0) { addElement(newL); } else {if (mer > ((Link2)(firstElement())).m_merit) { if (size() == m_MaxSize) { removeLinkAt(m_MaxSize - 1); } //---------- insertElementAt(newL, 0); } else { int i = 0; int size = size(); boolean done = false; //------------ // don't insert if list contains max elements an this // is worst than the last if ((size == m_MaxSize) && (mer <= ((Link2)(lastElement())).m_merit)) { } //--------------- else { while ((!done) && (i < size)) { if (mer > ((Link2)(elementAt(i))).m_merit) { if (size == m_MaxSize) { removeLinkAt(m_MaxSize - 1); } // --------------------- insertElementAt(newL, i); done = true; } else {if (i == size - 1) { addElement(newL); done = true; } else { i++; } } } } } } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.29 $"); } } // member variables /** maximum number of stale nodes before terminating search */ protected int m_maxStale; /** 0 == backward search, 1 == forward search, 2 == bidirectional */ protected int m_searchDirection; /** search direction: backward */ protected static final int SELECTION_BACKWARD = 0; /** search direction: forward */ protected static final int SELECTION_FORWARD = 1; /** search direction: bidirectional */ protected static final int SELECTION_BIDIRECTIONAL = 2; /** search directions */ public static final Tag [] TAGS_SELECTION = { new Tag(SELECTION_BACKWARD, "Backward"), new Tag(SELECTION_FORWARD, "Forward"), new Tag(SELECTION_BIDIRECTIONAL, "Bi-directional"), }; /** holds an array of starting attributes */ protected int[] m_starting; /** holds the start set for the search as a Range */ protected Range m_startRange; /** does the data have a class */ protected boolean m_hasClass; /** holds the class index */ protected int m_classIndex; /** number of attributes in the data */ protected int m_numAttribs; /** total number of subsets evaluated during a search */ protected int m_totalEvals; /** for debugging */ protected boolean m_debug; /** holds the merit of the best subset found */ protected double m_bestMerit; /** holds the maximum size of the lookup cache for evaluated subsets */ protected int m_cacheSize; /** * Returns a string describing this search method * @return a description of the search method suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "BestFirst:\n\n" +"Searches the space of attribute subsets by greedy hillclimbing " +"augmented with a backtracking facility. Setting the number of " +"consecutive non-improving nodes allowed controls the level of " +"backtracking done. Best first may start with the empty set of " +"attributes and search forward, or start with the full set of " +"attributes and search backward, or start at any point and search " +"in both directions (by considering all possible single attribute " +"additions and deletions at a given point).\n"; } /** *Constructor */ public BestFirst () { resetOptions(); } /** * Returns an enumeration describing the available options. * @return an enumeration of all the available options. * **/ public Enumeration listOptions () { Vector newVector = new Vector(4); newVector.addElement(new Option("\tSpecify a starting set of attributes." + "\n\tEg. 1,3,5-7." ,"P",1 , "-P <start set>")); newVector.addElement(new Option("\tDirection of search. (default = 1)." , "D", 1 , "-D <0 = backward | 1 = forward " + "| 2 = bi-directional>")); newVector.addElement(new Option("\tNumber of non-improving nodes to" + "\n\tconsider before terminating search." , "N", 1, "-N <num>")); newVector.addElement(new Option("\tSize of lookup cache for evaluated subsets." +"\n\tExpressed as a multiple of the number of" +"\n\tattributes in the data set. (default = 1)", "S", 1, "-S <num>")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -P &lt;start set&gt; * Specify a starting set of attributes. * Eg. 1,3,5-7.</pre> * * <pre> -D &lt;0 = backward | 1 = forward | 2 = bi-directional&gt; * Direction of search. (default = 1).</pre> * * <pre> -N &lt;num&gt; * Number of non-improving nodes to * consider before terminating search.</pre> * * <pre> -S &lt;num&gt; * Size of lookup cache for evaluated subsets. * Expressed as a multiple of the number of * attributes in the data set. (default = 1)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported * **/ public void setOptions (String[] options) throws Exception { String optionString; resetOptions(); optionString = Utils.getOption('P', options); if (optionString.length() != 0) { setStartSet(optionString); } optionString = Utils.getOption('D', options); if (optionString.length() != 0) { setDirection(new SelectedTag(Integer.parseInt(optionString), TAGS_SELECTION)); } else { setDirection(new SelectedTag(SELECTION_FORWARD, TAGS_SELECTION)); } optionString = Utils.getOption('N', options); if (optionString.length() != 0) { setSearchTermination(Integer.parseInt(optionString)); } optionString = Utils.getOption('S', options); if (optionString.length() != 0) { setLookupCacheSize(Integer.parseInt(optionString)); } m_debug = Utils.getFlag('Z', options); } /** * Set the maximum size of the evaluated subset cache (hashtable). This is * expressed as a multiplier for the number of attributes in the data set. * (default = 1). * * @param size the maximum size of the hashtable */ public void setLookupCacheSize(int size) { if (size >= 0) { m_cacheSize = size; } } /** * Return the maximum size of the evaluated subset cache (expressed as a multiplier * for the number of attributes in a data set. * * @return the maximum size of the hashtable. */ public int getLookupCacheSize() { return m_cacheSize; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String lookupCacheSizeTipText() { return "Set the maximum size of the lookup cache of evaluated subsets. This is " +"expressed as a multiplier of the number of attributes in the data set. " +"(default = 1)."; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String startSetTipText() { return "Set the start point for the search. This is specified as a comma " +"seperated list off attribute indexes starting at 1. It can include " +"ranges. Eg. 1,2,5-9,17."; } /** * Sets a starting set of attributes for the search. It is the * search method's responsibility to report this start set (if any) * in its toString() method. * @param startSet a string containing a list of attributes (and or ranges), * eg. 1,2,6,10-15. * @throws Exception if start set can't be set. */ public void setStartSet (String startSet) throws Exception { m_startRange.setRanges(startSet); } /** * Returns a list of attributes (and or attribute ranges) as a String * @return a list of attributes (and or attribute ranges) */ public String getStartSet () { return m_startRange.getRanges(); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String searchTerminationTipText() { return "Specify the number of consecutive non-improving nodes to allow " + "before terminating the search."; } /** * Set the numnber of non-improving nodes to consider before terminating * search. * * @param t the number of non-improving nodes * @throws Exception if t is less than 1 */ public void setSearchTermination (int t) throws Exception { if (t < 1) { throw new Exception("Value of -N must be > 0."); } m_maxStale = t; } /** * Get the termination criterion (number of non-improving nodes). * * @return the number of non-improving nodes */ public int getSearchTermination () { return m_maxStale; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String directionTipText() { return "Set the direction of the search."; } /** * Set the search direction * * @param d the direction of the search */ public void setDirection (SelectedTag d) { if (d.getTags() == TAGS_SELECTION) { m_searchDirection = d.getSelectedTag().getID(); } } /** * Get the search direction * * @return the direction of the search */ public SelectedTag getDirection () { return new SelectedTag(m_searchDirection, TAGS_SELECTION); } /** * Gets the current settings of BestFirst. * @return an array of strings suitable for passing to setOptions() */ public String[] getOptions () { String[] options = new String[6]; int current = 0; if (!(getStartSet().equals(""))) { options[current++] = "-P"; options[current++] = ""+startSetToString(); } options[current++] = "-D"; options[current++] = "" + m_searchDirection; options[current++] = "-N"; options[current++] = "" + m_maxStale; while (current < options.length) { options[current++] = ""; } return options; } /** * converts the array of starting attributes to a string. This is * used by getOptions to return the actual attributes specified * as the starting set. This is better than using m_startRanges.getRanges() * as the same start set can be specified in different ways from the * command line---eg 1,2,3 == 1-3. This is to ensure that stuff that * is stored in a database is comparable. * @return a comma seperated list of individual attribute numbers as a String */ private String startSetToString() { StringBuffer FString = new StringBuffer(); boolean didPrint; if (m_starting == null) { return getStartSet(); } for (int i = 0; i < m_starting.length; i++) { didPrint = false; if ((m_hasClass == false) || (m_hasClass == true && i != m_classIndex)) { FString.append((m_starting[i] + 1)); didPrint = true; } if (i == (m_starting.length - 1)) { FString.append(""); } else { if (didPrint) { FString.append(","); } } } return FString.toString(); } /** * returns a description of the search as a String * @return a description of the search */ public String toString () { StringBuffer BfString = new StringBuffer(); BfString.append("\tBest first.\n\tStart set: "); if (m_starting == null) { BfString.append("no attributes\n"); } else { BfString.append(startSetToString()+"\n"); } BfString.append("\tSearch direction: "); if (m_searchDirection == SELECTION_BACKWARD) { BfString.append("backward\n"); } else {if (m_searchDirection == SELECTION_FORWARD) { BfString.append("forward\n"); } else { BfString.append("bi-directional\n"); } } BfString.append("\tStale search after " + m_maxStale + " node expansions\n"); BfString.append("\tTotal number of subsets evaluated: " + m_totalEvals + "\n"); BfString.append("\tMerit of best subset found: " +Utils.doubleToString(Math.abs(m_bestMerit),8,3)+"\n"); return BfString.toString(); } protected void printGroup (BitSet tt, int numAttribs) { int i; for (i = 0; i < numAttribs; i++) { if (tt.get(i) == true) { System.out.print((i + 1) + " "); } } System.out.println(); } /** * Searches the attribute subset space by best first search * * @param ASEval the attribute evaluator to guide the search * @param data the training instances. * @return an array (not necessarily ordered) of selected attribute indexes * @throws Exception if the search can't be completed */ public int[] search (ASEvaluation ASEval, Instances data) throws Exception { m_totalEvals = 0; if (!(ASEval instanceof SubsetEvaluator)) { throw new Exception(ASEval.getClass().getName() + " is not a " + "Subset evaluator!"); } if (ASEval instanceof UnsupervisedSubsetEvaluator) { m_hasClass = false; } else { m_hasClass = true; m_classIndex = data.classIndex(); } SubsetEvaluator ASEvaluator = (SubsetEvaluator)ASEval; m_numAttribs = data.numAttributes(); int i, j; int best_size = 0; int size = 0; int done; int sd = m_searchDirection; BitSet best_group, temp_group; int stale; double best_merit; double merit; boolean z; boolean added; Link2 tl; Hashtable lookup = new Hashtable(m_cacheSize * m_numAttribs); int insertCount = 0; int cacheHits = 0; LinkedList2 bfList = new LinkedList2(m_maxStale); best_merit = -Double.MAX_VALUE; stale = 0; best_group = new BitSet(m_numAttribs); m_startRange.setUpper(m_numAttribs-1); if (!(getStartSet().equals(""))) { m_starting = m_startRange.getSelection(); } // If a starting subset has been supplied, then initialise the bitset if (m_starting != null) { for (i = 0; i < m_starting.length; i++) { if ((m_starting[i]) != m_classIndex) { best_group.set(m_starting[i]); } } best_size = m_starting.length; m_totalEvals++; } else { if (m_searchDirection == SELECTION_BACKWARD) { setStartSet("1-last"); m_starting = new int[m_numAttribs]; // init initial subset to all attributes for (i = 0, j = 0; i < m_numAttribs; i++) { if (i != m_classIndex) { best_group.set(i); m_starting[j++] = i; } } best_size = m_numAttribs - 1; m_totalEvals++; } } // evaluate the initial subset best_merit = ASEvaluator.evaluateSubset(best_group); // add the initial group to the list and the hash table Object [] best = new Object[1]; best[0] = best_group.clone(); bfList.addToList(best, best_merit); BitSet tt = (BitSet)best_group.clone(); String hashC = tt.toString(); lookup.put(hashC, new Double(best_merit)); while (stale < m_maxStale) { added = false; if (m_searchDirection == SELECTION_BIDIRECTIONAL) { // bi-directional search done = 2; sd = SELECTION_FORWARD; } else { done = 1; } // finished search? if (bfList.size() == 0) { stale = m_maxStale; break; } // copy the attribute set at the head of the list tl = bfList.getLinkAt(0); temp_group = (BitSet)(tl.getData()[0]); temp_group = (BitSet)temp_group.clone(); // remove the head of the list bfList.removeLinkAt(0); // count the number of bits set (attributes) int kk; for (kk = 0, size = 0; kk < m_numAttribs; kk++) { if (temp_group.get(kk)) { size++; } } do { for (i = 0; i < m_numAttribs; i++) { if (sd == SELECTION_FORWARD) { z = ((i != m_classIndex) && (!temp_group.get(i))); } else { z = ((i != m_classIndex) && (temp_group.get(i))); } if (z) { // set the bit (attribute to add/delete) if (sd == SELECTION_FORWARD) { temp_group.set(i); size++; } else { temp_group.clear(i); size--; } /* if this subset has been seen before, then it is already in the list (or has been fully expanded) */ tt = (BitSet)temp_group.clone(); hashC = tt.toString(); if (lookup.containsKey(hashC) == false) { merit = ASEvaluator.evaluateSubset(temp_group); m_totalEvals++; // insert this one in the hashtable if (insertCount > m_cacheSize * m_numAttribs) { lookup = new Hashtable(m_cacheSize * m_numAttribs); insertCount = 0; } hashC = tt.toString(); lookup.put(hashC, new Double(merit)); insertCount++; } else { merit = ((Double)lookup.get(hashC)).doubleValue(); cacheHits++; } // insert this one in the list Object[] add = new Object[1]; add[0] = tt.clone(); bfList.addToList(add, merit); if (m_debug) { System.out.print("Group: "); printGroup(tt, m_numAttribs); System.out.println(Thread.currentThread().getStackTrace()[1].getClassName() +"Merit: " + merit); } // is this better than the best? if (sd == SELECTION_FORWARD) { z = ((merit - best_merit) > 0.00001); } else { if (merit == best_merit) { z = (size < best_size); } else { z = (merit > best_merit); } } if (z) { added = true; stale = 0; best_merit = merit; // best_size = (size + best_size); best_size = size; best_group = (BitSet)(temp_group.clone()); } // unset this addition(deletion) if (sd == SELECTION_FORWARD) { temp_group.clear(i); size--; } else { temp_group.set(i); size++; } } } if (done == 2) { sd = SELECTION_BACKWARD; } done--; } while (done > 0); /* if we haven't added a new attribute subset then full expansion of this node hasen't resulted in anything better */ if (!added) { stale++; } } m_bestMerit = best_merit; return attributeList(best_group); } /** * Reset options to default values */ protected void resetOptions () { m_maxStale = 5; m_searchDirection = SELECTION_FORWARD; m_starting = null; m_startRange = new Range(); m_classIndex = -1; m_totalEvals = 0; m_cacheSize = 1; m_debug = false; } /** * converts a BitSet into a list of attribute indexes * @param group the BitSet to convert * @return an array of attribute indexes **/ protected int[] attributeList (BitSet group) { int count = 0; // count how many were selected for (int i = 0; i < m_numAttribs; i++) { if (group.get(i)) { count++; } } int[] list = new int[count]; count = 0; for (int i = 0; i < m_numAttribs; i++) { if (group.get(i)) { list[count++] = i; } } return list; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.29 $"); } }
williamClanton/jbossBA
weka/src/main/java/weka/attributeSelection/BestFirst.java
Java
gpl-2.0
24,824
/** * @version $Id: $Revision * @package mootool * @subpackage lofslidernews * @copyright Copyright (C) JAN 2010 LandOfCoder.com <@emai:landofcoder@gmail.com>. All rights reserved. * @website http://landofcoder.com * @license This plugin is dual-licensed under the GNU General Public License and the MIT License */ Element.Events.extend({ 'wheelup': { type: Element.Events.mousewheel.type, map: function(event){ event = new Event(event); if (event.wheel >= 0) this.fireEvent('wheelup', event) } }, 'wheeldown': { type: Element.Events.mousewheel.type, map: function(event){ event = new Event(event); if (event.wheel <= 0) this.fireEvent('wheeldown', event) } } }); //// if( typeof(LofSlideshow) == 'undefined' ){ var LofSlideshow = new Class( { initialize:function( eMain, eNavigator, eNavOuter, options ){ this.setting = Object.extend({ autoStart : true, descStyle : 'sliding', mainItemSelector : 'div.lof-main-item', navSelector : 'li' , navigatorEvent : 'click', interval : 2000, auto : false, navItemsDisplay : 3, startItem : 0, navItemHeight : 100, navItemWidth : 310 }, options || {} ); this.currentNo = 0; this.nextNo = null; this.previousNo = null; this.fxItems = []; this.minSize = 0; if( $defined(eMain) ){ this.slides = eMain.getElements( this.setting.mainItemSelector ); this.maxWidth = eMain.getStyle('width').toInt(); this.maxHeight = eMain.getStyle('height').toInt(); this.styleMode = this.__getStyleMode(); var fx = Object.extend({waiting:false}, this.setting.fxObject ); this.slides.each( function(item, index) { item.setStyles( eval('({"'+this.styleMode[0]+'": index * this.maxSize,"'+this.styleMode[1]+'":Math.abs(this.maxSize),"display" : "block"})') ); this.fxItems[index] = new Fx.Styles( item, fx ); }.bind(this) ); if( this.styleMode[0] == 'opacity' || this.styleMode[0] =='z-index' ){ this.slides[0].setStyle(this.styleMode[0],'1'); } eMain.addEvents( { 'mouseenter' : this.stop.bind(this), 'mouseleave' :function(e){ if( this.setting.auto ){ this.play( this.setting.interval,'next', true )};}.bind(this) } ); } if( $defined(eNavigator) && $defined(eNavOuter) ){ this.navigatorItems = eNavigator.getElements( this.setting.navSelector ); if( this.setting.navItemsDisplay > this.navigatorItems.length ){ this.setting.navItemsDisplay = this.navigatorItems.length; } eNavOuter.setStyles( {'height':this.setting.navItemsDisplay*this.setting.navItemHeight, 'width':this.setting.navItemWidth}); this.navigatorFx = new Fx.Style( eNavigator, 'top', {transition:Fx.Transitions.Quad.easeInOut,duration:800} ); this.registerMousewheelHandler( eNavigator ); // allow to use the srcoll this.navigatorItems.each( function(item,index) { item.addEvent( this.setting.navigatorEvent, function(){ this.jumping( index, true ); this.setNavActive( index, item ); }.bind(this) ); item.setStyles( { 'height':this.setting.navItemHeight, 'width' : this.setting.navItemWidth} ); }.bind(this) ); this.setNavActive( 0 ); } }, navivationAnimate:function( currentIndex ) { if (currentIndex <= this.setting.startItem || currentIndex - this.setting.startItem >= this.setting.navItemsDisplay-1) { this.setting.startItem = currentIndex - this.setting.navItemsDisplay+2; if (this.setting.startItem < 0) this.setting.startItem = 0; if (this.setting.startItem >this.slides.length-this.setting.navItemsDisplay) { this.setting.startItem = this.slides.length-this.setting.navItemsDisplay; } } this.navigatorFx.stop().start( -this.setting.startItem*this.setting.navItemHeight ); }, setNavActive:function( index, item ){ if( $defined(this.navigatorItems) ){ this.navigatorItems.removeClass('active'); this.navigatorItems[index].addClass('active'); this.navivationAnimate( this.currentNo ); } }, __getStyleMode:function(){ switch( this.setting.direction ){ case 'opacity': this.maxSize=0; this.minSize=1; return ['opacity','opacity']; case 'vrup': this.maxSize=this.maxHeight; return ['top','height']; case 'vrdown': this.maxSize=-this.maxHeight; return ['top','height']; case 'hrright': this.maxSize=-this.maxWidth; return ['left','width']; case 'hrleft': default: this.maxSize=this.maxWidth; return ['left','width']; } }, registerMousewheelHandler:function( element ){ element.addEvents({ 'wheelup': function(e) { e = new Event(e).stop(); this.previous(true); }.bind(this), 'wheeldown': function(e) { e = new Event(e).stop(); this.next(true); }.bind(this) } ); }, registerButtonsControl:function( eventHandler, objects, isHover ){ if( $defined(objects) && this.slides.length > 1 ){ for( var action in objects ){ if( $defined(this[action.toString()]) && $defined(objects[action]) ){ objects[action].addEvent( eventHandler, this[action.toString()].bind(this, [true]) ); } } } return this; }, start:function( isStart, obj ){ this.setting.auto = isStart; // if use the preload image. if( obj ) { this.preloadImages( this.onComplete(obj) ); } else { if( this.setting.auto && this.slides.length > 1 ){ this.play( this.setting.interval,'next', true );} } }, onComplete:function( obj ){ (function(){ new Fx.Style( obj ,'opacity',{ transition:Fx.Transitions.Quad.easeInOut, duration:800} ).start(1,0)}).delay(600); if( this.setting.auto && this.slides.length > 1 ){ this.play( this.setting.interval,'next', true );} }, preloadImages:function( _options ){ var options=Object.extend({ onComplete:function(){}, onProgress:function(){} },_options||{}); var loaded=[]; var counter=0; var self = this; this.slides.getElements('img').each( function(image, index){ if( !image.complete ){ image.onload =function(){ count++; if( count >= images.length ){ self.onComplete(); } } image.onerror =function(){ count++; if( count >= images.length ){ self.onComplete(); } } }else { count++; if( count >= images.length ){ self.onComplete(); } } } ); }, onProcessing:function( manual, start, end ){ this.previousNo = this.currentNo + (this.currentNo>0 ? -1 : this.slides.length-1); this.nextNo = this.currentNo + (this.currentNo < this.slides.length-1 ? 1 : 1- this.slides.length); return this; }, finishFx:function( manual ){ if( manual ) this.stop(); this.setNavActive( this.currentNo ); if( manual && this.setting.auto ){ this.play( this.setting.interval,'next', true ); } }, getObjectDirection:function( start, end ){ return eval("({'"+this.styleMode[0]+"':["+start+", "+end+"]})"); }, fxStart:function( index, obj ){ this.fxItems[index].stop().start( obj ); return this; }, jumping:function( no, manual ){ this.stop(); if( this.currentNo == no ) return; this.onProcessing( null, manual, 0, this.maxSize ) .fxStart( no, this.getObjectDirection(this.maxSize , this.minSize) ) .fxStart( this.currentNo, this.getObjectDirection(this.minSize, -this.maxSize) ) .finishFx( manual ); this.currentNo = no; }, next:function( manual , item){ this.currentNo += (this.currentNo < this.slides.length-1) ? 1 : (1 - this.slides.length); this.onProcessing( item, manual, 0, this.maxSize ) .fxStart( this.currentNo, this.getObjectDirection(this.maxSize ,this.minSize) ) .fxStart( this.previousNo, this.getObjectDirection(this.minSize, -this.maxSize) ) .finishFx( manual ); }, previous:function( manual, item ){ this.currentNo += this.currentNo > 0 ? -1 : this.slides.length - 1; this.onProcessing( item, manual, -this.maxWidth, this.minSize ) .fxStart( this.nextNo, this.getObjectDirection(this.minSize, this.maxSize) ) .fxStart( this.currentNo, this.getObjectDirection(-this.maxSize, this.minSize) ) .finishFx( manual ); }, play:function( delay, direction, wait ){ this.stop(); if(!wait){ this[direction](false); } this.isRun = this[direction].periodical(delay,this,true); },stop:function(){; $clear(this.isRun ); } } ); }
vuchannguyen/dayhoc
modules/mod_lofk2news/assets/js.js
JavaScript
gpl-2.0
8,624
/* * Copyright (c) 2012-2013 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #ifndef _LINUX_IOPOLL_H #define _LINUX_IOPOLL_H #include <linux/kernel.h> #include <linux/types.h> #include <linux/jiffies.h> #include <linux/delay.h> #include <asm-generic/errno.h> #include <asm/io.h> /** * readl_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs * @addr: Address to poll * @val: Variable to read the value into * @cond: Break condition (usually involving @val) * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops) * @timeout_us: Timeout in us, 0 means never timeout * * Returns 0 on success and -ETIMEDOUT upon a timeout. In either * case, the last read value at @addr is stored in @val. Must not * be called from atomic context if sleep_us or timeout_us are used. */ #define readl_poll_timeout(addr, val, cond, sleep_us, timeout_us) \ ({ \ unsigned long timeout = jiffies + usecs_to_jiffies(timeout_us); \ might_sleep_if(timeout_us); \ for (;;) { \ (val) = readl(addr); \ if (cond) \ break; \ if (timeout_us && time_after(jiffies, timeout)) { \ (val) = readl(addr); \ break; \ } \ if (sleep_us) \ usleep_range(DIV_ROUND_UP(sleep_us, 4), sleep_us); \ } \ (cond) ? 0 : -ETIMEDOUT; \ }) /** * readl_poll_timeout_noirq - Periodically poll an address until a condition is met or a timeout occurs * @addr: Address to poll * @val: Variable to read the value into * @cond: Break condition (usually involving @val) * @max_reads: Maximum number of reads before giving up * @time_between_us: Time to udelay() between successive reads * * Returns 0 on success and -ETIMEDOUT upon a timeout. */ #define readl_poll_timeout_noirq(addr, val, cond, max_reads, time_between_us) \ ({ \ int count; \ for (count = (max_reads); count > 0; count--) { \ (val) = readl(addr); \ if (cond) \ break; \ udelay(time_between_us); \ } \ (cond) ? 0 : -ETIMEDOUT; \ }) /** * readl_poll - Periodically poll an address until a condition is met * @addr: Address to poll * @val: Variable to read the value into * @cond: Break condition (usually involving @val) * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops) * * Must not be called from atomic context if sleep_us is used. */ #define readl_poll(addr, val, cond, sleep_us) \ readl_poll_timeout(addr, val, cond, sleep_us, 0) /** * readl_tight_poll_timeout - Tight-loop on an address until a condition is met or a timeout occurs * @addr: Address to poll * @val: Variable to read the value into * @cond: Break condition (usually involving @val) * @timeout_us: Timeout in us, 0 means never timeout * * Returns 0 on success and -ETIMEDOUT upon a timeout. In either * case, the last read value at @addr is stored in @val. Must not * be called from atomic context if timeout_us is used. */ #define readl_tight_poll_timeout(addr, val, cond, timeout_us) \ readl_poll_timeout(addr, val, cond, 0, timeout_us) /** * readl_tight_poll - Tight-loop on an address until a condition is met * @addr: Address to poll * @val: Variable to read the value into * @cond: Break condition (usually involving @val) * * May be called from atomic context. */ #define readl_tight_poll(addr, val, cond) \ readl_poll_timeout(addr, val, cond, 0, 0) #endif /* _LINUX_IOPOLL_H */
defconoi/Unleashed-N5
include/linux/iopoll.h
C
gpl-2.0
3,789
// Copyright (c) 2013- PPSSPP Project. // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, version 2.0 or later versions. // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License 2.0 for more details. // A copy of the GPL 2.0 should have been included with the program. // If not, see http://www.gnu.org/licenses/ // Official git repository and contact information can be found at // https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/. #include "math/math_util.h" #include "gfx_es2/gpu_features.h" #include "Core/Config.h" #include "GPU/GPUState.h" #include "GPU/Math3D.h" #include "GPU/Common/VertexDecoderCommon.h" #include "GPU/Common/TransformCommon.h" #include "GPU/Common/FramebufferCommon.h" #include "GPU/Common/TextureCacheCommon.h" #include "GPU/Common/SoftwareTransformCommon.h" // This is the software transform pipeline, which is necessary for supporting RECT // primitives correctly without geometry shaders, and may be easier to use for // debugging than the hardware transform pipeline. // There's code here that simply expands transformed RECTANGLES into plain triangles. // We're gonna have to keep software transforming RECTANGLES, unless we use a geom shader which we can't on OpenGL ES 2.0 or DX9. // Usually, though, these primitives don't use lighting etc so it's no biggie performance wise, but it would be nice to get rid of // this code. // Actually, if we find the camera-relative right and down vectors, it might even be possible to add the extra points in pre-transformed // space and thus make decent use of hardware transform. // Actually again, single quads could be drawn more efficiently using GL_TRIANGLE_STRIP, no need to duplicate verts as for // GL_TRIANGLES. Still need to sw transform to compute the extra two corners though. // // The verts are in the order: BR BL TL TR static void SwapUVs(TransformedVertex &a, TransformedVertex &b) { float tempu = a.u; float tempv = a.v; a.u = b.u; a.v = b.v; b.u = tempu; b.v = tempv; } // 2 3 3 2 0 3 2 1 // to to or // 1 0 0 1 1 2 3 0 // See comment below where this was called before. /* static void RotateUV(TransformedVertex v[4]) { float x1 = v[2].x; float x2 = v[0].x; float y1 = v[2].y; float y2 = v[0].y; if ((x1 < x2 && y1 < y2) || (x1 > x2 && y1 > y2)) SwapUVs(v[1], v[3]); }*/ static void RotateUVThrough(TransformedVertex v[4]) { float x1 = v[2].x; float x2 = v[0].x; float y1 = v[2].y; float y2 = v[0].y; if ((x1 < x2 && y1 > y2) || (x1 > x2 && y1 < y2)) SwapUVs(v[1], v[3]); } // Clears on the PSP are best done by drawing a series of vertical strips // in clear mode. This tries to detect that. static bool IsReallyAClear(const TransformedVertex *transformed, int numVerts) { if (transformed[0].x != 0.0f || transformed[0].y != 0.0f) return false; u32 matchcolor = transformed[0].color0_32; float matchz = transformed[0].z; int bufW = gstate_c.curRTWidth; int bufH = gstate_c.curRTHeight; float prevX = 0.0f; for (int i = 1; i < numVerts; i++) { if (transformed[i].color0_32 != matchcolor || transformed[i].z != matchz) return false; if ((i & 1) == 0) { // Top left of a rectangle if (transformed[i].y != 0) return false; if (i > 0 && transformed[i].x != transformed[i - 1].x) return false; } else { // Bottom right if (transformed[i].y != bufH) return false; if (transformed[i].x <= transformed[i - 1].x) return false; } } // The last vertical strip often extends outside the drawing area. if (transformed[numVerts - 1].x < bufW) return false; return true; } void SoftwareTransform( int prim, u8 *decoded, int vertexCount, u32 vertType, u16 *&inds, int indexType, const DecVtxFormat &decVtxFormat, int &maxIndex, FramebufferManagerCommon *fbman, TextureCacheCommon *texCache, TransformedVertex *transformed, TransformedVertex *transformedExpanded, TransformedVertex *&drawBuffer, int &numTrans, bool &drawIndexed, SoftwareTransformResult *result) { bool throughmode = (vertType & GE_VTYPE_THROUGH_MASK) != 0; bool lmode = gstate.isUsingSecondaryColor() && gstate.isLightingEnabled(); // TODO: Split up into multiple draw calls for GLES 2.0 where you can't guarantee support for more than 0x10000 verts. #if defined(MOBILE_DEVICE) if (vertexCount > 0x10000/3) vertexCount = 0x10000/3; #endif float uscale = 1.0f; float vscale = 1.0f; bool scaleUV = false; if (throughmode) { uscale /= gstate_c.curTextureWidth; vscale /= gstate_c.curTextureHeight; } else { scaleUV = !g_Config.bPrescaleUV; } bool skinningEnabled = vertTypeIsSkinningEnabled(vertType); const int w = gstate.getTextureWidth(0); const int h = gstate.getTextureHeight(0); float widthFactor = (float) w / (float) gstate_c.curTextureWidth; float heightFactor = (float) h / (float) gstate_c.curTextureHeight; Lighter lighter(vertType); float fog_end = getFloat24(gstate.fog1); float fog_slope = getFloat24(gstate.fog2); // Same fixup as in ShaderManager.cpp if (my_isinf(fog_slope)) { // not really sure what a sensible value might be. fog_slope = fog_slope < 0.0f ? -10000.0f : 10000.0f; } if (my_isnan(fog_slope)) { // Workaround for https://github.com/hrydgard/ppsspp/issues/5384#issuecomment-38365988 // Just put the fog far away at a large finite distance. // Infinities and NaNs are rather unpredictable in shaders on many GPUs // so it's best to just make it a sane calculation. fog_end = 100000.0f; fog_slope = 1.0f; } VertexReader reader(decoded, decVtxFormat, vertType); // We flip in the fragment shader for GE_TEXMAP_TEXTURE_MATRIX. const bool flipV = gstate_c.flipTexture && gstate.getUVGenMode() != GE_TEXMAP_TEXTURE_MATRIX; for (int index = 0; index < maxIndex; index++) { reader.Goto(index); float v[3] = {0, 0, 0}; Vec4f c0 = Vec4f(1, 1, 1, 1); Vec4f c1 = Vec4f(0, 0, 0, 0); float uv[3] = {0, 0, 1}; float fogCoef = 1.0f; if (throughmode) { // Do not touch the coordinates or the colors. No lighting. reader.ReadPos(v); if (reader.hasColor0()) { reader.ReadColor0(&c0.x); // c1 is already 0. } else { c0 = Vec4f::FromRGBA(gstate.getMaterialAmbientRGBA()); } if (reader.hasUV()) { reader.ReadUV(uv); uv[0] *= uscale; uv[1] *= vscale; } fogCoef = 1.0f; // Scale UV? } else { // We do software T&L for now float out[3]; float pos[3]; Vec3f normal(0, 0, 1); Vec3f worldnormal(0, 0, 1); reader.ReadPos(pos); if (!skinningEnabled) { Vec3ByMatrix43(out, pos, gstate.worldMatrix); if (reader.hasNormal()) { reader.ReadNrm(normal.AsArray()); if (gstate.areNormalsReversed()) { normal = -normal; } Norm3ByMatrix43(worldnormal.AsArray(), normal.AsArray(), gstate.worldMatrix); worldnormal = worldnormal.Normalized(); } } else { float weights[8]; reader.ReadWeights(weights); if (reader.hasNormal()) reader.ReadNrm(normal.AsArray()); // Skinning Vec3f psum(0, 0, 0); Vec3f nsum(0, 0, 0); for (int i = 0; i < vertTypeGetNumBoneWeights(vertType); i++) { if (weights[i] != 0.0f) { Vec3ByMatrix43(out, pos, gstate.boneMatrix+i*12); Vec3f tpos(out); psum += tpos * weights[i]; if (reader.hasNormal()) { Vec3f norm; Norm3ByMatrix43(norm.AsArray(), normal.AsArray(), gstate.boneMatrix+i*12); nsum += norm * weights[i]; } } } // Yes, we really must multiply by the world matrix too. Vec3ByMatrix43(out, psum.AsArray(), gstate.worldMatrix); if (reader.hasNormal()) { normal = nsum; if (gstate.areNormalsReversed()) { normal = -normal; } Norm3ByMatrix43(worldnormal.AsArray(), normal.AsArray(), gstate.worldMatrix); worldnormal = worldnormal.Normalized(); } } // Perform lighting here if enabled. don't need to check through, it's checked above. Vec4f unlitColor = Vec4f(1, 1, 1, 1); if (reader.hasColor0()) { reader.ReadColor0(&unlitColor.x); } else { unlitColor = Vec4f::FromRGBA(gstate.getMaterialAmbientRGBA()); } if (gstate.isLightingEnabled()) { float litColor0[4]; float litColor1[4]; lighter.Light(litColor0, litColor1, unlitColor.AsArray(), out, worldnormal); // Don't ignore gstate.lmode - we should send two colors in that case for (int j = 0; j < 4; j++) { c0[j] = litColor0[j]; } if (lmode) { // Separate colors for (int j = 0; j < 4; j++) { c1[j] = litColor1[j]; } } else { // Summed color into c0 (will clamp in ToRGBA().) for (int j = 0; j < 4; j++) { c0[j] += litColor1[j]; } } } else { if (reader.hasColor0()) { for (int j = 0; j < 4; j++) { c0[j] = unlitColor[j]; } } else { c0 = Vec4f::FromRGBA(gstate.getMaterialAmbientRGBA()); } if (lmode) { // c1 is already 0. } } float ruv[2] = {0.0f, 0.0f}; if (reader.hasUV()) reader.ReadUV(ruv); // Perform texture coordinate generation after the transform and lighting - one style of UV depends on lights. switch (gstate.getUVGenMode()) { case GE_TEXMAP_TEXTURE_COORDS: // UV mapping case GE_TEXMAP_UNKNOWN: // Seen in Riviera. Unsure of meaning, but this works. // Texture scale/offset is only performed in this mode. if (scaleUV) { uv[0] = ruv[0]*gstate_c.uv.uScale + gstate_c.uv.uOff; uv[1] = ruv[1]*gstate_c.uv.vScale + gstate_c.uv.vOff; } else { uv[0] = ruv[0]; uv[1] = ruv[1]; } uv[2] = 1.0f; break; case GE_TEXMAP_TEXTURE_MATRIX: { // Projection mapping Vec3f source; switch (gstate.getUVProjMode()) { case GE_PROJMAP_POSITION: // Use model space XYZ as source source = pos; break; case GE_PROJMAP_UV: // Use unscaled UV as source source = Vec3f(ruv[0], ruv[1], 0.0f); break; case GE_PROJMAP_NORMALIZED_NORMAL: // Use normalized normal as source source = normal.Normalized(); if (!reader.hasNormal()) { ERROR_LOG_REPORT(G3D, "Normal projection mapping without normal?"); } break; case GE_PROJMAP_NORMAL: // Use non-normalized normal as source! source = normal; if (!reader.hasNormal()) { ERROR_LOG_REPORT(G3D, "Normal projection mapping without normal?"); } break; } float uvw[3]; Vec3ByMatrix43(uvw, &source.x, gstate.tgenMatrix); uv[0] = uvw[0]; uv[1] = uvw[1]; uv[2] = uvw[2]; } break; case GE_TEXMAP_ENVIRONMENT_MAP: // Shade mapping - use two light sources to generate U and V. { Vec3f lightpos0 = Vec3f(&lighter.lpos[gstate.getUVLS0() * 3]).Normalized(); Vec3f lightpos1 = Vec3f(&lighter.lpos[gstate.getUVLS1() * 3]).Normalized(); uv[0] = (1.0f + Dot(lightpos0, worldnormal))/2.0f; uv[1] = (1.0f + Dot(lightpos1, worldnormal))/2.0f; uv[2] = 1.0f; } break; default: // Illegal ERROR_LOG_REPORT(G3D, "Impossible UV gen mode? %d", gstate.getUVGenMode()); break; } uv[0] = uv[0] * widthFactor; uv[1] = uv[1] * heightFactor; // Transform the coord by the view matrix. Vec3ByMatrix43(v, out, gstate.viewMatrix); fogCoef = (v[2] + fog_end) * fog_slope; } // TODO: Write to a flexible buffer, we don't always need all four components. memcpy(&transformed[index].x, v, 3 * sizeof(float)); transformed[index].fog = fogCoef; memcpy(&transformed[index].u, uv, 3 * sizeof(float)); if (flipV) { transformed[index].v = 1.0f - transformed[index].v; } transformed[index].color0_32 = c0.ToRGBA(); transformed[index].color1_32 = c1.ToRGBA(); } // Here's the best opportunity to try to detect rectangles used to clear the screen, and // replace them with real clears. This can provide a speedup on certain mobile chips. // // An alternative option is to simply ditch all the verts except the first and last to create a single // rectangle out of many. Quite a small optimization though. // Experiment: Disable on PowerVR (see issue #6290) // TODO: This bleeds outside the play area in non-buffered mode. Big deal? Probably not. if (maxIndex > 1 && gstate.isModeClear() && prim == GE_PRIM_RECTANGLES && IsReallyAClear(transformed, maxIndex) && gl_extensions.gpuVendor != GPU_VENDOR_POWERVR) { // && g_Config.iRenderingMode != FB_NON_BUFFERED_MODE) { result->color = transformed[0].color0_32; result->depth = transformed[0].z; result->action = SW_CLEAR; return; } // This means we're using a framebuffer (and one that isn't big enough.) if (gstate_c.curTextureHeight < (u32)h && maxIndex >= 2) { // Even if not rectangles, this will detect if either of the first two are outside the framebuffer. // HACK: Adding one pixel margin to this detection fixes issues in Assassin's Creed : Bloodlines, // while still keeping BOF working (see below). const float invTexH = 1.0f / gstate_c.curTextureHeight; // size of one texel. bool tlOutside; bool tlAlmostOutside; bool brOutside; if (gstate_c.flipTexture) { // This is flipped for OpenGL, but the same logic as unflipped, so look there. tlOutside = transformed[0].v < -invTexH && transformed[0].v >= 1.0f - heightFactor; brOutside = transformed[1].v < -invTexH && transformed[1].v >= 1.0f - heightFactor; tlAlmostOutside = transformed[0].v <= 0.5f && transformed[0].v >= 1.0f - heightFactor; } else { // If we're outside heightFactor, then v must be wrapping or clamping. Avoid this workaround. // If we're <= 1.0f, we're inside the framebuffer (workaround not needed.) // We buffer that 1.0f a little more with a texel to avoid some false positives. tlOutside = transformed[0].v <= heightFactor && transformed[0].v > 1.0f + invTexH; brOutside = transformed[1].v <= heightFactor && transformed[1].v > 1.0f + invTexH; // Careful: if br is outside, but tl is well inside, this workaround still doesn't make sense. // We go with halfway, since we overestimate framebuffer heights sometimes but not by much. tlAlmostOutside = transformed[0].v <= heightFactor && transformed[0].v >= 0.5f; } if (tlOutside || (brOutside && tlAlmostOutside)) { // Okay, so we're texturing from outside the framebuffer, but inside the texture height. // Breath of Fire 3 does this to access a render surface at an offset. const u32 bpp = fbman->GetTargetFormat() == GE_FORMAT_8888 ? 4 : 2; const u32 fb_size = bpp * fbman->GetTargetStride() * gstate_c.curTextureHeight; const u32 prevH = gstate_c.curTextureHeight; const u32 prevYOffset = gstate_c.curTextureYOffset; if (texCache->SetOffsetTexture(fb_size)) { const float oldWidthFactor = widthFactor; const float oldHeightFactor = heightFactor; widthFactor = (float) w / (float) gstate_c.curTextureWidth; heightFactor = (float) h / (float) gstate_c.curTextureHeight; // We've already baked in the old gstate_c.curTextureYOffset, so correct. const float yDiff = (float) (prevH + prevYOffset - gstate_c.curTextureYOffset) / (float) h; for (int index = 0; index < maxIndex; ++index) { transformed[index].u *= widthFactor / oldWidthFactor; // Inverse it back to scale to the new FBO, and add 1.0f to account for old FBO. if (gstate_c.flipTexture) { transformed[index].v = (1.0f - transformed[index].v) / oldHeightFactor; transformed[index].v -= yDiff; transformed[index].v = 1.0f - (transformed[index].v * heightFactor); } else { transformed[index].v = (transformed[index].v / oldHeightFactor - yDiff) * heightFactor; } } } } } // Step 2: expand rectangles. drawBuffer = transformed; numTrans = 0; drawIndexed = false; if (prim != GE_PRIM_RECTANGLES) { // We can simply draw the unexpanded buffer. numTrans = vertexCount; drawIndexed = true; } else { //rectangles always need 2 vertices, disregard the last one if there's an odd number vertexCount = vertexCount & ~1; numTrans = 0; drawBuffer = transformedExpanded; TransformedVertex *trans = &transformedExpanded[0]; const u16 *indsIn = (const u16 *)inds; u16 *newInds = inds + vertexCount; u16 *indsOut = newInds; maxIndex = 4 * vertexCount; for (int i = 0; i < vertexCount; i += 2) { const TransformedVertex &transVtxTL = transformed[indsIn[i + 0]]; const TransformedVertex &transVtxBR = transformed[indsIn[i + 1]]; // We have to turn the rectangle into two triangles, so 6 points. // This is 4 verts + 6 indices. // bottom right trans[0] = transVtxBR; // top right trans[1] = transVtxBR; trans[1].y = transVtxTL.y; trans[1].v = transVtxTL.v; // top left trans[2] = transVtxBR; trans[2].x = transVtxTL.x; trans[2].y = transVtxTL.y; trans[2].u = transVtxTL.u; trans[2].v = transVtxTL.v; // bottom left trans[3] = transVtxBR; trans[3].x = transVtxTL.x; trans[3].u = transVtxTL.u; // That's the four corners. Now process UV rotation. if (throughmode) RotateUVThrough(trans); // Apparently, non-through RotateUV just breaks things. // If we find a game where it helps, we'll just have to figure out how they differ. // Possibly, it has something to do with flipped viewport Y axis, which a few games use. // One game might be one of the Metal Gear ones, can't find the issue right now though. // else // RotateUV(trans); // Triangle: BR-TR-TL indsOut[0] = i * 2 + 0; indsOut[1] = i * 2 + 1; indsOut[2] = i * 2 + 2; // Triangle: BL-BR-TL indsOut[3] = i * 2 + 3; indsOut[4] = i * 2 + 0; indsOut[5] = i * 2 + 2; trans += 4; indsOut += 6; numTrans += 6; } inds = newInds; drawIndexed = true; // We don't know the color until here, so we have to do it now, instead of in StateMapping. // Might want to reconsider the order of things later... if (gstate.isModeClear() && gstate.isClearModeAlphaMask()) { result->setStencil = true; if (vertexCount > 1) { // Take the bottom right alpha value of the first rect as the stencil value. // Technically, each rect should individually fill its stencil, but most of the // time they use the same one. result->stencilValue = transformed[indsIn[1]].color0[3]; } else { result->stencilValue = 0; } } } result->action = SW_DRAW_PRIMITIVES; }
GamerzHell9137/ppsspp
GPU/Common/SoftwareTransformCommon.cpp
C++
gpl-2.0
18,655
/* * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Function naming determines intended use: * * <x>_r(void) : Returns the offset for register <x>. * * <x>_o(void) : Returns the offset for element <x>. * * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. * * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. * * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted * and masked to place it at field <y> of register <x>. This value * can be |'d with others to produce a full register value for * register <x>. * * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This * value can be ~'d and then &'d to clear the value of field <y> for * register <x>. * * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted * to place it at field <y> of register <x>. This value can be |'d * with others to produce a full register value for <x>. * * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register * <x> value 'r' after being shifted to place its LSB at bit 0. * This value is suitable for direct comparison with other unshifted * values appropriate for use in field <y> of register <x>. * * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for * field <y> of register <x>. This value is suitable for direct * comparison with unshifted values appropriate for use in field <y> * of register <x>. */ #ifndef _hw_gmmu_gm20b_h_ #define _hw_gmmu_gm20b_h_ static inline u32 gmmu_pde_aperture_big_w(void) { return 0; } static inline u32 gmmu_pde_aperture_big_invalid_f(void) { return 0x0; } static inline u32 gmmu_pde_aperture_big_video_memory_f(void) { return 0x1; } static inline u32 gmmu_pde_size_w(void) { return 0; } static inline u32 gmmu_pde_size_full_f(void) { return 0x0; } static inline u32 gmmu_pde_address_big_sys_f(u32 v) { return (v & 0xfffffff) << 4; } static inline u32 gmmu_pde_address_big_sys_w(void) { return 0; } static inline u32 gmmu_pde_aperture_small_w(void) { return 1; } static inline u32 gmmu_pde_aperture_small_invalid_f(void) { return 0x0; } static inline u32 gmmu_pde_aperture_small_video_memory_f(void) { return 0x1; } static inline u32 gmmu_pde_vol_small_w(void) { return 1; } static inline u32 gmmu_pde_vol_small_true_f(void) { return 0x4; } static inline u32 gmmu_pde_vol_small_false_f(void) { return 0x0; } static inline u32 gmmu_pde_vol_big_w(void) { return 1; } static inline u32 gmmu_pde_vol_big_true_f(void) { return 0x8; } static inline u32 gmmu_pde_vol_big_false_f(void) { return 0x0; } static inline u32 gmmu_pde_address_small_sys_f(u32 v) { return (v & 0xfffffff) << 4; } static inline u32 gmmu_pde_address_small_sys_w(void) { return 1; } static inline u32 gmmu_pde_address_shift_v(void) { return 0x0000000c; } static inline u32 gmmu_pde__size_v(void) { return 0x00000008; } static inline u32 gmmu_pte__size_v(void) { return 0x00000008; } static inline u32 gmmu_pte_valid_w(void) { return 0; } static inline u32 gmmu_pte_valid_true_f(void) { return 0x1; } static inline u32 gmmu_pte_valid_false_f(void) { return 0x0; } static inline u32 gmmu_pte_privilege_w(void) { return 0; } static inline u32 gmmu_pte_privilege_true_f(void) { return 0x2; } static inline u32 gmmu_pte_privilege_false_f(void) { return 0x0; } static inline u32 gmmu_pte_address_sys_f(u32 v) { return (v & 0xfffffff) << 4; } static inline u32 gmmu_pte_address_sys_w(void) { return 0; } static inline u32 gmmu_pte_vol_w(void) { return 1; } static inline u32 gmmu_pte_vol_true_f(void) { return 0x1; } static inline u32 gmmu_pte_vol_false_f(void) { return 0x0; } static inline u32 gmmu_pte_aperture_w(void) { return 1; } static inline u32 gmmu_pte_aperture_video_memory_f(void) { return 0x0; } static inline u32 gmmu_pte_read_only_w(void) { return 0; } static inline u32 gmmu_pte_read_only_true_f(void) { return 0x4; } static inline u32 gmmu_pte_write_disable_w(void) { return 1; } static inline u32 gmmu_pte_write_disable_true_f(void) { return 0x80000000; } static inline u32 gmmu_pte_read_disable_w(void) { return 1; } static inline u32 gmmu_pte_read_disable_true_f(void) { return 0x40000000; } static inline u32 gmmu_pte_comptagline_f(u32 v) { return (v & 0x1ffff) << 12; } static inline u32 gmmu_pte_comptagline_w(void) { return 1; } static inline u32 gmmu_pte_address_shift_v(void) { return 0x0000000c; } static inline u32 gmmu_pte_kind_f(u32 v) { return (v & 0xff) << 4; } static inline u32 gmmu_pte_kind_w(void) { return 1; } static inline u32 gmmu_pte_kind_invalid_v(void) { return 0x000000ff; } static inline u32 gmmu_pte_kind_pitch_v(void) { return 0x00000000; } static inline u32 gmmu_pte_kind_z16_v(void) { return 0x00000001; } static inline u32 gmmu_pte_kind_z16_2c_v(void) { return 0x00000002; } static inline u32 gmmu_pte_kind_z16_ms2_2c_v(void) { return 0x00000003; } static inline u32 gmmu_pte_kind_z16_ms4_2c_v(void) { return 0x00000004; } static inline u32 gmmu_pte_kind_z16_ms8_2c_v(void) { return 0x00000005; } static inline u32 gmmu_pte_kind_z16_ms16_2c_v(void) { return 0x00000006; } static inline u32 gmmu_pte_kind_z16_2z_v(void) { return 0x00000007; } static inline u32 gmmu_pte_kind_z16_ms2_2z_v(void) { return 0x00000008; } static inline u32 gmmu_pte_kind_z16_ms4_2z_v(void) { return 0x00000009; } static inline u32 gmmu_pte_kind_z16_ms8_2z_v(void) { return 0x0000000a; } static inline u32 gmmu_pte_kind_z16_ms16_2z_v(void) { return 0x0000000b; } static inline u32 gmmu_pte_kind_z16_4cz_v(void) { return 0x0000000c; } static inline u32 gmmu_pte_kind_z16_ms2_4cz_v(void) { return 0x0000000d; } static inline u32 gmmu_pte_kind_z16_ms4_4cz_v(void) { return 0x0000000e; } static inline u32 gmmu_pte_kind_z16_ms8_4cz_v(void) { return 0x0000000f; } static inline u32 gmmu_pte_kind_z16_ms16_4cz_v(void) { return 0x00000010; } static inline u32 gmmu_pte_kind_s8z24_v(void) { return 0x00000011; } static inline u32 gmmu_pte_kind_s8z24_1z_v(void) { return 0x00000012; } static inline u32 gmmu_pte_kind_s8z24_ms2_1z_v(void) { return 0x00000013; } static inline u32 gmmu_pte_kind_s8z24_ms4_1z_v(void) { return 0x00000014; } static inline u32 gmmu_pte_kind_s8z24_ms8_1z_v(void) { return 0x00000015; } static inline u32 gmmu_pte_kind_s8z24_ms16_1z_v(void) { return 0x00000016; } static inline u32 gmmu_pte_kind_s8z24_2cz_v(void) { return 0x00000017; } static inline u32 gmmu_pte_kind_s8z24_ms2_2cz_v(void) { return 0x00000018; } static inline u32 gmmu_pte_kind_s8z24_ms4_2cz_v(void) { return 0x00000019; } static inline u32 gmmu_pte_kind_s8z24_ms8_2cz_v(void) { return 0x0000001a; } static inline u32 gmmu_pte_kind_s8z24_ms16_2cz_v(void) { return 0x0000001b; } static inline u32 gmmu_pte_kind_s8z24_2cs_v(void) { return 0x0000001c; } static inline u32 gmmu_pte_kind_s8z24_ms2_2cs_v(void) { return 0x0000001d; } static inline u32 gmmu_pte_kind_s8z24_ms4_2cs_v(void) { return 0x0000001e; } static inline u32 gmmu_pte_kind_s8z24_ms8_2cs_v(void) { return 0x0000001f; } static inline u32 gmmu_pte_kind_s8z24_ms16_2cs_v(void) { return 0x00000020; } static inline u32 gmmu_pte_kind_s8z24_4cszv_v(void) { return 0x00000021; } static inline u32 gmmu_pte_kind_s8z24_ms2_4cszv_v(void) { return 0x00000022; } static inline u32 gmmu_pte_kind_s8z24_ms4_4cszv_v(void) { return 0x00000023; } static inline u32 gmmu_pte_kind_s8z24_ms8_4cszv_v(void) { return 0x00000024; } static inline u32 gmmu_pte_kind_s8z24_ms16_4cszv_v(void) { return 0x00000025; } static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_v(void) { return 0x00000026; } static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_v(void) { return 0x00000027; } static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_v(void) { return 0x00000028; } static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_v(void) { return 0x00000029; } static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_1zv_v(void) { return 0x0000002e; } static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_1zv_v(void) { return 0x0000002f; } static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_1zv_v(void) { return 0x00000030; } static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_1zv_v(void) { return 0x00000031; } static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_2cs_v(void) { return 0x00000032; } static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_2cs_v(void) { return 0x00000033; } static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_2cs_v(void) { return 0x00000034; } static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_2cs_v(void) { return 0x00000035; } static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_2czv_v(void) { return 0x0000003a; } static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_2czv_v(void) { return 0x0000003b; } static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_2czv_v(void) { return 0x0000003c; } static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_2czv_v(void) { return 0x0000003d; } static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_2zv_v(void) { return 0x0000003e; } static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_2zv_v(void) { return 0x0000003f; } static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_2zv_v(void) { return 0x00000040; } static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_2zv_v(void) { return 0x00000041; } static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_4cszv_v(void) { return 0x00000042; } static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_4cszv_v(void) { return 0x00000043; } static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_4cszv_v(void) { return 0x00000044; } static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_4cszv_v(void) { return 0x00000045; } static inline u32 gmmu_pte_kind_z24s8_v(void) { return 0x00000046; } static inline u32 gmmu_pte_kind_z24s8_1z_v(void) { return 0x00000047; } static inline u32 gmmu_pte_kind_z24s8_ms2_1z_v(void) { return 0x00000048; } static inline u32 gmmu_pte_kind_z24s8_ms4_1z_v(void) { return 0x00000049; } static inline u32 gmmu_pte_kind_z24s8_ms8_1z_v(void) { return 0x0000004a; } static inline u32 gmmu_pte_kind_z24s8_ms16_1z_v(void) { return 0x0000004b; } static inline u32 gmmu_pte_kind_z24s8_2cs_v(void) { return 0x0000004c; } static inline u32 gmmu_pte_kind_z24s8_ms2_2cs_v(void) { return 0x0000004d; } static inline u32 gmmu_pte_kind_z24s8_ms4_2cs_v(void) { return 0x0000004e; } static inline u32 gmmu_pte_kind_z24s8_ms8_2cs_v(void) { return 0x0000004f; } static inline u32 gmmu_pte_kind_z24s8_ms16_2cs_v(void) { return 0x00000050; } static inline u32 gmmu_pte_kind_z24s8_2cz_v(void) { return 0x00000051; } static inline u32 gmmu_pte_kind_z24s8_ms2_2cz_v(void) { return 0x00000052; } static inline u32 gmmu_pte_kind_z24s8_ms4_2cz_v(void) { return 0x00000053; } static inline u32 gmmu_pte_kind_z24s8_ms8_2cz_v(void) { return 0x00000054; } static inline u32 gmmu_pte_kind_z24s8_ms16_2cz_v(void) { return 0x00000055; } static inline u32 gmmu_pte_kind_z24s8_4cszv_v(void) { return 0x00000056; } static inline u32 gmmu_pte_kind_z24s8_ms2_4cszv_v(void) { return 0x00000057; } static inline u32 gmmu_pte_kind_z24s8_ms4_4cszv_v(void) { return 0x00000058; } static inline u32 gmmu_pte_kind_z24s8_ms8_4cszv_v(void) { return 0x00000059; } static inline u32 gmmu_pte_kind_z24s8_ms16_4cszv_v(void) { return 0x0000005a; } static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_v(void) { return 0x0000005b; } static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_v(void) { return 0x0000005c; } static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_v(void) { return 0x0000005d; } static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_v(void) { return 0x0000005e; } static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_1zv_v(void) { return 0x00000063; } static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_1zv_v(void) { return 0x00000064; } static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_1zv_v(void) { return 0x00000065; } static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_1zv_v(void) { return 0x00000066; } static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_2cs_v(void) { return 0x00000067; } static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_2cs_v(void) { return 0x00000068; } static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_2cs_v(void) { return 0x00000069; } static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_2cs_v(void) { return 0x0000006a; } static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_2czv_v(void) { return 0x0000006f; } static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_2czv_v(void) { return 0x00000070; } static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_2czv_v(void) { return 0x00000071; } static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_2czv_v(void) { return 0x00000072; } static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_2zv_v(void) { return 0x00000073; } static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_2zv_v(void) { return 0x00000074; } static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_2zv_v(void) { return 0x00000075; } static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_2zv_v(void) { return 0x00000076; } static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_4cszv_v(void) { return 0x00000077; } static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_4cszv_v(void) { return 0x00000078; } static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_4cszv_v(void) { return 0x00000079; } static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_4cszv_v(void) { return 0x0000007a; } static inline u32 gmmu_pte_kind_zf32_v(void) { return 0x0000007b; } static inline u32 gmmu_pte_kind_zf32_1z_v(void) { return 0x0000007c; } static inline u32 gmmu_pte_kind_zf32_ms2_1z_v(void) { return 0x0000007d; } static inline u32 gmmu_pte_kind_zf32_ms4_1z_v(void) { return 0x0000007e; } static inline u32 gmmu_pte_kind_zf32_ms8_1z_v(void) { return 0x0000007f; } static inline u32 gmmu_pte_kind_zf32_ms16_1z_v(void) { return 0x00000080; } static inline u32 gmmu_pte_kind_zf32_2cs_v(void) { return 0x00000081; } static inline u32 gmmu_pte_kind_zf32_ms2_2cs_v(void) { return 0x00000082; } static inline u32 gmmu_pte_kind_zf32_ms4_2cs_v(void) { return 0x00000083; } static inline u32 gmmu_pte_kind_zf32_ms8_2cs_v(void) { return 0x00000084; } static inline u32 gmmu_pte_kind_zf32_ms16_2cs_v(void) { return 0x00000085; } static inline u32 gmmu_pte_kind_zf32_2cz_v(void) { return 0x00000086; } static inline u32 gmmu_pte_kind_zf32_ms2_2cz_v(void) { return 0x00000087; } static inline u32 gmmu_pte_kind_zf32_ms4_2cz_v(void) { return 0x00000088; } static inline u32 gmmu_pte_kind_zf32_ms8_2cz_v(void) { return 0x00000089; } static inline u32 gmmu_pte_kind_zf32_ms16_2cz_v(void) { return 0x0000008a; } static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_v(void) { return 0x0000008b; } static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_v(void) { return 0x0000008c; } static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_v(void) { return 0x0000008d; } static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_v(void) { return 0x0000008e; } static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_1cs_v(void) { return 0x0000008f; } static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_1cs_v(void) { return 0x00000090; } static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_1cs_v(void) { return 0x00000091; } static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_1cs_v(void) { return 0x00000092; } static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_1zv_v(void) { return 0x00000097; } static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_1zv_v(void) { return 0x00000098; } static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_1zv_v(void) { return 0x00000099; } static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_1zv_v(void) { return 0x0000009a; } static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_1czv_v(void) { return 0x0000009b; } static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_1czv_v(void) { return 0x0000009c; } static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_1czv_v(void) { return 0x0000009d; } static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_1czv_v(void) { return 0x0000009e; } static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_2cs_v(void) { return 0x0000009f; } static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_2cs_v(void) { return 0x000000a0; } static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_2cs_v(void) { return 0x000000a1; } static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_2cs_v(void) { return 0x000000a2; } static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_2cszv_v(void) { return 0x000000a3; } static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_2cszv_v(void) { return 0x000000a4; } static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_2cszv_v(void) { return 0x000000a5; } static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_2cszv_v(void) { return 0x000000a6; } static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_v(void) { return 0x000000a7; } static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_v(void) { return 0x000000a8; } static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_v(void) { return 0x000000a9; } static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_v(void) { return 0x000000aa; } static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_1cs_v(void) { return 0x000000ab; } static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_1cs_v(void) { return 0x000000ac; } static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_1cs_v(void) { return 0x000000ad; } static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_1cs_v(void) { return 0x000000ae; } static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_1zv_v(void) { return 0x000000b3; } static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_1zv_v(void) { return 0x000000b4; } static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_1zv_v(void) { return 0x000000b5; } static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_1zv_v(void) { return 0x000000b6; } static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_1czv_v(void) { return 0x000000b7; } static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_1czv_v(void) { return 0x000000b8; } static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_1czv_v(void) { return 0x000000b9; } static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_1czv_v(void) { return 0x000000ba; } static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_2cs_v(void) { return 0x000000bb; } static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_2cs_v(void) { return 0x000000bc; } static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_2cs_v(void) { return 0x000000bd; } static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_2cs_v(void) { return 0x000000be; } static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_2cszv_v(void) { return 0x000000bf; } static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_2cszv_v(void) { return 0x000000c0; } static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_2cszv_v(void) { return 0x000000c1; } static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_2cszv_v(void) { return 0x000000c2; } static inline u32 gmmu_pte_kind_zf32_x24s8_v(void) { return 0x000000c3; } static inline u32 gmmu_pte_kind_zf32_x24s8_1cs_v(void) { return 0x000000c4; } static inline u32 gmmu_pte_kind_zf32_x24s8_ms2_1cs_v(void) { return 0x000000c5; } static inline u32 gmmu_pte_kind_zf32_x24s8_ms4_1cs_v(void) { return 0x000000c6; } static inline u32 gmmu_pte_kind_zf32_x24s8_ms8_1cs_v(void) { return 0x000000c7; } static inline u32 gmmu_pte_kind_zf32_x24s8_ms16_1cs_v(void) { return 0x000000c8; } static inline u32 gmmu_pte_kind_zf32_x24s8_2cszv_v(void) { return 0x000000ce; } static inline u32 gmmu_pte_kind_zf32_x24s8_ms2_2cszv_v(void) { return 0x000000cf; } static inline u32 gmmu_pte_kind_zf32_x24s8_ms4_2cszv_v(void) { return 0x000000d0; } static inline u32 gmmu_pte_kind_zf32_x24s8_ms8_2cszv_v(void) { return 0x000000d1; } static inline u32 gmmu_pte_kind_zf32_x24s8_ms16_2cszv_v(void) { return 0x000000d2; } static inline u32 gmmu_pte_kind_zf32_x24s8_2cs_v(void) { return 0x000000d3; } static inline u32 gmmu_pte_kind_zf32_x24s8_ms2_2cs_v(void) { return 0x000000d4; } static inline u32 gmmu_pte_kind_zf32_x24s8_ms4_2cs_v(void) { return 0x000000d5; } static inline u32 gmmu_pte_kind_zf32_x24s8_ms8_2cs_v(void) { return 0x000000d6; } static inline u32 gmmu_pte_kind_zf32_x24s8_ms16_2cs_v(void) { return 0x000000d7; } static inline u32 gmmu_pte_kind_generic_16bx2_v(void) { return 0x000000fe; } static inline u32 gmmu_pte_kind_c32_2c_v(void) { return 0x000000d8; } static inline u32 gmmu_pte_kind_c32_2cbr_v(void) { return 0x000000d9; } static inline u32 gmmu_pte_kind_c32_2cba_v(void) { return 0x000000da; } static inline u32 gmmu_pte_kind_c32_2cra_v(void) { return 0x000000db; } static inline u32 gmmu_pte_kind_c32_2bra_v(void) { return 0x000000dc; } static inline u32 gmmu_pte_kind_c32_ms2_2c_v(void) { return 0x000000dd; } static inline u32 gmmu_pte_kind_c32_ms2_2cbr_v(void) { return 0x000000de; } static inline u32 gmmu_pte_kind_c32_ms2_2cra_v(void) { return 0x000000cc; } static inline u32 gmmu_pte_kind_c32_ms4_2c_v(void) { return 0x000000df; } static inline u32 gmmu_pte_kind_c32_ms4_2cbr_v(void) { return 0x000000e0; } static inline u32 gmmu_pte_kind_c32_ms4_2cba_v(void) { return 0x000000e1; } static inline u32 gmmu_pte_kind_c32_ms4_2cra_v(void) { return 0x000000e2; } static inline u32 gmmu_pte_kind_c32_ms4_2bra_v(void) { return 0x000000e3; } static inline u32 gmmu_pte_kind_c32_ms8_ms16_2c_v(void) { return 0x000000e4; } static inline u32 gmmu_pte_kind_c32_ms8_ms16_2cra_v(void) { return 0x000000e5; } static inline u32 gmmu_pte_kind_c64_2c_v(void) { return 0x000000e6; } static inline u32 gmmu_pte_kind_c64_2cbr_v(void) { return 0x000000e7; } static inline u32 gmmu_pte_kind_c64_2cba_v(void) { return 0x000000e8; } static inline u32 gmmu_pte_kind_c64_2cra_v(void) { return 0x000000e9; } static inline u32 gmmu_pte_kind_c64_2bra_v(void) { return 0x000000ea; } static inline u32 gmmu_pte_kind_c64_ms2_2c_v(void) { return 0x000000eb; } static inline u32 gmmu_pte_kind_c64_ms2_2cbr_v(void) { return 0x000000ec; } static inline u32 gmmu_pte_kind_c64_ms2_2cra_v(void) { return 0x000000cd; } static inline u32 gmmu_pte_kind_c64_ms4_2c_v(void) { return 0x000000ed; } static inline u32 gmmu_pte_kind_c64_ms4_2cbr_v(void) { return 0x000000ee; } static inline u32 gmmu_pte_kind_c64_ms4_2cba_v(void) { return 0x000000ef; } static inline u32 gmmu_pte_kind_c64_ms4_2cra_v(void) { return 0x000000f0; } static inline u32 gmmu_pte_kind_c64_ms4_2bra_v(void) { return 0x000000f1; } static inline u32 gmmu_pte_kind_c64_ms8_ms16_2c_v(void) { return 0x000000f2; } static inline u32 gmmu_pte_kind_c64_ms8_ms16_2cra_v(void) { return 0x000000f3; } static inline u32 gmmu_pte_kind_c128_2c_v(void) { return 0x000000f4; } static inline u32 gmmu_pte_kind_c128_2cr_v(void) { return 0x000000f5; } static inline u32 gmmu_pte_kind_c128_ms2_2c_v(void) { return 0x000000f6; } static inline u32 gmmu_pte_kind_c128_ms2_2cr_v(void) { return 0x000000f7; } static inline u32 gmmu_pte_kind_c128_ms4_2c_v(void) { return 0x000000f8; } static inline u32 gmmu_pte_kind_c128_ms4_2cr_v(void) { return 0x000000f9; } static inline u32 gmmu_pte_kind_c128_ms8_ms16_2c_v(void) { return 0x000000fa; } static inline u32 gmmu_pte_kind_c128_ms8_ms16_2cr_v(void) { return 0x000000fb; } static inline u32 gmmu_pte_kind_x8c24_v(void) { return 0x000000fc; } static inline u32 gmmu_pte_kind_pitch_no_swizzle_v(void) { return 0x000000fd; } static inline u32 gmmu_pte_kind_smsked_message_v(void) { return 0x000000ca; } static inline u32 gmmu_pte_kind_smhost_message_v(void) { return 0x000000cb; } static inline u32 gmmu_pte_kind_s8_v(void) { return 0x0000002a; } static inline u32 gmmu_pte_kind_s8_2s_v(void) { return 0x0000002b; } #endif
arter97/android_kernel_nvidia_shieldtablet
drivers/gpu/nvgpu/gm20b/hw_gmmu_gm20b.h
C
gpl-2.0
24,084
/* ** Zabbix ** Copyright (C) 2001-2016 Zabbix SIA ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU General Public License as published by ** the Free Software Foundation; either version 2 of the License, or ** (at your option) any later version. ** ** This program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU General Public License for more details. ** ** You should have received a copy of the GNU General Public License ** along with this program; if not, write to the Free Software ** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. **/ #include "common.h" #include "sysinfo.h" ZBX_METRIC parameters_specific[] = /* KEY FLAG FUNCTION TEST PARAMETERS */ { {"kernel.maxfiles", 0, KERNEL_MAXFILES, NULL}, {"kernel.maxproc", 0, KERNEL_MAXPROC, NULL}, {"vfs.fs.size", CF_HAVEPARAMS, VFS_FS_SIZE, "/,free"}, {"vfs.fs.inode", CF_HAVEPARAMS, VFS_FS_INODE, "/,free"}, {"vfs.fs.discovery", 0, VFS_FS_DISCOVERY, NULL}, {"vfs.dev.read", CF_HAVEPARAMS, VFS_DEV_READ, "da0,operations"}, {"vfs.dev.write", CF_HAVEPARAMS, VFS_DEV_WRITE, "da0,operations"}, {"net.tcp.listen", CF_HAVEPARAMS, NET_TCP_LISTEN, "80"}, {"net.udp.listen", CF_HAVEPARAMS, NET_UDP_LISTEN, "68"}, {"net.if.in", CF_HAVEPARAMS, NET_IF_IN, "lo0,bytes"}, {"net.if.out", CF_HAVEPARAMS, NET_IF_OUT, "lo0,bytes"}, {"net.if.total", CF_HAVEPARAMS, NET_IF_TOTAL, "lo0,bytes"}, {"net.if.collisions", CF_HAVEPARAMS, NET_IF_COLLISIONS, "lo0"}, {"net.if.discovery", 0, NET_IF_DISCOVERY, "lo0"}, {"vm.memory.size", CF_HAVEPARAMS, VM_MEMORY_SIZE, "free"}, {"proc.num", CF_HAVEPARAMS, PROC_NUM, "inetd"}, {"proc.mem", CF_HAVEPARAMS, PROC_MEM, "inetd"}, {"system.cpu.switches", 0, SYSTEM_CPU_SWITCHES, NULL}, {"system.cpu.intr", 0, SYSTEM_CPU_INTR, NULL}, {"system.cpu.util", CF_HAVEPARAMS, SYSTEM_CPU_UTIL, "all,user,avg1"}, {"system.cpu.load", CF_HAVEPARAMS, SYSTEM_CPU_LOAD, "all,avg1"}, {"system.cpu.num", CF_HAVEPARAMS, SYSTEM_CPU_NUM, "online"}, {"system.cpu.discovery",0, SYSTEM_CPU_DISCOVERY, NULL}, {"system.uname", 0, SYSTEM_UNAME, NULL}, {"system.sw.arch", 0, SYSTEM_SW_ARCH, NULL}, {"system.swap.size", CF_HAVEPARAMS, SYSTEM_SWAP_SIZE, "all,free"}, {"system.uptime", 0, SYSTEM_UPTIME, NULL}, {"system.boottime", 0, SYSTEM_BOOTTIME, NULL}, {NULL} };
ruhip/zabbix_2.4.8
src/libs/zbxsysinfo/freebsd/freebsd.c
C
gpl-2.0
2,518
! ! Copyright (C) 2001-2015 Quantum ESPRESSO group ! This file is distributed under the terms of the ! GNU General Public License. See the file `License' ! in the root directory of the present distribution, ! or http://www.gnu.org/copyleft/gpl.txt . ! !---------------------------------------------------------------------------- SUBROUTINE set_mpi_comm_4_solvers(parent_comm, intra_bgrp_comm_, inter_bgrp_comm_ ) !---------------------------------------------------------------------------- ! USE mp_bands_util ! IMPLICIT NONE ! INTEGER, INTENT(IN) :: parent_comm, intra_bgrp_comm_, inter_bgrp_comm_ ! local variables INTEGER :: parent_nproc, parent_mype, ortho_parent_comm_, ierr ! !write(*,*) ' enter set_mpi_comm_4_davidson' intra_bgrp_comm = intra_bgrp_comm_ inter_bgrp_comm = inter_bgrp_comm_ ! #if defined (__MPI) ! CALL mpi_comm_size(parent_comm,parent_nproc,ierr) IF (ierr/=0) CALL errore( ' set_mpi_comm_4_solvers ', ' problem getting MPI size ', 1 ) CALL mpi_comm_rank(parent_comm,parent_mype,ierr) IF (ierr/=0) CALL errore( ' set_mpi_comm_4_solvers ', ' problem getting MPI rank ', 1 ) ! ! ... Set number of processors per band group ! CALL mpi_comm_size(intra_bgrp_comm,nproc_bgrp,ierr) IF (ierr/=0) CALL errore( ' set_mpi_comm_4_solvers ', ' problem getting MPI size ', 1 ) ! nbgrp = parent_nproc / nproc_bgrp IF ( nbgrp < 1 .OR. nbgrp > parent_nproc ) & CALL errore( 'set_mpi_comm_4_solvers','invalid number of band groups, out of range', 1 ) IF ( MOD( parent_nproc, nbgrp ) /= 0 ) & CALL errore( 'set_mpi_comm_4_solvers','n. of band groups must be divisor of parent_nproc', 1 ) ! ! set logical flag so that band parallelization in H\psi is allowed ! (can be disabled before calling H\psi if not desired) ! use_bgrp_in_hpsi = ( nbgrp > 1 ) ! ! ... set index of band group for this processor ( 0 : nbgrp - 1 ) ! my_bgrp_id = parent_mype / nproc_bgrp ! ! ... set index of processor within the image ( 0 : nproc_image - 1 ) ! me_bgrp = MOD( parent_mype, nproc_bgrp ) ! CALL mpi_barrier( parent_comm, ierr ) IF (ierr/=0) & CALL errore( 'set_mpi_comm_4_solvers','n. of band groups must be divisor of parent_nproc', 1 ) ! #else parent_nproc = 1 parent_mype = 0 nproc_bgrp = 1 nbgrp = 1 use_bgrp_in_hpsi = .false. my_bgrp_id = 0 me_bgrp = 0 #endif !write(*,*) ' exit set_mpi_comm_4_davidson' RETURN ! END SUBROUTINE set_mpi_comm_4_solvers
QEF/q-e_schrodinger
UtilXlib/set_mpi_comm_4_solvers.f90
FORTRAN
gpl-2.0
2,588
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> <title>Evince Programmer's Reference Manual: Evince Programmer's Reference Manual</title> <meta name="generator" content="DocBook XSL Stylesheets V1.78.1"> <link rel="home" href="index.html" title="Evince Programmer's Reference Manual"> <link rel="next" href="evince-frontend.html" title="Part I. Frontend"> <meta name="generator" content="GTK-Doc V1.19.1 (XML mode)"> <link rel="stylesheet" href="style.css" type="text/css"> </head> <body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"> <div class="book"> <div class="titlepage"> <div> <div><table class="navigation" id="top" width="100%" cellpadding="2" cellspacing="0"><tr><th valign="middle"><p class="title">Evince Programmer's Reference Manual</p></th></tr></table></div> <div><p class="releaseinfo"> For Evince version 3.10.3 . The latest version of this documentation can be found on-line at the <a class="ulink" href="http://library.gnome.org/devel/evince/index.html" target="_top">GNOME Library</a>. </p></div> <div><p class="copyright">Copyright © 2007, 2008, 2009 Nickolay V. Shmyrev</p></div> <div><p class="copyright">Copyright © 2008, 2009, 2010 Carlos Garcia Campos</p></div> <div><p class="copyright">Copyright © 2009, 2010 Christian Persch</p></div> <div><div class="legalnotice"> <a name="id-1.1.6"></a><p> Permission is granted to copy, distribute and/or modify this document under the terms of the <em class="citetitle">GNU General Public Licence</em> published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. You may obtain a copy of the <em class="citetitle">GNU General Public Licence</em> from the Free Software Foundation at <a class="ulink" href="http://www.gnu.org/licences/" target="_top">GNU Licences web site</a> or by writing to: </p> <div class="address"><p><br>           The Free Software Foundation, Inc.,<br>           <span class="street">51 Franklin St</span> – Fifth Floor,<br>           <span class="city">Boston</span>, <span class="state">MA</span> <span class="postcode">02110-1301</span>,<br>           <span class="country">USA</span><br>         </p></div> <p> </p> </div></div> </div> <hr> </div> <div class="toc"><dl class="toc"> <dt><span class="part"><a href="evince-frontend.html">I. Frontend</a></span></dt> <dd><dl> <dt> <span class="refentrytitle"><a href="EvSidebarAttachments.html">EvSidebarAttachments</a></span><span class="refpurpose"></span> </dt> <dt> <span class="refentrytitle"><a href="evince-ev-properties-dialog.html">ev-properties-dialog</a></span><span class="refpurpose"></span> </dt> <dt> <span class="refentrytitle"><a href="evince-ev-properties-fonts.html">ev-properties-fonts</a></span><span class="refpurpose"></span> </dt> <dt> <span class="refentrytitle"><a href="EvSidebarPage.html">EvSidebarPage</a></span><span class="refpurpose"></span> </dt> <dt> <span class="refentrytitle"><a href="EvOpenRecentAction.html">EvOpenRecentAction</a></span><span class="refpurpose"></span> </dt> <dt> <span class="refentrytitle"><a href="evince-ev-application.html">ev-application</a></span><span class="refpurpose"></span> </dt> <dt> <span class="refentrytitle"><a href="EvSidebarThumbnails.html">EvSidebarThumbnails</a></span><span class="refpurpose"></span> </dt> <dt> <span class="refentrytitle"><a href="EvSidebarLinks.html">EvSidebarLinks</a></span><span class="refpurpose"></span> </dt> <dt> <span class="refentrytitle"><a href="EvSidebar.html">EvSidebar</a></span><span class="refpurpose"></span> </dt> <dt> <span class="refentrytitle"><a href="evince-ev-properties-license.html">ev-properties-license</a></span><span class="refpurpose"></span> </dt> <dt> <span class="refentrytitle"><a href="EvMessageArea.html">EvMessageArea</a></span><span class="refpurpose"></span> </dt> <dt> <span class="refentrytitle"><a href="EvProgressMessageArea.html">EvProgressMessageArea</a></span><span class="refpurpose"></span> </dt> <dt> <span class="refentrytitle"><a href="evince-ev-metadata.html">ev-metadata</a></span><span class="refpurpose"></span> </dt> <dt> <span class="refentrytitle"><a href="EvWindow.html">EvWindow</a></span><span class="refpurpose"></span> </dt> <dt> <span class="refentrytitle"><a href="EvHistory.html">EvHistory</a></span><span class="refpurpose"></span> </dt> <dt> <span class="refentrytitle"><a href="EvSidebarLayers.html">EvSidebarLayers</a></span><span class="refpurpose"></span> </dt> <dt> <span class="refentrytitle"><a href="EvPasswordView.html">EvPasswordView</a></span><span class="refpurpose"></span> </dt> <dt> <span class="refentrytitle"><a href="EggFindBar.html">EggFindBar</a></span><span class="refpurpose"></span> </dt> <dt> <span class="refentrytitle"><a href="evince-ev-media-player-keys.html">ev-media-player-keys</a></span><span class="refpurpose"></span> </dt> <dt> <span class="refentrytitle"><a href="EvFileMonitor.html">EvFileMonitor</a></span><span class="refpurpose"></span> </dt> <dt> <span class="refentrytitle"><a href="EvPageAction.html">EvPageAction</a></span><span class="refpurpose"></span> </dt> <dt> <span class="refentrytitle"><a href="evince-ev-window-title.html">ev-window-title</a></span><span class="refpurpose"></span> </dt> <dt> <span class="refentrytitle"><a href="evince-ev-keyring.html">ev-keyring</a></span><span class="refpurpose"></span> </dt> <dt> <span class="refentrytitle"><a href="evince-ev-utils.html">ev-utils</a></span><span class="refpurpose"></span> </dt> <dt> <span class="refentrytitle"><a href="EvPageActionWidget.html">EvPageActionWidget</a></span><span class="refpurpose"></span> </dt> </dl></dd> <dt><span class="chapter"><a href="object-tree.html">Object Hierarchy</a></span></dt> <dt><span class="index"><a href="api-index-full.html">API Index</a></span></dt> <dt><span class="appendix"><a href="licence.html">A. Licence</a></span></dt> </dl></div> </div> <div class="footer"> <hr> Generated by GTK-Doc V1.19.1</div> </body> </html>
Vongo/evince
help/reference/shell/html/index.html
HTML
gpl-2.0
6,245
# Ember-cli-browser-update This README outlines the details of collaborating on this Ember addon. This project is an ember-cli addon for the [Browser Update](http://browser-update.org/ "Browser Update") project. ## What does it do? It warns the user with a small header bar that their browser is out of date and needs to be upgraded. You can use this to ensure that users do not experience missing functionality in your app because their browser does not support something that your code requires. You can customise the warning message to inform users about the degradation they will experience if they do not update. You can choose your minimum browser specs amongst other things. By default the minimum recommended browser is: + IE > 9 + Firefox > 2 + Opera > 9.63 + Safari > 2 + Chrome > 10 ## Content Security Policy If you are using [CSP](http://www.html5rocks.com/en/tutorials/security/content-security-policy/) you must add browser-update.org as a script-src and img-src: ``` Content-Security-Policy: script-src 'self' browser-update.org; img-src 'self' browser-update.org; ``` ## Configuration Add a `browserUpdate` object to your environment: ``` var ENV = { browserUpdate: { vs: {i:9,f:2,o:9.63,s:2,c:10}, test: true }, ... // rest of environment ``` This config object is passed to browser-update as its `buoop` variable. Full documentation can be found at [Browser-Update](http://browser-update.org/customize.html). The `test: true` parameter always shows the header bar regardless of the browser - useful for testing. Default values set up in the service are: ``` browserUpdate: { vs: {i:9,f:2,o:9.63,s:2,c:10} } ``` The `onshow`, `onclick` and `onclose` browser-update.org callbacks are hooked by the browser-update service and are exposed as events `show`, `click` and `close`. ## Installation ``` npm install --save-dev ember-cli-browser-update ``` ## Running Tests * `ember test` * `ember test --server` ## Building * `ember build` For more information on using ember-cli, visit [http://www.ember-cli.com/](http://www.ember-cli.com/).
BryanCrotaz/ember-cli-browser-update
README.md
Markdown
gpl-2.0
2,106
/*! * jQuery postMessage - v0.5 - 9/11/2009 * http://benalman.com/projects/jquery-postmessage-plugin/ * * Copyright (c) 2009 "Cowboy" Ben Alman * Dual licensed under the MIT and GPL licenses. * http://benalman.com/about/license/ */ // Script: jQuery postMessage: Cross-domain scripting goodness // // *Version: 0.5, Last updated: 9/11/2009* // // Project Home - http://benalman.com/projects/jquery-postmessage-plugin/ // GitHub - http://github.com/cowboy/jquery-postmessage/ // Source - http://github.com/cowboy/jquery-postmessage/raw/master/jquery.ba-postmessage.js // (Minified) - http://github.com/cowboy/jquery-postmessage/raw/master/jquery.ba-postmessage.min.js (0.9kb) // // About: License // // Copyright (c) 2009 "Cowboy" Ben Alman, // Dual licensed under the MIT and GPL licenses. // http://benalman.com/about/license/ // // About: Examples // // This working example, complete with fully commented code, illustrates one // way in which this plugin can be used. // // Iframe resizing - http://benalman.com/code/projects/jquery-postmessage/examples/iframe/ // // About: Support and Testing // // Information about what version or versions of jQuery this plugin has been // tested with and what browsers it has been tested in. // // jQuery Versions - 1.3.2 // Browsers Tested - Internet Explorer 6-8, Firefox 3, Safari 3-4, Chrome, Opera 9. // // About: Release History // // 0.5 - (9/11/2009) Improved cache-busting // 0.4 - (8/25/2009) Initial release // JAH Extensions (9/2011) // Modifed to remove jQuery dependency. Messages must be plain text. window.xdc = {}; (function($){ '$:nomunge'; // Used by YUI compressor. // A few vars used in non-awesome browsers. var interval_id, last_hash, cache_bust = 1, // A var used in awesome browsers. rm_callback, // A few convenient shortcuts. window = this, FALSE = !1, // Reused internal strings. postMessage = 'postMessage', addEventListener = 'addEventListener', p_receiveMessage, // I couldn't get window.postMessage to actually work in Opera 9.64! has_postMessage = window[postMessage] && true; var isFunction = function(fn) { return fn && Object.prototype.toString.call( fn ) === '[object Function]'; }; // Method: jQuery.postMessage // // This method will call window.postMessage if available, setting the // targetOrigin parameter to the base of the target_url parameter for maximum // security in browsers that support it. If window.postMessage is not available, // the target window's location.hash will be used to pass the message. If an // object is passed as the message param, it will be serialized into a string // using the jQuery.param method. // // Usage: // // > jQuery.postMessage( message, target_url [, target ] ); // // Arguments: // // message - (String) A message to be passed to the other frame. // message - (Object) An object to be serialized into a params string, using // the jQuery.param method. // target_url - (String) The URL of the other frame this window is // attempting to communicate with. This must be the exact URL (including // any query string) of the other window for this script to work in // browsers that don't support window.postMessage. // target - (Object) A reference to the other frame this window is // attempting to communicate with. If omitted, defaults to `parent`. // // Returns: // // Nothing. $[postMessage] = function( message, target_url, target ) { if ( !target_url ) { return; } // Default to parent if unspecified. target = target || parent; if ( has_postMessage ) { // The browser supports window.postMessage, so call it with a targetOrigin // set appropriately, based on the target_url parameter. target[postMessage]( message, target_url.replace( /([^:]+:\/\/[^\/]+).*/, '$1' ) ); } else if ( target_url ) { // The browser does not support window.postMessage, so set the location // of the target to target_url#message. A bit ugly, but it works! A cache // bust parameter is added to ensure that repeat messages trigger the // callback. target.location = target_url.replace( /#.*$/, '' ) + '#' + (+new Date) + (cache_bust++) + '&' + message; } }; // Method: jQuery.receiveMessage // // Register a single callback for either a window.postMessage call, if // supported, or if unsupported, for any change in the current window // location.hash. If window.postMessage is supported and source_origin is // specified, the source window will be checked against this for maximum // security. If window.postMessage is unsupported, a polling loop will be // started to watch for changes to the location.hash. // // Note that for simplicity's sake, only a single callback can be registered // at one time. Passing no params will unbind this event (or stop the polling // loop), and calling this method a second time with another callback will // unbind the event (or stop the polling loop) first, before binding the new // callback. // // Also note that if window.postMessage is available, the optional // source_origin param will be used to test the event.origin property. From // the MDC window.postMessage docs: This string is the concatenation of the // protocol and "://", the host name if one exists, and ":" followed by a port // number if a port is present and differs from the default port for the given // protocol. Examples of typical origins are https://example.org (implying // port 443), http://example.net (implying port 80), and http://example.com:8080. // // Usage: // // > jQuery.receiveMessage( callback [, source_origin ] [, delay ] ); // // Arguments: // // callback - (Function) This callback will execute whenever a <jQuery.postMessage> // message is received, provided the source_origin matches. If callback is // omitted, any existing receiveMessage event bind or polling loop will be // canceled. // source_origin - (String) If window.postMessage is available and this value // is not equal to the event.origin property, the callback will not be // called. // source_origin - (Function) If window.postMessage is available and this // function returns false when passed the event.origin property, the // callback will not be called. // delay - (Number) An optional zero-or-greater delay in milliseconds at // which the polling loop will execute (for browser that don't support // window.postMessage). If omitted, defaults to 100. // // Returns: // // Nothing! $.receiveMessage = p_receiveMessage = function( callback, source_origin, delay ) { if ( has_postMessage ) { // Since the browser supports window.postMessage, the callback will be // bound to the actual event associated with window.postMessage. if ( callback ) { // Unbind an existing callback if it exists. rm_callback && p_receiveMessage(); // Bind the callback. A reference to the callback is stored for ease of // unbinding. rm_callback = function(e) { if ( ( typeof source_origin === 'string' && e.origin !== source_origin ) || ( isFunction( source_origin ) && source_origin( e.origin ) === FALSE ) ) { return FALSE; } callback( e ); }; } if ( window[addEventListener] ) { window[ callback ? addEventListener : 'removeEventListener' ]( 'message', rm_callback, FALSE ); } else { window[ callback ? 'attachEvent' : 'detachEvent' ]( 'onmessage', rm_callback ); } } else { // Since the browser sucks, a polling loop will be started, and the // callback will be called whenever the location.hash changes. interval_id && clearInterval( interval_id ); interval_id = null; if ( callback ) { delay = typeof source_origin === 'number' ? source_origin : typeof delay === 'number' ? delay : 100; interval_id = setInterval(function(){ var hash = document.location.hash, re = /^#?\d+&/; if ( hash !== last_hash && re.test( hash ) ) { last_hash = hash; callback({ data: hash.replace( re, '' ) }); } }, delay ); } } }; })(xdc);
young-geng/StatNews
SFGateBusiness/test_files/Swzl_TrafficDriverTypeAhead_data_002/postMessage.js
JavaScript
gpl-2.0
8,792
{% extends "admin/base_site.html" %} {% load i18n %} {% block custom_stylesheet %} {% endblock %} {% block breadcrumbs %} <div class="breadcrumbs"> <a href="../../../../">{% trans "Home" %}</a> &rsaquo; <a href="../../../">{{ app_label|capfirst }}</a> &rsaquo; <a href="../../">{{ opts.verbose_name_plural|capfirst }}</a> &rsaquo; <a href="../">{{ object|truncatewords:"18" }}</a> &rsaquo; {% trans 'Delete' %} </div> {% endblock %} {% block contents %} {% if perms_lacking %} <p>{% blocktrans with object as escaped_object %}Deleting the {{ object_name }} '{{ escaped_object }}' would result in deleting related objects, but your account doesn't have permission to delete the following types of objects:{% endblocktrans %}</p> <ul> {% for obj in perms_lacking %} <li>{{ obj }}</li> {% endfor %} </ul> {% else %} <p>{% blocktrans with object as escaped_object %}Are you sure you want to delete the {{ object_name }} "{{ escaped_object }}"? All of the following related items will be deleted:{% endblocktrans %}</p> <ul>{{ deleted_objects|unordered_list }}</ul> <form action="" method="post"> <div> <input type="hidden" name="post" value="yes" /> <input type="submit" value="{% trans "Yes, I'm sure" %}" /> </div> </form> {% endif %} {% endblock %}
ShaolongHu/Nitrate
tcms/templates/admin/delete_confirmation.html
HTML
gpl-2.0
1,337
; (function ($, scope) { function NextendElementStyle(id, parameters) { this.element = $('#' + id); this.parameters = parameters; this.defaultSetId = parameters.set; this.element.parent() .on('click', $.proxy(this.show, this)); this.element.siblings('.n2-form-element-clear') .on('click', $.proxy(this.clear, this)); this.name = this.element.siblings('input'); nextend.styleManager.$.on('visualDelete', $.proxy(this.styleDeleted, this)); this.updateName(this.element.val()); NextendElement.prototype.constructor.apply(this, arguments); }; NextendElementStyle.prototype = Object.create(NextendElement.prototype); NextendElementStyle.prototype.constructor = NextendElementStyle; NextendElementStyle.prototype.show = function (e) { e.preventDefault(); if (this.parameters.font != '') { nextend.styleManager.setConnectedFont(this.parameters.font); } if (this.parameters.font2 != '') { nextend.styleManager.setConnectedFont2(this.parameters.font2); } if (this.parameters.style2 != '') { nextend.styleManager.setConnectedStyle(this.parameters.style2); } if (this.defaultSetId) { nextend.styleManager.changeSetById(this.defaultSetId); } nextend.styleManager.show(this.element.val(), $.proxy(this.save, this), { previewMode: this.parameters.previewmode, previewHTML: this.parameters.preview }); }; NextendElementStyle.prototype.clear = function (e) { e.preventDefault(); e.stopPropagation(); this.val(''); }; NextendElementStyle.prototype.save = function (e, value) { nextend.styleManager.addVisualUsage(this.parameters.previewmode, value, window.nextend.pre); this.val(value); }; NextendElementStyle.prototype.val = function (value) { this.element.val(value); this.updateName(value); this.triggerOutsideChange(); }; NextendElementStyle.prototype.insideChange = function (value) { this.element.val(value); this.updateName(value); this.triggerInsideChange(); }; NextendElementStyle.prototype.updateName = function (value) { $.when(nextend.styleManager.getVisual(value)) .done($.proxy(function (style) { this.name.val(style.name); }, this)); }; NextendElementStyle.prototype.styleDeleted = function (e, id) { if (id == this.element.val()) { this.insideChange(''); } }; NextendElementStyle.prototype.renderStyle = function () { var style = this.element.val(); nextend.styleManager.addVisualUsage(this.parameters.previewmode, style, ''); return nextend.styleManager.getClass(style, this.parameters.previewmode); }; scope.NextendElementStyle = NextendElementStyle; $(window).ready(function () { new NextendElementContextMenu('.n2-form-element-style', 'style'); }); })(n2, window);
ttthanhDC/ProjectTeamWP
wp-content/plugins/smart-slider-3/nextend/media/js/element/style.js
JavaScript
gpl-2.0
3,135
/* FCE Ultra - NES/Famicom Emulator * * Copyright notice for this file: * Copyright (C) 2009 CaH4e3 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "mapinc.h" static uint8 chrlo[8], chrhi[8], prg[2], mirr, vlock; static int32 IRQa, IRQCount, IRQLatch, IRQClock; static uint8 *WRAM=NULL; static uint32 WRAMSIZE; static uint8 *CHRRAM=NULL; static uint32 CHRRAMSIZE; static SFORMAT StateRegs[]= { {chrlo, 8, "CHRLO"}, {chrhi, 8, "CHRHI"}, {prg, 2, "PRGR"}, {&mirr, 1, "MIRR"}, {&vlock, 1, "VLOCK"}, {&IRQa, 4, "IRQA"}, {&IRQCount, 4, "IRQC"}, {&IRQLatch, 4, "IRQL"}, {&IRQClock, 4, "IRQCL"}, {0} }; static void Sync(void) { uint8 i; setprg8r(0x10,0x6000,0); setprg8(0x8000,prg[0]); setprg8(0xa000,prg[1]); setprg8(0xc000,~1); setprg8(0xe000,~0); for(i=0; i<8; i++) { uint32 chr = chrlo[i]|(chrhi[i]<<8); if(chrlo[i]==0xc8) { vlock = 0; continue; } else if(chrlo[i]==0x88) { vlock = 1; continue; } if(((chrlo[i]==4)||(chrlo[i]==5))&&!vlock) setchr1r(0x10,i<<10,chr&1); else setchr1(i<<10,chr); } switch(mirr) { case 0: setmirror(MI_V); break; case 1: setmirror(MI_H); break; case 2: setmirror(MI_0); break; case 3: setmirror(MI_1); break; } } static DECLFW(M253Write) { if((A>=0xB000)&&(A<=0xE00C)) { uint8 ind=((((A&8)|(A>>8))>>3)+2)&7; uint8 sar=A&4; chrlo[ind]=(chrlo[ind]&(0xF0>>sar))|((V&0x0F)<<sar); if(A&4) chrhi[ind]=V>>4; Sync(); } else switch(A) { case 0x8010: prg[0]=V; Sync(); break; case 0xA010: prg[1]=V; Sync(); break; case 0x9400: mirr=V&3; Sync(); break; case 0xF000: IRQLatch = (IRQLatch & 0xF0) | (V & 0x0F); break; case 0xF004: IRQLatch = (IRQLatch & 0x0F) | (V << 4); break; case 0xF008: IRQa = V&3; if(IRQa&2) { IRQCount = IRQLatch; IRQClock = 0; } X6502_IRQEnd(FCEU_IQEXT); break; } } static void M253Power(void) { Sync(); SetReadHandler(0x6000,0x7FFF,CartBR); SetWriteHandler(0x6000,0x7FFF,CartBW); SetReadHandler(0x8000,0xFFFF,CartBR); SetWriteHandler(0x8000,0xFFFF,M253Write); } static void M253Close(void) { if(WRAM) FCEU_gfree(WRAM); if(CHRRAM) FCEU_gfree(CHRRAM); WRAM=CHRRAM=NULL; } static void M253IRQ(int cycles) { if(IRQa&2) { if((IRQClock+=cycles)>=0x72) { IRQClock -= 0x72; if(IRQCount==0xFF) { IRQCount = IRQLatch; IRQa = IRQa|((IRQa&1)<<1); X6502_IRQBegin(FCEU_IQEXT); } else IRQCount++; } } } static void StateRestore(int version) { Sync(); } void Mapper253_Init(CartInfo *info) { info->Power=M253Power; info->Close=M253Close; MapIRQHook=M253IRQ; GameStateRestore=StateRestore; CHRRAMSIZE=4096; CHRRAM=(uint8*)FCEU_gmalloc(CHRRAMSIZE); SetupCartCHRMapping(0x10,CHRRAM,CHRRAMSIZE,1); AddExState(CHRRAM, CHRRAMSIZE, 0, "CRAM"); WRAMSIZE=8192; WRAM=(uint8*)FCEU_gmalloc(WRAMSIZE); SetupCartPRGMapping(0x10,WRAM,WRAMSIZE,1); AddExState(WRAM, WRAMSIZE, 0, "WRAM"); if(info->battery) { info->SaveGame[0]=WRAM; info->SaveGameLen[0]=WRAMSIZE; } AddExState(&StateRegs, ~0, 0, 0); }
bear24rw/gamingcape_fceu
src/boards/253.cpp
C++
gpl-2.0
4,113
/****************************************************************************** * Product: Adempiere ERP & CRM Smart Business Solution * * Copyright (C) 1999-2007 ComPiere, Inc. All Rights Reserved. * * This program is free software, you can redistribute it and/or modify it * * under the terms version 2 of the GNU General Public License as published * * by the Free Software Foundation. This program is distributed in the hope * * that it will be useful, but WITHOUT ANY WARRANTY, without even the implied * * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * * with this program, if not, write to the Free Software Foundation, Inc., * * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * For the text or an alternative of this public license, you may reach us * * ComPiere, Inc., 2620 Augustine Dr. #245, Santa Clara, CA 95054, USA * * or via info@compiere.org or http://www.compiere.org/license.html * *****************************************************************************/ /** Generated Model - DO NOT CHANGE */ package org.compiere.model; import java.sql.ResultSet; import java.util.Properties; /** Generated Model for CM_AccessNewsChannel * @author Adempiere (generated) * @version Release 3.5.4a - $Id$ */ public class X_CM_AccessNewsChannel extends PO implements I_CM_AccessNewsChannel, I_Persistent { /** * */ private static final long serialVersionUID = 20090915L; /** Standard Constructor */ public X_CM_AccessNewsChannel (Properties ctx, int CM_AccessNewsChannel_ID, String trxName) { super (ctx, CM_AccessNewsChannel_ID, trxName); /** if (CM_AccessNewsChannel_ID == 0) { setCM_AccessProfile_ID (0); setCM_NewsChannel_ID (0); } */ } /** Load Constructor */ public X_CM_AccessNewsChannel (Properties ctx, ResultSet rs, String trxName) { super (ctx, rs, trxName); } /** AccessLevel * @return 6 - System - Client */ protected int get_AccessLevel() { return accessLevel.intValue(); } /** Load Meta Data */ protected POInfo initPO (Properties ctx) { POInfo poi = POInfo.getPOInfo (ctx, Table_ID, get_TrxName()); return poi; } public String toString() { StringBuffer sb = new StringBuffer ("X_CM_AccessNewsChannel[") .append(get_ID()).append("]"); return sb.toString(); } public I_CM_AccessProfile getCM_AccessProfile() throws RuntimeException { return (I_CM_AccessProfile)MTable.get(getCtx(), I_CM_AccessProfile.Table_Name) .getPO(getCM_AccessProfile_ID(), get_TrxName()); } /** Set Web Access Profile. @param CM_AccessProfile_ID Web Access Profile */ public void setCM_AccessProfile_ID (int CM_AccessProfile_ID) { if (CM_AccessProfile_ID < 1) set_ValueNoCheck (COLUMNNAME_CM_AccessProfile_ID, null); else set_ValueNoCheck (COLUMNNAME_CM_AccessProfile_ID, Integer.valueOf(CM_AccessProfile_ID)); } /** Get Web Access Profile. @return Web Access Profile */ public int getCM_AccessProfile_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_CM_AccessProfile_ID); if (ii == null) return 0; return ii.intValue(); } public I_CM_NewsChannel getCM_NewsChannel() throws RuntimeException { return (I_CM_NewsChannel)MTable.get(getCtx(), I_CM_NewsChannel.Table_Name) .getPO(getCM_NewsChannel_ID(), get_TrxName()); } /** Set News Channel. @param CM_NewsChannel_ID News channel for rss feed */ public void setCM_NewsChannel_ID (int CM_NewsChannel_ID) { if (CM_NewsChannel_ID < 1) set_ValueNoCheck (COLUMNNAME_CM_NewsChannel_ID, null); else set_ValueNoCheck (COLUMNNAME_CM_NewsChannel_ID, Integer.valueOf(CM_NewsChannel_ID)); } /** Get News Channel. @return News channel for rss feed */ public int getCM_NewsChannel_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_CM_NewsChannel_ID); if (ii == null) return 0; return ii.intValue(); } }
klst-com/metasfresh
de.metas.adempiere.adempiere/base/src/main/java-gen/org/compiere/model/X_CM_AccessNewsChannel.java
Java
gpl-2.0
4,233
/* * drivers/mmc/host/sdhci-tegra.c * * SDHCI-compatible driver for NVIDIA Tegra SoCs * * Copyright (c) 2009-2010, NVIDIA Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #define NV_DEBUG 0 #include <linux/mmc/host.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/irq.h> #include <linux/mmc/card.h> #include <linux/clk.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/regulator/consumer.h> #include <mach/sdhci.h> #include <mach/pinmux.h> #include <nvodm_sdio.h> #include "sdhci.h" #define DRIVER_DESC "NVIDIA Tegra SDHCI compliant driver" #define DRIVER_NAME "tegra-sdhci" #define WLAN_SDHCI_HOST_ID 0 static struct sdhci_host *wlan_sdhci_host_ptr = NULL; struct tegra_sdhci { struct platform_device *pdev; struct clk *clk; NvOdmSdioHandle hOdmSdio; const struct tegra_pingroup_config *pinmux; int nr_pins; int gpio_cd; int gpio_polarity_cd; int irq_cd; int gpio_wp; int gpio_polarity_wp; unsigned int debounce; unsigned long max_clk; bool card_present; bool clk_enable; bool card_always_on; struct mmc_ios resume_ios; struct work_struct card_detection_wq; struct sdhci_host *sdhost; }; static inline unsigned long res_size(struct resource *res) { return res->end - res->start + 1; } static int tegra_sdhci_enable_dma(struct sdhci_host *sdhost) { return 0; } static void do_card_detect(struct work_struct *work) { struct tegra_sdhci *host = container_of(work, struct tegra_sdhci, card_detection_wq); sdhci_card_detect(host->sdhost); } void sdhci_tegra_wlan_detect(void) { if ( NULL != wlan_sdhci_host_ptr) { printk("%s Detecting WLAN\n", __FUNCTION__); sdhci_card_detect(wlan_sdhci_host_ptr); } else printk("%s WLAN host ptr NOT SAVED\n", __FUNCTION__); } EXPORT_SYMBOL(sdhci_tegra_wlan_detect); #ifdef CONFIG_MACH_MOT void tegra_sdhci_status_notify_cb(void *dev_id) { struct sdhci_host *sdhost = dev_id; struct tegra_sdhci *host = sdhci_priv(sdhost); dev_info(&host->pdev->dev, "%s\n", __func__); sdhci_card_detect(sdhost); } #endif static irqreturn_t card_detect_isr(int irq, void *dev_id) { struct sdhci_host *sdhost = dev_id; struct tegra_sdhci *host = sdhci_priv(sdhost); host->card_present = (gpio_get_value(host->gpio_cd)==host->gpio_polarity_cd); smp_wmb(); schedule_work(&(host->card_detection_wq)); return IRQ_HANDLED; } static bool tegra_sdhci_card_detect(struct sdhci_host *sdhost) { struct tegra_sdhci *host = sdhci_priv(sdhost); #ifdef CONFIG_TEGRA_ODM_AROWANA if (host->gpio_cd != -1) host->card_present = (gpio_get_value(host->gpio_cd)==host->gpio_polarity_cd); #endif smp_rmb(); return host->card_present; } static int tegra_sdhci_get_ro(struct sdhci_host *sdhost) { struct tegra_sdhci *host = sdhci_priv(sdhost); BUG_ON(host->gpio_wp == -1); return (gpio_get_value(host->gpio_wp)==host->gpio_polarity_wp); } static void tegra_sdhci_set_clock(struct sdhci_host *sdhost, unsigned int clock) { struct tegra_sdhci *host = sdhci_priv(sdhost); if (clock) { clk_set_rate(host->clk, clock); sdhost->max_clk = clk_get_rate(host->clk); dev_dbg(&host->pdev->dev, "clock request: %uKHz. currently " "%uKHz\n", clock/1000, sdhost->max_clk/1000); } if (clock && !host->clk_enable) { clk_enable(host->clk); host->clk_enable = true; } else if (!clock && host->clk_enable) { clk_disable(host->clk); host->clk_enable = false; } } static struct sdhci_ops tegra_sdhci_wp_cd_ops = { .enable_dma = tegra_sdhci_enable_dma, .get_ro = tegra_sdhci_get_ro, .card_detect = tegra_sdhci_card_detect, .set_clock = tegra_sdhci_set_clock, }; static struct sdhci_ops tegra_sdhci_cd_ops = { .enable_dma = tegra_sdhci_enable_dma, .card_detect = tegra_sdhci_card_detect, .set_clock = tegra_sdhci_set_clock, }; static struct sdhci_ops tegra_sdhci_wp_ops = { .enable_dma = tegra_sdhci_enable_dma, .get_ro = tegra_sdhci_get_ro, .set_clock = tegra_sdhci_set_clock, }; static struct sdhci_ops tegra_sdhci_ops = { .enable_dma = tegra_sdhci_enable_dma, .set_clock = tegra_sdhci_set_clock, }; int __init tegra_sdhci_probe(struct platform_device *pdev) { struct sdhci_host *sdhost; struct tegra_sdhci *host; struct tegra_sdhci_platform_data *plat = pdev->dev.platform_data; struct resource *res; int ret = -ENODEV; if (pdev->id == -1) { dev_err(&pdev->dev, "dynamic instance assignment not allowed\n"); return -ENODEV; } sdhost = sdhci_alloc_host(&pdev->dev, sizeof(struct tegra_sdhci)); if (IS_ERR_OR_NULL(sdhost)) { dev_err(&pdev->dev, "unable to allocate driver structure\n"); return (!sdhost) ? -ENOMEM : PTR_ERR(sdhost); } sdhost->hw_name = dev_name(&pdev->dev); host = sdhci_priv(sdhost); host->sdhost = sdhost; host->hOdmSdio = NvOdmSdioOpen(pdev->id); if (!host->hOdmSdio) dev_info(&pdev->dev, "no ODM SDIO adaptation\n"); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "no memory I/O resource provided\n"); ret = -ENODEV; goto err_sdhci_alloc; } if (!request_mem_region(res->start, res_size(res), dev_name(&pdev->dev))) { dev_err(&pdev->dev, "memory in use\n"); ret = -EBUSY; goto err_sdhci_alloc; } sdhost->ioaddr = ioremap(res->start, res_size(res)); if (!sdhost->ioaddr) { dev_err(&pdev->dev, "failed to map registers\n"); ret = -ENXIO; goto err_request_mem; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(&pdev->dev, "no IRQ resource provided\n"); ret = -ENODEV; goto err_ioremap; } sdhost->irq = res->start; INIT_WORK(&(host->card_detection_wq), do_card_detect); host->clk = clk_get(&pdev->dev, NULL); if (!host->clk) { dev_err(&pdev->dev, "unable to get clock\n"); ret = -ENODEV; goto err_ioremap; } host->pdev = pdev; host->pinmux = plat->pinmux; host->nr_pins = plat->nr_pins; host->gpio_cd = plat->gpio_nr_cd; host->gpio_polarity_cd = plat->gpio_polarity_cd; host->gpio_wp = plat->gpio_nr_wp; host->gpio_polarity_wp = plat->gpio_polarity_wp; host->card_always_on = plat->is_always_on; dev_info(&pdev->dev, "write protect: %d, card detect: %d, always on: %d\n", host->gpio_wp, host->gpio_cd, host->card_always_on); host->irq_cd = -1; host->debounce = plat->debounce; if (plat->max_clk) host->max_clk = min_t(unsigned int, 52000000, plat->max_clk); else { dev_info(&pdev->dev, "no max_clk specified, default to 52MHz\n"); host->max_clk = 52000000; } #ifdef CONFIG_EMBEDDED_MMC_START_OFFSET sdhost->start_offset = plat->offset; #endif if (host->gpio_cd != -1) { ret = gpio_request(host->gpio_cd, "card_detect"); if (ret < 0) { dev_err(&pdev->dev, "request cd gpio failed\n"); host->gpio_cd = -1; goto skip_gpio_cd; } host->irq_cd = gpio_to_irq(host->gpio_cd); if (host->irq_cd < 0) { /* fall back to non-GPIO card detect mode */ dev_err(&pdev->dev, "invalid card detect GPIO\n"); host->gpio_cd = -1; host->irq_cd = -1; goto skip_gpio_cd; } ret = gpio_direction_input(host->gpio_cd); if (ret < 0) { dev_err(&pdev->dev, "failed to configure GPIO\n"); gpio_free(host->gpio_cd); host->gpio_cd = -1; goto skip_gpio_cd; } #ifndef CONFIG_TEGRA_ODM_AROWANA ret = request_irq(host->irq_cd, card_detect_isr, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, mmc_hostname(sdhost->mmc), sdhost); if (ret) { dev_err(&pdev->dev, "unable to request IRQ\n"); gpio_free(host->gpio_cd); host->gpio_cd = -1; host->irq_cd = -1; goto skip_gpio_cd; } #endif host->card_present = (gpio_get_value(host->gpio_cd)==host->gpio_polarity_cd); } skip_gpio_cd: ret = 0; if (host->gpio_wp != -1) { ret = gpio_request(host->gpio_wp, "write_protect"); if (ret < 0) { dev_err(&pdev->dev, "request wp gpio failed\n"); host->gpio_wp = -1; goto skip_gpio_wp; } ret = gpio_direction_input(host->gpio_wp); if (ret < 0) { dev_err(&pdev->dev, "configure wp gpio failed\n"); gpio_free(host->gpio_wp); host->gpio_wp = -1; } } skip_gpio_wp: ret = 0; if (host->pinmux && host->nr_pins) tegra_pinmux_config_tristate_table(host->pinmux, host->nr_pins, TEGRA_TRI_NORMAL); clk_set_rate(host->clk, host->max_clk); clk_enable(host->clk); host->max_clk = clk_get_rate(host->clk); host->clk_enable = true; if (host->gpio_wp != -1 && (host->gpio_cd != -1 || !plat->is_removable)) sdhost->ops = &tegra_sdhci_wp_cd_ops; else if (host->gpio_wp != -1) sdhost->ops = &tegra_sdhci_wp_ops; else if (host->gpio_cd != -1 || !plat->is_removable) sdhost->ops = &tegra_sdhci_cd_ops; else sdhost->ops = &tegra_sdhci_ops; sdhost->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_SINGLE_POWER_WRITE | SDHCI_QUIRK_ENABLE_INTERRUPT_AT_BLOCK_GAP | SDHCI_QUIRK_BROKEN_WRITE_PROTECT | SDHCI_QUIRK_BROKEN_CARD_DETECTION | SDHCI_QUIRK_BROKEN_CTRL_HISPD | SDHCI_QUIRK_RUNTIME_DISABLE; #ifdef CONFIG_ARCH_TEGRA_2x_SOC sdhost->quirks |= SDHCI_QUIRK_BROKEN_SPEC_VERSION | SDHCI_QUIRK_NO_64KB_ADMA; sdhost->version = SDHCI_SPEC_200; #endif if (!plat->is_removable) host->card_present = true; #ifdef CONFIG_MACH_MOT if (plat->ocr_mask) sdhost->mmc->ocr_avail = plat->ocr_mask; if (plat->register_status_notify) plat->register_status_notify(tegra_sdhci_status_notify_cb, sdhost); #endif sdhost->data_width = plat->bus_width; sdhost->dma_mask = DMA_BIT_MASK(32); if (plat->regulator_str != NULL) { sdhost->regulator = regulator_get(NULL, plat->regulator_str); if (IS_ERR(sdhost->regulator)) { pr_err("Unable to acquire sdio regulator: %s due to error %ld\n", plat->regulator_str, PTR_ERR(sdhost->regulator)); sdhost->regulator = NULL; } } sdhost->max_power_class = plat->max_power_class; ret = sdhci_add_host(sdhost); if (ret) goto fail; platform_set_drvdata(pdev, sdhost); if (pdev->id == WLAN_SDHCI_HOST_ID) wlan_sdhci_host_ptr = sdhost; dev_info(&pdev->dev, "probe complete\n"); return 0; fail: if (host->irq_cd != -1) free_irq(host->irq_cd, sdhost); if (host->gpio_cd != -1) gpio_free(host->gpio_cd); if (host->gpio_wp != -1) gpio_free(host->gpio_wp); if (host->pinmux && host->nr_pins) tegra_pinmux_config_tristate_table(host->pinmux, host->nr_pins, TEGRA_TRI_TRISTATE); clk_disable(host->clk); clk_put(host->clk); err_ioremap: iounmap(sdhost->ioaddr); err_request_mem: res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, res_size(res)); err_sdhci_alloc: if (host->hOdmSdio) NvOdmSdioClose(host->hOdmSdio); sdhci_free_host(sdhost); dev_err(&pdev->dev, "probe failed\n"); return ret; } static int tegra_sdhci_remove(struct platform_device *pdev) { struct sdhci_host *sdhost = platform_get_drvdata(pdev); struct tegra_sdhci *host = sdhci_priv(sdhost); if (host->irq_cd != -1) free_irq(host->irq_cd, sdhost); if (host->gpio_cd != -1) gpio_free(host->gpio_cd); if (host->gpio_wp != -1) gpio_free(host->gpio_wp); if (host->pinmux && host->nr_pins) tegra_pinmux_config_tristate_table(host->pinmux, host->nr_pins, TEGRA_TRI_TRISTATE); if (host->clk_enable) clk_disable(host->clk); clk_put(host->clk); iounmap(sdhost->ioaddr); sdhost->ioaddr = NULL; if (host->hOdmSdio) NvOdmSdioClose(host->hOdmSdio); sdhci_free_host(sdhost); return 0; } #define is_card_sdio(_card) \ ((_card) && ((_card)->type == MMC_TYPE_SDIO)) #if defined(CONFIG_PM) #define dev_to_host(_dev) platform_get_drvdata(to_platform_device(_dev)) static void tegra_sdhci_configure_interrupts(struct sdhci_host *sdhost, bool enable) { u32 ierr; u32 clear = SDHCI_INT_ALL_MASK; u32 set; if (enable) { /* enable required MMC INTs */ set = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE; ierr = sdhci_readl(sdhost, SDHCI_INT_ENABLE); ierr &= clear; ierr |= set; sdhci_writel(sdhost, ierr, SDHCI_INT_ENABLE); sdhci_writel(sdhost, ierr, SDHCI_SIGNAL_ENABLE); } else { /* disable the interrupts */ ierr = sdhci_readl(sdhost, SDHCI_INT_ENABLE); /* Card interrupt masking is done by sdio client driver */ ierr &= SDHCI_INT_CARD_INT; sdhci_writel(sdhost, ierr, SDHCI_INT_ENABLE); sdhci_writel(sdhost, ierr, SDHCI_SIGNAL_ENABLE); } } static int tegra_sdhci_restore(struct sdhci_host *sdhost) { unsigned long timeout; u8 mask = SDHCI_RESET_ALL; sdhci_writeb(sdhost, mask, SDHCI_SOFTWARE_RESET); sdhost->clock = 0; /* Wait max 100 ms */ timeout = 100; /* hw clears the bit when it's done */ while (sdhci_readb(sdhost, SDHCI_SOFTWARE_RESET) & mask) { if (timeout == 0) { pr_err("%s: Reset 0x%x never completed.\n", mmc_hostname(sdhost->mmc), (int)mask); return -EIO; } timeout--; mdelay(1); } tegra_sdhci_configure_interrupts(sdhost, true); sdhost->last_clk = 0; return 0; } static int tegra_sdhci_suspend(struct device *dev) { struct sdhci_host *sdhost = dev_to_host(dev); struct tegra_sdhci *host = sdhci_priv(sdhost); struct pm_message event = { PM_EVENT_SUSPEND }; int ret = 0; if (host->card_always_on && is_card_sdio(sdhost->mmc->card)) { struct mmc_ios ios; memcpy(&host->resume_ios, &sdhost->mmc->ios, sizeof(struct mmc_ios)); memcpy(&ios, &sdhost->mmc->ios, sizeof(struct mmc_ios)); ios.clock = 0; ios.vdd = 0; ios.power_mode = MMC_POWER_OFF; ios.bus_width = MMC_BUS_WIDTH_1; ios.timing = MMC_TIMING_LEGACY; sdhost->mmc->ops->set_ios(sdhost->mmc, &ios); /* Disable the interrupts */ tegra_sdhci_configure_interrupts(sdhost, false); return ret; } ret = sdhci_suspend_host(sdhost, event); if (ret) { dev_err(dev, "failed to suspend host\n"); return ret; } if (host->hOdmSdio) NvOdmSdioSuspend(host->hOdmSdio); return ret; } static int tegra_sdhci_resume(struct device *dev) { struct sdhci_host *sdhost = dev_to_host(dev); struct tegra_sdhci *host = sdhci_priv(sdhost); if (!host->clk_enable) { clk_enable(host->clk); host->clk_enable = true; } if (host->gpio_cd != -1) host->card_present = (gpio_get_value(host->gpio_cd) == host->gpio_polarity_cd); if (host->card_always_on && is_card_sdio(sdhost->mmc->card)) { int ret = 0; /* soft reset SD host controller and enable MMC INTs */ ret = tegra_sdhci_restore(sdhost); if (ret) { dev_err(dev, "failed to resume host\n"); return ret; } mmiowb(); sdhost->mmc->ops->set_ios(sdhost->mmc, &host->resume_ios); return 0; } if (host->hOdmSdio) NvOdmSdioResume(host->hOdmSdio); return sdhci_resume_host(sdhost); } static struct dev_pm_ops tegra_sdhci_pm = { .suspend = tegra_sdhci_suspend, .resume = tegra_sdhci_resume, }; #define tegra_sdhci_pm_ops &tegra_sdhci_pm #else #define tegra_sdhci_pm_ops NULL #endif struct platform_driver tegra_sdhci_driver = { .probe = tegra_sdhci_probe, .remove = tegra_sdhci_remove, .driver = { .name = "tegra-sdhci", .owner = THIS_MODULE, .pm = tegra_sdhci_pm_ops, }, }; static int __init tegra_sdhci_init(void) { return platform_driver_register(&tegra_sdhci_driver); } static void __exit tegra_sdhci_exit(void) { platform_driver_unregister(&tegra_sdhci_driver); } module_init(tegra_sdhci_init); module_exit(tegra_sdhci_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(DRIVER_DESC);
mingkaidox/kernel_tegra
drivers/mmc/host/sdhci-tegra.c
C
gpl-2.0
15,999
package com.nightscout.android.upload; import java.io.Serializable; public class DeviceRecord extends Record implements Serializable{ /** * */ private static final long serialVersionUID = 6321618305992689901L; public String deviceId = ""; protected String deviceName = ""; public String getDeviceName(){ return deviceName; } public String setDeviceName(){ return deviceName; } }
wright-david/MedtronicUploader
src/com/nightscout/android/upload/DeviceRecord.java
Java
gpl-2.0
406
module Katello class Api::V2::ContentViewFilterRulesController < Api::V2::ApiController before_filter :find_filter before_filter :find_rule, :except => [:index, :create] api :GET, "/content_view_filters/:content_view_filter_id/rules", N_("List filter rules") param :content_view_filter_id, :identifier, :desc => N_("filter identifier"), :required => true def index ids = ContentViewFilter.rule_ids_for(@filter) results = ids.map { |id| ContentViewFilter.rule_class_for(@filter).find(id) } collection = { :results => results.uniq, :subtotal => results.count, :total => results.count } respond :collection => collection end api :POST, "/content_view_filters/:content_view_filter_id/rules", N_("Create a filter rule. The parameters included should be based upon the filter type.") param :content_view_filter_id, :identifier, :desc => N_("filter identifier"), :required => true param :name, String, :desc => N_("package or package group: name") param :version, String, :desc => N_("package: version") param :min_version, String, :desc => N_("package: minimum version") param :max_version, String, :desc => N_("package: maximum version") param :errata_id, String, :desc => N_("erratum: id") param :errata_ids, Array, :desc => N_("erratum: IDs or a select all object") param :start_date, String, :desc => N_("erratum: start date (YYYY-MM-DD)") param :end_date, String, :desc => N_("erratum: end date (YYYY-MM-DD)") param :types, Array, :desc => N_("erratum: types (enhancement, bugfix, security)") param :date_type, String, :desc => N_("erratum: search using the 'Issued On' or 'Updated On' column of the errata. Values are 'issued'/'updated'") def create rule_clazz = ContentViewFilter.rule_class_for(@filter) if rule_params.key?(:errata_ids) rules = [] rule_params[:errata_ids].each do |errata_id| rules << rule_clazz.create!({:errata_id => errata_id}.merge(:filter => @filter)) end else rule = rule_clazz.create!(rule_params.merge(:filter => @filter)) end if rules && rule.nil? respond_for_index(:collection => {:results => rules}, :template => 'index') else respond :resource => rule end end api :GET, "/content_view_filters/:content_view_filter_id/rules/:id", N_("Show filter rule info") param :content_view_filter_id, :identifier, :desc => N_("filter identifier"), :required => true param :id, :identifier, :desc => N_("rule identifier"), :required => true def show respond :resource => @rule end api :PUT, "/content_view_filters/:content_view_filter_id/rules/:id", N_("Update a filter rule. The parameters included should be based upon the filter type.") param :content_view_filter_id, :identifier, :desc => N_("filter identifier"), :required => true param :id, :identifier, :desc => N_("rule identifier"), :required => true param :name, String, :desc => N_("package or package group: name") param :version, String, :desc => N_("package: version") param :min_version, String, :desc => N_("package: minimum version") param :max_version, String, :desc => N_("package: maximum version") param :errata_id, String, :desc => N_("erratum: id") param :start_date, String, :desc => N_("erratum: start date (YYYY-MM-DD)") param :end_date, String, :desc => N_("erratum: end date (YYYY-MM-DD)") param :types, Array, :desc => N_("erratum: types (enhancement, bugfix, security)") def update update_params = rule_params if @rule.filter.content_type == 'package' update_params[:version] = "" unless rule_params[:version] update_params[:min_version] = "" unless rule_params[:min_version] update_params[:max_version] = "" unless rule_params[:max_version] end @rule.update_attributes!(update_params) respond :resource => @rule end api :DELETE, "/content_view_filters/:content_view_filter_id/rules/:id", N_("Delete a filter rule") param :content_view_filter_id, :identifier, :desc => N_("filter identifier"), :required => true param :id, :identifier, :desc => N_("rule identifier"), :required => true def destroy @rule.destroy respond_for_show :resource => @rule end private def find_filter @filter = ContentViewFilter.find(params[:content_view_filter_id]) end def find_rule rule_clazz = ContentViewFilter.rule_class_for(@filter) @rule = rule_clazz.find(params[:id]) end def rule_params if params[:content_view_filter_rule][:errata_ids].is_a?(Hash) ids = process_errata_ids(params[:content_view_filter_rule][:errata_ids]) params[:content_view_filter_rule][:errata_ids] = ids end params.fetch(:content_view_filter_rule, {}). permit(:uuid, :name, :version, :min_version, :max_version, :errata_id, :start_date, :end_date, :date_type, :types => [], :errata_ids => []) end def process_errata_ids(select_all_params) if select_all_params[:included][:ids].blank? current_errata_ids = @filter.erratum_rules.map(&:errata_id) + select_all_params[:excluded][:ids] Erratum.where('errata_id not in (?)', current_errata_ids).in_repositories(@filter.applicable_repos).pluck(:errata_id) else [] end end end end
dLobatog/katello
app/controllers/katello/api/v2/content_view_filter_rules_controller.rb
Ruby
gpl-2.0
5,494
# The valid parentheses problem In this one, the basic idea is to create a method that takes a string as input, and returns a boolean. True if the parentheses in the string are valid, false if not. E.g. () is valid. )( is not.
MarkWaldron/Challenges
Maths/11-Valid-Parentheses/readme.md
Markdown
gpl-2.0
227
define(function() { var config = { paths: { FAOSTAT_UI_COMMONS: 'faostat-ui-commons', faostat_ui_commons: '../' }, shim: { bootstrap: { deps: ['jquery'] } } }; return config; });
FAOSTAT4/faostat-ui-commons
js/paths.js
JavaScript
gpl-2.0
289
/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.telegram.messenger.volley.toolbox; import org.telegram.messenger.volley.AuthFailureError; import org.telegram.messenger.volley.Request; import org.telegram.messenger.volley.Request.Method; import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.HttpResponse; import org.apache.http.HttpStatus; import org.apache.http.ProtocolVersion; import org.apache.http.StatusLine; import org.apache.http.entity.BasicHttpEntity; import org.apache.http.message.BasicHeader; import org.apache.http.message.BasicHttpResponse; import org.apache.http.message.BasicStatusLine; import java.io.DataOutputStream; import java.io.IOException; import java.io.InputStream; import java.net.HttpURLConnection; import java.net.URL; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import javax.net.ssl.HttpsURLConnection; import javax.net.ssl.SSLSocketFactory; /** * An {@link HttpStack} based on {@link HttpURLConnection}. */ public class HurlStack implements HttpStack { private static final String HEADER_CONTENT_TYPE = "Content-Type"; /** * An interface for transforming URLs before use. */ public interface UrlRewriter { /** * Returns a URL to use instead of the provided one, or null to indicate * this URL should not be used at all. */ String rewriteUrl(String originalUrl); } private final UrlRewriter mUrlRewriter; private final SSLSocketFactory mSslSocketFactory; public HurlStack() { this(null); } /** * @param urlRewriter Rewriter to use for request URLs */ public HurlStack(UrlRewriter urlRewriter) { this(urlRewriter, null); } /** * @param urlRewriter Rewriter to use for request URLs * @param sslSocketFactory SSL factory to use for HTTPS connections */ public HurlStack(UrlRewriter urlRewriter, SSLSocketFactory sslSocketFactory) { mUrlRewriter = urlRewriter; mSslSocketFactory = sslSocketFactory; } @Override public HttpResponse performRequest(Request<?> request, Map<String, String> additionalHeaders) throws IOException, AuthFailureError { String url = request.getUrl(); HashMap<String, String> map = new HashMap<String, String>(); map.putAll(request.getHeaders()); map.putAll(additionalHeaders); if (mUrlRewriter != null) { String rewritten = mUrlRewriter.rewriteUrl(url); if (rewritten == null) { throw new IOException("URL blocked by rewriter: " + url); } url = rewritten; } URL parsedUrl = new URL(url); HttpURLConnection connection = openConnection(parsedUrl, request); for (String headerName : map.keySet()) { connection.addRequestProperty(headerName, map.get(headerName)); } setConnectionParametersForRequest(connection, request); // Initialize HttpResponse with data from the HttpURLConnection. ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1); int responseCode = connection.getResponseCode(); if (responseCode == -1) { // -1 is returned by getResponseCode() if the response code could not be retrieved. // Signal to the caller that something was wrong with the connection. throw new IOException("Could not retrieve response code from HttpUrlConnection."); } StatusLine responseStatus = new BasicStatusLine(protocolVersion, connection.getResponseCode(), connection.getResponseMessage()); BasicHttpResponse response = new BasicHttpResponse(responseStatus); if (hasResponseBody(request.getMethod(), responseStatus.getStatusCode())) { response.setEntity(entityFromConnection(connection)); } for (Entry<String, List<String>> header : connection.getHeaderFields().entrySet()) { if (header.getKey() != null) { Header h = new BasicHeader(header.getKey(), header.getValue().get(0)); response.addHeader(h); } } return response; } /** * Checks if a response message contains a body. * @see <a href="https://tools.ietf.org/html/rfc7230#section-3.3">RFC 7230 section 3.3</a> * @param requestMethod request method * @param responseCode response status code * @return whether the response has a body */ private static boolean hasResponseBody(int requestMethod, int responseCode) { return requestMethod != Request.Method.HEAD && !(HttpStatus.SC_CONTINUE <= responseCode && responseCode < HttpStatus.SC_OK) && responseCode != HttpStatus.SC_NO_CONTENT && responseCode != HttpStatus.SC_NOT_MODIFIED; } /** * Initializes an {@link HttpEntity} from the given {@link HttpURLConnection}. * @param connection * @return an HttpEntity populated with data from <code>connection</code>. */ private static HttpEntity entityFromConnection(HttpURLConnection connection) { BasicHttpEntity entity = new BasicHttpEntity(); InputStream inputStream; try { inputStream = connection.getInputStream(); } catch (IOException ioe) { inputStream = connection.getErrorStream(); } entity.setContent(inputStream); entity.setContentLength(connection.getContentLength()); entity.setContentEncoding(connection.getContentEncoding()); entity.setContentType(connection.getContentType()); return entity; } /** * Create an {@link HttpURLConnection} for the specified {@code url}. */ protected HttpURLConnection createConnection(URL url) throws IOException { return (HttpURLConnection) url.openConnection(); } /** * Opens an {@link HttpURLConnection} with parameters. * @param url * @return an open connection * @throws IOException */ private HttpURLConnection openConnection(URL url, Request<?> request) throws IOException { HttpURLConnection connection = createConnection(url); int timeoutMs = request.getTimeoutMs(); connection.setConnectTimeout(timeoutMs); connection.setReadTimeout(timeoutMs); connection.setUseCaches(false); connection.setDoInput(true); // use caller-provided custom SslSocketFactory, if any, for HTTPS if ("https".equals(url.getProtocol()) && mSslSocketFactory != null) { ((HttpsURLConnection)connection).setSSLSocketFactory(mSslSocketFactory); } return connection; } @SuppressWarnings("deprecation") /* package */ static void setConnectionParametersForRequest(HttpURLConnection connection, Request<?> request) throws IOException, AuthFailureError { switch (request.getMethod()) { case Method.DEPRECATED_GET_OR_POST: // This is the deprecated way that needs to be handled for backwards compatibility. // If the request's post body is null, then the assumption is that the request is // GET. Otherwise, it is assumed that the request is a POST. byte[] postBody = request.getPostBody(); if (postBody != null) { // Prepare output. There is no need to set Content-Length explicitly, // since this is handled by HttpURLConnection using the size of the prepared // output stream. connection.setDoOutput(true); connection.setRequestMethod("POST"); connection.addRequestProperty(HEADER_CONTENT_TYPE, request.getPostBodyContentType()); DataOutputStream out = new DataOutputStream(connection.getOutputStream()); out.write(postBody); out.close(); } break; case Method.GET: // Not necessary to set the request method because connection defaults to GET but // being explicit here. connection.setRequestMethod("GET"); break; case Method.DELETE: connection.setRequestMethod("DELETE"); break; case Method.POST: connection.setRequestMethod("POST"); addBodyIfExists(connection, request); break; case Method.PUT: connection.setRequestMethod("PUT"); addBodyIfExists(connection, request); break; case Method.HEAD: connection.setRequestMethod("HEAD"); break; case Method.OPTIONS: connection.setRequestMethod("OPTIONS"); break; case Method.TRACE: connection.setRequestMethod("TRACE"); break; case Method.PATCH: connection.setRequestMethod("PATCH"); addBodyIfExists(connection, request); break; default: throw new IllegalStateException("Unknown method type."); } } private static void addBodyIfExists(HttpURLConnection connection, Request<?> request) throws IOException, AuthFailureError { byte[] body = request.getBody(); if (body != null) { connection.setDoOutput(true); connection.addRequestProperty(HEADER_CONTENT_TYPE, request.getBodyContentType()); DataOutputStream out = new DataOutputStream(connection.getOutputStream()); out.write(body); out.close(); } } }
DigitalLabApp/Gramy
Telegram-master/TMessagesProj/src/main/java/org/telegram/messenger/volley/toolbox/HurlStack.java
Java
gpl-2.0
10,456
{ "collection" : { "version" : "1.0", "href" : "{{ url }}", "links" : [ {"rel" : "profile" , "href" : "http://schema.org/VideoGame"}, {"rel" : "collection", "prompt" : "Peliculas", "href" : "{{ url }}/../movies"}, {"rel" : "collection", "prompt" : "Libros", "href" : "{{ url }}/../books"}, {"rel" : "collection", "prompt" : "Musica", "href" : "{{ url }}/../musicalbums"}, {"rel" : "collection", "prompt" : "Video Juegos", "href" : "{{ url }}/../videogames"} ], "items" : [ {% for item in items %} { "href" : "{{ url }}/{{ item.id }}", "data" : [ {"name" : "name", "value" : "{{ item.name }}", "prompt" : "Nombre del videojuego"} ] } {% if not loop.last %},{% endif %} {% endfor %} ], "template" : { "data" : [ {"name" : "name", "value" : "{{ item.name }}", "prompt" : "Nombre del videojuego"}, {"name" : "description", "value" : "{{ item.description }}", "prompt" : "Descripción del videojuego"}, {"name" : "gamePlatform", "value" : "{{ item.gamePlatform }}", "prompt" : "Plataforma"}, {"name" : "applicationSubCategory", "value" : "{{ item.applicationSubCategory }}", "prompt" : "Sub-Categoria del videojuego"}, {"name" : "screenshot", "value" : "{{ item.screenshot }}", "prompt" : "Captura"}, {"name" : "embedUrl", "value" : "{{ item.embedUrl }}", "prompt" : "Trailer en Youtube"}, {"name" : "datePublished", "value" : "{{ item.datePublished }}", "prompt" : "Fecha de lanzamiento"} ] } } }
interfacesweb/s2_marsattack
api_1/templates/videogamelist_template.php
PHP
gpl-2.0
1,597
/* * This file is part of lanterna (http://code.google.com/p/lanterna/). * * lanterna is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * Copyright (C) 2010-2012 Martin */ package com.googlecode.lanterna.gui; import com.googlecode.lanterna.terminal.Terminal.Color; import java.util.*; /** * Extend this class to create your own themes. A {@code Theme} consists of several * {@code Theme.Definition}s, one for each {@code Theme.Category} value. When components are setting * their colors according to the theme, they do so by calling {@code TextGraphics.applyTheme}. * * @author Martin */ public class Theme { private static final Definition DEFAULT = new Definition(Color.BLACK, Color.WHITE, false); private static final Definition SELECTED = new Definition(Color.WHITE, Color.BLUE, true); private Map<Category, Definition> styles = new EnumMap<Category, Definition>(Category.class); private static final Theme DEFAULT_INSTANCE = new Theme(); /** * Represents things which can be styled. */ public enum Category { DIALOG_AREA, SCREEN_BACKGROUND, SHADOW, RAISED_BORDER, BORDER, BUTTON_ACTIVE, BUTTON_INACTIVE, BUTTON_LABEL_INACTIVE, BUTTON_LABEL_ACTIVE, LIST_ITEM, LIST_ITEM_SELECTED, CHECKBOX, CHECKBOX_SELECTED, TEXTBOX, TEXTBOX_FOCUSED, PROGRESS_BAR_COMPLETED, PROGRESS_BAR_REMAINING } protected Theme() { setDefinition(Category.DIALOG_AREA, DEFAULT); setDefinition(Category.SCREEN_BACKGROUND, new Definition(Color.CYAN, Color.BLUE, true)); setDefinition(Category.SHADOW, new Definition(Color.BLACK, Color.BLACK, true)); setDefinition(Category.BORDER, new Definition(Color.BLACK, Color.WHITE, true)); setDefinition(Category.RAISED_BORDER, new Definition(Color.WHITE, Color.WHITE, true)); setDefinition(Category.BUTTON_LABEL_ACTIVE, new Definition(Color.YELLOW, Color.BLUE, true)); setDefinition(Category.BUTTON_LABEL_INACTIVE, new Definition(Color.BLACK, Color.WHITE, true)); setDefinition(Category.BUTTON_ACTIVE, SELECTED); setDefinition(Category.BUTTON_INACTIVE, DEFAULT); setDefinition(Category.LIST_ITEM, DEFAULT); setDefinition(Category.LIST_ITEM_SELECTED, SELECTED); setDefinition(Category.CHECKBOX, DEFAULT); setDefinition(Category.CHECKBOX_SELECTED, SELECTED); setDefinition(Category.TEXTBOX, SELECTED); setDefinition(Category.TEXTBOX_FOCUSED, new Definition(Color.YELLOW, Color.BLUE, true)); setDefinition(Category.PROGRESS_BAR_COMPLETED, new Definition(Color.GREEN, Color.BLACK, false)); setDefinition(Category.PROGRESS_BAR_REMAINING, new Definition(Color.RED, Color.BLACK, false)); } public Theme.Definition getDefinition(Category category) { if (styles.containsKey(category) && styles.get(category) != null) { return styles.get(category); } return getDefault(); } protected void setDefinition(Category category, Definition def) { if (def == null) { styles.remove(category); } else { styles.put(category, def); } } /** * Gets the default style to use when no Category-specific style is set. */ protected Definition getDefaultStyle() { return DEFAULT; } /** * @deprecated use * {@code getDefaultStyle()} instead */ @Deprecated protected Definition getDefault() { return getDefaultStyle(); } /** * @deprecated use {@code getDefinition} instead. */ @Deprecated protected Definition getDialogEmptyArea() { return getDefinition(Category.DIALOG_AREA); } /** * @deprecated use {@code getDefinition} instead. */ @Deprecated protected Definition getScreenBackground() { return getDefinition(Category.SCREEN_BACKGROUND); } /** * @deprecated use {@code getDefinition} instead. */ @Deprecated protected Definition getShadow() { return getDefinition(Category.SHADOW); } /** * @deprecated use {@code getDefinition} instead. */ @Deprecated protected Definition getBorder() { return getDefinition(Category.BORDER); } /** * @deprecated use {@code getDefinition} instead. */ @Deprecated protected Definition getRaisedBorder() { return getDefinition(Category.RAISED_BORDER); } /** * @deprecated use {@code getDefinition} instead. */ @Deprecated protected Definition getButtonLabelActive() { return getDefinition(Category.BUTTON_LABEL_ACTIVE); } /** * @deprecated use {@code getDefinition} instead. */ @Deprecated protected Definition getButtonLabelInactive() { return getDefinition(Category.BUTTON_LABEL_INACTIVE); } /** * @deprecated use {@code getDefinition} instead. */ @Deprecated protected Definition getButtonActive() { return getDefinition(Category.BUTTON_ACTIVE); } /** * @deprecated use {@code getDefinition} instead. */ @Deprecated protected Definition getButtonInactive() { return getDefinition(Category.BUTTON_INACTIVE); } /** * @deprecated use {@code getDefinition} instead. */ @Deprecated protected Definition getItem() { return getDefinition(Category.LIST_ITEM); } /** * @deprecated use {@code getDefinition} instead. */ @Deprecated protected Definition getItemSelected() { return getDefinition(Category.LIST_ITEM_SELECTED); } /** * @deprecated use {@code getDefinition} instead. */ @Deprecated protected Definition getCheckBox() { return getDefinition(Category.CHECKBOX); } /** * @deprecated use {@code getDefinition} instead. */ @Deprecated protected Definition getCheckBoxSelected() { return getDefinition(Category.CHECKBOX_SELECTED); } /** * @deprecated use {@code getDefinition} instead. */ @Deprecated protected Definition getTextBoxFocused() { return getDefinition(Category.TEXTBOX_FOCUSED); } /** * @deprecated use {@code getDefinition} instead. */ @Deprecated protected Definition getTextBox() { return getDefinition(Category.TEXTBOX); } public static Theme getDefaultTheme() { return DEFAULT_INSTANCE; } /** * A style definition encompassing colors and effects. */ public static class Definition { private Color foreground; private Color background; private boolean highlighted; private boolean underlined; public Definition(Color foreground, Color background) { this(foreground, background, false); } public Definition(Color foreground, Color background, boolean highlighted) { this(foreground, background, highlighted, false); } public Definition(Color foreground, Color background, boolean highlighted, boolean underlined) { if (foreground == null) { throw new IllegalArgumentException("foreground color cannot be null"); } if (background == null) { throw new IllegalArgumentException("background color cannot be null"); } this.foreground = foreground; this.background = background; this.highlighted = highlighted; this.underlined = underlined; } public Color foreground() { return foreground; } public Color background() { return background; } public boolean isHighlighted() { return highlighted; } public boolean isUnderlined() { return underlined; } } }
Tusamarco/stresstool
src/com/googlecode/lanterna/gui/Theme.java
Java
gpl-2.0
8,611
/* * s2mpb02.h - Driver for the Samsung s2mpb02 * * Copyright (C) 2014 Samsung Electrnoics * XXX <xxx@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * S2MPB02 has Flash LED devices. * The devices share the same I2C bus and included in * this mfd driver. */ #ifndef __S2MPB02_H__ #define __S2MPB02_H__ #include <linux/i2c.h> #define MFD_DEV_NAME "s2mpb02" #define S2MPB02_I2C_ADDR (0xB2 >> 1) #define S2MPB02_REG_INVALID (0xff) #define S2MPB02_PMIC_REV(iodev) (iodev)->rev_num enum s2mpb02_types { TYPE_S2MPB02, }; enum s2mpb02_irq_source { LED_INT = 0, S2MPB02_IRQ_GROUP_NR, }; enum s2mpb02_irq { /* FLASH */ S2MPB02_LED_IRQ_IRLED_END, S2MPB02_IRQ_NR, }; struct s2mpb02_dev { struct device *dev; struct i2c_client *i2c; /* 0xB2; PMIC, Flash LED */ struct mutex i2c_lock; int type; u8 rev_num; /* pmic Rev */ int irq; int irq_base; int irq_gpio; bool wakeup; struct mutex irqlock; int irq_masks_cur[S2MPB02_IRQ_GROUP_NR]; int irq_masks_cache[S2MPB02_IRQ_GROUP_NR]; struct pinctrl *max_pinctrl; struct pinctrl_state *gpio_state_active; struct pinctrl_state *gpio_state_suspend; struct s2mpb02_platform_data *pdata; }; #ifdef CONFIG_LEDS_S2MPB02 struct s2mpb02_led_platform_data; #endif struct s2mpb02_regulator_data { int id; struct regulator_init_data *initdata; struct device_node *reg_node; }; struct s2mpb02_platform_data { /* IRQ */ int irq_base; int irq_gpio; bool wakeup; int num_regulators; struct s2mpb02_regulator_data *regulators; #ifdef CONFIG_LEDS_S2MPB02 /* led (flash/torch) data */ struct s2mpb02_led_platform_data *led_data; #endif }; extern int s2mpb02_irq_init(struct s2mpb02_dev *s2mpb02); extern void s2mpb02_irq_exit(struct s2mpb02_dev *s2mpb02); extern int s2mpb02_irq_resume(struct s2mpb02_dev *s2mpb02); /* S2MPB02 shared i2c API function */ extern int s2mpb02_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest); extern int s2mpb02_bulk_read(struct i2c_client *i2c, u8 reg, int count, u8 *buf); extern int s2mpb02_write_reg(struct i2c_client *i2c, u8 reg, u8 value); extern int s2mpb02_bulk_write(struct i2c_client *i2c, u8 reg, int count, u8 *buf); extern int s2mpb02_update_reg(struct i2c_client *i2c, u8 reg, u8 val, u8 mask); #endif /* __S2MPB02_H__ */
ashyx/kernel_gts28ve-gts210ve
include/linux/mfd/s2mpb02.h
C
gpl-2.0
2,938
/* Common declarations for all of GNU Fortran libcaf implementations. Copyright (C) 2011-2015 Free Software Foundation, Inc. Contributed by Tobias Burnus <burnus@net-b.de> This file is part of the GNU Fortran Coarray Runtime Library (libcaf). Libcaf is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. Libcaf is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #ifndef LIBCAF_H #define LIBCAF_H #include <stdbool.h> #include <stddef.h> /* For size_t. */ #include <stdint.h> /* For int32_t. */ #include "libgfortran.h" #if 0 #ifndef __GNUC__ #define __attribute__(x) #define likely(x) (x) #define unlikely(x) (x) #else #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif /* Definitions of the Fortran 2008 standard; need to kept in sync with ISO_FORTRAN_ENV, cf. libgfortran.h. */ #define STAT_UNLOCKED 0 #define STAT_LOCKED 1 #define STAT_LOCKED_OTHER_IMAGE 2 #define STAT_STOPPED_IMAGE 6000 #endif /* Describes what type of array we are registerring. Keep in sync with gcc/fortran/trans.h. */ typedef enum caf_register_t { CAF_REGTYPE_COARRAY_STATIC, CAF_REGTYPE_COARRAY_ALLOC, CAF_REGTYPE_LOCK_STATIC, CAF_REGTYPE_LOCK_ALLOC, CAF_REGTYPE_CRITICAL, CAF_REGTYPE_EVENT_STATIC, CAF_REGTYPE_EVENT_ALLOC } caf_register_t; typedef void* caf_token_t; typedef gfc_array_void gfc_descriptor_t; /* Linked list of static coarrays registered. */ typedef struct caf_static_t { caf_token_t token; struct caf_static_t *prev; } caf_static_t; /* When there is a vector subscript in this dimension, nvec == 0, otherwise, lower_bound, upper_bound, stride contains the bounds relative to the declared bounds; kind denotes the integer kind of the elements of vector[]. */ typedef struct caf_vector_t { size_t nvec; union { struct { void *vector; int kind; } v; struct { ptrdiff_t lower_bound, upper_bound, stride; } triplet; } u; } caf_vector_t; void _gfortran_caf_init (int *, char ***); void _gfortran_caf_finalize (void); int _gfortran_caf_this_image (int); int _gfortran_caf_num_images (int, int); void *_gfortran_caf_register (size_t, caf_register_t, caf_token_t *, int *, char *, int); void _gfortran_caf_deregister (caf_token_t *, int *, char *, int); void _gfortran_caf_sync_all (int *, char *, int); void _gfortran_caf_sync_memory (int *, char *, int); void _gfortran_caf_sync_images (int, int[], int *, char *, int); void _gfortran_caf_error_stop_str (const char *, int32_t) __attribute__ ((noreturn)); void _gfortran_caf_error_stop (int32_t) __attribute__ ((noreturn)); void _gfortran_caf_co_broadcast (gfc_descriptor_t *, int, int *, char *, int); void _gfortran_caf_co_sum (gfc_descriptor_t *, int, int *, char *, int); void _gfortran_caf_co_min (gfc_descriptor_t *, int, int *, char *, int, int); void _gfortran_caf_co_max (gfc_descriptor_t *, int, int *, char *, int, int); void _gfortran_caf_co_reduce (gfc_descriptor_t *, void* (*) (void *, void*), int, int, int *, char *, int, int); void _gfortran_caf_get (caf_token_t, size_t, int, gfc_descriptor_t *, caf_vector_t *, gfc_descriptor_t *, int, int, bool); void _gfortran_caf_send (caf_token_t, size_t, int, gfc_descriptor_t *, caf_vector_t *, gfc_descriptor_t *, int, int, bool); void _gfortran_caf_sendget (caf_token_t, size_t, int, gfc_descriptor_t *, caf_vector_t *, caf_token_t, size_t, int, gfc_descriptor_t *, caf_vector_t *, int, int, bool); void _gfortran_caf_atomic_define (caf_token_t, size_t, int, void *, int *, int, int); void _gfortran_caf_atomic_ref (caf_token_t, size_t, int, void *, int *, int, int); void _gfortran_caf_atomic_cas (caf_token_t, size_t, int, void *, void *, void *, int *, int, int); void _gfortran_caf_atomic_op (int, caf_token_t, size_t, int, void *, void *, int *, int, int); void _gfortran_caf_lock (caf_token_t, size_t, int, int *, int *, char *, int); void _gfortran_caf_unlock (caf_token_t, size_t, int, int *, char *, int); void _gfortran_caf_event_post (caf_token_t, size_t, int, int *, char *, int); void _gfortran_caf_event_wait (caf_token_t, size_t, int, int *, char *, int); void _gfortran_caf_event_query (caf_token_t, size_t, int, int *, int *); #endif /* LIBCAF_H */
rofirrim/gcc-tiny
libgfortran/caf/libcaf.h
C
gpl-2.0
5,098
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/kthread.h> #include <linux/mutex.h> #include <linux/msm_tsens.h> #include <linux/workqueue.h> #include <linux/completion.h> #include <linux/cpu.h> #include <linux/cpufreq.h> #include <linux/msm_tsens.h> #include <linux/msm_thermal.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/sysfs.h> #include <linux/types.h> #include <linux/thermal.h> #include <linux/regulator/rpm-smd-regulator.h> #include <linux/regulator/consumer.h> #include <linux/regulator/driver.h> #include <linux/msm_thermal_ioctl.h> #include <soc/qcom/rpm-smd.h> #include <soc/qcom/scm.h> #include <linux/sched/rt.h> #define CREATE_TRACE_POINTS #define TRACE_MSM_THERMAL #include <trace/trace_thermal.h> #define MAX_CURRENT_UA 100000 #define MAX_RAILS 5 #define MAX_THRESHOLD 2 #define MONITOR_ALL_TSENS -1 #define TSENS_NAME_MAX 20 #define TSENS_NAME_FORMAT "tsens_tz_sensor%d" #define THERM_SECURE_BITE_CMD 8 #define SENSOR_SCALING_FACTOR 1 static struct msm_thermal_data msm_thermal_info; static struct delayed_work check_temp_work; static bool core_control_enabled; static uint32_t cpus_offlined; static DEFINE_MUTEX(core_control_mutex); static struct kobject *cc_kobj; static struct kobject *mx_kobj; static struct task_struct *hotplug_task; static struct task_struct *freq_mitigation_task; static struct task_struct *thermal_monitor_task; static struct completion hotplug_notify_complete; static struct completion freq_mitigation_complete; static struct completion thermal_monitor_complete; static int enabled; static int polling_enabled; static int rails_cnt; static int sensor_cnt; static int psm_rails_cnt; static int ocr_rail_cnt; static int limit_idx; static int limit_idx_low; static int limit_idx_high; static int max_tsens_num; static struct cpufreq_frequency_table *table; static uint32_t usefreq; static int freq_table_get; static bool vdd_rstr_enabled; static bool vdd_rstr_nodes_called; static bool vdd_rstr_probed; static bool sensor_info_nodes_called; static bool sensor_info_probed; static bool psm_enabled; static bool psm_nodes_called; static bool psm_probed; static bool freq_mitigation_enabled; static bool ocr_enabled; static bool ocr_nodes_called; static bool ocr_probed; static bool ocr_reg_init_defer; static bool hotplug_enabled; static bool interrupt_mode_enable; static bool msm_thermal_probed; static bool gfx_crit_phase_ctrl_enabled; static bool gfx_warm_phase_ctrl_enabled; static bool cx_phase_ctrl_enabled; static bool vdd_mx_enabled; static bool therm_reset_enabled; static bool online_core; static bool cluster_info_probed; static bool cluster_info_nodes_called; //[BUGFIX]added By miao, bug 902619 #if defined(CONFIG_TCT_8X16_IDOL3) static bool boot_fmax_limit_enabled; #endif static int *tsens_id_map; static DEFINE_MUTEX(vdd_rstr_mutex); static DEFINE_MUTEX(psm_mutex); static DEFINE_MUTEX(cx_mutex); static DEFINE_MUTEX(gfx_mutex); static DEFINE_MUTEX(ocr_mutex); static DEFINE_MUTEX(vdd_mx_mutex); static uint32_t min_freq_limit; static uint32_t curr_gfx_band; static uint32_t curr_cx_band; static struct kobj_attribute cx_mode_attr; static struct kobj_attribute gfx_mode_attr; static struct kobj_attribute mx_enabled_attr; static struct attribute_group cx_attr_gp; static struct attribute_group gfx_attr_gp; static struct attribute_group mx_attr_group; static struct regulator *vdd_mx; static struct cpufreq_frequency_table *pending_freq_table_ptr; static int pending_cpu_freq = -1; static long *tsens_temp_at_panic; enum thermal_threshold { HOTPLUG_THRESHOLD_HIGH, HOTPLUG_THRESHOLD_LOW, FREQ_THRESHOLD_HIGH, FREQ_THRESHOLD_LOW, THRESHOLD_MAX_NR, }; enum sensor_id_type { THERM_ZONE_ID, THERM_TSENS_ID, THERM_ID_MAX_NR, }; struct cluster_info { int cluster_id; uint32_t entity_count; struct cluster_info *child_entity_ptr; struct cluster_info *parent_ptr; struct cpufreq_frequency_table *freq_table; int freq_idx; int freq_idx_low; int freq_idx_high; cpumask_t cluster_cores; bool sync_cluster; uint32_t limited_max_freq; uint32_t limited_min_freq; //[BUGFIX]added By miao, bug 902619 #if defined(CONFIG_TCT_8X16_IDOL3) uint32_t boot_max_freq_limit; #endif }; struct cpu_info { uint32_t cpu; const char *sensor_type; enum sensor_id_type id_type; uint32_t sensor_id; bool offline; bool user_offline; bool hotplug_thresh_clear; struct sensor_threshold threshold[THRESHOLD_MAX_NR]; bool max_freq; uint32_t user_max_freq; uint32_t user_min_freq; uint32_t limited_max_freq; uint32_t limited_min_freq; bool freq_thresh_clear; struct cluster_info *parent_ptr; }; struct threshold_info; struct therm_threshold { int32_t sensor_id; enum sensor_id_type id_type; struct sensor_threshold threshold[MAX_THRESHOLD]; int32_t trip_triggered; void (*notify)(struct therm_threshold *); struct threshold_info *parent; }; struct threshold_info { uint32_t thresh_ct; bool thresh_triggered; struct therm_threshold *thresh_list; }; struct rail { const char *name; uint32_t freq_req; uint32_t min_level; uint32_t num_levels; int32_t curr_level; uint32_t levels[3]; struct kobj_attribute value_attr; struct kobj_attribute level_attr; struct regulator *reg; struct attribute_group attr_gp; }; struct msm_sensor_info { const char *name; const char *alias; const char *type; uint32_t scaling_factor; }; struct psm_rail { const char *name; uint8_t init; uint8_t mode; struct kobj_attribute mode_attr; struct rpm_regulator *reg; struct regulator *phase_reg; struct attribute_group attr_gp; }; enum msm_thresh_list { MSM_THERM_RESET, MSM_VDD_RESTRICTION, MSM_CX_PHASE_CTRL_HOT, MSM_GFX_PHASE_CTRL_WARM, MSM_GFX_PHASE_CTRL_HOT, MSM_OCR, MSM_VDD_MX_RESTRICTION, MSM_LIST_MAX_NR, }; enum msm_thermal_phase_ctrl { MSM_CX_PHASE_CTRL, MSM_GFX_PHASE_CTRL, MSM_PHASE_CTRL_NR, }; enum msm_temp_band { MSM_COLD_CRITICAL = 1, MSM_COLD, MSM_COOL, MSM_NORMAL, MSM_WARM, MSM_HOT, MSM_HOT_CRITICAL, MSM_TEMP_MAX_NR, }; static struct psm_rail *psm_rails; static struct psm_rail *ocr_rails; static struct rail *rails; static struct msm_sensor_info *sensors; static struct cpu_info cpus[NR_CPUS]; static struct threshold_info *thresh; static bool mx_restr_applied; static struct cluster_info *core_ptr; struct vdd_rstr_enable { struct kobj_attribute ko_attr; uint32_t enabled; }; /* For SMPS only*/ enum PMIC_SW_MODE { PMIC_AUTO_MODE = RPM_REGULATOR_MODE_AUTO, PMIC_IPEAK_MODE = RPM_REGULATOR_MODE_IPEAK, PMIC_PWM_MODE = RPM_REGULATOR_MODE_HPM, }; enum ocr_request { OPTIMUM_CURRENT_MIN, OPTIMUM_CURRENT_MAX, OPTIMUM_CURRENT_NR, }; //[BUGFIX]added By miao, bug 902619 #ifdef CONFIG_TCT_8X16_IDOL3 #define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store) #endif #define SYNC_CORE(_cpu) \ (core_ptr && cpus[_cpu].parent_ptr->sync_cluster) #define VDD_RES_RO_ATTRIB(_rail, ko_attr, j, _name) \ ko_attr.attr.name = __stringify(_name); \ ko_attr.attr.mode = 0444; \ ko_attr.show = vdd_rstr_reg_##_name##_show; \ ko_attr.store = NULL; \ sysfs_attr_init(&ko_attr.attr); \ _rail.attr_gp.attrs[j] = &ko_attr.attr; #define VDD_RES_RW_ATTRIB(_rail, ko_attr, j, _name) \ ko_attr.attr.name = __stringify(_name); \ ko_attr.attr.mode = 0644; \ ko_attr.show = vdd_rstr_reg_##_name##_show; \ ko_attr.store = vdd_rstr_reg_##_name##_store; \ sysfs_attr_init(&ko_attr.attr); \ _rail.attr_gp.attrs[j] = &ko_attr.attr; #define VDD_RSTR_ENABLE_FROM_ATTRIBS(attr) \ (container_of(attr, struct vdd_rstr_enable, ko_attr)); #define VDD_RSTR_REG_VALUE_FROM_ATTRIBS(attr) \ (container_of(attr, struct rail, value_attr)); #define VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr) \ (container_of(attr, struct rail, level_attr)); #define OCR_RW_ATTRIB(_rail, ko_attr, j, _name) \ ko_attr.attr.name = __stringify(_name); \ ko_attr.attr.mode = 0644; \ ko_attr.show = ocr_reg_##_name##_show; \ ko_attr.store = ocr_reg_##_name##_store; \ sysfs_attr_init(&ko_attr.attr); \ _rail.attr_gp.attrs[j] = &ko_attr.attr; #define PSM_RW_ATTRIB(_rail, ko_attr, j, _name) \ ko_attr.attr.name = __stringify(_name); \ ko_attr.attr.mode = 0644; \ ko_attr.show = psm_reg_##_name##_show; \ ko_attr.store = psm_reg_##_name##_store; \ sysfs_attr_init(&ko_attr.attr); \ _rail.attr_gp.attrs[j] = &ko_attr.attr; #define PSM_REG_MODE_FROM_ATTRIBS(attr) \ (container_of(attr, struct psm_rail, mode_attr)); #define PHASE_RW_ATTR(_phase, _name, _attr, j, _attr_gr) \ _attr.attr.name = __stringify(_name); \ _attr.attr.mode = 0644; \ _attr.show = _phase##_phase_show; \ _attr.store = _phase##_phase_store; \ sysfs_attr_init(&_attr.attr); \ _attr_gr.attrs[j] = &_attr.attr; #define MX_RW_ATTR(ko_attr, _name, _attr_gp) \ ko_attr.attr.name = __stringify(_name); \ ko_attr.attr.mode = 0644; \ ko_attr.show = show_mx_##_name; \ ko_attr.store = store_mx_##_name; \ sysfs_attr_init(&ko_attr.attr); \ _attr_gp.attrs[0] = &ko_attr.attr; //[BUGFIX]added By miao, bug 902619 #if defined(CONFIG_TCT_8X16_IDOL3) static unsigned int big_freq_limit, small_freq_limit; static int __init big_freq(char *str) { get_option(&str, &big_freq_limit); return 0; } early_param("big_freq", big_freq); static int __init small_freq(char *str) { get_option(&str, &small_freq_limit); return 0; } early_param("small_freq", small_freq); static int probe_boot_freq_limit(struct device *dev) { int i, j; uint32_t val = 0; int cluster_cnt = 0, ret = 0; char *key = "qcom,cluster-boot-freq-limit"; bool match_found = false; struct cluster_info *cluster_ptr = NULL; if (!of_get_property(dev->of_node, key, &cluster_cnt) || cluster_cnt <= 0 || !core_ptr) return -EINVAL; if (cluster_cnt % (sizeof(__be32) * 2)) { pr_err("Invalid number(%d) of entry for %s\n", cluster_cnt, key); return -EINVAL; } cluster_cnt /= (sizeof(__be32) * 2); if (cluster_cnt > core_ptr->entity_count) { pr_err("Invalid cluster count:%d\n", cluster_cnt); return -EINVAL; } for (i = 0; i < cluster_cnt; i++) { ret = of_property_read_u32_index(dev->of_node, key, i * 2, &val); if (ret) { pr_err("Error reading index%d\n", i * 2); return -EINVAL; } for (j = 0; j < core_ptr->entity_count; j++) { cluster_ptr = &core_ptr->child_entity_ptr[j]; if (cluster_ptr->cluster_id != val) continue; if (!cluster_ptr->sync_cluster) { pr_err("Cluster%d is not synchronous\n", val); break; } ret = of_property_read_u32_index(dev->of_node, key, i * 2 + 1, &val); if (ret) { pr_err("Error reading index%d\n", i * 2 + 1); return -EINVAL; } cluster_ptr->boot_max_freq_limit = val; if (cluster_ptr->cluster_id) cluster_ptr->boot_max_freq_limit = big_freq_limit ? big_freq_limit : cluster_ptr->boot_max_freq_limit; else cluster_ptr->boot_max_freq_limit = small_freq_limit ? small_freq_limit : cluster_ptr->boot_max_freq_limit; pr_err("Boot freq limit for cluster[%d] is %d\n", cluster_ptr->cluster_id, cluster_ptr->boot_max_freq_limit); match_found = true; /* Setting initial limiting_max_freq equal to boot_max_freq_limit to limit to boot freq frequency even if there is no trigger from thermal during boot */ cluster_ptr->limited_max_freq = min(cluster_ptr->limited_max_freq, cluster_ptr->boot_max_freq_limit); break; } } if (match_found) boot_fmax_limit_enabled = true; return 0; } #endif //[BUGFIX]Added END static int msm_thermal_cpufreq_callback(struct notifier_block *nfb, unsigned long event, void *data) { struct cpufreq_policy *policy = data; uint32_t max_freq_req, min_freq_req; switch (event) { case CPUFREQ_INCOMPATIBLE: if (SYNC_CORE(policy->cpu)) { max_freq_req = cpus[policy->cpu].parent_ptr->limited_max_freq; min_freq_req = cpus[policy->cpu].parent_ptr->limited_min_freq; } else { max_freq_req = cpus[policy->cpu].limited_max_freq; min_freq_req = cpus[policy->cpu].limited_min_freq; } pr_debug("mitigating CPU%d to freq max: %u min: %u\n", policy->cpu, max_freq_req, min_freq_req); cpufreq_verify_within_limits(policy, min_freq_req, max_freq_req); if (max_freq_req < min_freq_req) pr_err("Invalid frequency request Max:%u Min:%u\n", max_freq_req, min_freq_req); break; case CPUFREQ_CREATE_POLICY: if (pending_cpu_freq != -1 && (cpumask_first(policy->related_cpus) == pending_cpu_freq)) { pr_debug("Updating freq plan for cpu: %d\n", policy->cpu); pending_freq_table_ptr = cpufreq_frequency_get_table( policy->cpu); pending_cpu_freq = -1; } break; } return NOTIFY_OK; } static struct notifier_block msm_thermal_cpufreq_notifier = { .notifier_call = msm_thermal_cpufreq_callback, }; static void update_cpu_freq(int cpu) { int ret = 0; if (cpu_online(cpu)) { trace_thermal_pre_frequency_mit(cpu, cpus[cpu].limited_max_freq, cpus[cpu].limited_min_freq); ret = cpufreq_update_policy(cpu); trace_thermal_post_frequency_mit(cpu, cpufreq_quick_get_max(cpu), cpus[cpu].limited_min_freq); if (ret) pr_err("Unable to update policy for cpu:%d. err:%d\n", cpu, ret); } } static int * __init get_sync_cluster(struct device *dev, int *cnt) { int *sync_cluster = NULL, cluster_cnt = 0, ret = 0; char *key = "qcom,synchronous-cluster-id"; if (!of_get_property(dev->of_node, key, &cluster_cnt) || cluster_cnt <= 0 || !core_ptr) return NULL; cluster_cnt /= sizeof(__be32); if (cluster_cnt > core_ptr->entity_count) { pr_err("Invalid cluster count:%d\n", cluster_cnt); return NULL; } sync_cluster = devm_kzalloc(dev, sizeof(int) * cluster_cnt, GFP_KERNEL); if (!sync_cluster) { pr_err("Memory alloc failed\n"); return NULL; } ret = of_property_read_u32_array(dev->of_node, key, sync_cluster, cluster_cnt); if (ret) { pr_err("Error in reading property:%s. err:%d\n", key, ret); devm_kfree(dev, sync_cluster); return NULL; } *cnt = cluster_cnt; return sync_cluster; } static void update_cpu_datastructure(struct cluster_info *cluster_ptr, int *sync_cluster, int sync_cluster_cnt) { int i = 0; bool is_sync_cluster = false; for (i = 0; (sync_cluster) && (i < sync_cluster_cnt); i++) { if (cluster_ptr->cluster_id != sync_cluster[i]) continue; is_sync_cluster = true; break; } cluster_ptr->sync_cluster = is_sync_cluster; pr_debug("Cluster ID:%d Sync cluster:%s Sibling mask:%lu\n", cluster_ptr->cluster_id, is_sync_cluster ? "Yes" : "No", *cluster_ptr->cluster_cores.bits); for_each_cpu_mask(i, cluster_ptr->cluster_cores) { cpus[i].parent_ptr = cluster_ptr; } } static ssize_t cluster_info_show( struct kobject *kobj, struct kobj_attribute *attr, char *buf) { uint32_t i = 0; ssize_t tot_size = 0, size = 0; for (; i < core_ptr->entity_count; i++) { struct cluster_info *cluster_ptr = &core_ptr->child_entity_ptr[i]; size = snprintf(&buf[tot_size], PAGE_SIZE - tot_size, "%d:%lu:%d ", cluster_ptr->cluster_id, *cluster_ptr->cluster_cores.bits, cluster_ptr->sync_cluster); if ((tot_size + size) >= PAGE_SIZE) { pr_err("Not enough buffer size"); break; } tot_size += size; } return tot_size; } static struct kobj_attribute cluster_info_attr = __ATTR_RO(cluster_info); static int create_cpu_topology_sysfs(void) { int ret = 0; struct kobject *module_kobj = NULL; if (!cluster_info_probed) { cluster_info_nodes_called = true; return ret; } if (!core_ptr) return ret; module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME); if (!module_kobj) { pr_err("cannot find kobject\n"); return -ENODEV; } sysfs_attr_init(&cluster_info_attr.attr); ret = sysfs_create_file(module_kobj, &cluster_info_attr.attr); if (ret) { pr_err("cannot create cluster info attr group. err:%d\n", ret); return ret; } return ret; } static int get_device_tree_cluster_info(struct device *dev, int *cluster_id, cpumask_t *cluster_cpus) { int i, cluster_cnt = 0, ret = 0; uint32_t val = 0; char *key = "qcom,synchronous-cluster-map"; if (!of_get_property(dev->of_node, key, &cluster_cnt) || cluster_cnt <= 0) { pr_debug("Property %s not defined.\n", key); return -ENODEV; } if (cluster_cnt % (sizeof(__be32) * 2)) { pr_err("Invalid number(%d) of entry for %s\n", cluster_cnt, key); return -EINVAL; } cluster_cnt /= (sizeof(__be32) * 2); for (i = 0; i < cluster_cnt; i++) { ret = of_property_read_u32_index(dev->of_node, key, i * 2, &val); if (ret) { pr_err("Error reading index%d\n", i * 2); return -EINVAL; } cluster_id[i] = val; of_property_read_u32_index(dev->of_node, key, i * 2 + 1, &val); if (ret) { pr_err("Error reading index%d\n", i * 2 + 1); return -EINVAL; } *cluster_cpus[i].bits = val; } return cluster_cnt; } static int get_kernel_cluster_info(int *cluster_id, cpumask_t *cluster_cpus) { uint32_t _cpu, cluster_index, cluster_cnt; for (_cpu = 0, cluster_cnt = 0; _cpu < num_possible_cpus(); _cpu++) { if (topology_physical_package_id(_cpu) < 0) { pr_err("CPU%d topology not initialized.\n", _cpu); return -ENODEV; } /* Do not use the sibling cpumask from topology module. ** kernel topology module updates the sibling cpumask ** only when the cores are brought online for the first time. ** KTM figures out the sibling cpumask using the ** cluster and core ID mapping. */ for (cluster_index = 0; cluster_index < num_possible_cpus(); cluster_index++) { if (cluster_id[cluster_index] == -1) { cluster_id[cluster_index] = topology_physical_package_id(_cpu); *cluster_cpus[cluster_index].bits = 0; cpumask_set_cpu(_cpu, &cluster_cpus[cluster_index]); cluster_cnt++; break; } if (cluster_id[cluster_index] == topology_physical_package_id(_cpu)) { cpumask_set_cpu(_cpu, &cluster_cpus[cluster_index]); break; } } } return cluster_cnt; } static void update_cpu_topology(struct device *dev) { int cluster_id[NR_CPUS] = {[0 ... NR_CPUS-1] = -1}; cpumask_t cluster_cpus[NR_CPUS]; uint32_t i, j; int cluster_cnt, cpu, sync_cluster_cnt = 0; struct cluster_info *temp_ptr = NULL; int *sync_cluster_id = NULL; cluster_info_probed = true; cluster_cnt = get_kernel_cluster_info(cluster_id, cluster_cpus); if (cluster_cnt <= 0) { cluster_cnt = get_device_tree_cluster_info(dev, cluster_id, cluster_cpus); if (cluster_cnt <= 0) { core_ptr = NULL; pr_debug("Cluster Info not defined. KTM continues.\n"); return; } } core_ptr = devm_kzalloc(dev, sizeof(struct cluster_info), GFP_KERNEL); if (!core_ptr) { pr_err("Memory alloc failed\n"); return; } core_ptr->parent_ptr = NULL; core_ptr->entity_count = cluster_cnt; core_ptr->cluster_id = -1; core_ptr->sync_cluster = false; temp_ptr = devm_kzalloc(dev, sizeof(struct cluster_info) * cluster_cnt, GFP_KERNEL); if (!temp_ptr) { pr_err("Memory alloc failed\n"); devm_kfree(dev, core_ptr); core_ptr = NULL; return; } sync_cluster_id = get_sync_cluster(dev, &sync_cluster_cnt); for (i = 0; i < cluster_cnt; i++) { pr_debug("Cluster_ID:%d CPU's:%lu\n", cluster_id[i], *cluster_cpus[i].bits); temp_ptr[i].cluster_id = cluster_id[i]; temp_ptr[i].parent_ptr = core_ptr; temp_ptr[i].cluster_cores = cluster_cpus[i]; temp_ptr[i].limited_max_freq = UINT_MAX; temp_ptr[i].limited_min_freq = 0; //[BUGFIX]added By miao, bug 902619 #if defined(CONFIG_TCT_8X16_IDOL3) temp_ptr[i].boot_max_freq_limit = UINT_MAX; #endif temp_ptr[i].freq_idx = 0; temp_ptr[i].freq_idx_low = 0; temp_ptr[i].freq_idx_high = 0; temp_ptr[i].freq_table = NULL; j = 0; for_each_cpu_mask(cpu, cluster_cpus[i]) j++; temp_ptr[i].entity_count = j; temp_ptr[i].child_entity_ptr = NULL; update_cpu_datastructure(&temp_ptr[i], sync_cluster_id, sync_cluster_cnt); } core_ptr->child_entity_ptr = temp_ptr; } static int __ref init_cluster_freq_table(void) { uint32_t _cluster = 0, _cpu = 0, table_len = 0, idx = 0; int ret = 0; struct cluster_info *cluster_ptr = NULL; struct cpufreq_policy *policy = NULL; struct cpufreq_frequency_table *freq_table_ptr = NULL; for (; _cluster < core_ptr->entity_count; _cluster++, table_len = 0, (policy && freq_table_ptr) ? cpufreq_cpu_put(policy) : 0, policy = NULL, freq_table_ptr = NULL) { cluster_ptr = &core_ptr->child_entity_ptr[_cluster]; if (cluster_ptr->freq_table) continue; for_each_cpu_mask(_cpu, cluster_ptr->cluster_cores) { policy = cpufreq_cpu_get(_cpu); if (!policy) continue; freq_table_ptr = cpufreq_frequency_get_table( policy->cpu); if (!freq_table_ptr) { cpufreq_cpu_put(policy); continue; } else { break; } } if (!freq_table_ptr) { _cpu = first_cpu(cluster_ptr->cluster_cores); pr_debug( "Online cpu%d in cluster%d to read cpufreq table\n", cluster_ptr->cluster_id, _cpu); pending_cpu_freq = _cpu; if (!cpu_online(_cpu)) { #ifdef CONFIG_SMP cpu_up(_cpu); cpu_down(_cpu); #endif } freq_table_ptr = pending_freq_table_ptr; } if (!freq_table_ptr) { pr_debug("Error reading cluster%d cpufreq table\n", cluster_ptr->cluster_id); ret = -EAGAIN; continue; } while (freq_table_ptr[table_len].frequency != CPUFREQ_TABLE_END) table_len++; cluster_ptr->freq_idx_low = 0; cluster_ptr->freq_idx_high = cluster_ptr->freq_idx = table_len - 1; if (cluster_ptr->freq_idx_high < 0 || (cluster_ptr->freq_idx_high < cluster_ptr->freq_idx_low)) { cluster_ptr->freq_idx = cluster_ptr->freq_idx_low = cluster_ptr->freq_idx_high = 0; WARN(1, "Cluster%d frequency table length:%d\n", cluster_ptr->cluster_id, table_len); ret = -EINVAL; goto release_and_exit; } cluster_ptr->freq_table = devm_kzalloc( &msm_thermal_info.pdev->dev, sizeof(struct cpufreq_frequency_table) * table_len, GFP_KERNEL); if (!cluster_ptr->freq_table) { pr_err("memory alloc failed\n"); cluster_ptr->freq_idx = cluster_ptr->freq_idx_low = cluster_ptr->freq_idx_high = 0; ret = -ENOMEM; goto release_and_exit; } for (idx = 0; idx < table_len; idx++) cluster_ptr->freq_table[idx].frequency = freq_table_ptr[idx].frequency; } return ret; release_and_exit: cpufreq_cpu_put(policy); return ret; } static void update_cluster_freq(void) { int online_cpu = -1; struct cluster_info *cluster_ptr = NULL; uint32_t _cluster = 0, _cpu = 0, max = UINT_MAX, min = 0; if (!core_ptr) return; for (; _cluster < core_ptr->entity_count; _cluster++, _cpu = 0, online_cpu = -1, max = UINT_MAX, min = 0) { /* ** If a cluster is synchronous, go over the frequency limits ** of each core in that cluster and aggregate the minimum ** and maximum frequencies. After aggregating, request for ** frequency update on the first online core in that cluster. ** Cpufreq driver takes care of updating the frequency of ** other cores in a synchronous cluster. */ cluster_ptr = &core_ptr->child_entity_ptr[_cluster]; if (!cluster_ptr->sync_cluster) continue; for_each_cpu_mask(_cpu, cluster_ptr->cluster_cores) { if (online_cpu == -1 && cpu_online(_cpu)) online_cpu = _cpu; max = min(max, cpus[_cpu].limited_max_freq); min = max(min, cpus[_cpu].limited_min_freq); } //[BUGFIX]added By miao, bug 902619 #if defined(CONFIG_TCT_8X16_IDOL3) max = min(max, cluster_ptr->boot_max_freq_limit); #endif if (cluster_ptr->limited_max_freq == max && cluster_ptr->limited_min_freq == min) continue; cluster_ptr->limited_max_freq = max; cluster_ptr->limited_min_freq = min; if (online_cpu != -1) update_cpu_freq(online_cpu); } } static void do_cluster_freq_ctrl(long temp) { uint32_t _cluster = 0; int _cpu = -1, freq_idx = 0; bool mitigate = false; struct cluster_info *cluster_ptr = NULL; if (temp >= msm_thermal_info.limit_temp_degC) mitigate = true; else if (temp < msm_thermal_info.limit_temp_degC - msm_thermal_info.temp_hysteresis_degC) mitigate = false; else return; get_online_cpus(); for (; _cluster < core_ptr->entity_count; _cluster++) { cluster_ptr = &core_ptr->child_entity_ptr[_cluster]; if (!cluster_ptr->freq_table) continue; if (mitigate) freq_idx = max_t(int, cluster_ptr->freq_idx_low, (cluster_ptr->freq_idx - msm_thermal_info.bootup_freq_step)); else freq_idx = min_t(int, cluster_ptr->freq_idx_high, (cluster_ptr->freq_idx + msm_thermal_info.bootup_freq_step)); if (freq_idx == cluster_ptr->freq_idx) continue; cluster_ptr->freq_idx = freq_idx; for_each_cpu_mask(_cpu, cluster_ptr->cluster_cores) { if (!(msm_thermal_info.bootup_freq_control_mask & BIT(_cpu))) continue; pr_info("Limiting CPU%d max frequency to %u. Temp:%ld\n" , _cpu , cluster_ptr->freq_table[freq_idx].frequency , temp); cpus[_cpu].limited_max_freq = cluster_ptr->freq_table[freq_idx].frequency; } } if (_cpu != -1) update_cluster_freq(); put_online_cpus(); } /* If freq table exists, then we can send freq request */ static int check_freq_table(void) { int ret = 0; uint32_t i = 0; static bool invalid_table; if (invalid_table) return -EINVAL; if (freq_table_get) return 0; if (core_ptr) { ret = init_cluster_freq_table(); if (!ret) freq_table_get = 1; else if (ret == -EINVAL) invalid_table = true; return ret; } table = cpufreq_frequency_get_table(0); if (!table) { pr_debug("error reading cpufreq table\n"); return -EINVAL; } while (table[i].frequency != CPUFREQ_TABLE_END) i++; limit_idx_low = 0; limit_idx_high = limit_idx = i - 1; if (limit_idx_high < 0 || limit_idx_high < limit_idx_low) { invalid_table = true; table = NULL; limit_idx_low = limit_idx_high = limit_idx = 0; WARN(1, "CPU0 frequency table length:%d\n", i); return -EINVAL; } freq_table_get = 1; return 0; } static int update_cpu_min_freq_all(uint32_t min) { uint32_t cpu = 0, _cluster = 0; int ret = 0; struct cluster_info *cluster_ptr = NULL; bool valid_table = false; if (!freq_table_get) { ret = check_freq_table(); if (ret && !core_ptr) { pr_err("Fail to get freq table. err:%d\n", ret); return ret; } } /* If min is larger than allowed max */ if (core_ptr) { for (; _cluster < core_ptr->entity_count; _cluster++) { cluster_ptr = &core_ptr->child_entity_ptr[_cluster]; if (!cluster_ptr->freq_table) continue; valid_table = true; min = min(min, cluster_ptr->freq_table[ cluster_ptr->freq_idx_high].frequency); } if (!valid_table) return ret; } else { min = min(min, table[limit_idx_high].frequency); } pr_debug("Requesting min freq:%u for all CPU's\n", min); if (freq_mitigation_task) { min_freq_limit = min; complete(&freq_mitigation_complete); } else { get_online_cpus(); for_each_possible_cpu(cpu) { cpus[cpu].limited_min_freq = min; if (!SYNC_CORE(cpu)) update_cpu_freq(cpu); } update_cluster_freq(); put_online_cpus(); } return ret; } static int vdd_restriction_apply_freq(struct rail *r, int level) { int ret = 0; if (level == r->curr_level) return ret; /* level = -1: disable, level = 0,1,2..n: enable */ if (level == -1) { ret = update_cpu_min_freq_all(r->min_level); if (ret) return ret; else r->curr_level = -1; } else if (level >= 0 && level < (r->num_levels)) { ret = update_cpu_min_freq_all(r->levels[level]); if (ret) return ret; else r->curr_level = level; } else { pr_err("level input:%d is not within range\n", level); return -EINVAL; } return ret; } static int vdd_restriction_apply_voltage(struct rail *r, int level) { int ret = 0; if (r->reg == NULL) { pr_err("%s don't have regulator handle. can't apply vdd\n", r->name); return -EFAULT; } if (level == r->curr_level) return ret; /* level = -1: disable, level = 0,1,2..n: enable */ if (level == -1) { ret = regulator_set_voltage(r->reg, r->min_level, r->levels[r->num_levels - 1]); if (!ret) r->curr_level = -1; pr_debug("Requested min level for %s. curr level: %d\n", r->name, r->curr_level); } else if (level >= 0 && level < (r->num_levels)) { ret = regulator_set_voltage(r->reg, r->levels[level], r->levels[r->num_levels - 1]); if (!ret) r->curr_level = level; pr_debug("Requesting level %d for %s. curr level: %d\n", r->levels[level], r->name, r->levels[r->curr_level]); } else { pr_err("level input:%d is not within range\n", level); return -EINVAL; } return ret; } /* Setting all rails the same mode */ static int psm_set_mode_all(int mode) { int i = 0; int fail_cnt = 0; int ret = 0; pr_debug("Requesting PMIC Mode: %d\n", mode); for (i = 0; i < psm_rails_cnt; i++) { if (psm_rails[i].mode != mode) { ret = rpm_regulator_set_mode(psm_rails[i].reg, mode); if (ret) { pr_err("Cannot set mode:%d for %s. err:%d", mode, psm_rails[i].name, ret); fail_cnt++; } else psm_rails[i].mode = mode; } } return fail_cnt ? (-EFAULT) : ret; } static ssize_t vdd_rstr_en_show( struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct vdd_rstr_enable *en = VDD_RSTR_ENABLE_FROM_ATTRIBS(attr); return snprintf(buf, PAGE_SIZE, "%d\n", en->enabled); } static ssize_t vdd_rstr_en_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int ret = 0; int i = 0; uint8_t en_cnt = 0; uint8_t dis_cnt = 0; uint32_t val = 0; struct kernel_param kp; struct vdd_rstr_enable *en = VDD_RSTR_ENABLE_FROM_ATTRIBS(attr); mutex_lock(&vdd_rstr_mutex); kp.arg = &val; ret = param_set_bool(buf, &kp); if (ret) { pr_err("Invalid input %s for enabled\n", buf); goto done_vdd_rstr_en; } if ((val == 0) && (en->enabled == 0)) goto done_vdd_rstr_en; for (i = 0; i < rails_cnt; i++) { if (rails[i].freq_req == 1 && freq_table_get) ret = vdd_restriction_apply_freq(&rails[i], (val) ? 0 : -1); else ret = vdd_restriction_apply_voltage(&rails[i], (val) ? 0 : -1); /* * Even if fail to set one rail, still try to set the * others. Continue the loop */ if (ret) pr_err("Set vdd restriction for %s failed\n", rails[i].name); else { if (val) en_cnt++; else dis_cnt++; } } /* As long as one rail is enabled, vdd rstr is enabled */ if (val && en_cnt) en->enabled = 1; else if (!val && (dis_cnt == rails_cnt)) en->enabled = 0; pr_debug("%s vdd restriction. curr: %d\n", (val) ? "Enable" : "Disable", en->enabled); done_vdd_rstr_en: mutex_unlock(&vdd_rstr_mutex); return count; } static int send_temperature_band(enum msm_thermal_phase_ctrl phase, enum msm_temp_band req_band) { int ret = 0; uint32_t msg_id; struct msm_rpm_request *rpm_req; unsigned int band = req_band; uint32_t key, resource, resource_id; if (phase < 0 || phase >= MSM_PHASE_CTRL_NR || req_band <= 0 || req_band >= MSM_TEMP_MAX_NR) { pr_err("Invalid input\n"); ret = -EINVAL; goto phase_ctrl_exit; } switch (phase) { case MSM_CX_PHASE_CTRL: key = msm_thermal_info.cx_phase_request_key; break; case MSM_GFX_PHASE_CTRL: key = msm_thermal_info.gfx_phase_request_key; break; default: goto phase_ctrl_exit; break; } resource = msm_thermal_info.phase_rpm_resource_type; resource_id = msm_thermal_info.phase_rpm_resource_id; pr_debug("Sending %s temperature band %d\n", (phase == MSM_CX_PHASE_CTRL) ? "CX" : "GFX", req_band); rpm_req = msm_rpm_create_request(MSM_RPM_CTX_ACTIVE_SET, resource, resource_id, 1); if (!rpm_req) { pr_err("Creating RPM request failed\n"); ret = -ENXIO; goto phase_ctrl_exit; } ret = msm_rpm_add_kvp_data(rpm_req, key, (const uint8_t *)&band, (int)sizeof(band)); if (ret) { pr_err("Adding KVP data failed. err:%d\n", ret); goto free_rpm_handle; } msg_id = msm_rpm_send_request(rpm_req); if (!msg_id) { pr_err("RPM send request failed\n"); ret = -ENXIO; goto free_rpm_handle; } ret = msm_rpm_wait_for_ack(msg_id); if (ret) { pr_err("RPM wait for ACK failed. err:%d\n", ret); goto free_rpm_handle; } free_rpm_handle: msm_rpm_free_request(rpm_req); phase_ctrl_exit: return ret; } static uint32_t msm_thermal_str_to_int(const char *inp) { int i, len; uint32_t output = 0; len = strnlen(inp, sizeof(uint32_t)); for (i = 0; i < len; i++) output |= inp[i] << (i * 8); return output; } static ssize_t sensor_info_show( struct kobject *kobj, struct kobj_attribute *attr, char *buf) { int i; ssize_t tot_size = 0, size = 0; for (i = 0; i < sensor_cnt; i++) { size = snprintf(&buf[tot_size], PAGE_SIZE - tot_size, "%s:%s:%s:%d ", sensors[i].type, sensors[i].name, sensors[i].alias ? : "", sensors[i].scaling_factor); if (tot_size + size >= PAGE_SIZE) { pr_err("Not enough buffer size\n"); break; } tot_size += size; } if (tot_size) buf[tot_size - 1] = '\n'; return tot_size; } static struct vdd_rstr_enable vdd_rstr_en = { .ko_attr.attr.name = __stringify(enabled), .ko_attr.attr.mode = 0644, .ko_attr.show = vdd_rstr_en_show, .ko_attr.store = vdd_rstr_en_store, .enabled = 1, }; static struct attribute *vdd_rstr_en_attribs[] = { &vdd_rstr_en.ko_attr.attr, NULL, }; static struct attribute_group vdd_rstr_en_attribs_gp = { .attrs = vdd_rstr_en_attribs, }; static ssize_t vdd_rstr_reg_value_show( struct kobject *kobj, struct kobj_attribute *attr, char *buf) { int val = 0; struct rail *reg = VDD_RSTR_REG_VALUE_FROM_ATTRIBS(attr); /* -1:disabled, -2:fail to get regualtor handle */ if (reg->curr_level < 0) val = reg->curr_level; else val = reg->levels[reg->curr_level]; return snprintf(buf, PAGE_SIZE, "%d\n", val); } static ssize_t vdd_rstr_reg_level_show( struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct rail *reg = VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr); return snprintf(buf, PAGE_SIZE, "%d\n", reg->curr_level); } static ssize_t vdd_rstr_reg_level_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int ret = 0; int val = 0; struct rail *reg = VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr); mutex_lock(&vdd_rstr_mutex); if (vdd_rstr_en.enabled == 0) goto done_store_level; ret = kstrtouint(buf, 10, &val); if (ret) { pr_err("Invalid input %s for level\n", buf); goto done_store_level; } if (val < 0 || val > reg->num_levels - 1) { pr_err(" Invalid number %d for level\n", val); goto done_store_level; } if (val != reg->curr_level) { if (reg->freq_req == 1 && freq_table_get) update_cpu_min_freq_all(reg->levels[val]); else { ret = vdd_restriction_apply_voltage(reg, val); if (ret) { pr_err( \ "Set vdd restriction for regulator %s failed. err:%d\n", reg->name, ret); goto done_store_level; } } reg->curr_level = val; pr_debug("Request level %d for %s\n", reg->curr_level, reg->name); } done_store_level: mutex_unlock(&vdd_rstr_mutex); return count; } static int request_optimum_current(struct psm_rail *rail, enum ocr_request req) { int ret = 0; if ((!rail) || (req >= OPTIMUM_CURRENT_NR) || (req < 0)) { pr_err("Invalid input %d\n", req); ret = -EINVAL; goto request_ocr_exit; } ret = regulator_set_optimum_mode(rail->phase_reg, (req == OPTIMUM_CURRENT_MAX) ? MAX_CURRENT_UA : 0); if (ret < 0) { pr_err("Optimum current request failed. err:%d\n", ret); goto request_ocr_exit; } ret = 0; /*regulator_set_optimum_mode returns the mode on success*/ pr_debug("Requested optimum current mode: %d\n", req); request_ocr_exit: return ret; } static int ocr_set_mode_all(enum ocr_request req) { int ret = 0, i; for (i = 0; i < ocr_rail_cnt; i++) { if (ocr_rails[i].mode == req) continue; ret = request_optimum_current(&ocr_rails[i], req); if (ret) goto ocr_set_mode_exit; ocr_rails[i].mode = req; } ocr_set_mode_exit: return ret; } static ssize_t ocr_reg_mode_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr); return snprintf(buf, PAGE_SIZE, "%d\n", reg->mode); } static ssize_t ocr_reg_mode_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int ret = 0; int val = 0; struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr); if (!ocr_enabled) return count; mutex_lock(&ocr_mutex); ret = kstrtoint(buf, 10, &val); if (ret) { pr_err("Invalid input %s for mode. err:%d\n", buf, ret); goto done_ocr_store; } if ((val != OPTIMUM_CURRENT_MAX) && (val != OPTIMUM_CURRENT_MIN)) { pr_err("Invalid value %d for mode\n", val); goto done_ocr_store; } if (val != reg->mode) { ret = request_optimum_current(reg, val); if (ret) goto done_ocr_store; reg->mode = val; } done_ocr_store: mutex_unlock(&ocr_mutex); return count; } static ssize_t store_phase_request(const char *buf, size_t count, bool is_cx) { int ret = 0, val; struct mutex *phase_mutex = (is_cx) ? (&cx_mutex) : (&gfx_mutex); enum msm_thermal_phase_ctrl phase_req = (is_cx) ? MSM_CX_PHASE_CTRL : MSM_GFX_PHASE_CTRL; ret = kstrtoint(buf, 10, &val); if (ret) { pr_err("Invalid input %s for %s temperature band\n", buf, (is_cx) ? "CX" : "GFX"); goto phase_store_exit; } if ((val <= 0) || (val >= MSM_TEMP_MAX_NR)) { pr_err("Invalid input %d for %s temperature band\n", val, (is_cx) ? "CX" : "GFX"); ret = -EINVAL; goto phase_store_exit; } mutex_lock(phase_mutex); if (val != ((is_cx) ? curr_cx_band : curr_gfx_band)) { ret = send_temperature_band(phase_req, val); if (!ret) { *((is_cx) ? &curr_cx_band : &curr_gfx_band) = val; } else { pr_err("Failed to send %d temp. band to %s rail\n", val, (is_cx) ? "CX" : "GFX"); goto phase_store_unlock_exit; } } ret = count; phase_store_unlock_exit: mutex_unlock(phase_mutex); phase_store_exit: return ret; } #define show_phase(_name, _variable) \ static ssize_t _name##_phase_show(struct kobject *kobj, \ struct kobj_attribute *attr, char *buf) \ { \ return snprintf(buf, PAGE_SIZE, "%u\n", _variable); \ } #define store_phase(_name, _variable, _iscx) \ static ssize_t _name##_phase_store(struct kobject *kobj, \ struct kobj_attribute *attr, const char *buf, size_t count) \ { \ return store_phase_request(buf, count, _iscx); \ } show_phase(gfx, curr_gfx_band) show_phase(cx, curr_cx_band) store_phase(gfx, curr_gfx_band, false) store_phase(cx, curr_cx_band, true) static ssize_t psm_reg_mode_show( struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr); return snprintf(buf, PAGE_SIZE, "%d\n", reg->mode); } static ssize_t psm_reg_mode_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int ret = 0; int val = 0; struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr); mutex_lock(&psm_mutex); ret = kstrtoint(buf, 10, &val); if (ret) { pr_err("Invalid input %s for mode\n", buf); goto done_psm_store; } if ((val != PMIC_PWM_MODE) && (val != PMIC_AUTO_MODE)) { pr_err("Invalid number %d for mode\n", val); goto done_psm_store; } if (val != reg->mode) { ret = rpm_regulator_set_mode(reg->reg, val); if (ret) { pr_err("Fail to set Mode:%d for %s. err:%d\n", val, reg->name, ret); goto done_psm_store; } reg->mode = val; } done_psm_store: mutex_unlock(&psm_mutex); return count; } static int check_sensor_id(int sensor_id) { int i = 0; bool hw_id_found = false; int ret = 0; for (i = 0; i < max_tsens_num; i++) { if (sensor_id == tsens_id_map[i]) { hw_id_found = true; break; } } if (!hw_id_found) { pr_err("Invalid sensor hw id:%d\n", sensor_id); return -EINVAL; } return ret; } static int create_sensor_id_map(void) { int i = 0; int ret = 0; tsens_id_map = kzalloc(sizeof(int) * max_tsens_num, GFP_KERNEL); if (!tsens_id_map) { pr_err("Cannot allocate memory for tsens_id_map\n"); return -ENOMEM; } for (i = 0; i < max_tsens_num; i++) { ret = tsens_get_hw_id_mapping(i, &tsens_id_map[i]); /* If return -ENXIO, hw_id is default in sequence */ if (ret) { if (ret == -ENXIO) { tsens_id_map[i] = i; ret = 0; } else { pr_err("Failed to get hw id for id:%d.err:%d\n", i, ret); goto fail; } } } return ret; fail: kfree(tsens_id_map); return ret; } /* 1:enable, 0:disable */ static int vdd_restriction_apply_all(int en) { int i = 0; int en_cnt = 0; int dis_cnt = 0; int fail_cnt = 0; int ret = 0; for (i = 0; i < rails_cnt; i++) { if (rails[i].freq_req == 1) if (freq_table_get) ret = vdd_restriction_apply_freq(&rails[i], en ? 0 : -1); else continue; else ret = vdd_restriction_apply_voltage(&rails[i], en ? 0 : -1); if (ret) { pr_err("Failed to %s for %s. err:%d", (en) ? "enable" : "disable", rails[i].name, ret); fail_cnt++; } else { if (en) en_cnt++; else dis_cnt++; } } /* As long as one rail is enabled, vdd rstr is enabled */ if (en && en_cnt) vdd_rstr_en.enabled = 1; else if (!en && (dis_cnt == rails_cnt)) vdd_rstr_en.enabled = 0; /* * Check fail_cnt again to make sure all of the rails are applied * restriction successfully or not */ if (fail_cnt) return -EFAULT; return ret; } static int set_and_activate_threshold(uint32_t sensor_id, struct sensor_threshold *threshold) { int ret = 0; ret = sensor_set_trip(sensor_id, threshold); if (ret != 0) { pr_err("sensor:%u Error in setting trip:%d. err:%d\n", sensor_id, threshold->trip, ret); goto set_done; } ret = sensor_activate_trip(sensor_id, threshold, true); if (ret != 0) { pr_err("sensor:%u Error in enabling trip:%d. err:%d\n", sensor_id, threshold->trip, ret); goto set_done; } set_done: return ret; } static int therm_get_temp(uint32_t id, enum sensor_id_type type, long *temp) { int ret = 0; struct tsens_device tsens_dev; if (!temp) { pr_err("Invalid value\n"); ret = -EINVAL; goto get_temp_exit; } switch (type) { case THERM_ZONE_ID: ret = sensor_get_temp(id, temp); if (ret) { pr_err("Unable to read thermal zone sensor:%d\n", id); goto get_temp_exit; } break; case THERM_TSENS_ID: tsens_dev.sensor_num = id; ret = tsens_get_temp(&tsens_dev, temp); if (ret) { pr_err("Unable to read TSENS sensor:%d\n", tsens_dev.sensor_num); goto get_temp_exit; } break; default: pr_err("Invalid type\n"); ret = -EINVAL; goto get_temp_exit; } get_temp_exit: return ret; } static int msm_thermal_panic_callback(struct notifier_block *nfb, unsigned long event, void *data) { int i; for (i = 0; i < max_tsens_num; i++) therm_get_temp(tsens_id_map[i], THERM_TSENS_ID, &tsens_temp_at_panic[i]); return NOTIFY_OK; } static struct notifier_block msm_thermal_panic_notifier = { .notifier_call = msm_thermal_panic_callback, }; static int set_threshold(uint32_t zone_id, struct sensor_threshold *threshold) { int i = 0, ret = 0; long temp; if (!threshold) { pr_err("Invalid input\n"); ret = -EINVAL; goto set_threshold_exit; } ret = therm_get_temp(zone_id, THERM_ZONE_ID, &temp); if (ret) { pr_err("Unable to read temperature for zone:%d. err:%d\n", zone_id, ret); goto set_threshold_exit; } while (i < MAX_THRESHOLD) { switch (threshold[i].trip) { case THERMAL_TRIP_CONFIGURABLE_HI: if (threshold[i].temp >= temp) { ret = set_and_activate_threshold(zone_id, &threshold[i]); if (ret) goto set_threshold_exit; } break; case THERMAL_TRIP_CONFIGURABLE_LOW: if (threshold[i].temp <= temp) { ret = set_and_activate_threshold(zone_id, &threshold[i]); if (ret) goto set_threshold_exit; } break; default: pr_err("zone:%u Invalid trip:%d\n", zone_id, threshold[i].trip); break; } i++; } set_threshold_exit: return ret; } static int apply_vdd_mx_restriction(void) { int ret = 0; if (mx_restr_applied) goto done; ret = regulator_set_voltage(vdd_mx, msm_thermal_info.vdd_mx_min, INT_MAX); if (ret) { pr_err("Failed to add mx vote, error %d\n", ret); goto done; } ret = regulator_enable(vdd_mx); if (ret) pr_err("Failed to vote for mx voltage %d, error %d\n", msm_thermal_info.vdd_mx_min, ret); else mx_restr_applied = true; done: return ret; } static int remove_vdd_mx_restriction(void) { int ret = 0; if (!mx_restr_applied) goto done; ret = regulator_disable(vdd_mx); if (ret) { pr_err("Failed to disable mx voting, error %d\n", ret); goto done; } ret = regulator_set_voltage(vdd_mx, 0, INT_MAX); if (ret) pr_err("Failed to remove mx vote, error %d\n", ret); else mx_restr_applied = false; done: return ret; } static int do_vdd_mx(void) { long temp = 0; int ret = 0; int i = 0; int dis_cnt = 0; if (!vdd_mx_enabled) return ret; mutex_lock(&vdd_mx_mutex); for (i = 0; i < thresh[MSM_VDD_MX_RESTRICTION].thresh_ct; i++) { ret = therm_get_temp( thresh[MSM_VDD_MX_RESTRICTION].thresh_list[i].sensor_id, thresh[MSM_VDD_MX_RESTRICTION].thresh_list[i].id_type, &temp); if (ret) { pr_err("Unable to read TSENS sensor:%d, err:%d\n", thresh[MSM_VDD_MX_RESTRICTION].thresh_list[i]. sensor_id, ret); dis_cnt++; continue; } if (temp <= msm_thermal_info.vdd_mx_temp_degC) { ret = apply_vdd_mx_restriction(); if (ret) pr_err( "Failed to apply mx restriction\n"); goto exit; } else if (temp >= (msm_thermal_info.vdd_mx_temp_degC + msm_thermal_info.vdd_mx_temp_hyst_degC)) { dis_cnt++; } } if ((dis_cnt == thresh[MSM_VDD_MX_RESTRICTION].thresh_ct)) { ret = remove_vdd_mx_restriction(); if (ret) pr_err("Failed to remove vdd mx restriction\n"); } exit: mutex_unlock(&vdd_mx_mutex); return ret; } static void vdd_mx_notify(struct therm_threshold *trig_thresh) { static uint32_t mx_sens_status; int ret; pr_debug("Sensor%d trigger recevied for type %d\n", trig_thresh->sensor_id, trig_thresh->trip_triggered); if (!vdd_mx_enabled) return; mutex_lock(&vdd_mx_mutex); switch (trig_thresh->trip_triggered) { case THERMAL_TRIP_CONFIGURABLE_LOW: mx_sens_status |= BIT(trig_thresh->sensor_id); break; case THERMAL_TRIP_CONFIGURABLE_HI: if (mx_sens_status & BIT(trig_thresh->sensor_id)) mx_sens_status ^= BIT(trig_thresh->sensor_id); break; default: pr_err("Unsupported trip type\n"); break; } if (mx_sens_status) { ret = apply_vdd_mx_restriction(); if (ret) pr_err("Failed to apply mx restriction\n"); } else if (!mx_sens_status) { ret = remove_vdd_mx_restriction(); if (ret) pr_err("Failed to remove vdd mx restriction\n"); } mutex_unlock(&vdd_mx_mutex); set_threshold(trig_thresh->sensor_id, trig_thresh->threshold); } static void msm_thermal_bite(int tsens_id, long temp) { struct scm_desc desc; pr_err("TSENS:%d reached temperature:%ld. System reset\n", tsens_id, temp); if (!is_scm_armv8()) { scm_call_atomic1(SCM_SVC_BOOT, THERM_SECURE_BITE_CMD, 0); } else { desc.args[0] = 0; desc.arginfo = SCM_ARGS(1); scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT, THERM_SECURE_BITE_CMD), &desc); } } static int do_therm_reset(void) { int ret = 0, i; long temp = 0; if (!therm_reset_enabled) return ret; for (i = 0; i < thresh[MSM_THERM_RESET].thresh_ct; i++) { ret = therm_get_temp( thresh[MSM_THERM_RESET].thresh_list[i].sensor_id, thresh[MSM_THERM_RESET].thresh_list[i].id_type, &temp); if (ret) { pr_err("Unable to read TSENS sensor:%d. err:%d\n", thresh[MSM_THERM_RESET].thresh_list[i].sensor_id, ret); continue; } if (temp >= msm_thermal_info.therm_reset_temp_degC) msm_thermal_bite( thresh[MSM_THERM_RESET].thresh_list[i].sensor_id, temp); } return ret; } static void therm_reset_notify(struct therm_threshold *thresh_data) { long temp; int ret = 0; if (!therm_reset_enabled) return; if (!thresh_data) { pr_err("Invalid input\n"); return; } switch (thresh_data->trip_triggered) { case THERMAL_TRIP_CONFIGURABLE_HI: ret = therm_get_temp(thresh_data->sensor_id, thresh_data->id_type, &temp); if (ret) pr_err("Unable to read TSENS sensor:%d. err:%d\n", thresh_data->sensor_id, ret); msm_thermal_bite(tsens_id_map[thresh_data->sensor_id], temp); break; case THERMAL_TRIP_CONFIGURABLE_LOW: break; default: pr_err("Invalid trip type\n"); break; } set_threshold(thresh_data->sensor_id, thresh_data->threshold); } #ifdef CONFIG_SMP static void __ref do_core_control(long temp) { int i = 0; int ret = 0; if (!core_control_enabled) return; mutex_lock(&core_control_mutex); if (msm_thermal_info.core_control_mask && temp >= msm_thermal_info.core_limit_temp_degC) { for (i = num_possible_cpus(); i > 0; i--) { if (!(msm_thermal_info.core_control_mask & BIT(i))) continue; if (cpus_offlined & BIT(i) && !cpu_online(i)) continue; pr_info("Set Offline: CPU%d Temp: %ld\n", i, temp); trace_thermal_pre_core_offline(i); ret = cpu_down(i); if (ret) pr_err("Error %d offline core %d\n", ret, i); trace_thermal_post_core_offline(i, cpumask_test_cpu(i, cpu_online_mask)); cpus_offlined |= BIT(i); break; } } else if (msm_thermal_info.core_control_mask && cpus_offlined && temp <= (msm_thermal_info.core_limit_temp_degC - msm_thermal_info.core_temp_hysteresis_degC)) { for (i = 0; i < num_possible_cpus(); i++) { if (!(cpus_offlined & BIT(i))) continue; cpus_offlined &= ~BIT(i); pr_info("Allow Online CPU%d Temp: %ld\n", i, temp); /* * If this core is already online, then bring up the * next offlined core. */ if (cpu_online(i)) continue; trace_thermal_pre_core_online(i); ret = cpu_up(i); if (ret) pr_err("Error %d online core %d\n", ret, i); trace_thermal_post_core_online(i, cpumask_test_cpu(i, cpu_online_mask)); break; } } mutex_unlock(&core_control_mutex); } /* Call with core_control_mutex locked */ static int __ref update_offline_cores(int val) { uint32_t cpu = 0; int ret = 0; uint32_t previous_cpus_offlined = 0; if (!core_control_enabled) return 0; previous_cpus_offlined = cpus_offlined; cpus_offlined = msm_thermal_info.core_control_mask & val; for_each_possible_cpu(cpu) { if (cpus_offlined & BIT(cpu)) { if (!cpu_online(cpu)) continue; trace_thermal_pre_core_offline(cpu); ret = cpu_down(cpu); if (ret) pr_err("Unable to offline CPU%d. err:%d\n", cpu, ret); else pr_debug("Offlined CPU%d\n", cpu); trace_thermal_post_core_offline(cpu, cpumask_test_cpu(cpu, cpu_online_mask)); } else if (online_core && (previous_cpus_offlined & BIT(cpu))) { if (cpu_online(cpu)) continue; trace_thermal_pre_core_online(cpu); ret = cpu_up(cpu); if (ret && ret == notifier_to_errno(NOTIFY_BAD)) pr_debug("Onlining CPU%d is vetoed\n", cpu); else if (ret) pr_err("Unable to online CPU%d. err:%d\n", cpu, ret); else pr_debug("Onlined CPU%d\n", cpu); trace_thermal_post_core_online(cpu, cpumask_test_cpu(cpu, cpu_online_mask)); } } return ret; } static __ref int do_hotplug(void *data) { int ret = 0; uint32_t cpu = 0, mask = 0; struct sched_param param = {.sched_priority = MAX_RT_PRIO-2}; if (!core_control_enabled) { pr_debug("Core control disabled\n"); return -EINVAL; } sched_setscheduler(current, SCHED_FIFO, &param); while (!kthread_should_stop()) { while (wait_for_completion_interruptible( &hotplug_notify_complete) != 0) ; INIT_COMPLETION(hotplug_notify_complete); mask = 0; mutex_lock(&core_control_mutex); for_each_possible_cpu(cpu) { if (hotplug_enabled && cpus[cpu].hotplug_thresh_clear) { set_threshold(cpus[cpu].sensor_id, &cpus[cpu].threshold[HOTPLUG_THRESHOLD_HIGH]); cpus[cpu].hotplug_thresh_clear = false; } if (cpus[cpu].offline || cpus[cpu].user_offline) mask |= BIT(cpu); } if (mask != cpus_offlined) update_offline_cores(mask); mutex_unlock(&core_control_mutex); sysfs_notify(cc_kobj, NULL, "cpus_offlined"); } return ret; } #else static void __ref do_core_control(long temp) { return; } static __ref int do_hotplug(void *data) { return 0; } static int __ref update_offline_cores(int val) { return 0; } #endif static int do_gfx_phase_cond(void) { long temp = 0; int ret = 0; uint32_t new_req_band = curr_gfx_band; if (!gfx_warm_phase_ctrl_enabled && !gfx_crit_phase_ctrl_enabled) return ret; mutex_lock(&gfx_mutex); if (gfx_warm_phase_ctrl_enabled) { ret = therm_get_temp( thresh[MSM_GFX_PHASE_CTRL_WARM].thresh_list->sensor_id, thresh[MSM_GFX_PHASE_CTRL_WARM].thresh_list->id_type, &temp); if (ret) { pr_err("Unable to read TSENS sensor:%d. err:%d\n", thresh[MSM_GFX_PHASE_CTRL_WARM].thresh_list->sensor_id, ret); goto gfx_phase_cond_exit; } } else { ret = therm_get_temp( thresh[MSM_GFX_PHASE_CTRL_HOT].thresh_list->sensor_id, thresh[MSM_GFX_PHASE_CTRL_HOT].thresh_list->id_type, &temp); if (ret) { pr_err("Unable to read TSENS sensor:%d. err:%d\n", thresh[MSM_GFX_PHASE_CTRL_HOT].thresh_list->sensor_id, ret); goto gfx_phase_cond_exit; } } switch (curr_gfx_band) { case MSM_HOT_CRITICAL: if (temp < (msm_thermal_info.gfx_phase_hot_temp_degC - msm_thermal_info.gfx_phase_hot_temp_hyst_degC)) new_req_band = MSM_WARM; break; case MSM_WARM: if (temp >= msm_thermal_info.gfx_phase_hot_temp_degC) new_req_band = MSM_HOT_CRITICAL; else if (temp < (msm_thermal_info.gfx_phase_warm_temp_degC - msm_thermal_info.gfx_phase_warm_temp_hyst_degC)) new_req_band = MSM_NORMAL; break; case MSM_NORMAL: if (temp >= msm_thermal_info.gfx_phase_warm_temp_degC) new_req_band = MSM_WARM; break; default: if (temp >= msm_thermal_info.gfx_phase_hot_temp_degC) new_req_band = MSM_HOT_CRITICAL; else if (temp >= msm_thermal_info.gfx_phase_warm_temp_degC) new_req_band = MSM_WARM; else new_req_band = MSM_NORMAL; break; } if (new_req_band != curr_gfx_band) { ret = send_temperature_band(MSM_GFX_PHASE_CTRL, new_req_band); if (!ret) { pr_debug("Reached %d band. Temp:%ld\n", new_req_band, temp); curr_gfx_band = new_req_band; } else { pr_err("Error sending temp. band:%d. Temp:%ld. err:%d", new_req_band, temp, ret); } } gfx_phase_cond_exit: mutex_unlock(&gfx_mutex); return ret; } static int do_cx_phase_cond(void) { long temp = 0; int i, ret = 0, dis_cnt = 0; if (!cx_phase_ctrl_enabled) return ret; mutex_lock(&cx_mutex); for (i = 0; i < thresh[MSM_CX_PHASE_CTRL_HOT].thresh_ct; i++) { ret = therm_get_temp( thresh[MSM_CX_PHASE_CTRL_HOT].thresh_list[i].sensor_id, thresh[MSM_CX_PHASE_CTRL_HOT].thresh_list[i].id_type, &temp); if (ret) { pr_err("Unable to read TSENS sensor:%d. err:%d\n", thresh[MSM_CX_PHASE_CTRL_HOT].thresh_list[i].sensor_id, ret); dis_cnt++; continue; } if (temp >= msm_thermal_info.cx_phase_hot_temp_degC) { if (curr_cx_band != MSM_HOT_CRITICAL) { ret = send_temperature_band(MSM_CX_PHASE_CTRL, MSM_HOT_CRITICAL); if (!ret) { pr_debug("band:HOT_CRITICAL Temp:%ld\n", temp); curr_cx_band = MSM_HOT_CRITICAL; } else { pr_err("Error %d sending HOT_CRITICAL", ret); } } goto cx_phase_cond_exit; } else if (temp < (msm_thermal_info.cx_phase_hot_temp_degC - msm_thermal_info.cx_phase_hot_temp_hyst_degC)) dis_cnt++; } if (dis_cnt == max_tsens_num && curr_cx_band != MSM_WARM) { ret = send_temperature_band(MSM_CX_PHASE_CTRL, MSM_WARM); if (!ret) { pr_debug("band:WARM Temp:%ld\n", temp); curr_cx_band = MSM_WARM; } else { pr_err("Error sending WARM temp band. err:%d", ret); } } cx_phase_cond_exit: mutex_unlock(&cx_mutex); return ret; } static int do_ocr(void) { long temp = 0; int ret = 0; int i = 0, j = 0; int pfm_cnt = 0; if (!ocr_enabled) return ret; mutex_lock(&ocr_mutex); for (i = 0; i < thresh[MSM_OCR].thresh_ct; i++) { ret = therm_get_temp( thresh[MSM_OCR].thresh_list[i].sensor_id, thresh[MSM_OCR].thresh_list[i].id_type, &temp); if (ret) { pr_err("Unable to read TSENS sensor %d. err:%d\n", thresh[MSM_OCR].thresh_list[i].sensor_id, ret); pfm_cnt++; continue; } if (temp > msm_thermal_info.ocr_temp_degC) { if (ocr_rails[0].init != OPTIMUM_CURRENT_NR) for (j = 0; j < ocr_rail_cnt; j++) ocr_rails[j].init = OPTIMUM_CURRENT_NR; ret = ocr_set_mode_all(OPTIMUM_CURRENT_MAX); if (ret) pr_err("Error setting max ocr. err:%d\n", ret); else pr_debug("Requested MAX OCR. tsens:%d Temp:%ld", thresh[MSM_OCR].thresh_list[i].sensor_id, temp); goto do_ocr_exit; } else if (temp <= (msm_thermal_info.ocr_temp_degC - msm_thermal_info.ocr_temp_hyst_degC)) pfm_cnt++; } if (pfm_cnt == thresh[MSM_OCR].thresh_ct || ocr_rails[0].init != OPTIMUM_CURRENT_NR) { /* 'init' not equal to OPTIMUM_CURRENT_NR means this is the ** first polling iteration after device probe. During first ** iteration, if temperature is less than the set point, clear ** the max current request made and reset the 'init'. */ if (ocr_rails[0].init != OPTIMUM_CURRENT_NR) for (j = 0; j < ocr_rail_cnt; j++) ocr_rails[j].init = OPTIMUM_CURRENT_NR; ret = ocr_set_mode_all(OPTIMUM_CURRENT_MIN); if (ret) { pr_err("Error setting min ocr. err:%d\n", ret); goto do_ocr_exit; } else { pr_debug("Requested MIN OCR. Temp:%ld", temp); } } do_ocr_exit: mutex_unlock(&ocr_mutex); return ret; } static int do_vdd_restriction(void) { long temp = 0; int ret = 0; int i = 0; int dis_cnt = 0; if (!vdd_rstr_enabled) return ret; if (usefreq && !freq_table_get) { if (check_freq_table() && !core_ptr) return ret; } mutex_lock(&vdd_rstr_mutex); for (i = 0; i < thresh[MSM_VDD_RESTRICTION].thresh_ct; i++) { ret = therm_get_temp( thresh[MSM_VDD_RESTRICTION].thresh_list[i].sensor_id, thresh[MSM_VDD_RESTRICTION].thresh_list[i].id_type, &temp); if (ret) { pr_err("Unable to read TSENS sensor:%d. err:%d\n", thresh[MSM_VDD_RESTRICTION].thresh_list[i].sensor_id, ret); dis_cnt++; continue; } if (temp <= msm_thermal_info.vdd_rstr_temp_degC) { ret = vdd_restriction_apply_all(1); if (ret) { pr_err( \ "Enable vdd rstr for all failed. err:%d\n", ret); goto exit; } pr_debug("Enabled Vdd Restriction tsens:%d. Temp:%ld\n", thresh[MSM_VDD_RESTRICTION].thresh_list[i].sensor_id, temp); goto exit; } else if (temp > msm_thermal_info.vdd_rstr_temp_hyst_degC) dis_cnt++; } if (dis_cnt == max_tsens_num) { ret = vdd_restriction_apply_all(0); if (ret) { pr_err("Disable vdd rstr for all failed. err:%d\n", ret); goto exit; } pr_debug("Disabled Vdd Restriction\n"); } exit: mutex_unlock(&vdd_rstr_mutex); return ret; } static int do_psm(void) { long temp = 0; int ret = 0; int i = 0; int auto_cnt = 0; mutex_lock(&psm_mutex); for (i = 0; i < max_tsens_num; i++) { ret = therm_get_temp(tsens_id_map[i], THERM_TSENS_ID, &temp); if (ret) { pr_err("Unable to read TSENS sensor:%d. err:%d\n", tsens_id_map[i], ret); auto_cnt++; continue; } /* * As long as one sensor is above the threshold, set PWM mode * on all rails, and loop stops. Set auto mode when all rails * are below thershold */ if (temp > msm_thermal_info.psm_temp_degC) { ret = psm_set_mode_all(PMIC_PWM_MODE); if (ret) { pr_err("Set pwm mode for all failed. err:%d\n", ret); goto exit; } pr_debug("Requested PMIC PWM Mode tsens:%d. Temp:%ld\n", tsens_id_map[i], temp); break; } else if (temp <= msm_thermal_info.psm_temp_hyst_degC) auto_cnt++; } if (auto_cnt == max_tsens_num) { ret = psm_set_mode_all(PMIC_AUTO_MODE); if (ret) { pr_err("Set auto mode for all failed. err:%d\n", ret); goto exit; } pr_debug("Requested PMIC AUTO Mode\n"); } exit: mutex_unlock(&psm_mutex); return ret; } static void do_freq_control(long temp) { uint32_t cpu = 0; uint32_t max_freq = cpus[cpu].limited_max_freq; if (core_ptr) return do_cluster_freq_ctrl(temp); if (!freq_table_get) return; if (temp >= msm_thermal_info.limit_temp_degC) { if (limit_idx == limit_idx_low) return; limit_idx -= msm_thermal_info.bootup_freq_step; if (limit_idx < limit_idx_low) limit_idx = limit_idx_low; max_freq = table[limit_idx].frequency; } else if (temp < msm_thermal_info.limit_temp_degC - msm_thermal_info.temp_hysteresis_degC) { if (limit_idx == limit_idx_high) return; limit_idx += msm_thermal_info.bootup_freq_step; if (limit_idx >= limit_idx_high) { limit_idx = limit_idx_high; max_freq = UINT_MAX; } else max_freq = table[limit_idx].frequency; } if (max_freq == cpus[cpu].limited_max_freq) return; /* Update new limits */ get_online_cpus(); for_each_possible_cpu(cpu) { if (!(msm_thermal_info.bootup_freq_control_mask & BIT(cpu))) continue; pr_info("Limiting CPU%d max frequency to %u. Temp:%ld\n", cpu, max_freq, temp); cpus[cpu].limited_max_freq = max_freq; if (!SYNC_CORE(cpu)) update_cpu_freq(cpu); } update_cluster_freq(); put_online_cpus(); } static void check_temp(struct work_struct *work) { long temp = 0; int ret = 0; do_therm_reset(); ret = therm_get_temp(msm_thermal_info.sensor_id, THERM_TSENS_ID, &temp); if (ret) { pr_err("Unable to read TSENS sensor:%d. err:%d\n", msm_thermal_info.sensor_id, ret); goto reschedule; } do_core_control(temp); do_vdd_mx(); do_psm(); do_gfx_phase_cond(); do_cx_phase_cond(); do_ocr(); /* ** All mitigation involving CPU frequency should be ** placed below this check. The mitigation following this ** frequency table check, should be able to handle the failure case. */ if (!freq_table_get) check_freq_table(); do_vdd_restriction(); do_freq_control(temp); reschedule: if (polling_enabled) schedule_delayed_work(&check_temp_work, msecs_to_jiffies(msm_thermal_info.poll_ms)); } static int __ref msm_thermal_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { uint32_t cpu = (uintptr_t)hcpu; if (action == CPU_UP_PREPARE || action == CPU_UP_PREPARE_FROZEN) { if (core_control_enabled && (msm_thermal_info.core_control_mask & BIT(cpu)) && (cpus_offlined & BIT(cpu))) { pr_debug("Preventing CPU%d from coming online.\n", cpu); return NOTIFY_BAD; } } pr_debug("voting for CPU%d to be online\n", cpu); return NOTIFY_OK; } static struct notifier_block __refdata msm_thermal_cpu_notifier = { .notifier_call = msm_thermal_cpu_callback, }; static int hotplug_notify(enum thermal_trip_type type, int temp, void *data) { struct cpu_info *cpu_node = (struct cpu_info *)data; pr_info("%s reach temp threshold: %d\n", cpu_node->sensor_type, temp); if (!(msm_thermal_info.core_control_mask & BIT(cpu_node->cpu))) return 0; switch (type) { case THERMAL_TRIP_CONFIGURABLE_HI: if (!(cpu_node->offline)) cpu_node->offline = 1; break; case THERMAL_TRIP_CONFIGURABLE_LOW: if (cpu_node->offline) cpu_node->offline = 0; break; default: break; } if (hotplug_task) { cpu_node->hotplug_thresh_clear = true; complete(&hotplug_notify_complete); } else pr_err("Hotplug task is not initialized\n"); return 0; } /* Adjust cpus offlined bit based on temperature reading. */ static int hotplug_init_cpu_offlined(void) { long temp = 0; uint32_t cpu = 0; if (!hotplug_enabled) return 0; mutex_lock(&core_control_mutex); for_each_possible_cpu(cpu) { if (!(msm_thermal_info.core_control_mask & BIT(cpus[cpu].cpu))) continue; if (therm_get_temp(cpus[cpu].sensor_id, cpus[cpu].id_type, &temp)) { pr_err("Unable to read TSENS sensor:%d.\n", cpus[cpu].sensor_id); mutex_unlock(&core_control_mutex); return -EINVAL; } if (temp >= msm_thermal_info.hotplug_temp_degC) cpus[cpu].offline = 1; else if (temp <= (msm_thermal_info.hotplug_temp_degC - msm_thermal_info.hotplug_temp_hysteresis_degC)) cpus[cpu].offline = 0; } mutex_unlock(&core_control_mutex); if (hotplug_task) complete(&hotplug_notify_complete); else { pr_err("Hotplug task is not initialized\n"); return -EINVAL; } return 0; } static void hotplug_init(void) { uint32_t cpu = 0; struct sensor_threshold *hi_thresh = NULL, *low_thresh = NULL; if (hotplug_task) return; if (!hotplug_enabled) goto init_kthread; for_each_possible_cpu(cpu) { cpus[cpu].sensor_id = sensor_get_id((char *)cpus[cpu].sensor_type); cpus[cpu].id_type = THERM_ZONE_ID; if (!(msm_thermal_info.core_control_mask & BIT(cpus[cpu].cpu))) continue; hi_thresh = &cpus[cpu].threshold[HOTPLUG_THRESHOLD_HIGH]; low_thresh = &cpus[cpu].threshold[HOTPLUG_THRESHOLD_LOW]; hi_thresh->temp = msm_thermal_info.hotplug_temp_degC; hi_thresh->trip = THERMAL_TRIP_CONFIGURABLE_HI; low_thresh->temp = msm_thermal_info.hotplug_temp_degC - msm_thermal_info.hotplug_temp_hysteresis_degC; low_thresh->trip = THERMAL_TRIP_CONFIGURABLE_LOW; hi_thresh->notify = low_thresh->notify = hotplug_notify; hi_thresh->data = low_thresh->data = (void *)&cpus[cpu]; set_threshold(cpus[cpu].sensor_id, hi_thresh); } init_kthread: init_completion(&hotplug_notify_complete); hotplug_task = kthread_run(do_hotplug, NULL, "msm_thermal:hotplug"); if (IS_ERR(hotplug_task)) { pr_err("Failed to create do_hotplug thread. err:%ld\n", PTR_ERR(hotplug_task)); return; } /* * Adjust cpus offlined bit when hotplug intitializes so that the new * cpus offlined state is based on hotplug threshold range */ if (hotplug_init_cpu_offlined()) kthread_stop(hotplug_task); } static __ref int do_freq_mitigation(void *data) { int ret = 0; uint32_t cpu = 0, max_freq_req = 0, min_freq_req = 0; struct sched_param param = {.sched_priority = MAX_RT_PRIO-1}; sched_setscheduler(current, SCHED_FIFO, &param); while (!kthread_should_stop()) { while (wait_for_completion_interruptible( &freq_mitigation_complete) != 0) ; INIT_COMPLETION(freq_mitigation_complete); for_each_possible_cpu(cpu) { max_freq_req = (cpus[cpu].max_freq) ? msm_thermal_info.freq_limit : UINT_MAX; max_freq_req = min(max_freq_req, cpus[cpu].user_max_freq); min_freq_req = max(min_freq_limit, cpus[cpu].user_min_freq); if ((max_freq_req == cpus[cpu].limited_max_freq) && (min_freq_req == cpus[cpu].limited_min_freq)) goto reset_threshold; cpus[cpu].limited_max_freq = max_freq_req; cpus[cpu].limited_min_freq = min_freq_req; if (!SYNC_CORE(cpu)) update_cpu_freq(cpu); reset_threshold: if (freq_mitigation_enabled && cpus[cpu].freq_thresh_clear) { set_threshold(cpus[cpu].sensor_id, &cpus[cpu].threshold[FREQ_THRESHOLD_HIGH]); cpus[cpu].freq_thresh_clear = false; } } update_cluster_freq(); } return ret; } static int freq_mitigation_notify(enum thermal_trip_type type, int temp, void *data) { struct cpu_info *cpu_node = (struct cpu_info *) data; pr_debug("%s reached temp threshold: %d\n", cpu_node->sensor_type, temp); if (!(msm_thermal_info.freq_mitig_control_mask & BIT(cpu_node->cpu))) return 0; switch (type) { case THERMAL_TRIP_CONFIGURABLE_HI: if (!cpu_node->max_freq) { pr_info("Mitigating CPU%d frequency to %d\n", cpu_node->cpu, msm_thermal_info.freq_limit); cpu_node->max_freq = true; } break; case THERMAL_TRIP_CONFIGURABLE_LOW: if (cpu_node->max_freq) { pr_info("Removing frequency mitigation for CPU%d\n", cpu_node->cpu); cpu_node->max_freq = false; } break; default: break; } if (freq_mitigation_task) { cpu_node->freq_thresh_clear = true; complete(&freq_mitigation_complete); } else { pr_err("Frequency mitigation task is not initialized\n"); } return 0; } static void freq_mitigation_init(void) { uint32_t cpu = 0; struct sensor_threshold *hi_thresh = NULL, *low_thresh = NULL; if (freq_mitigation_task) return; if (!freq_mitigation_enabled) goto init_freq_thread; for_each_possible_cpu(cpu) { if (!(msm_thermal_info.freq_mitig_control_mask & BIT(cpu))) continue; hi_thresh = &cpus[cpu].threshold[FREQ_THRESHOLD_HIGH]; low_thresh = &cpus[cpu].threshold[FREQ_THRESHOLD_LOW]; hi_thresh->temp = msm_thermal_info.freq_mitig_temp_degc; hi_thresh->trip = THERMAL_TRIP_CONFIGURABLE_HI; low_thresh->temp = msm_thermal_info.freq_mitig_temp_degc - msm_thermal_info.freq_mitig_temp_hysteresis_degc; low_thresh->trip = THERMAL_TRIP_CONFIGURABLE_LOW; hi_thresh->notify = low_thresh->notify = freq_mitigation_notify; hi_thresh->data = low_thresh->data = (void *)&cpus[cpu]; set_threshold(cpus[cpu].sensor_id, hi_thresh); } init_freq_thread: init_completion(&freq_mitigation_complete); freq_mitigation_task = kthread_run(do_freq_mitigation, NULL, "msm_thermal:freq_mitig"); if (IS_ERR(freq_mitigation_task)) { pr_err("Failed to create frequency mitigation thread. err:%ld\n", PTR_ERR(freq_mitigation_task)); return; } } int msm_thermal_get_freq_plan_size(uint32_t cluster, unsigned int *table_len) { uint32_t i = 0; struct cluster_info *cluster_ptr = NULL; if (!core_ptr) { pr_err("Topology ptr not initialized\n"); return -ENODEV; } if (!table_len) { pr_err("Invalid input\n"); return -EINVAL; } if (!freq_table_get) check_freq_table(); for (; i < core_ptr->entity_count; i++) { cluster_ptr = &core_ptr->child_entity_ptr[i]; if (cluster_ptr->cluster_id == cluster) { if (!cluster_ptr->freq_table) { pr_err("Cluster%d clock plan not initialized\n", cluster); return -EINVAL; } *table_len = cluster_ptr->freq_idx_high + 1; return 0; } } pr_err("Invalid cluster ID:%d\n", cluster); return -EINVAL; } int msm_thermal_get_cluster_freq_plan(uint32_t cluster, unsigned int *table_ptr) { uint32_t i = 0; struct cluster_info *cluster_ptr = NULL; if (!core_ptr) { pr_err("Topology ptr not initialized\n"); return -ENODEV; } if (!table_ptr) { pr_err("Invalid input\n"); return -EINVAL; } if (!freq_table_get) check_freq_table(); for (; i < core_ptr->entity_count; i++) { cluster_ptr = &core_ptr->child_entity_ptr[i]; if (cluster_ptr->cluster_id == cluster) break; } if (i == core_ptr->entity_count) { pr_err("Invalid cluster ID:%d\n", cluster); return -EINVAL; } if (!cluster_ptr->freq_table) { pr_err("Cluster%d clock plan not initialized\n", cluster); return -EINVAL; } for (i = 0; i <= cluster_ptr->freq_idx_high; i++) table_ptr[i] = cluster_ptr->freq_table[i].frequency; return 0; } int msm_thermal_set_cluster_freq(uint32_t cluster, uint32_t freq, bool is_max) { int ret = 0; uint32_t i = 0; struct cluster_info *cluster_ptr = NULL; bool notify = false; if (!core_ptr) { pr_err("Topology ptr not initialized\n"); return -ENODEV; } for (; i < core_ptr->entity_count; i++) { cluster_ptr = &core_ptr->child_entity_ptr[i]; if (cluster_ptr->cluster_id != cluster) continue; if (!cluster_ptr->sync_cluster) { pr_err("Cluster%d is not synchronous\n", cluster); return -EINVAL; } else { pr_debug("Update Cluster%d %s frequency to %d\n", cluster, (is_max) ? "max" : "min", freq); break; } } if (i == core_ptr->entity_count) { pr_err("Invalid cluster ID:%d\n", cluster); return -EINVAL; } for_each_cpu_mask(i, cluster_ptr->cluster_cores) { uint32_t *freq_ptr = (is_max) ? &cpus[i].user_max_freq : &cpus[i].user_min_freq; if (*freq_ptr == freq) continue; notify = true; *freq_ptr = freq; } if (freq_mitigation_task) { if (notify) complete(&freq_mitigation_complete); } else { pr_err("Frequency mitigation task is not initialized\n"); return -ESRCH; } return ret; } int msm_thermal_set_frequency(uint32_t cpu, uint32_t freq, bool is_max) { int ret = 0; if (cpu >= num_possible_cpus()) { pr_err("Invalid input\n"); ret = -EINVAL; goto set_freq_exit; } pr_debug("Userspace requested %s frequency %u for CPU%u\n", (is_max) ? "Max" : "Min", freq, cpu); if (is_max) { if (cpus[cpu].user_max_freq == freq) goto set_freq_exit; cpus[cpu].user_max_freq = freq; } else { if (cpus[cpu].user_min_freq == freq) goto set_freq_exit; cpus[cpu].user_min_freq = freq; } if (freq_mitigation_task) { complete(&freq_mitigation_complete); } else { pr_err("Frequency mitigation task is not initialized\n"); ret = -ESRCH; goto set_freq_exit; } set_freq_exit: return ret; } int therm_set_threshold(struct threshold_info *thresh_inp) { int ret = 0, i = 0, err = 0; struct therm_threshold *thresh_ptr; if (!thresh_inp) { pr_err("Invalid input\n"); ret = -EINVAL; goto therm_set_exit; } thresh_inp->thresh_triggered = false; for (i = 0; i < thresh_inp->thresh_ct; i++) { thresh_ptr = &thresh_inp->thresh_list[i]; thresh_ptr->trip_triggered = -1; err = set_threshold(thresh_ptr->sensor_id, thresh_ptr->threshold); if (err) { ret = err; err = 0; } } therm_set_exit: return ret; } static void cx_phase_ctrl_notify(struct therm_threshold *trig_thresh) { static uint32_t cx_sens_status; int ret = 0; if (!cx_phase_ctrl_enabled) return; if (trig_thresh->trip_triggered < 0) goto cx_phase_ctrl_exit; mutex_lock(&cx_mutex); pr_debug("sensor:%d reached %s thresh for CX\n", tsens_id_map[trig_thresh->sensor_id], (trig_thresh->trip_triggered == THERMAL_TRIP_CONFIGURABLE_HI) ? "hot critical" : "warm"); switch (trig_thresh->trip_triggered) { case THERMAL_TRIP_CONFIGURABLE_HI: cx_sens_status |= BIT(trig_thresh->sensor_id); break; case THERMAL_TRIP_CONFIGURABLE_LOW: if (cx_sens_status & BIT(trig_thresh->sensor_id)) cx_sens_status ^= BIT(trig_thresh->sensor_id); break; default: pr_err("Unsupported trip type\n"); goto cx_phase_unlock_exit; break; } if ((cx_sens_status && (curr_cx_band == MSM_HOT_CRITICAL)) || (!cx_sens_status && (curr_cx_band == MSM_WARM))) goto cx_phase_unlock_exit; ret = send_temperature_band(MSM_CX_PHASE_CTRL, (cx_sens_status) ? MSM_HOT_CRITICAL : MSM_WARM); if (!ret) curr_cx_band = (cx_sens_status) ? MSM_HOT_CRITICAL : MSM_WARM; cx_phase_unlock_exit: mutex_unlock(&cx_mutex); cx_phase_ctrl_exit: set_threshold(trig_thresh->sensor_id, trig_thresh->threshold); return; } static void gfx_phase_ctrl_notify(struct therm_threshold *trig_thresh) { uint32_t new_req_band = curr_gfx_band; int ret = 0; if (!gfx_warm_phase_ctrl_enabled && !gfx_crit_phase_ctrl_enabled) return; if (trig_thresh->trip_triggered < 0) goto gfx_phase_ctrl_exit; mutex_lock(&gfx_mutex); if (gfx_crit_phase_ctrl_enabled) { switch ( thresh[MSM_GFX_PHASE_CTRL_HOT].thresh_list->trip_triggered) { case THERMAL_TRIP_CONFIGURABLE_HI: new_req_band = MSM_HOT_CRITICAL; pr_debug( "sensor:%d reached hot critical thresh for GFX\n", tsens_id_map[trig_thresh->sensor_id]); goto notify_new_band; break; case THERMAL_TRIP_CONFIGURABLE_LOW: new_req_band = MSM_WARM; pr_debug("sensor:%d reached warm thresh for GFX\n", tsens_id_map[trig_thresh->sensor_id]); goto notify_new_band; break; default: break; } } if (gfx_warm_phase_ctrl_enabled) { switch ( thresh[MSM_GFX_PHASE_CTRL_WARM].thresh_list->trip_triggered) { case THERMAL_TRIP_CONFIGURABLE_HI: new_req_band = MSM_WARM; pr_debug("sensor:%d reached warm thresh for GFX\n", tsens_id_map[trig_thresh->sensor_id]); goto notify_new_band; break; case THERMAL_TRIP_CONFIGURABLE_LOW: new_req_band = MSM_NORMAL; pr_debug("sensor:%d reached normal thresh for GFX\n", tsens_id_map[trig_thresh->sensor_id]); goto notify_new_band; break; default: break; } } notify_new_band: if (new_req_band != curr_gfx_band) { ret = send_temperature_band(MSM_GFX_PHASE_CTRL, new_req_band); if (!ret) curr_gfx_band = new_req_band; } mutex_unlock(&gfx_mutex); gfx_phase_ctrl_exit: switch (curr_gfx_band) { case MSM_HOT_CRITICAL: if (gfx_crit_phase_ctrl_enabled) therm_set_threshold(&thresh[MSM_GFX_PHASE_CTRL_HOT]); break; case MSM_NORMAL: if (gfx_warm_phase_ctrl_enabled) therm_set_threshold(&thresh[MSM_GFX_PHASE_CTRL_WARM]); break; case MSM_WARM: default: if (gfx_crit_phase_ctrl_enabled) therm_set_threshold(&thresh[MSM_GFX_PHASE_CTRL_HOT]); if (gfx_warm_phase_ctrl_enabled) therm_set_threshold(&thresh[MSM_GFX_PHASE_CTRL_WARM]); break; } return; } static void vdd_restriction_notify(struct therm_threshold *trig_thresh) { int ret = 0; static uint32_t vdd_sens_status; if (!vdd_rstr_enabled) return; if (!trig_thresh) { pr_err("Invalid input\n"); return; } if (trig_thresh->trip_triggered < 0) goto set_and_exit; mutex_lock(&vdd_rstr_mutex); pr_debug("sensor:%d reached %s thresh for Vdd restriction\n", tsens_id_map[trig_thresh->sensor_id], (trig_thresh->trip_triggered == THERMAL_TRIP_CONFIGURABLE_HI) ? "high" : "low"); switch (trig_thresh->trip_triggered) { case THERMAL_TRIP_CONFIGURABLE_HI: if (vdd_sens_status & BIT(trig_thresh->sensor_id)) vdd_sens_status ^= BIT(trig_thresh->sensor_id); break; case THERMAL_TRIP_CONFIGURABLE_LOW: vdd_sens_status |= BIT(trig_thresh->sensor_id); break; default: pr_err("Unsupported trip type\n"); goto unlock_and_exit; break; } ret = vdd_restriction_apply_all((vdd_sens_status) ? 1 : 0); if (ret) { pr_err("%s vdd rstr votlage for all failed\n", (vdd_sens_status) ? "Enable" : "Disable"); goto unlock_and_exit; } unlock_and_exit: mutex_unlock(&vdd_rstr_mutex); set_and_exit: set_threshold(trig_thresh->sensor_id, trig_thresh->threshold); return; } static void ocr_notify(struct therm_threshold *trig_thresh) { int ret = 0; static uint32_t ocr_sens_status; if (!ocr_enabled) return; if (!trig_thresh) { pr_err("Invalid input\n"); return; } if (trig_thresh->trip_triggered < 0) goto set_and_exit; mutex_lock(&ocr_mutex); pr_debug("sensor%d reached %d thresh for Optimum current request\n", tsens_id_map[trig_thresh->sensor_id], trig_thresh->trip_triggered); switch (trig_thresh->trip_triggered) { case THERMAL_TRIP_CONFIGURABLE_HI: ocr_sens_status |= BIT(trig_thresh->sensor_id); break; case THERMAL_TRIP_CONFIGURABLE_LOW: if (ocr_sens_status & BIT(trig_thresh->sensor_id)) ocr_sens_status ^= BIT(trig_thresh->sensor_id); break; default: pr_err("Unsupported trip type\n"); goto unlock_and_exit; break; } ret = ocr_set_mode_all(ocr_sens_status ? OPTIMUM_CURRENT_MAX : OPTIMUM_CURRENT_MIN); if (ret) { pr_err("%s Optimum current mode for all failed. err:%d\n", (ocr_sens_status) ? "Enable" : "Disable", ret); goto unlock_and_exit; } unlock_and_exit: mutex_unlock(&ocr_mutex); set_and_exit: set_threshold(trig_thresh->sensor_id, trig_thresh->threshold); return; } static __ref int do_thermal_monitor(void *data) { int ret = 0, i, j; struct therm_threshold *sensor_list; while (!kthread_should_stop()) { while (wait_for_completion_interruptible( &thermal_monitor_complete) != 0) ; INIT_COMPLETION(thermal_monitor_complete); for (i = 0; i < MSM_LIST_MAX_NR; i++) { if (!thresh[i].thresh_triggered) continue; thresh[i].thresh_triggered = false; for (j = 0; j < thresh[i].thresh_ct; j++) { sensor_list = &thresh[i].thresh_list[j]; if (sensor_list->trip_triggered < 0) continue; sensor_list->notify(sensor_list); sensor_list->trip_triggered = -1; } } } return ret; } static int convert_to_zone_id(struct threshold_info *thresh_inp) { int ret = 0, i, zone_id; struct therm_threshold *thresh_array; if (!thresh_inp) { pr_err("Invalid input\n"); ret = -EINVAL; goto convert_to_exit; } thresh_array = thresh_inp->thresh_list; for (i = 0; i < thresh_inp->thresh_ct; i++) { char tsens_name[TSENS_NAME_MAX] = ""; if (thresh_array[i].id_type == THERM_ZONE_ID) continue; snprintf(tsens_name, TSENS_NAME_MAX, TSENS_NAME_FORMAT, thresh_array[i].sensor_id); zone_id = sensor_get_id(tsens_name); if (zone_id < 0) { pr_err("Error getting zone id for %s. err:%d\n", tsens_name, ret); ret = zone_id; goto convert_to_exit; } thresh_array[i].sensor_id = zone_id; thresh_array[i].id_type = THERM_ZONE_ID; } convert_to_exit: return ret; } static void thermal_monitor_init(void) { if (thermal_monitor_task) return; init_completion(&thermal_monitor_complete); thermal_monitor_task = kthread_run(do_thermal_monitor, NULL, "msm_thermal:therm_monitor"); if (IS_ERR(thermal_monitor_task)) { pr_err("Failed to create thermal monitor thread. err:%ld\n", PTR_ERR(thermal_monitor_task)); goto init_exit; } if (therm_reset_enabled && !(convert_to_zone_id(&thresh[MSM_THERM_RESET]))) therm_set_threshold(&thresh[MSM_THERM_RESET]); if ((cx_phase_ctrl_enabled) && !(convert_to_zone_id(&thresh[MSM_CX_PHASE_CTRL_HOT]))) therm_set_threshold(&thresh[MSM_CX_PHASE_CTRL_HOT]); if ((vdd_rstr_enabled) && !(convert_to_zone_id(&thresh[MSM_VDD_RESTRICTION]))) therm_set_threshold(&thresh[MSM_VDD_RESTRICTION]); if ((gfx_warm_phase_ctrl_enabled) && !(convert_to_zone_id(&thresh[MSM_GFX_PHASE_CTRL_WARM]))) { therm_set_threshold(&thresh[MSM_GFX_PHASE_CTRL_WARM]); } if ((gfx_crit_phase_ctrl_enabled) && !(convert_to_zone_id(&thresh[MSM_GFX_PHASE_CTRL_HOT]))) { therm_set_threshold(&thresh[MSM_GFX_PHASE_CTRL_HOT]); } if ((ocr_enabled) && !(convert_to_zone_id(&thresh[MSM_OCR]))) therm_set_threshold(&thresh[MSM_OCR]); if (vdd_mx_enabled && !(convert_to_zone_id(&thresh[MSM_VDD_MX_RESTRICTION]))) therm_set_threshold(&thresh[MSM_VDD_MX_RESTRICTION]); init_exit: return; } static int msm_thermal_notify(enum thermal_trip_type type, int temp, void *data) { struct therm_threshold *thresh_data = (struct therm_threshold *)data; if (thermal_monitor_task) { thresh_data->trip_triggered = type; thresh_data->parent->thresh_triggered = true; complete(&thermal_monitor_complete); } else { pr_err("Thermal monitor task is not initialized\n"); } return 0; } static int init_threshold(enum msm_thresh_list index, int sensor_id, int32_t hi_temp, int32_t low_temp, void (*callback)(struct therm_threshold *)) { int ret = 0, i; struct therm_threshold *thresh_ptr; if (!callback || index >= MSM_LIST_MAX_NR || index < 0 || sensor_id == -ENODEV) { pr_err("Invalid input. sensor:%d. index:%d\n", sensor_id, index); ret = -EINVAL; goto init_thresh_exit; } if (thresh[index].thresh_list) { pr_info("threshold id:%d already initialized\n", index); goto init_thresh_exit; } thresh[index].thresh_ct = (sensor_id == MONITOR_ALL_TSENS) ? max_tsens_num : 1; thresh[index].thresh_triggered = false; thresh[index].thresh_list = kzalloc(sizeof(struct therm_threshold) * thresh[index].thresh_ct, GFP_KERNEL); if (!thresh[index].thresh_list) { pr_err("kzalloc failed for thresh index:%d\n", index); ret = -ENOMEM; goto init_thresh_exit; } thresh_ptr = thresh[index].thresh_list; if (sensor_id == MONITOR_ALL_TSENS) { for (i = 0; i < max_tsens_num; i++) { thresh_ptr[i].sensor_id = tsens_id_map[i]; thresh_ptr[i].id_type = THERM_TSENS_ID; thresh_ptr[i].notify = callback; thresh_ptr[i].trip_triggered = -1; thresh_ptr[i].parent = &thresh[index]; thresh_ptr[i].threshold[0].temp = hi_temp; thresh_ptr[i].threshold[0].trip = THERMAL_TRIP_CONFIGURABLE_HI; thresh_ptr[i].threshold[1].temp = low_temp; thresh_ptr[i].threshold[1].trip = THERMAL_TRIP_CONFIGURABLE_LOW; thresh_ptr[i].threshold[0].notify = thresh_ptr[i].threshold[1].notify = msm_thermal_notify; thresh_ptr[i].threshold[0].data = thresh_ptr[i].threshold[1].data = (void *)&thresh_ptr[i]; } } else { thresh_ptr->sensor_id = sensor_id; thresh_ptr->id_type = THERM_TSENS_ID; thresh_ptr->notify = callback; thresh_ptr->trip_triggered = -1; thresh_ptr->parent = &thresh[index]; thresh_ptr->threshold[0].temp = hi_temp; thresh_ptr->threshold[0].trip = THERMAL_TRIP_CONFIGURABLE_HI; thresh_ptr->threshold[1].temp = low_temp; thresh_ptr->threshold[1].trip = THERMAL_TRIP_CONFIGURABLE_LOW; thresh_ptr->threshold[0].notify = thresh_ptr->threshold[1].notify = msm_thermal_notify; thresh_ptr->threshold[0].data = thresh_ptr->threshold[1].data = (void *)thresh_ptr; } init_thresh_exit: return ret; } static int msm_thermal_add_gfx_nodes(void) { struct kobject *module_kobj = NULL; struct kobject *gfx_kobj = NULL; int ret = 0; if (!gfx_warm_phase_ctrl_enabled && !gfx_crit_phase_ctrl_enabled) return -EINVAL; module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME); if (!module_kobj) { pr_err("cannot find kobject\n"); ret = -ENOENT; goto gfx_node_exit; } gfx_kobj = kobject_create_and_add("gfx_phase_ctrl", module_kobj); if (!gfx_kobj) { pr_err("cannot create gfx kobject\n"); ret = -ENOMEM; goto gfx_node_exit; } gfx_attr_gp.attrs = kzalloc(sizeof(struct attribute *) * 2, GFP_KERNEL); if (!gfx_attr_gp.attrs) { pr_err("kzalloc failed\n"); ret = -ENOMEM; goto gfx_node_fail; } PHASE_RW_ATTR(gfx, temp_band, gfx_mode_attr, 0, gfx_attr_gp); gfx_attr_gp.attrs[1] = NULL; ret = sysfs_create_group(gfx_kobj, &gfx_attr_gp); if (ret) { pr_err("cannot create GFX attribute group. err:%d\n", ret); goto gfx_node_fail; } gfx_node_fail: if (ret) { kobject_put(gfx_kobj); kfree(gfx_attr_gp.attrs); gfx_attr_gp.attrs = NULL; } gfx_node_exit: return ret; } static int msm_thermal_add_cx_nodes(void) { struct kobject *module_kobj = NULL; struct kobject *cx_kobj = NULL; int ret = 0; if (!cx_phase_ctrl_enabled) return -EINVAL; module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME); if (!module_kobj) { pr_err("cannot find kobject\n"); ret = -ENOENT; goto cx_node_exit; } cx_kobj = kobject_create_and_add("cx_phase_ctrl", module_kobj); if (!cx_kobj) { pr_err("cannot create cx kobject\n"); ret = -ENOMEM; goto cx_node_exit; } cx_attr_gp.attrs = kzalloc(sizeof(struct attribute *) * 2, GFP_KERNEL); if (!cx_attr_gp.attrs) { pr_err("kzalloc failed\n"); ret = -ENOMEM; goto cx_node_fail; } PHASE_RW_ATTR(cx, temp_band, cx_mode_attr, 0, cx_attr_gp); cx_attr_gp.attrs[1] = NULL; ret = sysfs_create_group(cx_kobj, &cx_attr_gp); if (ret) { pr_err("cannot create CX attribute group. err:%d\n", ret); goto cx_node_fail; } cx_node_fail: if (ret) { kobject_put(cx_kobj); kfree(cx_attr_gp.attrs); cx_attr_gp.attrs = NULL; } cx_node_exit: return ret; } /* * We will reset the cpu frequencies limits here. The core online/offline * status will be carried over to the process stopping the msm_thermal, as * we dont want to online a core and bring in the thermal issues. */ static void __ref disable_msm_thermal(void) { uint32_t cpu = 0; /* make sure check_temp is no longer running */ cancel_delayed_work_sync(&check_temp_work); get_online_cpus(); for_each_possible_cpu(cpu) { if (cpus[cpu].limited_max_freq == UINT_MAX && cpus[cpu].limited_min_freq == 0) continue; pr_info("Max frequency reset for CPU%d\n", cpu); cpus[cpu].limited_max_freq = UINT_MAX; cpus[cpu].limited_min_freq = 0; if (!SYNC_CORE(cpu)) update_cpu_freq(cpu); } update_cluster_freq(); put_online_cpus(); } static void interrupt_mode_init(void) { if (!msm_thermal_probed) { interrupt_mode_enable = true; return; } if (polling_enabled) { pr_info("Interrupt mode init\n"); polling_enabled = 0; disable_msm_thermal(); hotplug_init(); freq_mitigation_init(); thermal_monitor_init(); msm_thermal_add_cx_nodes(); msm_thermal_add_gfx_nodes(); } } static int __ref set_enabled(const char *val, const struct kernel_param *kp) { int ret = 0; ret = param_set_bool(val, kp); if (!enabled) interrupt_mode_init(); else pr_info("no action for enabled = %d\n", enabled); pr_info("enabled = %d\n", enabled); return ret; } static struct kernel_param_ops module_ops = { .set = set_enabled, .get = param_get_bool, }; module_param_cb(enabled, &module_ops, &enabled, 0644); MODULE_PARM_DESC(enabled, "enforce thermal limit on cpu"); static ssize_t show_cc_enabled(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", core_control_enabled); } static ssize_t __ref store_cc_enabled(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int ret = 0; int val = 0; uint32_t cpu = 0; ret = kstrtoint(buf, 10, &val); if (ret) { pr_err("Invalid input %s. err:%d\n", buf, ret); goto done_store_cc; } if (core_control_enabled == !!val) goto done_store_cc; core_control_enabled = !!val; if (core_control_enabled) { pr_info("Core control enabled\n"); register_cpu_notifier(&msm_thermal_cpu_notifier); /* * Re-evaluate thermal core condition, update current status * and set threshold for all cpus. */ hotplug_init_cpu_offlined(); mutex_lock(&core_control_mutex); update_offline_cores(cpus_offlined); if (hotplug_enabled) { for_each_possible_cpu(cpu) { if (!(msm_thermal_info.core_control_mask & BIT(cpus[cpu].cpu))) continue; set_threshold(cpus[cpu].sensor_id, &cpus[cpu].threshold[HOTPLUG_THRESHOLD_HIGH]); } } mutex_unlock(&core_control_mutex); } else { pr_info("Core control disabled\n"); unregister_cpu_notifier(&msm_thermal_cpu_notifier); } done_store_cc: return count; } static ssize_t show_cpus_offlined(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", cpus_offlined); } static ssize_t __ref store_cpus_offlined(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int ret = 0; uint32_t val = 0; uint32_t cpu; mutex_lock(&core_control_mutex); ret = kstrtouint(buf, 10, &val); if (ret) { pr_err("Invalid input %s. err:%d\n", buf, ret); goto done_cc; } if (polling_enabled) { pr_err("Ignoring request; polling thread is enabled.\n"); goto done_cc; } for_each_possible_cpu(cpu) { if (!(msm_thermal_info.core_control_mask & BIT(cpu))) continue; cpus[cpu].user_offline = !!(val & BIT(cpu)); pr_debug("\"%s\"(PID:%i) requests %s CPU%d.\n", current->comm, current->pid, (cpus[cpu].user_offline) ? "offline" : "online", cpu); } if (hotplug_task) complete(&hotplug_notify_complete); else pr_err("Hotplug task is not initialized\n"); done_cc: mutex_unlock(&core_control_mutex); return count; } static __refdata struct kobj_attribute cc_enabled_attr = __ATTR(enabled, 0644, show_cc_enabled, store_cc_enabled); static __refdata struct kobj_attribute cpus_offlined_attr = __ATTR(cpus_offlined, 0644, show_cpus_offlined, store_cpus_offlined); static __refdata struct attribute *cc_attrs[] = { &cc_enabled_attr.attr, &cpus_offlined_attr.attr, NULL, }; static __refdata struct attribute_group cc_attr_group = { .attrs = cc_attrs, }; static __init int msm_thermal_add_cc_nodes(void) { struct kobject *module_kobj = NULL; int ret = 0; module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME); if (!module_kobj) { pr_err("cannot find kobject\n"); ret = -ENOENT; goto done_cc_nodes; } cc_kobj = kobject_create_and_add("core_control", module_kobj); if (!cc_kobj) { pr_err("cannot create core control kobj\n"); ret = -ENOMEM; goto done_cc_nodes; } ret = sysfs_create_group(cc_kobj, &cc_attr_group); if (ret) { pr_err("cannot create sysfs group. err:%d\n", ret); goto done_cc_nodes; } return 0; done_cc_nodes: if (cc_kobj) kobject_del(cc_kobj); return ret; } static ssize_t show_mx_enabled(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", vdd_mx_enabled); } static ssize_t __ref store_mx_enabled(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int ret = 0; int val = 0; ret = kstrtoint(buf, 10, &val); if (ret) { pr_err("Invalid input %s\n", buf); goto done_store_mx; } if (vdd_mx_enabled == !!val) goto done_store_mx; vdd_mx_enabled = !!val; mutex_lock(&vdd_mx_mutex); if (!vdd_mx_enabled) remove_vdd_mx_restriction(); else if (!(convert_to_zone_id(&thresh[MSM_VDD_MX_RESTRICTION]))) therm_set_threshold(&thresh[MSM_VDD_MX_RESTRICTION]); mutex_unlock(&vdd_mx_mutex); done_store_mx: return count; } static __init int msm_thermal_add_mx_nodes(void) { struct kobject *module_kobj = NULL; int ret = 0; if (!vdd_mx_enabled) return -EINVAL; module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME); if (!module_kobj) { pr_err("cannot find kobject for module\n"); ret = -ENOENT; goto done_mx_nodes; } mx_kobj = kobject_create_and_add("vdd_mx", module_kobj); if (!mx_kobj) { pr_err("cannot create mx restriction kobj\n"); ret = -ENOMEM; goto done_mx_nodes; } mx_attr_group.attrs = kzalloc(sizeof(struct attribute *) * 2, GFP_KERNEL); if (!mx_attr_group.attrs) { ret = -ENOMEM; pr_err("cannot allocate memory for mx_attr_group.attrs"); goto done_mx_nodes; } MX_RW_ATTR(mx_enabled_attr, enabled, mx_attr_group); mx_attr_group.attrs[1] = NULL; ret = sysfs_create_group(mx_kobj, &mx_attr_group); if (ret) { pr_err("cannot create group\n"); goto done_mx_nodes; } done_mx_nodes: if (ret) { if (mx_kobj) kobject_del(mx_kobj); kfree(mx_attr_group.attrs); } return ret; } static void msm_thermal_panic_notifier_init(struct device *dev) { int i; tsens_temp_at_panic = devm_kzalloc(dev, sizeof(long) * max_tsens_num, GFP_KERNEL); if (!tsens_temp_at_panic) { pr_err("kzalloc failed\n"); return; } for (i = 0; i < max_tsens_num; i++) tsens_temp_at_panic[i] = LONG_MIN; atomic_notifier_chain_register(&panic_notifier_list, &msm_thermal_panic_notifier); } int msm_thermal_pre_init(struct device *dev) { int ret = 0; if (tsens_is_ready() <= 0) { pr_err("Tsens driver is not ready yet\n"); return -EPROBE_DEFER; } ret = tsens_get_max_sensor_num(&max_tsens_num); if (ret < 0) { pr_err("failed to get max sensor number, err:%d\n", ret); return ret; } if (create_sensor_id_map()) { pr_err("Creating sensor id map failed\n"); ret = -EINVAL; goto pre_init_exit; } if (!tsens_temp_at_panic) msm_thermal_panic_notifier_init(dev); if (!thresh) { thresh = kzalloc( sizeof(struct threshold_info) * MSM_LIST_MAX_NR, GFP_KERNEL); if (!thresh) { pr_err("kzalloc failed\n"); ret = -ENOMEM; goto pre_init_exit; } memset(thresh, 0, sizeof(struct threshold_info) * MSM_LIST_MAX_NR); } pre_init_exit: return ret; } int msm_thermal_init(struct msm_thermal_data *pdata) { int ret = 0; uint32_t cpu; for_each_possible_cpu(cpu) { cpus[cpu].cpu = cpu; cpus[cpu].offline = 0; cpus[cpu].user_offline = 0; cpus[cpu].hotplug_thresh_clear = false; cpus[cpu].max_freq = false; cpus[cpu].user_max_freq = UINT_MAX; cpus[cpu].user_min_freq = 0; cpus[cpu].limited_max_freq = UINT_MAX; cpus[cpu].limited_min_freq = 0; cpus[cpu].freq_thresh_clear = false; } BUG_ON(!pdata); memcpy(&msm_thermal_info, pdata, sizeof(struct msm_thermal_data)); if (check_sensor_id(msm_thermal_info.sensor_id)) { pr_err("Invalid sensor:%d for polling\n", msm_thermal_info.sensor_id); return -EINVAL; } enabled = 1; polling_enabled = 1; ret = cpufreq_register_notifier(&msm_thermal_cpufreq_notifier, CPUFREQ_POLICY_NOTIFIER); if (ret) pr_err("cannot register cpufreq notifier. err:%d\n", ret); INIT_DELAYED_WORK(&check_temp_work, check_temp); schedule_delayed_work(&check_temp_work, 0); if (num_possible_cpus() > 1) register_cpu_notifier(&msm_thermal_cpu_notifier); return ret; } static int ocr_reg_init(struct platform_device *pdev) { int ret = 0; int i, j; for (i = 0; i < ocr_rail_cnt; i++) { /* Check if vdd_restriction has already initialized any * regualtor handle. If so use the same handle.*/ for (j = 0; j < rails_cnt; j++) { if (!strcmp(ocr_rails[i].name, rails[j].name)) { if (rails[j].reg == NULL) break; ocr_rails[i].phase_reg = rails[j].reg; goto reg_init; } } ocr_rails[i].phase_reg = devm_regulator_get(&pdev->dev, ocr_rails[i].name); if (IS_ERR_OR_NULL(ocr_rails[i].phase_reg)) { ret = PTR_ERR(ocr_rails[i].phase_reg); if (ret != -EPROBE_DEFER) { pr_err("Could not get regulator: %s, err:%d\n", ocr_rails[i].name, ret); ocr_rails[i].phase_reg = NULL; ocr_rails[i].mode = 0; ocr_rails[i].init = 0; } return ret; } reg_init: ocr_rails[i].mode = OPTIMUM_CURRENT_MIN; } return ret; } static int vdd_restriction_reg_init(struct platform_device *pdev) { int ret = 0; int i; for (i = 0; i < rails_cnt; i++) { if (rails[i].freq_req == 1) { usefreq |= BIT(i); check_freq_table(); /* * Restrict frequency by default until we have made * our first temp reading */ if (freq_table_get) ret = vdd_restriction_apply_freq(&rails[i], 0); else pr_info("Defer vdd rstr freq init.\n"); } else { rails[i].reg = devm_regulator_get(&pdev->dev, rails[i].name); if (IS_ERR_OR_NULL(rails[i].reg)) { ret = PTR_ERR(rails[i].reg); if (ret != -EPROBE_DEFER) { pr_err( \ "could not get regulator: %s. err:%d\n", rails[i].name, ret); rails[i].reg = NULL; rails[i].curr_level = -2; return ret; } pr_info("Defer regulator %s probe\n", rails[i].name); return ret; } /* * Restrict votlage by default until we have made * our first temp reading */ ret = vdd_restriction_apply_voltage(&rails[i], 0); } } return ret; } static int psm_reg_init(struct platform_device *pdev) { int ret = 0; int i = 0; int j = 0; for (i = 0; i < psm_rails_cnt; i++) { psm_rails[i].reg = rpm_regulator_get(&pdev->dev, psm_rails[i].name); if (IS_ERR_OR_NULL(psm_rails[i].reg)) { ret = PTR_ERR(psm_rails[i].reg); if (ret != -EPROBE_DEFER) { pr_err("couldn't get rpm regulator %s. err%d\n", psm_rails[i].name, ret); psm_rails[i].reg = NULL; goto psm_reg_exit; } pr_info("Defer regulator %s probe\n", psm_rails[i].name); return ret; } /* Apps default vote for PWM mode */ psm_rails[i].init = PMIC_PWM_MODE; ret = rpm_regulator_set_mode(psm_rails[i].reg, psm_rails[i].init); if (ret) { pr_err("Cannot set PMIC PWM mode. err:%d\n", ret); return ret; } else psm_rails[i].mode = PMIC_PWM_MODE; } return ret; psm_reg_exit: if (ret) { for (j = 0; j < i; j++) { if (psm_rails[j].reg != NULL) rpm_regulator_put(psm_rails[j].reg); } } return ret; } //[BUGFIX]added By miao, bug 902619 #if defined(CONFIG_TCT_8X16_IDOL3) static ssize_t boot_freq_limit_enable_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int ret = 0, i; uint32_t val = 0; struct cluster_info *cluster_ptr; ret = kstrtouint(buf, 10, &val); if (ret) { pr_err("Invalid input:%s. ret:%d", buf, ret); goto done_store; } pr_debug("Request to %s boot freq limit\n", val ? "enable": "disable"); if (boot_fmax_limit_enabled == (val ? true : false)) goto done_store; boot_fmax_limit_enabled = val ? true : false; if (!boot_fmax_limit_enabled) { if (core_ptr) { for (i = 0; i < core_ptr->entity_count; i++) { cluster_ptr = &core_ptr->child_entity_ptr[i]; cluster_ptr->boot_max_freq_limit = UINT_MAX; } pr_info("Disabling boot freq limit\n"); if (freq_mitigation_task) { complete(&freq_mitigation_complete); } else { get_online_cpus(); update_cluster_freq(); put_online_cpus(); } } } done_store: return count; } static ssize_t boot_freq_limit_enable_show( struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", boot_fmax_limit_enabled ? 1: 0); } static struct kobj_attribute boot_freq_limit_attr = __ATTR_RW(boot_freq_limit_enable); static int boot_freq_limit_sysfs_node(void) { struct kobject *module_kobj = NULL; int ret = 0; if (!boot_fmax_limit_enabled) return ret; module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME); if (!module_kobj) { pr_err("cannot find kobject\n"); return -ENOENT; } sysfs_attr_init(&boot_freq_limit_attr.attr); ret = sysfs_create_file(module_kobj, &boot_freq_limit_attr.attr); if (ret) { pr_err( "cannot create boot_freq_limit kobject attribute. err:%d\n", ret); return ret; } return ret; } #endif //[BUGFIX]added END static struct kobj_attribute sensor_info_attr = __ATTR_RO(sensor_info); static int msm_thermal_add_sensor_info_nodes(void) { struct kobject *module_kobj = NULL; int ret = 0; if (!sensor_info_probed) { sensor_info_nodes_called = true; return ret; } if (sensor_info_probed && sensor_cnt == 0) return ret; module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME); if (!module_kobj) { pr_err("cannot find kobject\n"); return -ENOENT; } sysfs_attr_init(&sensor_info_attr.attr); ret = sysfs_create_file(module_kobj, &sensor_info_attr.attr); if (ret) { pr_err( "cannot create sensor info kobject attribute. err:%d\n", ret); return ret; } return ret; } static int msm_thermal_add_vdd_rstr_nodes(void) { struct kobject *module_kobj = NULL; struct kobject *vdd_rstr_kobj = NULL; struct kobject *vdd_rstr_reg_kobj[MAX_RAILS] = {0}; int rc = 0; int i = 0; if (!vdd_rstr_probed) { vdd_rstr_nodes_called = true; return rc; } if (vdd_rstr_probed && rails_cnt == 0) return rc; module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME); if (!module_kobj) { pr_err("cannot find kobject\n"); rc = -ENOENT; goto thermal_sysfs_add_exit; } vdd_rstr_kobj = kobject_create_and_add("vdd_restriction", module_kobj); if (!vdd_rstr_kobj) { pr_err("cannot create vdd_restriction kobject\n"); rc = -ENOMEM; goto thermal_sysfs_add_exit; } rc = sysfs_create_group(vdd_rstr_kobj, &vdd_rstr_en_attribs_gp); if (rc) { pr_err("cannot create kobject attribute group. err:%d\n", rc); rc = -ENOMEM; goto thermal_sysfs_add_exit; } for (i = 0; i < rails_cnt; i++) { vdd_rstr_reg_kobj[i] = kobject_create_and_add(rails[i].name, vdd_rstr_kobj); if (!vdd_rstr_reg_kobj[i]) { pr_err("cannot create kobject for %s\n", rails[i].name); rc = -ENOMEM; goto thermal_sysfs_add_exit; } rails[i].attr_gp.attrs = kzalloc(sizeof(struct attribute *) * 3, GFP_KERNEL); if (!rails[i].attr_gp.attrs) { pr_err("kzalloc failed\n"); rc = -ENOMEM; goto thermal_sysfs_add_exit; } VDD_RES_RW_ATTRIB(rails[i], rails[i].level_attr, 0, level); VDD_RES_RO_ATTRIB(rails[i], rails[i].value_attr, 1, value); rails[i].attr_gp.attrs[2] = NULL; rc = sysfs_create_group(vdd_rstr_reg_kobj[i], &rails[i].attr_gp); if (rc) { pr_err("cannot create attribute group for %s. err:%d\n", rails[i].name, rc); goto thermal_sysfs_add_exit; } } return rc; thermal_sysfs_add_exit: if (rc) { for (i = 0; i < rails_cnt; i++) { kobject_del(vdd_rstr_reg_kobj[i]); kfree(rails[i].attr_gp.attrs); } if (vdd_rstr_kobj) kobject_del(vdd_rstr_kobj); } return rc; } static int msm_thermal_add_ocr_nodes(void) { struct kobject *module_kobj = NULL; struct kobject *ocr_kobj = NULL; struct kobject *ocr_reg_kobj[MAX_RAILS] = {0}; int rc = 0; int i = 0; if (!ocr_probed) { ocr_nodes_called = true; return rc; } if (ocr_probed && ocr_rail_cnt == 0) return rc; module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME); if (!module_kobj) { pr_err("Cannot find kobject\n"); rc = -ENOENT; goto ocr_node_exit; } ocr_kobj = kobject_create_and_add("opt_curr_req", module_kobj); if (!ocr_kobj) { pr_err("Cannot create ocr kobject\n"); rc = -ENOMEM; goto ocr_node_exit; } for (i = 0; i < ocr_rail_cnt; i++) { ocr_reg_kobj[i] = kobject_create_and_add(ocr_rails[i].name, ocr_kobj); if (!ocr_reg_kobj[i]) { pr_err("Cannot create kobject for %s\n", ocr_rails[i].name); rc = -ENOMEM; goto ocr_node_exit; } ocr_rails[i].attr_gp.attrs = kzalloc( sizeof(struct attribute *) * 2, GFP_KERNEL); if (!ocr_rails[i].attr_gp.attrs) { pr_err("Fail to allocate memory for attribute for %s\n", ocr_rails[i].name); rc = -ENOMEM; goto ocr_node_exit; } OCR_RW_ATTRIB(ocr_rails[i], ocr_rails[i].mode_attr, 0, mode); ocr_rails[i].attr_gp.attrs[1] = NULL; rc = sysfs_create_group(ocr_reg_kobj[i], &ocr_rails[i].attr_gp); if (rc) { pr_err("Cannot create attribute group for %s. err:%d\n", ocr_rails[i].name, rc); goto ocr_node_exit; } } ocr_node_exit: if (rc) { for (i = 0; i < ocr_rail_cnt; i++) { if (ocr_reg_kobj[i]) kobject_del(ocr_reg_kobj[i]); kfree(ocr_rails[i].attr_gp.attrs); ocr_rails[i].attr_gp.attrs = NULL; } if (ocr_kobj) kobject_del(ocr_kobj); } return rc; } static int msm_thermal_add_psm_nodes(void) { struct kobject *module_kobj = NULL; struct kobject *psm_kobj = NULL; struct kobject *psm_reg_kobj[MAX_RAILS] = {0}; int rc = 0; int i = 0; if (!psm_probed) { psm_nodes_called = true; return rc; } if (psm_probed && psm_rails_cnt == 0) return rc; module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME); if (!module_kobj) { pr_err("cannot find kobject\n"); rc = -ENOENT; goto psm_node_exit; } psm_kobj = kobject_create_and_add("pmic_sw_mode", module_kobj); if (!psm_kobj) { pr_err("cannot create psm kobject\n"); rc = -ENOMEM; goto psm_node_exit; } for (i = 0; i < psm_rails_cnt; i++) { psm_reg_kobj[i] = kobject_create_and_add(psm_rails[i].name, psm_kobj); if (!psm_reg_kobj[i]) { pr_err("cannot create kobject for %s\n", psm_rails[i].name); rc = -ENOMEM; goto psm_node_exit; } psm_rails[i].attr_gp.attrs = kzalloc( \ sizeof(struct attribute *) * 2, GFP_KERNEL); if (!psm_rails[i].attr_gp.attrs) { pr_err("kzalloc failed\n"); rc = -ENOMEM; goto psm_node_exit; } PSM_RW_ATTRIB(psm_rails[i], psm_rails[i].mode_attr, 0, mode); psm_rails[i].attr_gp.attrs[1] = NULL; rc = sysfs_create_group(psm_reg_kobj[i], &psm_rails[i].attr_gp); if (rc) { pr_err("cannot create attribute group for %s. err:%d\n", psm_rails[i].name, rc); goto psm_node_exit; } } return rc; psm_node_exit: if (rc) { for (i = 0; i < psm_rails_cnt; i++) { kobject_del(psm_reg_kobj[i]); kfree(psm_rails[i].attr_gp.attrs); } if (psm_kobj) kobject_del(psm_kobj); } return rc; } static int probe_vdd_mx(struct device_node *node, struct msm_thermal_data *data, struct platform_device *pdev) { int ret = 0; char *key = NULL; key = "qcom,disable-vdd-mx"; if (of_property_read_bool(node, key)) { vdd_mx_enabled = false; return ret; } key = "qcom,mx-restriction-temp"; ret = of_property_read_u32(node, key, &data->vdd_mx_temp_degC); if (ret) goto read_node_done; key = "qcom,mx-restriction-temp-hysteresis"; ret = of_property_read_u32(node, key, &data->vdd_mx_temp_hyst_degC); if (ret) goto read_node_done; key = "qcom,mx-retention-min"; ret = of_property_read_u32(node, key, &data->vdd_mx_min); if (ret) goto read_node_done; vdd_mx = devm_regulator_get(&pdev->dev, "vdd-mx"); if (IS_ERR_OR_NULL(vdd_mx)) { ret = PTR_ERR(vdd_mx); if (ret != -EPROBE_DEFER) { pr_err( "Could not get regulator: vdd-mx, err:%d\n", ret); } goto read_node_done; } ret = init_threshold(MSM_VDD_MX_RESTRICTION, MONITOR_ALL_TSENS, data->vdd_mx_temp_degC + data->vdd_mx_temp_hyst_degC, data->vdd_mx_temp_degC, vdd_mx_notify); read_node_done: if (!ret) vdd_mx_enabled = true; else if (ret != -EPROBE_DEFER) dev_info(&pdev->dev, "%s:Failed reading node=%s, key=%s. KTM continues\n", __func__, node->full_name, key); return ret; } static int probe_vdd_rstr(struct device_node *node, struct msm_thermal_data *data, struct platform_device *pdev) { int ret = 0; int i = 0; int arr_size; char *key = NULL; struct device_node *child_node = NULL; rails = NULL; key = "qcom,disable-vdd-rstr"; if (of_property_read_bool(node, key)) { vdd_rstr_probed = true; vdd_rstr_enabled = false; rails_cnt = 0; return ret; } key = "qcom,vdd-restriction-temp"; ret = of_property_read_u32(node, key, &data->vdd_rstr_temp_degC); if (ret) goto read_node_fail; key = "qcom,vdd-restriction-temp-hysteresis"; ret = of_property_read_u32(node, key, &data->vdd_rstr_temp_hyst_degC); if (ret) goto read_node_fail; for_each_child_of_node(node, child_node) { rails_cnt++; } if (rails_cnt == 0) goto read_node_fail; if (rails_cnt >= MAX_RAILS) { pr_err("Too many rails:%d.\n", rails_cnt); return -EFAULT; } rails = kzalloc(sizeof(struct rail) * rails_cnt, GFP_KERNEL); if (!rails) { pr_err("Fail to allocate memory for rails.\n"); return -ENOMEM; } i = 0; for_each_child_of_node(node, child_node) { key = "qcom,vdd-rstr-reg"; ret = of_property_read_string(child_node, key, &rails[i].name); if (ret) goto read_node_fail; key = "qcom,levels"; if (!of_get_property(child_node, key, &arr_size)) goto read_node_fail; rails[i].num_levels = arr_size/sizeof(__be32); if (rails[i].num_levels > sizeof(rails[i].levels)/sizeof(uint32_t)) { pr_err("Array size:%d too large for index:%d\n", rails[i].num_levels, i); return -EFAULT; } ret = of_property_read_u32_array(child_node, key, rails[i].levels, rails[i].num_levels); if (ret) goto read_node_fail; key = "qcom,freq-req"; rails[i].freq_req = of_property_read_bool(child_node, key); if (rails[i].freq_req) rails[i].min_level = 0; else { key = "qcom,min-level"; ret = of_property_read_u32(child_node, key, &rails[i].min_level); if (ret) goto read_node_fail; } rails[i].curr_level = -1; rails[i].reg = NULL; i++; } if (rails_cnt) { ret = vdd_restriction_reg_init(pdev); if (ret) { pr_err("Err regulator init. err:%d. KTM continues.\n", ret); goto read_node_fail; } ret = init_threshold(MSM_VDD_RESTRICTION, MONITOR_ALL_TSENS, data->vdd_rstr_temp_hyst_degC, data->vdd_rstr_temp_degC, vdd_restriction_notify); if (ret) { pr_err("Error in initializing thresholds. err:%d\n", ret); goto read_node_fail; } vdd_rstr_enabled = true; } read_node_fail: vdd_rstr_probed = true; if (ret) { dev_info(&pdev->dev, "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n", __func__, node->full_name, key, ret); kfree(rails); rails_cnt = 0; } if (ret == -EPROBE_DEFER) vdd_rstr_probed = false; return ret; } static void probe_sensor_info(struct device_node *node, struct msm_thermal_data *data, struct platform_device *pdev) { int err = 0; int i = 0; char *key = NULL; struct device_node *child_node = NULL; struct device_node *np = NULL; key = "qcom,disable-sensor-info"; if (of_property_read_bool(node, key)) { sensor_info_probed = true; return; } np = of_find_compatible_node(NULL, NULL, "qcom,sensor-information"); if (!np) { dev_info(&pdev->dev, "%s:unable to find DT for sensor-information.KTM continues\n", __func__); sensor_info_probed = true; return; } sensor_cnt = of_get_child_count(np); if (sensor_cnt == 0) { err = -ENODEV; goto read_node_fail; } sensors = devm_kzalloc(&pdev->dev, sizeof(struct msm_sensor_info) * sensor_cnt, GFP_KERNEL); if (!sensors) { pr_err("Fail to allocate memory for sensor_info.\n"); err = -ENOMEM; goto read_node_fail; } for_each_child_of_node(np, child_node) { key = "qcom,sensor-type"; err = of_property_read_string(child_node, key, &sensors[i].type); if (err) goto read_node_fail; key = "qcom,sensor-name"; err = of_property_read_string(child_node, key, &sensors[i].name); if (err) goto read_node_fail; key = "qcom,alias-name"; of_property_read_string(child_node, key, &sensors[i].alias); key = "qcom,scaling-factor"; err = of_property_read_u32(child_node, key, &sensors[i].scaling_factor); if (err) { sensors[i].scaling_factor = SENSOR_SCALING_FACTOR; err = 0; } i++; } read_node_fail: sensor_info_probed = true; if (err) { dev_info(&pdev->dev, "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n", __func__, np->full_name, key, err); devm_kfree(&pdev->dev, sensors); } } static int probe_ocr(struct device_node *node, struct msm_thermal_data *data, struct platform_device *pdev) { int ret = 0; int j = 0; char *key = NULL; if (ocr_probed) { pr_info("Nodes already probed\n"); goto read_ocr_exit; } ocr_rails = NULL; key = "qcom,disable-ocr"; if (of_property_read_bool(node, key)) { ocr_probed = true; ocr_enabled = false; ocr_rail_cnt = 0; goto read_ocr_exit; } key = "qcom,pmic-opt-curr-temp"; ret = of_property_read_u32(node, key, &data->ocr_temp_degC); if (ret) goto read_ocr_fail; key = "qcom,pmic-opt-curr-temp-hysteresis"; ret = of_property_read_u32(node, key, &data->ocr_temp_hyst_degC); if (ret) goto read_ocr_fail; key = "qcom,pmic-opt-curr-regs"; ocr_rail_cnt = of_property_count_strings(node, key); if (ocr_rail_cnt <= 0) { pr_err("Invalid ocr rail count. err:%d\n", ocr_rail_cnt); goto read_ocr_fail; } ocr_rails = kzalloc(sizeof(struct psm_rail) * ocr_rail_cnt, GFP_KERNEL); if (!ocr_rails) { pr_err("Fail to allocate memory for ocr rails\n"); ocr_rail_cnt = 0; return -ENOMEM; } for (j = 0; j < ocr_rail_cnt; j++) { ret = of_property_read_string_index(node, key, j, &ocr_rails[j].name); if (ret) goto read_ocr_fail; ocr_rails[j].phase_reg = NULL; ocr_rails[j].init = OPTIMUM_CURRENT_MAX; } key = "qcom,pmic-opt-curr-sensor-id"; ret = of_property_read_u32(node, key, &data->ocr_sensor_id); if (ret) { pr_info("ocr sensor is not configured, use all TSENS. err:%d\n", ret); data->ocr_sensor_id = MONITOR_ALL_TSENS; } ret = ocr_reg_init(pdev); if (ret) { if (ret == -EPROBE_DEFER) { ocr_reg_init_defer = true; pr_info("ocr reg init is defered\n"); } else { pr_err( "Failed to get regulators. KTM continues. err:%d\n", ret); goto read_ocr_fail; } } ret = init_threshold(MSM_OCR, data->ocr_sensor_id, data->ocr_temp_degC, data->ocr_temp_degC - data->ocr_temp_hyst_degC, ocr_notify); if (ret) goto read_ocr_fail; if (!ocr_reg_init_defer) ocr_enabled = true; ocr_nodes_called = false; /* * Vote for max optimum current by default until we have made * our first temp reading */ if (ocr_enabled) { ret = ocr_set_mode_all(OPTIMUM_CURRENT_MAX); if (ret) { pr_err("Set max optimum current failed. err:%d\n", ret); ocr_enabled = false; } } read_ocr_fail: ocr_probed = true; if (ret) { if (ret == -EPROBE_DEFER) { ret = 0; goto read_ocr_exit; } dev_err( &pdev->dev, "%s:Failed reading node=%s, key=%s err:%d. KTM continues\n", __func__, node->full_name, key, ret); kfree(ocr_rails); ocr_rails = NULL; ocr_rail_cnt = 0; } read_ocr_exit: return ret; } static int probe_psm(struct device_node *node, struct msm_thermal_data *data, struct platform_device *pdev) { int ret = 0; int j = 0; char *key = NULL; psm_rails = NULL; key = "qcom,disable-psm"; if (of_property_read_bool(node, key)) { psm_probed = true; psm_enabled = false; psm_rails_cnt = 0; return ret; } key = "qcom,pmic-sw-mode-temp"; ret = of_property_read_u32(node, key, &data->psm_temp_degC); if (ret) goto read_node_fail; key = "qcom,pmic-sw-mode-temp-hysteresis"; ret = of_property_read_u32(node, key, &data->psm_temp_hyst_degC); if (ret) goto read_node_fail; key = "qcom,pmic-sw-mode-regs"; psm_rails_cnt = of_property_count_strings(node, key); psm_rails = kzalloc(sizeof(struct psm_rail) * psm_rails_cnt, GFP_KERNEL); if (!psm_rails) { pr_err("Fail to allocate memory for psm rails\n"); psm_rails_cnt = 0; return -ENOMEM; } for (j = 0; j < psm_rails_cnt; j++) { ret = of_property_read_string_index(node, key, j, &psm_rails[j].name); if (ret) goto read_node_fail; } if (psm_rails_cnt) { ret = psm_reg_init(pdev); if (ret) { pr_err("Err regulator init. err:%d. KTM continues.\n", ret); goto read_node_fail; } psm_enabled = true; } read_node_fail: psm_probed = true; if (ret) { dev_info(&pdev->dev, "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n", __func__, node->full_name, key, ret); kfree(psm_rails); psm_rails_cnt = 0; } if (ret == -EPROBE_DEFER) psm_probed = false; return ret; } static int probe_cc(struct device_node *node, struct msm_thermal_data *data, struct platform_device *pdev) { char *key = NULL; uint32_t cpu_cnt = 0; int ret = 0; uint32_t cpu = 0; if (num_possible_cpus() > 1) { core_control_enabled = 1; hotplug_enabled = 1; } key = "qcom,core-limit-temp"; ret = of_property_read_u32(node, key, &data->core_limit_temp_degC); if (ret) goto read_node_fail; key = "qcom,core-temp-hysteresis"; ret = of_property_read_u32(node, key, &data->core_temp_hysteresis_degC); if (ret) goto read_node_fail; key = "qcom,core-control-mask"; ret = of_property_read_u32(node, key, &data->core_control_mask); if (ret) goto read_node_fail; key = "qcom,hotplug-temp"; ret = of_property_read_u32(node, key, &data->hotplug_temp_degC); if (ret) goto hotplug_node_fail; key = "qcom,hotplug-temp-hysteresis"; ret = of_property_read_u32(node, key, &data->hotplug_temp_hysteresis_degC); if (ret) goto hotplug_node_fail; key = "qcom,cpu-sensors"; cpu_cnt = of_property_count_strings(node, key); if (cpu_cnt < num_possible_cpus()) { pr_err("Wrong number of cpu sensors:%d\n", cpu_cnt); ret = -EINVAL; goto hotplug_node_fail; } for_each_possible_cpu(cpu) { ret = of_property_read_string_index(node, key, cpu, &cpus[cpu].sensor_type); if (ret) goto hotplug_node_fail; } read_node_fail: if (ret) { dev_info(&pdev->dev, "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n", KBUILD_MODNAME, node->full_name, key, ret); core_control_enabled = 0; } return ret; hotplug_node_fail: if (ret) { dev_info(&pdev->dev, "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n", KBUILD_MODNAME, node->full_name, key, ret); hotplug_enabled = 0; } return ret; } static int probe_gfx_phase_ctrl(struct device_node *node, struct msm_thermal_data *data, struct platform_device *pdev) { char *key = NULL; const char *tmp_str = NULL; int ret = 0; key = "qcom,disable-gfx-phase-ctrl"; if (of_property_read_bool(node, key)) { gfx_crit_phase_ctrl_enabled = false; gfx_warm_phase_ctrl_enabled = false; return ret; } key = "qcom,gfx-sensor-id"; ret = of_property_read_u32(node, key, &data->gfx_sensor); if (ret) goto probe_gfx_exit; key = "qcom,gfx-phase-resource-key"; ret = of_property_read_string(node, key, &tmp_str); if (ret) goto probe_gfx_exit; data->gfx_phase_request_key = msm_thermal_str_to_int(tmp_str); key = "qcom,gfx-phase-warm-temp"; ret = of_property_read_u32(node, key, &data->gfx_phase_warm_temp_degC); if (ret) { dev_info(&pdev->dev, "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n", KBUILD_MODNAME, node->full_name, key, ret); data->gfx_phase_warm_temp_degC = INT_MIN; goto probe_gfx_crit; } key = "qcom,gfx-phase-warm-temp-hyst"; ret = of_property_read_u32(node, key, &data->gfx_phase_warm_temp_hyst_degC); if (ret) { dev_info(&pdev->dev, "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n", KBUILD_MODNAME, node->full_name, key, ret); goto probe_gfx_crit; } ret = init_threshold(MSM_GFX_PHASE_CTRL_WARM, data->gfx_sensor, data->gfx_phase_warm_temp_degC, data->gfx_phase_warm_temp_degC - data->gfx_phase_warm_temp_hyst_degC, gfx_phase_ctrl_notify); if (ret) { pr_err("init WARM threshold failed. err:%d\n", ret); goto probe_gfx_crit; } gfx_warm_phase_ctrl_enabled = true; probe_gfx_crit: key = "qcom,gfx-phase-hot-crit-temp"; ret = of_property_read_u32(node, key, &data->gfx_phase_hot_temp_degC); if (ret) { data->gfx_phase_hot_temp_degC = INT_MAX; goto probe_gfx_exit; } key = "qcom,gfx-phase-hot-crit-temp-hyst"; ret = of_property_read_u32(node, key, &data->gfx_phase_hot_temp_hyst_degC); if (ret) goto probe_gfx_exit; ret = init_threshold(MSM_GFX_PHASE_CTRL_HOT, data->gfx_sensor, data->gfx_phase_hot_temp_degC, data->gfx_phase_hot_temp_degC - data->gfx_phase_hot_temp_hyst_degC, gfx_phase_ctrl_notify); if (ret) { pr_err("init HOT threshold failed. err:%d\n", ret); goto probe_gfx_exit; } gfx_crit_phase_ctrl_enabled = true; probe_gfx_exit: if (ret) { dev_info(&pdev->dev, "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n", KBUILD_MODNAME, node->full_name, key, ret); } return ret; } static int probe_cx_phase_ctrl(struct device_node *node, struct msm_thermal_data *data, struct platform_device *pdev) { char *key = NULL; const char *tmp_str; int ret = 0; key = "qcom,disable-cx-phase-ctrl"; if (of_property_read_bool(node, key)) { cx_phase_ctrl_enabled = false; return ret; } key = "qcom,rpm-phase-resource-type"; ret = of_property_read_string(node, key, &tmp_str); if (ret) goto probe_cx_exit; data->phase_rpm_resource_type = msm_thermal_str_to_int(tmp_str); key = "qcom,rpm-phase-resource-id"; ret = of_property_read_u32(node, key, &data->phase_rpm_resource_id); if (ret) goto probe_cx_exit; key = "qcom,cx-phase-resource-key"; ret = of_property_read_string(node, key, &tmp_str); if (ret) goto probe_cx_exit; data->cx_phase_request_key = msm_thermal_str_to_int(tmp_str); key = "qcom,cx-phase-hot-crit-temp"; ret = of_property_read_u32(node, key, &data->cx_phase_hot_temp_degC); if (ret) goto probe_cx_exit; key = "qcom,cx-phase-hot-crit-temp-hyst"; ret = of_property_read_u32(node, key, &data->cx_phase_hot_temp_hyst_degC); if (ret) goto probe_cx_exit; ret = init_threshold(MSM_CX_PHASE_CTRL_HOT, MONITOR_ALL_TSENS, data->cx_phase_hot_temp_degC, data->cx_phase_hot_temp_degC - data->cx_phase_hot_temp_hyst_degC, cx_phase_ctrl_notify); if (ret) { pr_err("init HOT threshold failed. err:%d\n", ret); goto probe_cx_exit; } cx_phase_ctrl_enabled = true; probe_cx_exit: if (ret) { dev_info(&pdev->dev, "%s:Failed reading node=%s, key=%s err=%d. KTM continues\n", KBUILD_MODNAME, node->full_name, key, ret); cx_phase_ctrl_enabled = false; } return ret; } static int probe_therm_reset(struct device_node *node, struct msm_thermal_data *data, struct platform_device *pdev) { char *key = NULL; int ret = 0; key = "qcom,therm-reset-temp"; ret = of_property_read_u32(node, key, &data->therm_reset_temp_degC); if (ret) goto PROBE_RESET_EXIT; ret = init_threshold(MSM_THERM_RESET, MONITOR_ALL_TSENS, data->therm_reset_temp_degC, data->therm_reset_temp_degC - 10, therm_reset_notify); if (ret) { pr_err("Therm reset data structure init failed\n"); goto PROBE_RESET_EXIT; } therm_reset_enabled = true; PROBE_RESET_EXIT: if (ret) { dev_info(&pdev->dev, "%s:Failed reading node=%s, key=%s err=%d. KTM continues\n", __func__, node->full_name, key, ret); therm_reset_enabled = false; } return ret; } static int probe_freq_mitigation(struct device_node *node, struct msm_thermal_data *data, struct platform_device *pdev) { char *key = NULL; int ret = 0; key = "qcom,freq-mitigation-temp"; ret = of_property_read_u32(node, key, &data->freq_mitig_temp_degc); if (ret) goto PROBE_FREQ_EXIT; key = "qcom,freq-mitigation-temp-hysteresis"; ret = of_property_read_u32(node, key, &data->freq_mitig_temp_hysteresis_degc); if (ret) goto PROBE_FREQ_EXIT; key = "qcom,freq-mitigation-value"; ret = of_property_read_u32(node, key, &data->freq_limit); if (ret) goto PROBE_FREQ_EXIT; key = "qcom,freq-mitigation-control-mask"; ret = of_property_read_u32(node, key, &data->freq_mitig_control_mask); if (ret) goto PROBE_FREQ_EXIT; freq_mitigation_enabled = 1; PROBE_FREQ_EXIT: if (ret) { dev_info(&pdev->dev, "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n", __func__, node->full_name, key, ret); freq_mitigation_enabled = 0; } return ret; } static int msm_thermal_dev_probe(struct platform_device *pdev) { int ret = 0; char *key = NULL; struct device_node *node = pdev->dev.of_node; struct msm_thermal_data data; memset(&data, 0, sizeof(struct msm_thermal_data)); data.pdev = pdev; ret = msm_thermal_pre_init(&pdev->dev); if (ret) { pr_err("thermal pre init failed. err:%d\n", ret); goto fail; } key = "qcom,sensor-id"; ret = of_property_read_u32(node, key, &data.sensor_id); if (ret) goto fail; key = "qcom,poll-ms"; ret = of_property_read_u32(node, key, &data.poll_ms); if (ret) goto fail; key = "qcom,limit-temp"; ret = of_property_read_u32(node, key, &data.limit_temp_degC); if (ret) goto fail; key = "qcom,temp-hysteresis"; ret = of_property_read_u32(node, key, &data.temp_hysteresis_degC); if (ret) goto fail; key = "qcom,freq-step"; ret = of_property_read_u32(node, key, &data.bootup_freq_step); if (ret) goto fail; key = "qcom,online-hotplug-core"; if (of_property_read_bool(node, key)) online_core = true; else online_core = false; key = "qcom,freq-control-mask"; ret = of_property_read_u32(node, key, &data.bootup_freq_control_mask); ret = probe_cc(node, &data, pdev); ret = probe_freq_mitigation(node, &data, pdev); ret = probe_cx_phase_ctrl(node, &data, pdev); ret = probe_gfx_phase_ctrl(node, &data, pdev); ret = probe_therm_reset(node, &data, pdev); ret = probe_vdd_mx(node, &data, pdev); if (ret == -EPROBE_DEFER) goto fail; /* * Probe optional properties below. Call probe_psm before * probe_vdd_rstr because rpm_regulator_get has to be called * before devm_regulator_get * probe_ocr should be called after probe_vdd_rstr to reuse the * regualtor handle. calling devm_regulator_get more than once * will fail. */ ret = probe_psm(node, &data, pdev); if (ret == -EPROBE_DEFER) goto fail; ret = probe_vdd_rstr(node, &data, pdev); if (ret == -EPROBE_DEFER) goto fail; probe_sensor_info(node, &data, pdev); ret = probe_ocr(node, &data, pdev); update_cpu_topology(&pdev->dev); //[BUGFIX]added By miao, bug 902619 #if defined(CONFIG_TCT_8X16_IDOL3) probe_boot_freq_limit(&pdev->dev); #endif /* * In case sysfs add nodes get called before probe function. * Need to make sure sysfs node is created again */ if (psm_nodes_called) { msm_thermal_add_psm_nodes(); psm_nodes_called = false; } if (vdd_rstr_nodes_called) { msm_thermal_add_vdd_rstr_nodes(); vdd_rstr_nodes_called = false; } if (sensor_info_nodes_called) { msm_thermal_add_sensor_info_nodes(); sensor_info_nodes_called = false; } if (ocr_nodes_called) { msm_thermal_add_ocr_nodes(); ocr_nodes_called = false; } if (cluster_info_nodes_called) { create_cpu_topology_sysfs(); cluster_info_nodes_called = false; } msm_thermal_ioctl_init(); ret = msm_thermal_init(&data); msm_thermal_probed = true; if (interrupt_mode_enable) { interrupt_mode_init(); interrupt_mode_enable = false; } return ret; fail: if (ret) pr_err("Failed reading node=%s, key=%s. err:%d\n", node->full_name, key, ret); return ret; } static int msm_thermal_dev_exit(struct platform_device *inp_dev) { int i = 0; msm_thermal_ioctl_cleanup(); if (thresh) { if (vdd_rstr_enabled) kfree(thresh[MSM_VDD_RESTRICTION].thresh_list); if (cx_phase_ctrl_enabled) kfree(thresh[MSM_CX_PHASE_CTRL_HOT].thresh_list); if (gfx_warm_phase_ctrl_enabled) kfree(thresh[MSM_GFX_PHASE_CTRL_WARM].thresh_list); if (gfx_crit_phase_ctrl_enabled) kfree(thresh[MSM_GFX_PHASE_CTRL_HOT].thresh_list); if (ocr_enabled) { for (i = 0; i < ocr_rail_cnt; i++) kfree(ocr_rails[i].attr_gp.attrs); kfree(ocr_rails); ocr_rails = NULL; kfree(thresh[MSM_OCR].thresh_list); } if (vdd_mx_enabled) { kfree(mx_kobj); kfree(mx_attr_group.attrs); kfree(thresh[MSM_VDD_MX_RESTRICTION].thresh_list); } kfree(thresh); thresh = NULL; } return 0; } static struct of_device_id msm_thermal_match_table[] = { {.compatible = "qcom,msm-thermal"}, {}, }; static struct platform_driver msm_thermal_device_driver = { .probe = msm_thermal_dev_probe, .driver = { .name = "msm-thermal", .owner = THIS_MODULE, .of_match_table = msm_thermal_match_table, }, .remove = msm_thermal_dev_exit, }; int __init msm_thermal_device_init(void) { return platform_driver_register(&msm_thermal_device_driver); } arch_initcall(msm_thermal_device_init); int __init msm_thermal_late_init(void) { if (num_possible_cpus() > 1) msm_thermal_add_cc_nodes(); msm_thermal_add_psm_nodes(); msm_thermal_add_vdd_rstr_nodes(); msm_thermal_add_sensor_info_nodes(); if (ocr_reg_init_defer) { if (!ocr_reg_init(msm_thermal_info.pdev)) { ocr_enabled = true; msm_thermal_add_ocr_nodes(); } } msm_thermal_add_mx_nodes(); interrupt_mode_init(); create_cpu_topology_sysfs(); //[BUGFIX]added By miao, bug 902619 #if defined(CONFIG_TCT_8X16_IDOL3) boot_freq_limit_sysfs_node(); #endif return 0; } late_initcall(msm_thermal_late_init);
TeamRegular/android_kernel_tcl_msm8916
drivers/thermal/msm_thermal.c
C
gpl-2.0
129,560
// initializeSearchBox // Description: used to set an initial value for the search // box, clear it when the user clicks into it and // reset the value if no text was entered function initializeSearchBox() { var inputPrompt = "Search people and the Web"; $("#q").val(inputPrompt); $("#q").focus(function(){ if ($(this).val() == inputPrompt) { $(this).val(""); } }); $("#q").blur(function(){ if ($(this).val() == "") { $(this).val(inputPrompt); } }); } //searchAction //Description: used to determine the form action, searching the web or directory function initializeSearchAction() { document.getElementById('search-button-people').onclick = function() { var form = document.getElementById('searchform'); form.action = 'http://www3.unca.edu/directory/'; form.method = 'POST' $('.cse').remove(); form.submit(); } } // initializeNavigationSecondary // Description: used to create expandable subnavigation menus function initializeNavigationSecondary () { //close open menus and change the trigger image if($.browser.msie && $.browser.version == "6.0") { return; } $('.navigation-secondary ul.menu li ul').hide(); $('.navigation-secondary ul.menu li.active-trail ul').show(); $('.navigation-secondary ul.menu li.active-trail ul li ul').hide(); $('.navigation-secondary ul.menu li.active-trail ul li.active-trail ul').show(); $(".navigation-secondary ul.menu li.expanded ul").before("<span class='trigger trigger-closed'/>"); $('.navigation-secondary ul.menu li.active-trail span').toggleClass('trigger-closed'); $('.navigation-secondary ul.menu li.active-trail ul li span').toggleClass('trigger-closed'); $('.navigation-secondary ul.menu li.active-trail ul li.active-trail span').toggleClass('trigger-closed'); $('.trigger').click(function() { $(this).toggleClass('trigger-closed'); $(this).siblings('ul').toggle(); return false; }); } function initializeSlideshow(width) { var currentPosition = 0; var slideWidth = width; var slides = $('.slide'); var numberOfSlides = slides.length; // Remove scrollbar in JS $('#slidesContainer').css('overflow', 'hidden'); // Wrap all .slides with #slideInner div slides .wrapAll('<div id="slideInner"></div>') // Float left to display horizontally, readjust .slides width .css({ 'float' : 'left', 'width' : slideWidth }); // Set #slideInner width equal to total width of all slides $('#slideInner').css('width', slideWidth * numberOfSlides); // Insert left and right arrow controls in the DOM $('#slideshow') .append('<span class="control" id="leftControl">Move left</span>') .append('<span class="control" id="rightControl">Move right</span>'); // Hide left arrow control on first load manageControls(currentPosition); // Create event listeners for .controls clicks $('.control').bind('click', function(){ // Determine new position currentPosition = ($(this).attr('id')=='rightControl') ? currentPosition+1 : currentPosition-1; // Hide / show controls manageControls(currentPosition); // Move slideInner using margin-left $('#slideInner').animate({ 'marginLeft' : slideWidth*(-currentPosition) }); }); // manageControls: Hides and shows controls depending on currentPosition function manageControls(position){ // Hide left arrow if position is first slide if(position==0){ $('#leftControl').hide() } else{ $('#leftControl').show() } // Hide right arrow if position is last slide if(position==numberOfSlides-1){ $('#rightControl').hide() } else{ $('#rightControl').show() } } }
shermang/unca
sites/all/themes/unca/unca_department/js/functions.js
JavaScript
gpl-2.0
3,690
{% extends "user_profile_base_extended.html" %} {% load i18n %} {% block title %}{% trans "Profile" %} | {{ block.super }}{% endblock %} {% block breadcrumbs %} {{ block.super }}<li>{% trans "Profile" %}</li> {% endblock %} {% block body_id %}profile{% endblock %} {% block section_description %}{% trans "Here you can change your personal details. This information will be used in the header of the translation files." %}{% endblock %} {% block section_body %} <form method="post" action=""> {% csrf_token %} <p> <label for="id_first_name">{% trans 'First Name' %}</label> {{ form.first_name }} {{ form.first_name.errors }} </p> <p> <label for="id_last_name">{% trans 'Last Name' %}</label> {{ form.last_name }} {{ form.last_name.errors }} </p> <p> <label for="id_email">{% trans 'Email address' %}</label> {{ form.email }} {{ form.email.errors }} </p> <p class="buttons"> <input type="submit" class="save" value="{% trans 'Save' %}" /> </p> </form> {% endblock %}
ttreeagency/PootleTypo3Org
pootle/templates/profiles/edit_personal.html
HTML
gpl-2.0
1,085
#ifndef __WHOWAS_H #define __WHOWAS_H #define WW_MAXCHANNELS 20 #define WW_DEFAULT_MAXENTRIES 1000 #define WW_MASKLEN (HOSTLEN + USERLEN + NICKLEN) #define WW_REASONLEN 512 typedef struct whowas { int type; time_t timestamp; nick nick; /* unlinked nick */ chanindex *channels[WW_MAXCHANNELS]; /* WHOWAS_QUIT or WHOWAS_KILL */ sstring *reason; /* WHOWAS_RENAME */ sstring *newnick; unsigned int marker; struct whowas *next; struct whowas *prev; } whowas; extern whowas *whowasrecs; extern int whowasmax; extern int whowasoffset; /* points to oldest record */ #define WHOWAS_UNUSED 0 #define WHOWAS_USED 1 #define WHOWAS_QUIT 2 #define WHOWAS_KILL 3 #define WHOWAS_RENAME 4 whowas *whowas_fromnick(nick *np, int standalone); nick *whowas_tonick(whowas *ww); void whowas_freenick(nick *np); whowas *whowas_chase(const char *target, int maxage); const char *whowas_format(whowas *ww); const char *whowas_formatchannels(whowas *ww); void whowas_clean(whowas *ww); void whowas_free(whowas *ww); unsigned int nextwhowasmarker(void); #endif /* __WHOWAS_H */
NikosPapakonstantinou/newserv
whowas/whowas.h
C
gpl-2.0
1,083
/* objdump.c -- dump information about an object file. Copyright (C) 1990-2020 Free Software Foundation, Inc. This file is part of GNU Binutils. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ /* Objdump overview. Objdump displays information about one or more object files, either on their own, or inside libraries. It is commonly used as a disassembler, but it can also display information about file headers, symbol tables, relocations, debugging directives and more. The flow of execution is as follows: 1. Command line arguments are checked for control switches and the information to be displayed is selected. 2. Any remaining arguments are assumed to be object files, and they are processed in order by display_bfd(). If the file is an archive each of its elements is processed in turn. 3. The file's target architecture and binary file format are determined by bfd_check_format(). If they are recognised, then dump_bfd() is called. 4. dump_bfd() in turn calls separate functions to display the requested item(s) of information(s). For example disassemble_data() is called if a disassembly has been requested. When disassembling the code loops through blocks of instructions bounded by symbols, calling disassemble_bytes() on each block. The actual disassembling is done by the libopcodes library, via a function pointer supplied by the disassembler() function. */ #include "sysdep.h" #include "bfd.h" #include "elf-bfd.h" #include "coff-bfd.h" #include "progress.h" #include "bucomm.h" #include "elfcomm.h" #include "dwarf.h" #include "ctf-api.h" #include "getopt.h" #include "safe-ctype.h" #include "dis-asm.h" #include "libiberty.h" #include "demangle.h" #include "filenames.h" #include "debug.h" #include "budbg.h" #include "objdump.h" #ifdef HAVE_MMAP #include <sys/mman.h> #endif /* Internal headers for the ELF .stab-dump code - sorry. */ #define BYTES_IN_WORD 32 #include "aout/aout64.h" /* Exit status. */ static int exit_status = 0; static char *default_target = NULL; /* Default at runtime. */ /* The following variables are set based on arguments passed on the command line. */ static int show_version = 0; /* Show the version number. */ static int dump_section_contents; /* -s */ static int dump_section_headers; /* -h */ static bfd_boolean dump_file_header; /* -f */ static int dump_symtab; /* -t */ static int dump_dynamic_symtab; /* -T */ static int dump_reloc_info; /* -r */ static int dump_dynamic_reloc_info; /* -R */ static int dump_ar_hdrs; /* -a */ static int dump_private_headers; /* -p */ static char *dump_private_options; /* -P */ static int no_addresses; /* --no-addresses */ static int prefix_addresses; /* --prefix-addresses */ static int with_line_numbers; /* -l */ static bfd_boolean with_source_code; /* -S */ static int show_raw_insn; /* --show-raw-insn */ static int dump_dwarf_section_info; /* --dwarf */ static int dump_stab_section_info; /* --stabs */ static int dump_ctf_section_info; /* --ctf */ static char *dump_ctf_section_name; static char *dump_ctf_parent_name; /* --ctf-parent */ static int do_demangle; /* -C, --demangle */ static bfd_boolean disassemble; /* -d */ static bfd_boolean disassemble_all; /* -D */ static int disassemble_zeroes; /* --disassemble-zeroes */ static bfd_boolean formats_info; /* -i */ static int wide_output; /* -w */ static int insn_width; /* --insn-width */ static bfd_vma start_address = (bfd_vma) -1; /* --start-address */ static bfd_vma stop_address = (bfd_vma) -1; /* --stop-address */ static int dump_debugging; /* --debugging */ static int dump_debugging_tags; /* --debugging-tags */ static int suppress_bfd_header; static int dump_special_syms = 0; /* --special-syms */ static bfd_vma adjust_section_vma = 0; /* --adjust-vma */ static int file_start_context = 0; /* --file-start-context */ static bfd_boolean display_file_offsets;/* -F */ static const char *prefix; /* --prefix */ static int prefix_strip; /* --prefix-strip */ static size_t prefix_length; static bfd_boolean unwind_inlines; /* --inlines. */ static const char * disasm_sym; /* Disassembly start symbol. */ static const char * source_comment; /* --source_comment. */ static bfd_boolean visualize_jumps = FALSE; /* --visualize-jumps. */ static bfd_boolean color_output = FALSE; /* --visualize-jumps=color. */ static bfd_boolean extended_color_output = FALSE; /* --visualize-jumps=extended-color. */ static int demangle_flags = DMGL_ANSI | DMGL_PARAMS; /* A structure to record the sections mentioned in -j switches. */ struct only { const char * name; /* The name of the section. */ bfd_boolean seen; /* A flag to indicate that the section has been found in one or more input files. */ struct only * next; /* Pointer to the next structure in the list. */ }; /* Pointer to an array of 'only' structures. This pointer is NULL if the -j switch has not been used. */ static struct only * only_list = NULL; /* Variables for handling include file path table. */ static const char **include_paths; static int include_path_count; /* Extra info to pass to the section disassembler and address printing function. */ struct objdump_disasm_info { bfd * abfd; bfd_boolean require_sec; arelent ** dynrelbuf; long dynrelcount; disassembler_ftype disassemble_fn; arelent * reloc; const char * symbol; }; /* Architecture to disassemble for, or default if NULL. */ static char *machine = NULL; /* Target specific options to the disassembler. */ static char *disassembler_options = NULL; /* Endianness to disassemble for, or default if BFD_ENDIAN_UNKNOWN. */ static enum bfd_endian endian = BFD_ENDIAN_UNKNOWN; /* The symbol table. */ static asymbol **syms; /* Number of symbols in `syms'. */ static long symcount = 0; /* The sorted symbol table. */ static asymbol **sorted_syms; /* Number of symbols in `sorted_syms'. */ static long sorted_symcount = 0; /* The dynamic symbol table. */ static asymbol **dynsyms; /* The synthetic symbol table. */ static asymbol *synthsyms; static long synthcount = 0; /* Number of symbols in `dynsyms'. */ static long dynsymcount = 0; static bfd_byte *stabs; static bfd_size_type stab_size; static bfd_byte *strtab; static bfd_size_type stabstr_size; /* Handlers for -P/--private. */ static const struct objdump_private_desc * const objdump_private_vectors[] = { OBJDUMP_PRIVATE_VECTORS NULL }; /* The list of detected jumps inside a function. */ static struct jump_info *detected_jumps = NULL; static void usage (FILE *, int) ATTRIBUTE_NORETURN; static void usage (FILE *stream, int status) { fprintf (stream, _("Usage: %s <option(s)> <file(s)>\n"), program_name); fprintf (stream, _(" Display information from object <file(s)>.\n")); fprintf (stream, _(" At least one of the following switches must be given:\n")); fprintf (stream, _("\ -a, --archive-headers Display archive header information\n\ -f, --file-headers Display the contents of the overall file header\n\ -p, --private-headers Display object format specific file header contents\n\ -P, --private=OPT,OPT... Display object format specific contents\n\ -h, --[section-]headers Display the contents of the section headers\n\ -x, --all-headers Display the contents of all headers\n\ -d, --disassemble Display assembler contents of executable sections\n\ -D, --disassemble-all Display assembler contents of all sections\n\ --disassemble=<sym> Display assembler contents from <sym>\n\ -S, --source Intermix source code with disassembly\n\ --source-comment[=<txt>] Prefix lines of source code with <txt>\n\ -s, --full-contents Display the full contents of all sections requested\n\ -g, --debugging Display debug information in object file\n\ -e, --debugging-tags Display debug information using ctags style\n\ -G, --stabs Display (in raw form) any STABS info in the file\n\ -W[lLiaprmfFsoRtUuTgAckK] or\n\ --dwarf[=rawline,=decodedline,=info,=abbrev,=pubnames,=aranges,=macro,=frames,\n\ =frames-interp,=str,=loc,=Ranges,=pubtypes,\n\ =gdb_index,=trace_info,=trace_abbrev,=trace_aranges,\n\ =addr,=cu_index,=links,=follow-links]\n\ Display DWARF info in the file\n\ --ctf=SECTION Display CTF info from SECTION\n\ -t, --syms Display the contents of the symbol table(s)\n\ -T, --dynamic-syms Display the contents of the dynamic symbol table\n\ -r, --reloc Display the relocation entries in the file\n\ -R, --dynamic-reloc Display the dynamic relocation entries in the file\n\ @<file> Read options from <file>\n\ -v, --version Display this program's version number\n\ -i, --info List object formats and architectures supported\n\ -H, --help Display this information\n\ ")); if (status != 2) { const struct objdump_private_desc * const *desc; fprintf (stream, _("\n The following switches are optional:\n")); fprintf (stream, _("\ -b, --target=BFDNAME Specify the target object format as BFDNAME\n\ -m, --architecture=MACHINE Specify the target architecture as MACHINE\n\ -j, --section=NAME Only display information for section NAME\n\ -M, --disassembler-options=OPT Pass text OPT on to the disassembler\n\ -EB --endian=big Assume big endian format when disassembling\n\ -EL --endian=little Assume little endian format when disassembling\n\ --file-start-context Include context from start of file (with -S)\n\ -I, --include=DIR Add DIR to search list for source files\n\ -l, --line-numbers Include line numbers and filenames in output\n\ -F, --file-offsets Include file offsets when displaying information\n\ -C, --demangle[=STYLE] Decode mangled/processed symbol names\n\ The STYLE, if specified, can be `auto', `gnu',\n\ `lucid', `arm', `hp', `edg', `gnu-v3', `java'\n\ or `gnat'\n\ --recurse-limit Enable a limit on recursion whilst demangling. [Default]\n\ --no-recurse-limit Disable a limit on recursion whilst demangling\n\ -w, --wide Format output for more than 80 columns\n\ -z, --disassemble-zeroes Do not skip blocks of zeroes when disassembling\n\ --start-address=ADDR Only process data whose address is >= ADDR\n\ --stop-address=ADDR Only process data whose address is < ADDR\n\ --no-addresses Do not print address alongside disassembly\n\ --prefix-addresses Print complete address alongside disassembly\n\ --[no-]show-raw-insn Display hex alongside symbolic disassembly\n\ --insn-width=WIDTH Display WIDTH bytes on a single line for -d\n\ --adjust-vma=OFFSET Add OFFSET to all displayed section addresses\n\ --special-syms Include special symbols in symbol dumps\n\ --inlines Print all inlines for source line (with -l)\n\ --prefix=PREFIX Add PREFIX to absolute paths for -S\n\ --prefix-strip=LEVEL Strip initial directory names for -S\n")); fprintf (stream, _("\ --dwarf-depth=N Do not display DIEs at depth N or greater\n\ --dwarf-start=N Display DIEs starting with N, at the same depth\n\ or deeper\n\ --dwarf-check Make additional dwarf internal consistency checks.\ \n\ --ctf-parent=SECTION Use SECTION as the CTF parent\n\ --visualize-jumps Visualize jumps by drawing ASCII art lines\n\ --visualize-jumps=color Use colors in the ASCII art\n\ --visualize-jumps=extended-color Use extended 8-bit color codes\n\ --visualize-jumps=off Disable jump visualization\n\n")); list_supported_targets (program_name, stream); list_supported_architectures (program_name, stream); disassembler_usage (stream); if (objdump_private_vectors[0] != NULL) { fprintf (stream, _("\nOptions supported for -P/--private switch:\n")); for (desc = objdump_private_vectors; *desc != NULL; desc++) (*desc)->help (stream); } } if (REPORT_BUGS_TO[0] && status == 0) fprintf (stream, _("Report bugs to %s.\n"), REPORT_BUGS_TO); exit (status); } /* 150 isn't special; it's just an arbitrary non-ASCII char value. */ enum option_values { OPTION_ENDIAN=150, OPTION_START_ADDRESS, OPTION_STOP_ADDRESS, OPTION_DWARF, OPTION_PREFIX, OPTION_PREFIX_STRIP, OPTION_INSN_WIDTH, OPTION_ADJUST_VMA, OPTION_DWARF_DEPTH, OPTION_DWARF_CHECK, OPTION_DWARF_START, OPTION_RECURSE_LIMIT, OPTION_NO_RECURSE_LIMIT, OPTION_INLINES, OPTION_SOURCE_COMMENT, OPTION_CTF, OPTION_CTF_PARENT, OPTION_VISUALIZE_JUMPS }; static struct option long_options[]= { {"adjust-vma", required_argument, NULL, OPTION_ADJUST_VMA}, {"all-headers", no_argument, NULL, 'x'}, {"private-headers", no_argument, NULL, 'p'}, {"private", required_argument, NULL, 'P'}, {"architecture", required_argument, NULL, 'm'}, {"archive-headers", no_argument, NULL, 'a'}, {"debugging", no_argument, NULL, 'g'}, {"debugging-tags", no_argument, NULL, 'e'}, {"demangle", optional_argument, NULL, 'C'}, {"disassemble", optional_argument, NULL, 'd'}, {"disassemble-all", no_argument, NULL, 'D'}, {"disassembler-options", required_argument, NULL, 'M'}, {"disassemble-zeroes", no_argument, NULL, 'z'}, {"dynamic-reloc", no_argument, NULL, 'R'}, {"dynamic-syms", no_argument, NULL, 'T'}, {"endian", required_argument, NULL, OPTION_ENDIAN}, {"file-headers", no_argument, NULL, 'f'}, {"file-offsets", no_argument, NULL, 'F'}, {"file-start-context", no_argument, &file_start_context, 1}, {"full-contents", no_argument, NULL, 's'}, {"headers", no_argument, NULL, 'h'}, {"help", no_argument, NULL, 'H'}, {"info", no_argument, NULL, 'i'}, {"line-numbers", no_argument, NULL, 'l'}, {"no-show-raw-insn", no_argument, &show_raw_insn, -1}, {"no-addresses", no_argument, &no_addresses, 1}, {"prefix-addresses", no_argument, &prefix_addresses, 1}, {"recurse-limit", no_argument, NULL, OPTION_RECURSE_LIMIT}, {"recursion-limit", no_argument, NULL, OPTION_RECURSE_LIMIT}, {"no-recurse-limit", no_argument, NULL, OPTION_NO_RECURSE_LIMIT}, {"no-recursion-limit", no_argument, NULL, OPTION_NO_RECURSE_LIMIT}, {"reloc", no_argument, NULL, 'r'}, {"section", required_argument, NULL, 'j'}, {"section-headers", no_argument, NULL, 'h'}, {"show-raw-insn", no_argument, &show_raw_insn, 1}, {"source", no_argument, NULL, 'S'}, {"source-comment", optional_argument, NULL, OPTION_SOURCE_COMMENT}, {"special-syms", no_argument, &dump_special_syms, 1}, {"include", required_argument, NULL, 'I'}, {"dwarf", optional_argument, NULL, OPTION_DWARF}, {"ctf", required_argument, NULL, OPTION_CTF}, {"ctf-parent", required_argument, NULL, OPTION_CTF_PARENT}, {"stabs", no_argument, NULL, 'G'}, {"start-address", required_argument, NULL, OPTION_START_ADDRESS}, {"stop-address", required_argument, NULL, OPTION_STOP_ADDRESS}, {"syms", no_argument, NULL, 't'}, {"target", required_argument, NULL, 'b'}, {"version", no_argument, NULL, 'V'}, {"wide", no_argument, NULL, 'w'}, {"prefix", required_argument, NULL, OPTION_PREFIX}, {"prefix-strip", required_argument, NULL, OPTION_PREFIX_STRIP}, {"insn-width", required_argument, NULL, OPTION_INSN_WIDTH}, {"dwarf-depth", required_argument, 0, OPTION_DWARF_DEPTH}, {"dwarf-start", required_argument, 0, OPTION_DWARF_START}, {"dwarf-check", no_argument, 0, OPTION_DWARF_CHECK}, {"inlines", no_argument, 0, OPTION_INLINES}, {"visualize-jumps", optional_argument, 0, OPTION_VISUALIZE_JUMPS}, {0, no_argument, 0, 0} }; static void nonfatal (const char *msg) { bfd_nonfatal (msg); exit_status = 1; } /* Returns a version of IN with any control characters replaced by escape sequences. Uses a static buffer if necessary. */ static const char * sanitize_string (const char * in) { static char * buffer = NULL; static size_t buffer_len = 0; const char * original = in; char * out; /* Paranoia. */ if (in == NULL) return ""; /* See if any conversion is necessary. In the majority of cases it will not be needed. */ do { char c = *in++; if (c == 0) return original; if (ISCNTRL (c)) break; } while (1); /* Copy the input, translating as needed. */ in = original; if (buffer_len < (strlen (in) * 2)) { free ((void *) buffer); buffer_len = strlen (in) * 2; buffer = xmalloc (buffer_len + 1); } out = buffer; do { char c = *in++; if (c == 0) break; if (!ISCNTRL (c)) *out++ = c; else { *out++ = '^'; *out++ = c + 0x40; } } while (1); *out = 0; return buffer; } /* Returns TRUE if the specified section should be dumped. */ static bfd_boolean process_section_p (asection * section) { struct only * only; if (only_list == NULL) return TRUE; for (only = only_list; only; only = only->next) if (strcmp (only->name, section->name) == 0) { only->seen = TRUE; return TRUE; } return FALSE; } /* Add an entry to the 'only' list. */ static void add_only (char * name) { struct only * only; /* First check to make sure that we do not already have an entry for this name. */ for (only = only_list; only; only = only->next) if (strcmp (only->name, name) == 0) return; only = xmalloc (sizeof * only); only->name = name; only->seen = FALSE; only->next = only_list; only_list = only; } /* Release the memory used by the 'only' list. PR 11225: Issue a warning message for unseen sections. Only do this if none of the sections were seen. This is mainly to support tools like the GAS testsuite where an object file is dumped with a list of generic section names known to be present in a range of different file formats. */ static void free_only_list (void) { bfd_boolean at_least_one_seen = FALSE; struct only * only; struct only * next; if (only_list == NULL) return; for (only = only_list; only; only = only->next) if (only->seen) { at_least_one_seen = TRUE; break; } for (only = only_list; only; only = next) { if (! at_least_one_seen) { non_fatal (_("section '%s' mentioned in a -j option, " "but not found in any input file"), only->name); exit_status = 1; } next = only->next; free (only); } } static void dump_section_header (bfd *abfd, asection *section, void *data) { char *comma = ""; unsigned int opb = bfd_octets_per_byte (abfd, section); int longest_section_name = *((int *) data); /* Ignore linker created section. See elfNN_ia64_object_p in bfd/elfxx-ia64.c. */ if (section->flags & SEC_LINKER_CREATED) return; /* PR 10413: Skip sections that we are ignoring. */ if (! process_section_p (section)) return; printf ("%3d %-*s %08lx ", section->index, longest_section_name, sanitize_string (bfd_section_name (section)), (unsigned long) bfd_section_size (section) / opb); bfd_printf_vma (abfd, bfd_section_vma (section)); printf (" "); bfd_printf_vma (abfd, section->lma); printf (" %08lx 2**%u", (unsigned long) section->filepos, bfd_section_alignment (section)); if (! wide_output) printf ("\n "); printf (" "); #define PF(x, y) \ if (section->flags & x) { printf ("%s%s", comma, y); comma = ", "; } PF (SEC_HAS_CONTENTS, "CONTENTS"); PF (SEC_ALLOC, "ALLOC"); PF (SEC_CONSTRUCTOR, "CONSTRUCTOR"); PF (SEC_LOAD, "LOAD"); PF (SEC_RELOC, "RELOC"); PF (SEC_READONLY, "READONLY"); PF (SEC_CODE, "CODE"); PF (SEC_DATA, "DATA"); PF (SEC_ROM, "ROM"); PF (SEC_DEBUGGING, "DEBUGGING"); PF (SEC_NEVER_LOAD, "NEVER_LOAD"); PF (SEC_EXCLUDE, "EXCLUDE"); PF (SEC_SORT_ENTRIES, "SORT_ENTRIES"); if (bfd_get_arch (abfd) == bfd_arch_tic54x) { PF (SEC_TIC54X_BLOCK, "BLOCK"); PF (SEC_TIC54X_CLINK, "CLINK"); } PF (SEC_SMALL_DATA, "SMALL_DATA"); if (bfd_get_flavour (abfd) == bfd_target_coff_flavour) { PF (SEC_COFF_SHARED, "SHARED"); PF (SEC_COFF_NOREAD, "NOREAD"); } else if (bfd_get_flavour (abfd) == bfd_target_elf_flavour) { PF (SEC_ELF_OCTETS, "OCTETS"); PF (SEC_ELF_PURECODE, "PURECODE"); } PF (SEC_THREAD_LOCAL, "THREAD_LOCAL"); PF (SEC_GROUP, "GROUP"); if (bfd_get_arch (abfd) == bfd_arch_mep) { PF (SEC_MEP_VLIW, "VLIW"); } if ((section->flags & SEC_LINK_ONCE) != 0) { const char *ls; struct coff_comdat_info *comdat; switch (section->flags & SEC_LINK_DUPLICATES) { default: abort (); case SEC_LINK_DUPLICATES_DISCARD: ls = "LINK_ONCE_DISCARD"; break; case SEC_LINK_DUPLICATES_ONE_ONLY: ls = "LINK_ONCE_ONE_ONLY"; break; case SEC_LINK_DUPLICATES_SAME_SIZE: ls = "LINK_ONCE_SAME_SIZE"; break; case SEC_LINK_DUPLICATES_SAME_CONTENTS: ls = "LINK_ONCE_SAME_CONTENTS"; break; } printf ("%s%s", comma, ls); comdat = bfd_coff_get_comdat_section (abfd, section); if (comdat != NULL) printf (" (COMDAT %s %ld)", comdat->name, comdat->symbol); comma = ", "; } printf ("\n"); #undef PF } /* Called on each SECTION in ABFD, update the int variable pointed to by DATA which contains the string length of the longest section name. */ static void find_longest_section_name (bfd *abfd ATTRIBUTE_UNUSED, asection *section, void *data) { int *longest_so_far = (int *) data; const char *name; int len; /* Ignore linker created section. */ if (section->flags & SEC_LINKER_CREATED) return; /* Skip sections that we are ignoring. */ if (! process_section_p (section)) return; name = bfd_section_name (section); len = (int) strlen (name); if (len > *longest_so_far) *longest_so_far = len; } static void dump_headers (bfd *abfd) { /* The default width of 13 is just an arbitrary choice. */ int max_section_name_length = 13; int bfd_vma_width; #ifndef BFD64 bfd_vma_width = 10; #else /* With BFD64, non-ELF returns -1 and wants always 64 bit addresses. */ if (bfd_get_arch_size (abfd) == 32) bfd_vma_width = 10; else bfd_vma_width = 18; #endif printf (_("Sections:\n")); if (wide_output) bfd_map_over_sections (abfd, find_longest_section_name, &max_section_name_length); printf (_("Idx %-*s Size %-*s%-*sFile off Algn"), max_section_name_length, "Name", bfd_vma_width, "VMA", bfd_vma_width, "LMA"); if (wide_output) printf (_(" Flags")); printf ("\n"); bfd_map_over_sections (abfd, dump_section_header, &max_section_name_length); } static asymbol ** slurp_symtab (bfd *abfd) { asymbol **sy = NULL; long storage; if (!(bfd_get_file_flags (abfd) & HAS_SYMS)) { symcount = 0; return NULL; } storage = bfd_get_symtab_upper_bound (abfd); if (storage < 0) { non_fatal (_("failed to read symbol table from: %s"), bfd_get_filename (abfd)); bfd_fatal (_("error message was")); } if (storage) { off_t filesize = bfd_get_file_size (abfd); /* qv PR 24707. */ if (filesize > 0 && filesize < storage /* The MMO file format supports its own special compression technique, so its sections can be larger than the file size. */ && bfd_get_flavour (abfd) != bfd_target_mmo_flavour) { bfd_nonfatal_message (bfd_get_filename (abfd), abfd, NULL, _("error: symbol table size (%#lx) is larger than filesize (%#lx)"), storage, (long) filesize); exit_status = 1; symcount = 0; return NULL; } sy = (asymbol **) xmalloc (storage); } symcount = bfd_canonicalize_symtab (abfd, sy); if (symcount < 0) bfd_fatal (bfd_get_filename (abfd)); return sy; } /* Read in the dynamic symbols. */ static asymbol ** slurp_dynamic_symtab (bfd *abfd) { asymbol **sy = NULL; long storage; storage = bfd_get_dynamic_symtab_upper_bound (abfd); if (storage < 0) { if (!(bfd_get_file_flags (abfd) & DYNAMIC)) { non_fatal (_("%s: not a dynamic object"), bfd_get_filename (abfd)); exit_status = 1; dynsymcount = 0; return NULL; } bfd_fatal (bfd_get_filename (abfd)); } if (storage) sy = (asymbol **) xmalloc (storage); dynsymcount = bfd_canonicalize_dynamic_symtab (abfd, sy); if (dynsymcount < 0) bfd_fatal (bfd_get_filename (abfd)); return sy; } /* Some symbol names are significant and should be kept in the table of sorted symbol names, even if they are marked as debugging/section symbols. */ static bfd_boolean is_significant_symbol_name (const char * name) { return strncmp (name, ".plt", 4) == 0 || strcmp (name, ".got") == 0; } /* Filter out (in place) symbols that are useless for disassembly. COUNT is the number of elements in SYMBOLS. Return the number of useful symbols. */ static long remove_useless_symbols (asymbol **symbols, long count) { asymbol **in_ptr = symbols, **out_ptr = symbols; while (--count >= 0) { asymbol *sym = *in_ptr++; if (sym->name == NULL || sym->name[0] == '\0') continue; if ((sym->flags & (BSF_DEBUGGING | BSF_SECTION_SYM)) && ! is_significant_symbol_name (sym->name)) continue; if (bfd_is_und_section (sym->section) || bfd_is_com_section (sym->section)) continue; *out_ptr++ = sym; } return out_ptr - symbols; } static const asection *compare_section; /* Sort symbols into value order. */ static int compare_symbols (const void *ap, const void *bp) { const asymbol *a = * (const asymbol **) ap; const asymbol *b = * (const asymbol **) bp; const char *an; const char *bn; size_t anl; size_t bnl; bfd_boolean as, af, bs, bf; flagword aflags; flagword bflags; if (bfd_asymbol_value (a) > bfd_asymbol_value (b)) return 1; else if (bfd_asymbol_value (a) < bfd_asymbol_value (b)) return -1; /* Prefer symbols from the section currently being disassembled. Don't sort symbols from other sections by section, since there isn't much reason to prefer one section over another otherwise. See sym_ok comment for why we compare by section name. */ as = strcmp (compare_section->name, a->section->name) == 0; bs = strcmp (compare_section->name, b->section->name) == 0; if (as && !bs) return -1; if (!as && bs) return 1; an = bfd_asymbol_name (a); bn = bfd_asymbol_name (b); anl = strlen (an); bnl = strlen (bn); /* The symbols gnu_compiled and gcc2_compiled convey no real information, so put them after other symbols with the same value. */ af = (strstr (an, "gnu_compiled") != NULL || strstr (an, "gcc2_compiled") != NULL); bf = (strstr (bn, "gnu_compiled") != NULL || strstr (bn, "gcc2_compiled") != NULL); if (af && ! bf) return 1; if (! af && bf) return -1; /* We use a heuristic for the file name, to try to sort it after more useful symbols. It may not work on non Unix systems, but it doesn't really matter; the only difference is precisely which symbol names get printed. */ #define file_symbol(s, sn, snl) \ (((s)->flags & BSF_FILE) != 0 \ || ((snl) > 2 \ && (sn)[(snl) - 2] == '.' \ && ((sn)[(snl) - 1] == 'o' \ || (sn)[(snl) - 1] == 'a'))) af = file_symbol (a, an, anl); bf = file_symbol (b, bn, bnl); if (af && ! bf) return 1; if (! af && bf) return -1; /* Sort function and object symbols before global symbols before local symbols before section symbols before debugging symbols. */ aflags = a->flags; bflags = b->flags; if ((aflags & BSF_DEBUGGING) != (bflags & BSF_DEBUGGING)) { if ((aflags & BSF_DEBUGGING) != 0) return 1; else return -1; } if ((aflags & BSF_SECTION_SYM) != (bflags & BSF_SECTION_SYM)) { if ((aflags & BSF_SECTION_SYM) != 0) return 1; else return -1; } if ((aflags & BSF_FUNCTION) != (bflags & BSF_FUNCTION)) { if ((aflags & BSF_FUNCTION) != 0) return -1; else return 1; } if ((aflags & BSF_OBJECT) != (bflags & BSF_OBJECT)) { if ((aflags & BSF_OBJECT) != 0) return -1; else return 1; } if ((aflags & BSF_LOCAL) != (bflags & BSF_LOCAL)) { if ((aflags & BSF_LOCAL) != 0) return 1; else return -1; } if ((aflags & BSF_GLOBAL) != (bflags & BSF_GLOBAL)) { if ((aflags & BSF_GLOBAL) != 0) return -1; else return 1; } if (bfd_get_flavour (bfd_asymbol_bfd (a)) == bfd_target_elf_flavour && bfd_get_flavour (bfd_asymbol_bfd (b)) == bfd_target_elf_flavour) { bfd_vma asz, bsz; asz = 0; if ((a->flags & (BSF_SECTION_SYM | BSF_SYNTHETIC)) == 0) asz = ((elf_symbol_type *) a)->internal_elf_sym.st_size; bsz = 0; if ((b->flags & (BSF_SECTION_SYM | BSF_SYNTHETIC)) == 0) bsz = ((elf_symbol_type *) b)->internal_elf_sym.st_size; if (asz != bsz) return asz > bsz ? -1 : 1; } /* Symbols that start with '.' might be section names, so sort them after symbols that don't start with '.'. */ if (an[0] == '.' && bn[0] != '.') return 1; if (an[0] != '.' && bn[0] == '.') return -1; /* Finally, if we can't distinguish them in any other way, try to get consistent results by sorting the symbols by name. */ return strcmp (an, bn); } /* Sort relocs into address order. */ static int compare_relocs (const void *ap, const void *bp) { const arelent *a = * (const arelent **) ap; const arelent *b = * (const arelent **) bp; if (a->address > b->address) return 1; else if (a->address < b->address) return -1; /* So that associated relocations tied to the same address show up in the correct order, we don't do any further sorting. */ if (a > b) return 1; else if (a < b) return -1; else return 0; } /* Print an address (VMA) to the output stream in INFO. If SKIP_ZEROES is TRUE, omit leading zeroes. */ static void objdump_print_value (bfd_vma vma, struct disassemble_info *inf, bfd_boolean skip_zeroes) { char buf[30]; char *p; struct objdump_disasm_info *aux; aux = (struct objdump_disasm_info *) inf->application_data; bfd_sprintf_vma (aux->abfd, buf, vma); if (! skip_zeroes) p = buf; else { for (p = buf; *p == '0'; ++p) ; if (*p == '\0') --p; } (*inf->fprintf_func) (inf->stream, "%s", p); } /* Print the name of a symbol. */ static void objdump_print_symname (bfd *abfd, struct disassemble_info *inf, asymbol *sym) { char *alloc; const char *name, *version_string = NULL; bfd_boolean hidden = FALSE; alloc = NULL; name = bfd_asymbol_name (sym); if (do_demangle && name[0] != '\0') { /* Demangle the name. */ alloc = bfd_demangle (abfd, name, demangle_flags); if (alloc != NULL) name = alloc; } if ((sym->flags & (BSF_SECTION_SYM | BSF_SYNTHETIC)) == 0) version_string = bfd_get_symbol_version_string (abfd, sym, TRUE, &hidden); if (bfd_is_und_section (bfd_asymbol_section (sym))) hidden = TRUE; name = sanitize_string (name); if (inf != NULL) { (*inf->fprintf_func) (inf->stream, "%s", name); if (version_string && *version_string != '\0') (*inf->fprintf_func) (inf->stream, hidden ? "@%s" : "@@%s", version_string); } else { printf ("%s", name); if (version_string && *version_string != '\0') printf (hidden ? "@%s" : "@@%s", version_string); } if (alloc != NULL) free (alloc); } static inline bfd_boolean sym_ok (bfd_boolean want_section, bfd * abfd ATTRIBUTE_UNUSED, long place, asection * sec, struct disassemble_info * inf) { if (want_section) { /* NB: An object file can have different sections with the same section name. Compare compare section pointers if they have the same owner. */ if (sorted_syms[place]->section->owner == sec->owner && sorted_syms[place]->section != sec) return FALSE; /* Note - we cannot just compare section pointers because they could be different, but the same... Ie the symbol that we are trying to find could have come from a separate debug info file. Under such circumstances the symbol will be associated with a section in the debug info file, whilst the section we want is in a normal file. So the section pointers will be different, but the section names will be the same. */ if (strcmp (bfd_section_name (sorted_syms[place]->section), bfd_section_name (sec)) != 0) return FALSE; } return inf->symbol_is_valid (sorted_syms[place], inf); } /* Locate a symbol given a bfd and a section (from INFO->application_data), and a VMA. If INFO->application_data->require_sec is TRUE, then always require the symbol to be in the section. Returns NULL if there is no suitable symbol. If PLACE is not NULL, then *PLACE is set to the index of the symbol in sorted_syms. */ static asymbol * find_symbol_for_address (bfd_vma vma, struct disassemble_info *inf, long *place) { /* @@ Would it speed things up to cache the last two symbols returned, and maybe their address ranges? For many processors, only one memory operand can be present at a time, so the 2-entry cache wouldn't be constantly churned by code doing heavy memory accesses. */ /* Indices in `sorted_syms'. */ long min = 0; long max_count = sorted_symcount; long thisplace; struct objdump_disasm_info *aux; bfd *abfd; asection *sec; unsigned int opb; bfd_boolean want_section; long rel_count; if (sorted_symcount < 1) return NULL; aux = (struct objdump_disasm_info *) inf->application_data; abfd = aux->abfd; sec = inf->section; opb = inf->octets_per_byte; /* Perform a binary search looking for the closest symbol to the required value. We are searching the range (min, max_count]. */ while (min + 1 < max_count) { asymbol *sym; thisplace = (max_count + min) / 2; sym = sorted_syms[thisplace]; if (bfd_asymbol_value (sym) > vma) max_count = thisplace; else if (bfd_asymbol_value (sym) < vma) min = thisplace; else { min = thisplace; break; } } /* The symbol we want is now in min, the low end of the range we were searching. If there are several symbols with the same value, we want the first one. */ thisplace = min; while (thisplace > 0 && (bfd_asymbol_value (sorted_syms[thisplace]) == bfd_asymbol_value (sorted_syms[thisplace - 1]))) --thisplace; /* Prefer a symbol in the current section if we have multple symbols with the same value, as can occur with overlays or zero size sections. */ min = thisplace; while (min < max_count && (bfd_asymbol_value (sorted_syms[min]) == bfd_asymbol_value (sorted_syms[thisplace]))) { if (sym_ok (TRUE, abfd, min, sec, inf)) { thisplace = min; if (place != NULL) *place = thisplace; return sorted_syms[thisplace]; } ++min; } /* If the file is relocatable, and the symbol could be from this section, prefer a symbol from this section over symbols from others, even if the other symbol's value might be closer. Note that this may be wrong for some symbol references if the sections have overlapping memory ranges, but in that case there's no way to tell what's desired without looking at the relocation table. Also give the target a chance to reject symbols. */ want_section = (aux->require_sec || ((abfd->flags & HAS_RELOC) != 0 && vma >= bfd_section_vma (sec) && vma < (bfd_section_vma (sec) + bfd_section_size (sec) / opb))); if (! sym_ok (want_section, abfd, thisplace, sec, inf)) { long i; long newplace = sorted_symcount; for (i = min - 1; i >= 0; i--) { if (sym_ok (want_section, abfd, i, sec, inf)) { if (newplace == sorted_symcount) newplace = i; if (bfd_asymbol_value (sorted_syms[i]) != bfd_asymbol_value (sorted_syms[newplace])) break; /* Remember this symbol and keep searching until we reach an earlier address. */ newplace = i; } } if (newplace != sorted_symcount) thisplace = newplace; else { /* We didn't find a good symbol with a smaller value. Look for one with a larger value. */ for (i = thisplace + 1; i < sorted_symcount; i++) { if (sym_ok (want_section, abfd, i, sec, inf)) { thisplace = i; break; } } } if (! sym_ok (want_section, abfd, thisplace, sec, inf)) /* There is no suitable symbol. */ return NULL; } /* If we have not found an exact match for the specified address and we have dynamic relocations available, then we can produce a better result by matching a relocation to the address and using the symbol associated with that relocation. */ rel_count = aux->dynrelcount; if (!want_section && sorted_syms[thisplace]->value != vma && rel_count > 0 && aux->dynrelbuf != NULL && aux->dynrelbuf[0]->address <= vma && aux->dynrelbuf[rel_count - 1]->address >= vma /* If we have matched a synthetic symbol, then stick with that. */ && (sorted_syms[thisplace]->flags & BSF_SYNTHETIC) == 0) { arelent ** rel_low; arelent ** rel_high; rel_low = aux->dynrelbuf; rel_high = rel_low + rel_count - 1; while (rel_low <= rel_high) { arelent **rel_mid = &rel_low[(rel_high - rel_low) / 2]; arelent * rel = *rel_mid; if (rel->address == vma) { /* Absolute relocations do not provide a more helpful symbolic address. Find a non-absolute relocation with the same address. */ arelent **rel_vma = rel_mid; for (rel_mid--; rel_mid >= rel_low && rel_mid[0]->address == vma; rel_mid--) rel_vma = rel_mid; for (; rel_vma <= rel_high && rel_vma[0]->address == vma; rel_vma++) { rel = *rel_vma; if (rel->sym_ptr_ptr != NULL && ! bfd_is_abs_section ((* rel->sym_ptr_ptr)->section)) { if (place != NULL) * place = thisplace; return * rel->sym_ptr_ptr; } } break; } if (vma < rel->address) rel_high = rel_mid; else if (vma >= rel_mid[1]->address) rel_low = rel_mid + 1; else break; } } if (place != NULL) *place = thisplace; return sorted_syms[thisplace]; } /* Print an address and the offset to the nearest symbol. */ static void objdump_print_addr_with_sym (bfd *abfd, asection *sec, asymbol *sym, bfd_vma vma, struct disassemble_info *inf, bfd_boolean skip_zeroes) { if (!no_addresses) { objdump_print_value (vma, inf, skip_zeroes); (*inf->fprintf_func) (inf->stream, " "); } if (sym == NULL) { bfd_vma secaddr; (*inf->fprintf_func) (inf->stream, "<%s", sanitize_string (bfd_section_name (sec))); secaddr = bfd_section_vma (sec); if (vma < secaddr) { (*inf->fprintf_func) (inf->stream, "-0x"); objdump_print_value (secaddr - vma, inf, TRUE); } else if (vma > secaddr) { (*inf->fprintf_func) (inf->stream, "+0x"); objdump_print_value (vma - secaddr, inf, TRUE); } (*inf->fprintf_func) (inf->stream, ">"); } else { (*inf->fprintf_func) (inf->stream, "<"); objdump_print_symname (abfd, inf, sym); if (bfd_asymbol_value (sym) == vma) ; /* Undefined symbols in an executables and dynamic objects do not have a value associated with them, so it does not make sense to display an offset relative to them. Normally we would not be provided with this kind of symbol, but the target backend might choose to do so, and the code in find_symbol_for_address might return an as yet unresolved symbol associated with a dynamic reloc. */ else if ((bfd_get_file_flags (abfd) & (EXEC_P | DYNAMIC)) && bfd_is_und_section (sym->section)) ; else if (bfd_asymbol_value (sym) > vma) { (*inf->fprintf_func) (inf->stream, "-0x"); objdump_print_value (bfd_asymbol_value (sym) - vma, inf, TRUE); } else if (vma > bfd_asymbol_value (sym)) { (*inf->fprintf_func) (inf->stream, "+0x"); objdump_print_value (vma - bfd_asymbol_value (sym), inf, TRUE); } (*inf->fprintf_func) (inf->stream, ">"); } if (display_file_offsets) inf->fprintf_func (inf->stream, _(" (File Offset: 0x%lx)"), (long int)(sec->filepos + (vma - sec->vma))); } /* Print an address (VMA), symbolically if possible. If SKIP_ZEROES is TRUE, don't output leading zeroes. */ static void objdump_print_addr (bfd_vma vma, struct disassemble_info *inf, bfd_boolean skip_zeroes) { struct objdump_disasm_info *aux; asymbol *sym = NULL; bfd_boolean skip_find = FALSE; aux = (struct objdump_disasm_info *) inf->application_data; if (sorted_symcount < 1) { if (!no_addresses) { (*inf->fprintf_func) (inf->stream, "0x"); objdump_print_value (vma, inf, skip_zeroes); } if (display_file_offsets) inf->fprintf_func (inf->stream, _(" (File Offset: 0x%lx)"), (long int) (inf->section->filepos + (vma - inf->section->vma))); return; } if (aux->reloc != NULL && aux->reloc->sym_ptr_ptr != NULL && * aux->reloc->sym_ptr_ptr != NULL) { sym = * aux->reloc->sym_ptr_ptr; /* Adjust the vma to the reloc. */ vma += bfd_asymbol_value (sym); if (bfd_is_und_section (bfd_asymbol_section (sym))) skip_find = TRUE; } if (!skip_find) sym = find_symbol_for_address (vma, inf, NULL); objdump_print_addr_with_sym (aux->abfd, inf->section, sym, vma, inf, skip_zeroes); } /* Print VMA to INFO. This function is passed to the disassembler routine. */ static void objdump_print_address (bfd_vma vma, struct disassemble_info *inf) { objdump_print_addr (vma, inf, ! prefix_addresses); } /* Determine if the given address has a symbol associated with it. */ static int objdump_symbol_at_address (bfd_vma vma, struct disassemble_info * inf) { asymbol * sym; sym = find_symbol_for_address (vma, inf, NULL); return (sym != NULL && (bfd_asymbol_value (sym) == vma)); } /* Hold the last function name and the last line number we displayed in a disassembly. */ static char *prev_functionname; static unsigned int prev_line; static unsigned int prev_discriminator; /* We keep a list of all files that we have seen when doing a disassembly with source, so that we know how much of the file to display. This can be important for inlined functions. */ struct print_file_list { struct print_file_list *next; const char *filename; const char *modname; const char *map; size_t mapsize; const char **linemap; unsigned maxline; unsigned last_line; unsigned max_printed; int first; }; static struct print_file_list *print_files; /* The number of preceding context lines to show when we start displaying a file for the first time. */ #define SHOW_PRECEDING_CONTEXT_LINES (5) /* Read a complete file into memory. */ static const char * slurp_file (const char *fn, size_t *size, struct stat *fst) { #ifdef HAVE_MMAP int ps = getpagesize (); size_t msize; #endif const char *map; int fd = open (fn, O_RDONLY | O_BINARY); if (fd < 0) return NULL; if (fstat (fd, fst) < 0) { close (fd); return NULL; } *size = fst->st_size; #ifdef HAVE_MMAP msize = (*size + ps - 1) & ~(ps - 1); map = mmap (NULL, msize, PROT_READ, MAP_SHARED, fd, 0); if (map != (char *) -1L) { close (fd); return map; } #endif map = (const char *) malloc (*size); if (!map || (size_t) read (fd, (char *) map, *size) != *size) { free ((void *) map); map = NULL; } close (fd); return map; } #define line_map_decrease 5 /* Precompute array of lines for a mapped file. */ static const char ** index_file (const char *map, size_t size, unsigned int *maxline) { const char *p, *lstart, *end; int chars_per_line = 45; /* First iteration will use 40. */ unsigned int lineno; const char **linemap = NULL; unsigned long line_map_size = 0; lineno = 0; lstart = map; end = map + size; for (p = map; p < end; p++) { if (*p == '\n') { if (p + 1 < end && p[1] == '\r') p++; } else if (*p == '\r') { if (p + 1 < end && p[1] == '\n') p++; } else continue; /* End of line found. */ if (linemap == NULL || line_map_size < lineno + 1) { unsigned long newsize; chars_per_line -= line_map_decrease; if (chars_per_line <= 1) chars_per_line = 1; line_map_size = size / chars_per_line + 1; if (line_map_size < lineno + 1) line_map_size = lineno + 1; newsize = line_map_size * sizeof (char *); linemap = (const char **) xrealloc (linemap, newsize); } linemap[lineno++] = lstart; lstart = p + 1; } *maxline = lineno; return linemap; } /* Tries to open MODNAME, and if successful adds a node to print_files linked list and returns that node. Returns NULL on failure. */ static struct print_file_list * try_print_file_open (const char *origname, const char *modname, struct stat *fst) { struct print_file_list *p; p = (struct print_file_list *) xmalloc (sizeof (struct print_file_list)); p->map = slurp_file (modname, &p->mapsize, fst); if (p->map == NULL) { free (p); return NULL; } p->linemap = index_file (p->map, p->mapsize, &p->maxline); p->last_line = 0; p->max_printed = 0; p->filename = origname; p->modname = modname; p->next = print_files; p->first = 1; print_files = p; return p; } /* If the source file, as described in the symtab, is not found try to locate it in one of the paths specified with -I If found, add location to print_files linked list. */ static struct print_file_list * update_source_path (const char *filename, bfd *abfd) { struct print_file_list *p; const char *fname; struct stat fst; int i; p = try_print_file_open (filename, filename, &fst); if (p == NULL) { if (include_path_count == 0) return NULL; /* Get the name of the file. */ fname = lbasename (filename); /* If file exists under a new path, we need to add it to the list so that show_line knows about it. */ for (i = 0; i < include_path_count; i++) { char *modname = concat (include_paths[i], "/", fname, (const char *) 0); p = try_print_file_open (filename, modname, &fst); if (p) break; free (modname); } } if (p != NULL) { long mtime = bfd_get_mtime (abfd); if (fst.st_mtime > mtime) warn (_("source file %s is more recent than object file\n"), filename); } return p; } /* Print a source file line. */ static void print_line (struct print_file_list *p, unsigned int linenum) { const char *l; size_t len; --linenum; if (linenum >= p->maxline) return; l = p->linemap [linenum]; if (source_comment != NULL && strlen (l) > 0) printf ("%s", source_comment); len = strcspn (l, "\n\r"); /* Test fwrite return value to quiet glibc warning. */ if (len == 0 || fwrite (l, len, 1, stdout) == 1) putchar ('\n'); } /* Print a range of source code lines. */ static void dump_lines (struct print_file_list *p, unsigned int start, unsigned int end) { if (p->map == NULL) return; while (start <= end) { print_line (p, start); start++; } } /* Show the line number, or the source line, in a disassembly listing. */ static void show_line (bfd *abfd, asection *section, bfd_vma addr_offset) { const char *filename; const char *functionname; unsigned int linenumber; unsigned int discriminator; bfd_boolean reloc; char *path = NULL; if (! with_line_numbers && ! with_source_code) return; if (! bfd_find_nearest_line_discriminator (abfd, section, syms, addr_offset, &filename, &functionname, &linenumber, &discriminator)) return; if (filename != NULL && *filename == '\0') filename = NULL; if (functionname != NULL && *functionname == '\0') functionname = NULL; if (filename && IS_ABSOLUTE_PATH (filename) && prefix) { char *path_up; const char *fname = filename; path = xmalloc (prefix_length + PATH_MAX + 1); if (prefix_length) memcpy (path, prefix, prefix_length); path_up = path + prefix_length; /* Build relocated filename, stripping off leading directories from the initial filename if requested. */ if (prefix_strip > 0) { int level = 0; const char *s; /* Skip selected directory levels. */ for (s = fname + 1; *s != '\0' && level < prefix_strip; s++) if (IS_DIR_SEPARATOR (*s)) { fname = s; level++; } } /* Update complete filename. */ strncpy (path_up, fname, PATH_MAX); path_up[PATH_MAX] = '\0'; filename = path; reloc = TRUE; } else reloc = FALSE; if (with_line_numbers) { if (functionname != NULL && (prev_functionname == NULL || strcmp (functionname, prev_functionname) != 0)) { char *demangle_alloc = NULL; if (do_demangle && functionname[0] != '\0') { /* Demangle the name. */ demangle_alloc = bfd_demangle (abfd, functionname, demangle_flags); } /* Demangling adds trailing parens, so don't print those. */ if (demangle_alloc != NULL) printf ("%s:\n", sanitize_string (demangle_alloc)); else printf ("%s():\n", sanitize_string (functionname)); prev_line = -1; free (demangle_alloc); } if (linenumber > 0 && (linenumber != prev_line || discriminator != prev_discriminator)) { if (discriminator > 0) printf ("%s:%u (discriminator %u)\n", filename == NULL ? "???" : sanitize_string (filename), linenumber, discriminator); else printf ("%s:%u\n", filename == NULL ? "???" : sanitize_string (filename), linenumber); } if (unwind_inlines) { const char *filename2; const char *functionname2; unsigned line2; while (bfd_find_inliner_info (abfd, &filename2, &functionname2, &line2)) { printf ("inlined by %s:%u", sanitize_string (filename2), line2); printf (" (%s)\n", sanitize_string (functionname2)); } } } if (with_source_code && filename != NULL && linenumber > 0) { struct print_file_list **pp, *p; unsigned l; for (pp = &print_files; *pp != NULL; pp = &(*pp)->next) if (filename_cmp ((*pp)->filename, filename) == 0) break; p = *pp; if (p == NULL) { if (reloc) filename = xstrdup (filename); p = update_source_path (filename, abfd); } if (p != NULL && linenumber != p->last_line) { if (file_start_context && p->first) l = 1; else { l = linenumber - SHOW_PRECEDING_CONTEXT_LINES; if (l >= linenumber) l = 1; if (p->max_printed >= l) { if (p->max_printed < linenumber) l = p->max_printed + 1; else l = linenumber; } } dump_lines (p, l, linenumber); if (p->max_printed < linenumber) p->max_printed = linenumber; p->last_line = linenumber; p->first = 0; } } if (functionname != NULL && (prev_functionname == NULL || strcmp (functionname, prev_functionname) != 0)) { if (prev_functionname != NULL) free (prev_functionname); prev_functionname = (char *) xmalloc (strlen (functionname) + 1); strcpy (prev_functionname, functionname); } if (linenumber > 0 && linenumber != prev_line) prev_line = linenumber; if (discriminator != prev_discriminator) prev_discriminator = discriminator; if (path) free (path); } /* Pseudo FILE object for strings. */ typedef struct { char *buffer; size_t pos; size_t alloc; } SFILE; /* sprintf to a "stream". */ static int ATTRIBUTE_PRINTF_2 objdump_sprintf (SFILE *f, const char *format, ...) { size_t n; va_list args; while (1) { size_t space = f->alloc - f->pos; va_start (args, format); n = vsnprintf (f->buffer + f->pos, space, format, args); va_end (args); if (space > n) break; f->alloc = (f->alloc + n) * 2; f->buffer = (char *) xrealloc (f->buffer, f->alloc); } f->pos += n; return n; } /* Code for generating (colored) diagrams of control flow start and end points. */ /* Structure used to store the properties of a jump. */ struct jump_info { /* The next jump, or NULL if this is the last object. */ struct jump_info *next; /* The previous jump, or NULL if this is the first object. */ struct jump_info *prev; /* The start addresses of the jump. */ struct { /* The list of start addresses. */ bfd_vma *addresses; /* The number of elements. */ size_t count; /* The maximum number of elements that fit into the array. */ size_t max_count; } start; /* The end address of the jump. */ bfd_vma end; /* The drawing level of the jump. */ int level; }; /* Construct a jump object for a jump from start to end with the corresponding level. */ static struct jump_info * jump_info_new (bfd_vma start, bfd_vma end, int level) { struct jump_info *result = xmalloc (sizeof (struct jump_info)); result->next = NULL; result->prev = NULL; result->start.addresses = xmalloc (sizeof (bfd_vma *) * 2); result->start.addresses[0] = start; result->start.count = 1; result->start.max_count = 2; result->end = end; result->level = level; return result; } /* Free a jump object and return the next object or NULL if this was the last one. */ static struct jump_info * jump_info_free (struct jump_info *ji) { struct jump_info *result = NULL; if (ji) { result = ji->next; if (ji->start.addresses) free (ji->start.addresses); free (ji); } return result; } /* Get the smallest value of all start and end addresses. */ static bfd_vma jump_info_min_address (const struct jump_info *ji) { bfd_vma min_address = ji->end; size_t i; for (i = ji->start.count; i-- > 0;) if (ji->start.addresses[i] < min_address) min_address = ji->start.addresses[i]; return min_address; } /* Get the largest value of all start and end addresses. */ static bfd_vma jump_info_max_address (const struct jump_info *ji) { bfd_vma max_address = ji->end; size_t i; for (i = ji->start.count; i-- > 0;) if (ji->start.addresses[i] > max_address) max_address = ji->start.addresses[i]; return max_address; } /* Get the target address of a jump. */ static bfd_vma jump_info_end_address (const struct jump_info *ji) { return ji->end; } /* Test if an address is one of the start addresses of a jump. */ static bfd_boolean jump_info_is_start_address (const struct jump_info *ji, bfd_vma address) { bfd_boolean result = FALSE; size_t i; for (i = ji->start.count; i-- > 0;) if (address == ji->start.addresses[i]) { result = TRUE; break; } return result; } /* Test if an address is the target address of a jump. */ static bfd_boolean jump_info_is_end_address (const struct jump_info *ji, bfd_vma address) { return (address == ji->end); } /* Get the difference between the smallest and largest address of a jump. */ static bfd_vma jump_info_size (const struct jump_info *ji) { return jump_info_max_address (ji) - jump_info_min_address (ji); } /* Unlink a jump object from a list. */ static void jump_info_unlink (struct jump_info *node, struct jump_info **base) { if (node->next) node->next->prev = node->prev; if (node->prev) node->prev->next = node->next; else *base = node->next; node->next = NULL; node->prev = NULL; } /* Insert unlinked jump info node into a list. */ static void jump_info_insert (struct jump_info *node, struct jump_info *target, struct jump_info **base) { node->next = target; node->prev = target->prev; target->prev = node; if (node->prev) node->prev->next = node; else *base = node; } /* Add unlinked node to the front of a list. */ static void jump_info_add_front (struct jump_info *node, struct jump_info **base) { node->next = *base; if (node->next) node->next->prev = node; node->prev = NULL; *base = node; } /* Move linked node to target position. */ static void jump_info_move_linked (struct jump_info *node, struct jump_info *target, struct jump_info **base) { /* Unlink node. */ jump_info_unlink (node, base); /* Insert node at target position. */ jump_info_insert (node, target, base); } /* Test if two jumps intersect. */ static bfd_boolean jump_info_intersect (const struct jump_info *a, const struct jump_info *b) { return ((jump_info_max_address (a) >= jump_info_min_address (b)) && (jump_info_min_address (a) <= jump_info_max_address (b))); } /* Merge two compatible jump info objects. */ static void jump_info_merge (struct jump_info **base) { struct jump_info *a; for (a = *base; a; a = a->next) { struct jump_info *b; for (b = a->next; b; b = b->next) { /* Merge both jumps into one. */ if (a->end == b->end) { /* Reallocate addresses. */ size_t needed_size = a->start.count + b->start.count; size_t i; if (needed_size > a->start.max_count) { a->start.max_count += b->start.max_count; a->start.addresses = xrealloc (a->start.addresses, a->start.max_count * sizeof (bfd_vma *)); } /* Append start addresses. */ for (i = 0; i < b->start.count; ++i) a->start.addresses[a->start.count++] = b->start.addresses[i]; /* Remove and delete jump. */ struct jump_info *tmp = b->prev; jump_info_unlink (b, base); jump_info_free (b); b = tmp; } } } } /* Sort jumps by their size and starting point using a stable minsort. This could be improved if sorting performance is an issue, for example by using mergesort. */ static void jump_info_sort (struct jump_info **base) { struct jump_info *current_element = *base; while (current_element) { struct jump_info *best_match = current_element; struct jump_info *runner = current_element->next; bfd_vma best_size = jump_info_size (best_match); while (runner) { bfd_vma runner_size = jump_info_size (runner); if ((runner_size < best_size) || ((runner_size == best_size) && (jump_info_min_address (runner) < jump_info_min_address (best_match)))) { best_match = runner; best_size = runner_size; } runner = runner->next; } if (best_match == current_element) current_element = current_element->next; else jump_info_move_linked (best_match, current_element, base); } } /* Visualize all jumps at a given address. */ static void jump_info_visualize_address (bfd_vma address, int max_level, char *line_buffer, uint8_t *color_buffer) { struct jump_info *ji = detected_jumps; size_t len = (max_level + 1) * 3; /* Clear line buffer. */ memset (line_buffer, ' ', len); memset (color_buffer, 0, len); /* Iterate over jumps and add their ASCII art. */ while (ji) { /* Discard jumps that are never needed again. */ if (jump_info_max_address (ji) < address) { struct jump_info *tmp = ji; ji = ji->next; jump_info_unlink (tmp, &detected_jumps); jump_info_free (tmp); continue; } /* This jump intersects with the current address. */ if (jump_info_min_address (ji) <= address) { /* Hash target address to get an even distribution between all values. */ bfd_vma hash_address = jump_info_end_address (ji); uint8_t color = iterative_hash_object (hash_address, 0); /* Fetch line offset. */ int offset = (max_level - ji->level) * 3; /* Draw start line. */ if (jump_info_is_start_address (ji, address)) { size_t i = offset + 1; for (; i < len - 1; ++i) if (line_buffer[i] == ' ') { line_buffer[i] = '-'; color_buffer[i] = color; } if (line_buffer[i] == ' ') { line_buffer[i] = '-'; color_buffer[i] = color; } else if (line_buffer[i] == '>') { line_buffer[i] = 'X'; color_buffer[i] = color; } if (line_buffer[offset] == ' ') { if (address <= ji->end) line_buffer[offset] = (jump_info_min_address (ji) == address) ? '/': '+'; else line_buffer[offset] = (jump_info_max_address (ji) == address) ? '\\': '+'; color_buffer[offset] = color; } } /* Draw jump target. */ else if (jump_info_is_end_address (ji, address)) { size_t i = offset + 1; for (; i < len - 1; ++i) if (line_buffer[i] == ' ') { line_buffer[i] = '-'; color_buffer[i] = color; } if (line_buffer[i] == ' ') { line_buffer[i] = '>'; color_buffer[i] = color; } else if (line_buffer[i] == '-') { line_buffer[i] = 'X'; color_buffer[i] = color; } if (line_buffer[offset] == ' ') { if (jump_info_min_address (ji) < address) line_buffer[offset] = (jump_info_max_address (ji) > address) ? '>' : '\\'; else line_buffer[offset] = '/'; color_buffer[offset] = color; } } /* Draw intermediate line segment. */ else if (line_buffer[offset] == ' ') { line_buffer[offset] = '|'; color_buffer[offset] = color; } } ji = ji->next; } } /* Clone of disassemble_bytes to detect jumps inside a function. */ /* FIXME: is this correct? Can we strip it down even further? */ static struct jump_info * disassemble_jumps (struct disassemble_info * inf, disassembler_ftype disassemble_fn, bfd_vma start_offset, bfd_vma stop_offset, bfd_vma rel_offset, arelent *** relppp, arelent ** relppend) { struct objdump_disasm_info *aux; struct jump_info *jumps = NULL; asection *section; bfd_vma addr_offset; unsigned int opb = inf->octets_per_byte; int octets = opb; SFILE sfile; aux = (struct objdump_disasm_info *) inf->application_data; section = inf->section; sfile.alloc = 120; sfile.buffer = (char *) xmalloc (sfile.alloc); sfile.pos = 0; inf->insn_info_valid = 0; inf->fprintf_func = (fprintf_ftype) objdump_sprintf; inf->stream = &sfile; addr_offset = start_offset; while (addr_offset < stop_offset) { int previous_octets; /* Remember the length of the previous instruction. */ previous_octets = octets; octets = 0; sfile.pos = 0; inf->bytes_per_line = 0; inf->bytes_per_chunk = 0; inf->flags = ((disassemble_all ? DISASSEMBLE_DATA : 0) | (wide_output ? WIDE_OUTPUT : 0)); if (machine) inf->flags |= USER_SPECIFIED_MACHINE_TYPE; if (inf->disassembler_needs_relocs && (bfd_get_file_flags (aux->abfd) & EXEC_P) == 0 && (bfd_get_file_flags (aux->abfd) & DYNAMIC) == 0 && *relppp < relppend) { bfd_signed_vma distance_to_rel; distance_to_rel = (**relppp)->address - (rel_offset + addr_offset); /* Check to see if the current reloc is associated with the instruction that we are about to disassemble. */ if (distance_to_rel == 0 /* FIXME: This is wrong. We are trying to catch relocs that are addressed part way through the current instruction, as might happen with a packed VLIW instruction. Unfortunately we do not know the length of the current instruction since we have not disassembled it yet. Instead we take a guess based upon the length of the previous instruction. The proper solution is to have a new target-specific disassembler function which just returns the length of an instruction at a given address without trying to display its disassembly. */ || (distance_to_rel > 0 && distance_to_rel < (bfd_signed_vma) (previous_octets/ opb))) { inf->flags |= INSN_HAS_RELOC; } } if (! disassemble_all && (section->flags & (SEC_CODE | SEC_HAS_CONTENTS)) == (SEC_CODE | SEC_HAS_CONTENTS)) /* Set a stop_vma so that the disassembler will not read beyond the next symbol. We assume that symbols appear on the boundaries between instructions. We only do this when disassembling code of course, and when -D is in effect. */ inf->stop_vma = section->vma + stop_offset; inf->stop_offset = stop_offset; /* Extract jump information. */ inf->insn_info_valid = 0; octets = (*disassemble_fn) (section->vma + addr_offset, inf); /* Test if a jump was detected. */ if (inf->insn_info_valid && ((inf->insn_type == dis_branch) || (inf->insn_type == dis_condbranch) || (inf->insn_type == dis_jsr) || (inf->insn_type == dis_condjsr)) && (inf->target >= section->vma + start_offset) && (inf->target < section->vma + stop_offset)) { struct jump_info *ji = jump_info_new (section->vma + addr_offset, inf->target, -1); jump_info_add_front (ji, &jumps); } inf->stop_vma = 0; addr_offset += octets / opb; } inf->fprintf_func = (fprintf_ftype) fprintf; inf->stream = stdout; free (sfile.buffer); /* Merge jumps. */ jump_info_merge (&jumps); /* Process jumps. */ jump_info_sort (&jumps); /* Group jumps by level. */ struct jump_info *last_jump = jumps; int max_level = -1; while (last_jump) { /* The last jump is part of the next group. */ struct jump_info *base = last_jump; /* Increment level. */ base->level = ++max_level; /* Find jumps that can be combined on the same level, with the largest jumps tested first. This has the advantage that large jumps are on lower levels and do not intersect with small jumps that get grouped on higher levels. */ struct jump_info *exchange_item = last_jump->next; struct jump_info *it = exchange_item; for (; it; it = it->next) { /* Test if the jump intersects with any jump from current group. */ bfd_boolean ok = TRUE; struct jump_info *it_collision; for (it_collision = base; it_collision != exchange_item; it_collision = it_collision->next) { /* This jump intersects so we leave it out. */ if (jump_info_intersect (it_collision, it)) { ok = FALSE; break; } } /* Add jump to group. */ if (ok) { /* Move current element to the front. */ if (it != exchange_item) { struct jump_info *save = it->prev; jump_info_move_linked (it, exchange_item, &jumps); last_jump = it; it = save; } else { last_jump = exchange_item; exchange_item = exchange_item->next; } last_jump->level = max_level; } } /* Move to next group. */ last_jump = exchange_item; } return jumps; } /* The number of zeroes we want to see before we start skipping them. The number is arbitrarily chosen. */ #define DEFAULT_SKIP_ZEROES 8 /* The number of zeroes to skip at the end of a section. If the number of zeroes at the end is between SKIP_ZEROES_AT_END and SKIP_ZEROES, they will be disassembled. If there are fewer than SKIP_ZEROES_AT_END, they will be skipped. This is a heuristic attempt to avoid disassembling zeroes inserted by section alignment. */ #define DEFAULT_SKIP_ZEROES_AT_END 3 static int null_print (const void * stream ATTRIBUTE_UNUSED, const char * format ATTRIBUTE_UNUSED, ...) { return 1; } /* Print out jump visualization. */ static void print_jump_visualisation (bfd_vma addr, int max_level, char *line_buffer, uint8_t *color_buffer) { if (!line_buffer) return; jump_info_visualize_address (addr, max_level, line_buffer, color_buffer); size_t line_buffer_size = strlen (line_buffer); char last_color = 0; size_t i; for (i = 0; i <= line_buffer_size; ++i) { if (color_output) { uint8_t color = (i < line_buffer_size) ? color_buffer[i]: 0; if (color != last_color) { if (color) if (extended_color_output) /* Use extended 8bit color, but do not choose dark colors. */ printf ("\033[38;5;%dm", 124 + (color % 108)); else /* Use simple terminal colors. */ printf ("\033[%dm", 31 + (color % 7)); else /* Clear color. */ printf ("\033[0m"); last_color = color; } } putchar ((i < line_buffer_size) ? line_buffer[i]: ' '); } } /* Disassemble some data in memory between given values. */ static void disassemble_bytes (struct disassemble_info * inf, disassembler_ftype disassemble_fn, bfd_boolean insns, bfd_byte * data, bfd_vma start_offset, bfd_vma stop_offset, bfd_vma rel_offset, arelent *** relppp, arelent ** relppend) { struct objdump_disasm_info *aux; asection *section; unsigned int octets_per_line; unsigned int skip_addr_chars; bfd_vma addr_offset; unsigned int opb = inf->octets_per_byte; unsigned int skip_zeroes = inf->skip_zeroes; unsigned int skip_zeroes_at_end = inf->skip_zeroes_at_end; size_t octets; SFILE sfile; aux = (struct objdump_disasm_info *) inf->application_data; section = inf->section; sfile.alloc = 120; sfile.buffer = (char *) xmalloc (sfile.alloc); sfile.pos = 0; if (insn_width) octets_per_line = insn_width; else if (insns) octets_per_line = 4; else octets_per_line = 16; /* Figure out how many characters to skip at the start of an address, to make the disassembly look nicer. We discard leading zeroes in chunks of 4, ensuring that there is always a leading zero remaining. */ skip_addr_chars = 0; if (!no_addresses && !prefix_addresses) { char buf[30]; bfd_sprintf_vma (aux->abfd, buf, section->vma + section->size / opb); while (buf[skip_addr_chars] == '0') ++skip_addr_chars; /* Don't discard zeros on overflow. */ if (buf[skip_addr_chars] == '\0' && section->vma != 0) skip_addr_chars = 0; if (skip_addr_chars != 0) skip_addr_chars = (skip_addr_chars - 1) & -4; } inf->insn_info_valid = 0; /* Determine maximum level. */ uint8_t *color_buffer = NULL; char *line_buffer = NULL; int max_level = -1; /* Some jumps were detected. */ if (detected_jumps) { struct jump_info *ji; /* Find maximum jump level. */ for (ji = detected_jumps; ji; ji = ji->next) { if (ji->level > max_level) max_level = ji->level; } /* Allocate buffers. */ size_t len = (max_level + 1) * 3 + 1; line_buffer = xmalloc (len); line_buffer[len - 1] = 0; color_buffer = xmalloc (len); color_buffer[len - 1] = 0; } addr_offset = start_offset; while (addr_offset < stop_offset) { bfd_boolean need_nl = FALSE; octets = 0; /* Make sure we don't use relocs from previous instructions. */ aux->reloc = NULL; /* If we see more than SKIP_ZEROES octets of zeroes, we just print `...'. */ if (! disassemble_zeroes) for (; addr_offset * opb + octets < stop_offset * opb; octets++) if (data[addr_offset * opb + octets] != 0) break; if (! disassemble_zeroes && (inf->insn_info_valid == 0 || inf->branch_delay_insns == 0) && (octets >= skip_zeroes || (addr_offset * opb + octets == stop_offset * opb && octets < skip_zeroes_at_end))) { /* If there are more nonzero octets to follow, we only skip zeroes in multiples of 4, to try to avoid running over the start of an instruction which happens to start with zero. */ if (addr_offset * opb + octets != stop_offset * opb) octets &= ~3; /* If we are going to display more data, and we are displaying file offsets, then tell the user how many zeroes we skip and the file offset from where we resume dumping. */ if (display_file_offsets && addr_offset + octets / opb < stop_offset) printf (_("\t... (skipping %lu zeroes, " "resuming at file offset: 0x%lx)\n"), (unsigned long) (octets / opb), (unsigned long) (section->filepos + addr_offset + octets / opb)); else printf ("\t...\n"); } else { char buf[50]; unsigned int bpc = 0; unsigned int pb = 0; if (with_line_numbers || with_source_code) show_line (aux->abfd, section, addr_offset); if (no_addresses) printf ("\t"); else if (!prefix_addresses) { char *s; bfd_sprintf_vma (aux->abfd, buf, section->vma + addr_offset); for (s = buf + skip_addr_chars; *s == '0'; s++) *s = ' '; if (*s == '\0') *--s = '0'; printf ("%s:\t", buf + skip_addr_chars); } else { aux->require_sec = TRUE; objdump_print_address (section->vma + addr_offset, inf); aux->require_sec = FALSE; putchar (' '); } print_jump_visualisation (section->vma + addr_offset, max_level, line_buffer, color_buffer); if (insns) { int insn_size; sfile.pos = 0; inf->fprintf_func = (fprintf_ftype) objdump_sprintf; inf->stream = &sfile; inf->bytes_per_line = 0; inf->bytes_per_chunk = 0; inf->flags = ((disassemble_all ? DISASSEMBLE_DATA : 0) | (wide_output ? WIDE_OUTPUT : 0)); if (machine) inf->flags |= USER_SPECIFIED_MACHINE_TYPE; if (inf->disassembler_needs_relocs && (bfd_get_file_flags (aux->abfd) & EXEC_P) == 0 && (bfd_get_file_flags (aux->abfd) & DYNAMIC) == 0 && *relppp < relppend) { bfd_signed_vma distance_to_rel; int max_reloc_offset = aux->abfd->arch_info->max_reloc_offset_into_insn; distance_to_rel = ((**relppp)->address - rel_offset - addr_offset); insn_size = 0; if (distance_to_rel > 0 && (max_reloc_offset < 0 || distance_to_rel <= max_reloc_offset)) { /* This reloc *might* apply to the current insn, starting somewhere inside it. Discover the length of the current insn so that the check below will work. */ if (insn_width) insn_size = insn_width; else { /* We find the length by calling the dissassembler function with a dummy print handler. This should work unless the disassembler is not expecting to be called multiple times for the same address. This does mean disassembling the instruction twice, but we only do this when there is a high probability that there is a reloc that will affect the instruction. */ inf->fprintf_func = (fprintf_ftype) null_print; insn_size = disassemble_fn (section->vma + addr_offset, inf); inf->fprintf_func = (fprintf_ftype) objdump_sprintf; } } /* Check to see if the current reloc is associated with the instruction that we are about to disassemble. */ if (distance_to_rel == 0 || (distance_to_rel > 0 && distance_to_rel < insn_size / (int) opb)) { inf->flags |= INSN_HAS_RELOC; aux->reloc = **relppp; } } if (! disassemble_all && ((section->flags & (SEC_CODE | SEC_HAS_CONTENTS)) == (SEC_CODE | SEC_HAS_CONTENTS))) /* Set a stop_vma so that the disassembler will not read beyond the next symbol. We assume that symbols appear on the boundaries between instructions. We only do this when disassembling code of course, and when -D is in effect. */ inf->stop_vma = section->vma + stop_offset; inf->stop_offset = stop_offset; insn_size = (*disassemble_fn) (section->vma + addr_offset, inf); octets = insn_size; inf->stop_vma = 0; inf->fprintf_func = (fprintf_ftype) fprintf; inf->stream = stdout; if (insn_width == 0 && inf->bytes_per_line != 0) octets_per_line = inf->bytes_per_line; if (insn_size < (int) opb) { if (sfile.pos) printf ("%s\n", sfile.buffer); if (insn_size >= 0) { non_fatal (_("disassemble_fn returned length %d"), insn_size); exit_status = 1; } break; } } else { bfd_vma j; octets = octets_per_line; if (addr_offset + octets / opb > stop_offset) octets = (stop_offset - addr_offset) * opb; for (j = addr_offset * opb; j < addr_offset * opb + octets; ++j) { if (ISPRINT (data[j])) buf[j - addr_offset * opb] = data[j]; else buf[j - addr_offset * opb] = '.'; } buf[j - addr_offset * opb] = '\0'; } if (prefix_addresses ? show_raw_insn > 0 : show_raw_insn >= 0) { bfd_vma j; /* If ! prefix_addresses and ! wide_output, we print octets_per_line octets per line. */ pb = octets; if (pb > octets_per_line && ! prefix_addresses && ! wide_output) pb = octets_per_line; if (inf->bytes_per_chunk) bpc = inf->bytes_per_chunk; else bpc = 1; for (j = addr_offset * opb; j < addr_offset * opb + pb; j += bpc) { /* PR 21580: Check for a buffer ending early. */ if (j + bpc <= stop_offset * opb) { unsigned int k; if (inf->display_endian == BFD_ENDIAN_LITTLE) { for (k = bpc; k-- != 0; ) printf ("%02x", (unsigned) data[j + k]); } else { for (k = 0; k < bpc; k++) printf ("%02x", (unsigned) data[j + k]); } } putchar (' '); } for (; pb < octets_per_line; pb += bpc) { unsigned int k; for (k = 0; k < bpc; k++) printf (" "); putchar (' '); } /* Separate raw data from instruction by extra space. */ if (insns) putchar ('\t'); else printf (" "); } if (! insns) printf ("%s", buf); else if (sfile.pos) printf ("%s", sfile.buffer); if (prefix_addresses ? show_raw_insn > 0 : show_raw_insn >= 0) { while (pb < octets) { bfd_vma j; char *s; putchar ('\n'); j = addr_offset * opb + pb; if (no_addresses) printf ("\t"); else { bfd_sprintf_vma (aux->abfd, buf, section->vma + j / opb); for (s = buf + skip_addr_chars; *s == '0'; s++) *s = ' '; if (*s == '\0') *--s = '0'; printf ("%s:\t", buf + skip_addr_chars); } print_jump_visualisation (section->vma + j / opb, max_level, line_buffer, color_buffer); pb += octets_per_line; if (pb > octets) pb = octets; for (; j < addr_offset * opb + pb; j += bpc) { /* PR 21619: Check for a buffer ending early. */ if (j + bpc <= stop_offset * opb) { unsigned int k; if (inf->display_endian == BFD_ENDIAN_LITTLE) { for (k = bpc; k-- != 0; ) printf ("%02x", (unsigned) data[j + k]); } else { for (k = 0; k < bpc; k++) printf ("%02x", (unsigned) data[j + k]); } } putchar (' '); } } } if (!wide_output) putchar ('\n'); else need_nl = TRUE; } while ((*relppp) < relppend && (**relppp)->address < rel_offset + addr_offset + octets / opb) { if (dump_reloc_info || dump_dynamic_reloc_info) { arelent *q; q = **relppp; if (wide_output) putchar ('\t'); else printf ("\t\t\t"); if (!no_addresses) { objdump_print_value (section->vma - rel_offset + q->address, inf, TRUE); printf (": "); } if (q->howto == NULL) printf ("*unknown*\t"); else if (q->howto->name) printf ("%s\t", q->howto->name); else printf ("%d\t", q->howto->type); if (q->sym_ptr_ptr == NULL || *q->sym_ptr_ptr == NULL) printf ("*unknown*"); else { const char *sym_name; sym_name = bfd_asymbol_name (*q->sym_ptr_ptr); if (sym_name != NULL && *sym_name != '\0') objdump_print_symname (aux->abfd, inf, *q->sym_ptr_ptr); else { asection *sym_sec; sym_sec = bfd_asymbol_section (*q->sym_ptr_ptr); sym_name = bfd_section_name (sym_sec); if (sym_name == NULL || *sym_name == '\0') sym_name = "*unknown*"; printf ("%s", sanitize_string (sym_name)); } } if (q->addend) { bfd_signed_vma addend = q->addend; if (addend < 0) { printf ("-0x"); addend = -addend; } else printf ("+0x"); objdump_print_value (addend, inf, TRUE); } printf ("\n"); need_nl = FALSE; } ++(*relppp); } if (need_nl) printf ("\n"); addr_offset += octets / opb; } free (sfile.buffer); free (line_buffer); free (color_buffer); } static void disassemble_section (bfd *abfd, asection *section, void *inf) { const struct elf_backend_data * bed; bfd_vma sign_adjust = 0; struct disassemble_info * pinfo = (struct disassemble_info *) inf; struct objdump_disasm_info * paux; unsigned int opb = pinfo->octets_per_byte; bfd_byte * data = NULL; bfd_size_type datasize = 0; arelent ** rel_pp = NULL; arelent ** rel_ppstart = NULL; arelent ** rel_ppend; bfd_vma stop_offset; asymbol * sym = NULL; long place = 0; long rel_count; bfd_vma rel_offset; unsigned long addr_offset; bfd_boolean do_print; enum loop_control { stop_offset_reached, function_sym, next_sym } loop_until; /* Sections that do not contain machine code are not normally disassembled. */ if (! disassemble_all && only_list == NULL && ((section->flags & (SEC_CODE | SEC_HAS_CONTENTS)) != (SEC_CODE | SEC_HAS_CONTENTS))) return; if (! process_section_p (section)) return; datasize = bfd_section_size (section); if (datasize == 0) return; if (start_address == (bfd_vma) -1 || start_address < section->vma) addr_offset = 0; else addr_offset = start_address - section->vma; if (stop_address == (bfd_vma) -1) stop_offset = datasize / opb; else { if (stop_address < section->vma) stop_offset = 0; else stop_offset = stop_address - section->vma; if (stop_offset > datasize / opb) stop_offset = datasize / opb; } if (addr_offset >= stop_offset) return; /* Decide which set of relocs to use. Load them if necessary. */ paux = (struct objdump_disasm_info *) pinfo->application_data; if (paux->dynrelbuf && dump_dynamic_reloc_info) { rel_pp = paux->dynrelbuf; rel_count = paux->dynrelcount; /* Dynamic reloc addresses are absolute, non-dynamic are section relative. REL_OFFSET specifies the reloc address corresponding to the start of this section. */ rel_offset = section->vma; } else { rel_count = 0; rel_pp = NULL; rel_offset = 0; if ((section->flags & SEC_RELOC) != 0 && (dump_reloc_info || pinfo->disassembler_needs_relocs)) { long relsize; relsize = bfd_get_reloc_upper_bound (abfd, section); if (relsize < 0) bfd_fatal (bfd_get_filename (abfd)); if (relsize > 0) { rel_ppstart = rel_pp = (arelent **) xmalloc (relsize); rel_count = bfd_canonicalize_reloc (abfd, section, rel_pp, syms); if (rel_count < 0) bfd_fatal (bfd_get_filename (abfd)); /* Sort the relocs by address. */ qsort (rel_pp, rel_count, sizeof (arelent *), compare_relocs); } } } rel_ppend = rel_pp + rel_count; if (!bfd_malloc_and_get_section (abfd, section, &data)) { non_fatal (_("Reading section %s failed because: %s"), section->name, bfd_errmsg (bfd_get_error ())); return; } pinfo->buffer = data; pinfo->buffer_vma = section->vma; pinfo->buffer_length = datasize; pinfo->section = section; /* Sort the symbols into value and section order. */ compare_section = section; if (sorted_symcount > 1) qsort (sorted_syms, sorted_symcount, sizeof (asymbol *), compare_symbols); /* Skip over the relocs belonging to addresses below the start address. */ while (rel_pp < rel_ppend && (*rel_pp)->address < rel_offset + addr_offset) ++rel_pp; printf (_("\nDisassembly of section %s:\n"), sanitize_string (section->name)); /* Find the nearest symbol forwards from our current position. */ paux->require_sec = TRUE; sym = (asymbol *) find_symbol_for_address (section->vma + addr_offset, (struct disassemble_info *) inf, &place); paux->require_sec = FALSE; /* PR 9774: If the target used signed addresses then we must make sure that we sign extend the value that we calculate for 'addr' in the loop below. */ if (bfd_get_flavour (abfd) == bfd_target_elf_flavour && (bed = get_elf_backend_data (abfd)) != NULL && bed->sign_extend_vma) sign_adjust = (bfd_vma) 1 << (bed->s->arch_size - 1); /* Disassemble a block of instructions up to the address associated with the symbol we have just found. Then print the symbol and find the next symbol on. Repeat until we have disassembled the entire section or we have reached the end of the address range we are interested in. */ do_print = paux->symbol == NULL; loop_until = stop_offset_reached; while (addr_offset < stop_offset) { bfd_vma addr; asymbol *nextsym; bfd_vma nextstop_offset; bfd_boolean insns; addr = section->vma + addr_offset; addr = ((addr & ((sign_adjust << 1) - 1)) ^ sign_adjust) - sign_adjust; if (sym != NULL && bfd_asymbol_value (sym) <= addr) { int x; for (x = place; (x < sorted_symcount && (bfd_asymbol_value (sorted_syms[x]) <= addr)); ++x) continue; pinfo->symbols = sorted_syms + place; pinfo->num_symbols = x - place; pinfo->symtab_pos = place; } else { pinfo->symbols = NULL; pinfo->num_symbols = 0; pinfo->symtab_pos = -1; } /* If we are only disassembling from a specific symbol, check to see if we should start or stop displaying. */ if (sym && paux->symbol) { if (do_print) { /* See if we should stop printing. */ switch (loop_until) { case function_sym: if (sym->flags & BSF_FUNCTION) do_print = FALSE; break; case stop_offset_reached: /* Handled by the while loop. */ break; case next_sym: /* FIXME: There is an implicit assumption here that the name of sym is different from paux->symbol. */ if (! bfd_is_local_label (abfd, sym)) do_print = FALSE; break; } } else { const char * name = bfd_asymbol_name (sym); char * alloc = NULL; if (do_demangle && name[0] != '\0') { /* Demangle the name. */ alloc = bfd_demangle (abfd, name, demangle_flags); if (alloc != NULL) name = alloc; } /* We are not currently printing. Check to see if the current symbol matches the requested symbol. */ if (streq (name, paux->symbol)) { do_print = TRUE; if (sym->flags & BSF_FUNCTION) { if (bfd_get_flavour (abfd) == bfd_target_elf_flavour && ((elf_symbol_type *) sym)->internal_elf_sym.st_size > 0) { /* Sym is a function symbol with a size associated with it. Turn on automatic disassembly for the next VALUE bytes. */ stop_offset = addr_offset + ((elf_symbol_type *) sym)->internal_elf_sym.st_size; loop_until = stop_offset_reached; } else { /* Otherwise we need to tell the loop heuristic to loop until the next function symbol is encountered. */ loop_until = function_sym; } } else { /* Otherwise loop until the next symbol is encountered. */ loop_until = next_sym; } } free (alloc); } } if (! prefix_addresses && do_print) { pinfo->fprintf_func (pinfo->stream, "\n"); objdump_print_addr_with_sym (abfd, section, sym, addr, pinfo, FALSE); pinfo->fprintf_func (pinfo->stream, ":\n"); } if (sym != NULL && bfd_asymbol_value (sym) > addr) nextsym = sym; else if (sym == NULL) nextsym = NULL; else { #define is_valid_next_sym(SYM) \ (strcmp (bfd_section_name ((SYM)->section), bfd_section_name (section)) == 0 \ && (bfd_asymbol_value (SYM) > bfd_asymbol_value (sym)) \ && pinfo->symbol_is_valid (SYM, pinfo)) /* Search forward for the next appropriate symbol in SECTION. Note that all the symbols are sorted together into one big array, and that some sections may have overlapping addresses. */ while (place < sorted_symcount && ! is_valid_next_sym (sorted_syms [place])) ++place; if (place >= sorted_symcount) nextsym = NULL; else nextsym = sorted_syms[place]; } if (sym != NULL && bfd_asymbol_value (sym) > addr) nextstop_offset = bfd_asymbol_value (sym) - section->vma; else if (nextsym == NULL) nextstop_offset = stop_offset; else nextstop_offset = bfd_asymbol_value (nextsym) - section->vma; if (nextstop_offset > stop_offset || nextstop_offset <= addr_offset) nextstop_offset = stop_offset; /* If a symbol is explicitly marked as being an object rather than a function, just dump the bytes without disassembling them. */ if (disassemble_all || sym == NULL || sym->section != section || bfd_asymbol_value (sym) > addr || ((sym->flags & BSF_OBJECT) == 0 && (strstr (bfd_asymbol_name (sym), "gnu_compiled") == NULL) && (strstr (bfd_asymbol_name (sym), "gcc2_compiled") == NULL)) || (sym->flags & BSF_FUNCTION) != 0) insns = TRUE; else insns = FALSE; if (do_print) { /* Resolve symbol name. */ if (visualize_jumps && abfd && sym && sym->name) { struct disassemble_info di; SFILE sf; sf.alloc = strlen (sym->name) + 40; sf.buffer = (char*) xmalloc (sf.alloc); sf.pos = 0; di.fprintf_func = (fprintf_ftype) objdump_sprintf; di.stream = &sf; objdump_print_symname (abfd, &di, sym); /* Fetch jump information. */ detected_jumps = disassemble_jumps (pinfo, paux->disassemble_fn, addr_offset, nextstop_offset, rel_offset, &rel_pp, rel_ppend); /* Free symbol name. */ free (sf.buffer); } /* Add jumps to output. */ disassemble_bytes (pinfo, paux->disassemble_fn, insns, data, addr_offset, nextstop_offset, rel_offset, &rel_pp, rel_ppend); /* Free jumps. */ while (detected_jumps) { detected_jumps = jump_info_free (detected_jumps); } } addr_offset = nextstop_offset; sym = nextsym; } free (data); if (rel_ppstart != NULL) free (rel_ppstart); } /* Disassemble the contents of an object file. */ static void disassemble_data (bfd *abfd) { struct disassemble_info disasm_info; struct objdump_disasm_info aux; long i; print_files = NULL; prev_functionname = NULL; prev_line = -1; prev_discriminator = 0; /* We make a copy of syms to sort. We don't want to sort syms because that will screw up the relocs. */ sorted_symcount = symcount ? symcount : dynsymcount; sorted_syms = (asymbol **) xmalloc ((sorted_symcount + synthcount) * sizeof (asymbol *)); if (sorted_symcount != 0) { memcpy (sorted_syms, symcount ? syms : dynsyms, sorted_symcount * sizeof (asymbol *)); sorted_symcount = remove_useless_symbols (sorted_syms, sorted_symcount); } for (i = 0; i < synthcount; ++i) { sorted_syms[sorted_symcount] = synthsyms + i; ++sorted_symcount; } init_disassemble_info (&disasm_info, stdout, (fprintf_ftype) fprintf); disasm_info.application_data = (void *) &aux; aux.abfd = abfd; aux.require_sec = FALSE; aux.dynrelbuf = NULL; aux.dynrelcount = 0; aux.reloc = NULL; aux.symbol = disasm_sym; disasm_info.print_address_func = objdump_print_address; disasm_info.symbol_at_address_func = objdump_symbol_at_address; if (machine != NULL) { const bfd_arch_info_type *inf = bfd_scan_arch (machine); if (inf == NULL) fatal (_("can't use supplied machine %s"), machine); abfd->arch_info = inf; } if (endian != BFD_ENDIAN_UNKNOWN) { struct bfd_target *xvec; xvec = (struct bfd_target *) xmalloc (sizeof (struct bfd_target)); memcpy (xvec, abfd->xvec, sizeof (struct bfd_target)); xvec->byteorder = endian; abfd->xvec = xvec; } /* Use libopcodes to locate a suitable disassembler. */ aux.disassemble_fn = disassembler (bfd_get_arch (abfd), bfd_big_endian (abfd), bfd_get_mach (abfd), abfd); if (!aux.disassemble_fn) { non_fatal (_("can't disassemble for architecture %s\n"), bfd_printable_arch_mach (bfd_get_arch (abfd), 0)); exit_status = 1; return; } disasm_info.flavour = bfd_get_flavour (abfd); disasm_info.arch = bfd_get_arch (abfd); disasm_info.mach = bfd_get_mach (abfd); disasm_info.disassembler_options = disassembler_options; disasm_info.octets_per_byte = bfd_octets_per_byte (abfd, NULL); disasm_info.skip_zeroes = DEFAULT_SKIP_ZEROES; disasm_info.skip_zeroes_at_end = DEFAULT_SKIP_ZEROES_AT_END; disasm_info.disassembler_needs_relocs = FALSE; if (bfd_big_endian (abfd)) disasm_info.display_endian = disasm_info.endian = BFD_ENDIAN_BIG; else if (bfd_little_endian (abfd)) disasm_info.display_endian = disasm_info.endian = BFD_ENDIAN_LITTLE; else /* ??? Aborting here seems too drastic. We could default to big or little instead. */ disasm_info.endian = BFD_ENDIAN_UNKNOWN; /* Allow the target to customize the info structure. */ disassemble_init_for_target (& disasm_info); /* Pre-load the dynamic relocs as we may need them during the disassembly. */ { long relsize = bfd_get_dynamic_reloc_upper_bound (abfd); if (relsize < 0 && dump_dynamic_reloc_info) bfd_fatal (bfd_get_filename (abfd)); if (relsize > 0) { aux.dynrelbuf = (arelent **) xmalloc (relsize); aux.dynrelcount = bfd_canonicalize_dynamic_reloc (abfd, aux.dynrelbuf, dynsyms); if (aux.dynrelcount < 0) bfd_fatal (bfd_get_filename (abfd)); /* Sort the relocs by address. */ qsort (aux.dynrelbuf, aux.dynrelcount, sizeof (arelent *), compare_relocs); } } disasm_info.symtab = sorted_syms; disasm_info.symtab_size = sorted_symcount; bfd_map_over_sections (abfd, disassemble_section, & disasm_info); if (aux.dynrelbuf != NULL) free (aux.dynrelbuf); free (sorted_syms); disassemble_free_target (&disasm_info); } static bfd_boolean load_specific_debug_section (enum dwarf_section_display_enum debug, asection *sec, void *file) { struct dwarf_section *section = &debug_displays [debug].section; bfd *abfd = (bfd *) file; bfd_byte *contents; bfd_size_type amt; size_t alloced; if (section->start != NULL) { /* If it is already loaded, do nothing. */ if (streq (section->filename, bfd_get_filename (abfd))) return TRUE; free (section->start); } section->filename = bfd_get_filename (abfd); section->reloc_info = NULL; section->num_relocs = 0; section->address = bfd_section_vma (sec); section->user_data = sec; section->size = bfd_section_size (sec); /* PR 24360: On 32-bit hosts sizeof (size_t) < sizeof (bfd_size_type). */ alloced = amt = section->size + 1; if (alloced != amt || alloced == 0) { section->start = NULL; free_debug_section (debug); printf (_("\nSection '%s' has an invalid size: %#llx.\n"), sanitize_string (section->name), (unsigned long long) section->size); return FALSE; } section->start = contents = malloc (alloced); if (section->start == NULL || !bfd_get_full_section_contents (abfd, sec, &contents)) { free_debug_section (debug); printf (_("\nCan't get contents for section '%s'.\n"), sanitize_string (section->name)); return FALSE; } /* Ensure any string section has a terminating NUL. */ section->start[section->size] = 0; if ((abfd->flags & (EXEC_P | DYNAMIC)) == 0 && debug_displays [debug].relocate) { long reloc_size; bfd_boolean ret; bfd_cache_section_contents (sec, section->start); ret = bfd_simple_get_relocated_section_contents (abfd, sec, section->start, syms) != NULL; if (! ret) { free_debug_section (debug); printf (_("\nCan't get contents for section '%s'.\n"), sanitize_string (section->name)); return FALSE; } reloc_size = bfd_get_reloc_upper_bound (abfd, sec); if (reloc_size > 0) { unsigned long reloc_count; arelent **relocs; relocs = (arelent **) xmalloc (reloc_size); reloc_count = bfd_canonicalize_reloc (abfd, sec, relocs, NULL); if (reloc_count == 0) free (relocs); else { section->reloc_info = relocs; section->num_relocs = reloc_count; } } } return TRUE; } bfd_boolean reloc_at (struct dwarf_section * dsec, dwarf_vma offset) { arelent ** relocs; arelent * rp; if (dsec == NULL || dsec->reloc_info == NULL) return FALSE; relocs = (arelent **) dsec->reloc_info; for (; (rp = * relocs) != NULL; ++ relocs) if (rp->address == offset) return TRUE; return FALSE; } bfd_boolean load_debug_section (enum dwarf_section_display_enum debug, void *file) { struct dwarf_section *section = &debug_displays [debug].section; bfd *abfd = (bfd *) file; asection *sec; /* If it is already loaded, do nothing. */ if (section->start != NULL) { if (streq (section->filename, bfd_get_filename (abfd))) return TRUE; } /* Locate the debug section. */ sec = bfd_get_section_by_name (abfd, section->uncompressed_name); if (sec != NULL) section->name = section->uncompressed_name; else { sec = bfd_get_section_by_name (abfd, section->compressed_name); if (sec != NULL) section->name = section->compressed_name; } if (sec == NULL) return FALSE; return load_specific_debug_section (debug, sec, file); } void free_debug_section (enum dwarf_section_display_enum debug) { struct dwarf_section *section = &debug_displays [debug].section; if (section->start == NULL) return; /* PR 17512: file: 0f67f69d. */ if (section->user_data != NULL) { asection * sec = (asection *) section->user_data; /* If we are freeing contents that are also pointed to by the BFD library's section structure then make sure to update those pointers too. Otherwise, the next time we try to load data for this section we can end up using a stale pointer. */ if (section->start == sec->contents) { sec->contents = NULL; sec->flags &= ~ SEC_IN_MEMORY; sec->compress_status = COMPRESS_SECTION_NONE; } } free ((char *) section->start); section->start = NULL; section->address = 0; section->size = 0; } void close_debug_file (void * file) { bfd * abfd = (bfd *) file; bfd_close (abfd); } void * open_debug_file (const char * pathname) { bfd * data; data = bfd_openr (pathname, NULL); if (data == NULL) return NULL; if (! bfd_check_format (data, bfd_object)) return NULL; return data; } #if HAVE_LIBDEBUGINFOD /* Return a hex string represention of the build-id. */ unsigned char * get_build_id (void * data) { unsigned i; char * build_id_str; bfd * abfd = (bfd *) data; const struct bfd_build_id * build_id; build_id = abfd->build_id; if (build_id == NULL) return NULL; build_id_str = malloc (build_id->size * 2 + 1); if (build_id_str == NULL) return NULL; for (i = 0; i < build_id->size; i++) sprintf (build_id_str + (i * 2), "%02x", build_id->data[i]); build_id_str[build_id->size * 2] = '\0'; return (unsigned char *)build_id_str; } #endif /* HAVE_LIBDEBUGINFOD */ static void dump_dwarf_section (bfd *abfd, asection *section, void *arg ATTRIBUTE_UNUSED) { const char *name = bfd_section_name (section); const char *match; int i; if (CONST_STRNEQ (name, ".gnu.linkonce.wi.")) match = ".debug_info"; else match = name; for (i = 0; i < max; i++) if ((strcmp (debug_displays [i].section.uncompressed_name, match) == 0 || strcmp (debug_displays [i].section.compressed_name, match) == 0) && debug_displays [i].enabled != NULL && *debug_displays [i].enabled) { struct dwarf_section *sec = &debug_displays [i].section; if (strcmp (sec->uncompressed_name, match) == 0) sec->name = sec->uncompressed_name; else sec->name = sec->compressed_name; if (load_specific_debug_section ((enum dwarf_section_display_enum) i, section, abfd)) { debug_displays [i].display (sec, abfd); if (i != info && i != abbrev) free_debug_section ((enum dwarf_section_display_enum) i); } break; } } /* Dump the dwarf debugging information. */ static void dump_dwarf (bfd *abfd) { /* The byte_get pointer should have been set at the start of dump_bfd(). */ if (byte_get == NULL) { warn (_("File %s does not contain any dwarf debug information\n"), bfd_get_filename (abfd)); return; } switch (bfd_get_arch (abfd)) { case bfd_arch_s12z: /* S12Z has a 24 bit address space. But the only known producer of dwarf_info encodes addresses into 32 bits. */ eh_addr_size = 4; break; default: eh_addr_size = bfd_arch_bits_per_address (abfd) / 8; break; } init_dwarf_regnames_by_bfd_arch_and_mach (bfd_get_arch (abfd), bfd_get_mach (abfd)); bfd_map_over_sections (abfd, dump_dwarf_section, NULL); } /* Read ABFD's stabs section STABSECT_NAME, and return a pointer to it. Return NULL on failure. */ static bfd_byte * read_section_stabs (bfd *abfd, const char *sect_name, bfd_size_type *size_ptr, bfd_size_type *entsize_ptr) { asection *stabsect; bfd_byte *contents; stabsect = bfd_get_section_by_name (abfd, sect_name); if (stabsect == NULL) { printf (_("No %s section present\n\n"), sanitize_string (sect_name)); return FALSE; } if (!bfd_malloc_and_get_section (abfd, stabsect, &contents)) { non_fatal (_("reading %s section of %s failed: %s"), sect_name, bfd_get_filename (abfd), bfd_errmsg (bfd_get_error ())); exit_status = 1; free (contents); return NULL; } *size_ptr = bfd_section_size (stabsect); if (entsize_ptr) *entsize_ptr = stabsect->entsize; return contents; } /* Stabs entries use a 12 byte format: 4 byte string table index 1 byte stab type 1 byte stab other field 2 byte stab desc field 4 byte stab value FIXME: This will have to change for a 64 bit object format. */ #define STRDXOFF (0) #define TYPEOFF (4) #define OTHEROFF (5) #define DESCOFF (6) #define VALOFF (8) #define STABSIZE (12) /* Print ABFD's stabs section STABSECT_NAME (in `stabs'), using string table section STRSECT_NAME (in `strtab'). */ static void print_section_stabs (bfd *abfd, const char *stabsect_name, unsigned *string_offset_ptr) { int i; unsigned file_string_table_offset = 0; unsigned next_file_string_table_offset = *string_offset_ptr; bfd_byte *stabp, *stabs_end; stabp = stabs; stabs_end = stabp + stab_size; printf (_("Contents of %s section:\n\n"), sanitize_string (stabsect_name)); printf ("Symnum n_type n_othr n_desc n_value n_strx String\n"); /* Loop through all symbols and print them. We start the index at -1 because there is a dummy symbol on the front of stabs-in-{coff,elf} sections that supplies sizes. */ for (i = -1; stabp <= stabs_end - STABSIZE; stabp += STABSIZE, i++) { const char *name; unsigned long strx; unsigned char type, other; unsigned short desc; bfd_vma value; strx = bfd_h_get_32 (abfd, stabp + STRDXOFF); type = bfd_h_get_8 (abfd, stabp + TYPEOFF); other = bfd_h_get_8 (abfd, stabp + OTHEROFF); desc = bfd_h_get_16 (abfd, stabp + DESCOFF); value = bfd_h_get_32 (abfd, stabp + VALOFF); printf ("\n%-6d ", i); /* Either print the stab name, or, if unnamed, print its number again (makes consistent formatting for tools like awk). */ name = bfd_get_stab_name (type); if (name != NULL) printf ("%-6s", sanitize_string (name)); else if (type == N_UNDF) printf ("HdrSym"); else printf ("%-6d", type); printf (" %-6d %-6d ", other, desc); bfd_printf_vma (abfd, value); printf (" %-6lu", strx); /* Symbols with type == 0 (N_UNDF) specify the length of the string table associated with this file. We use that info to know how to relocate the *next* file's string table indices. */ if (type == N_UNDF) { file_string_table_offset = next_file_string_table_offset; next_file_string_table_offset += value; } else { bfd_size_type amt = strx + file_string_table_offset; /* Using the (possibly updated) string table offset, print the string (if any) associated with this symbol. */ if (amt < stabstr_size) /* PR 17512: file: 079-79389-0.001:0.1. FIXME: May need to sanitize this string before displaying. */ printf (" %.*s", (int)(stabstr_size - amt), strtab + amt); else printf (" *"); } } printf ("\n\n"); *string_offset_ptr = next_file_string_table_offset; } typedef struct { const char * section_name; const char * string_section_name; unsigned string_offset; } stab_section_names; static void find_stabs_section (bfd *abfd, asection *section, void *names) { int len; stab_section_names * sought = (stab_section_names *) names; /* Check for section names for which stabsect_name is a prefix, to handle .stab.N, etc. */ len = strlen (sought->section_name); /* If the prefix matches, and the files section name ends with a nul or a digit, then we match. I.e., we want either an exact match or a section followed by a number. */ if (strncmp (sought->section_name, section->name, len) == 0 && (section->name[len] == 0 || (section->name[len] == '.' && ISDIGIT (section->name[len + 1])))) { if (strtab == NULL) strtab = read_section_stabs (abfd, sought->string_section_name, &stabstr_size, NULL); if (strtab) { stabs = read_section_stabs (abfd, section->name, &stab_size, NULL); if (stabs) print_section_stabs (abfd, section->name, &sought->string_offset); } } } static void dump_stabs_section (bfd *abfd, char *stabsect_name, char *strsect_name) { stab_section_names s; s.section_name = stabsect_name; s.string_section_name = strsect_name; s.string_offset = 0; bfd_map_over_sections (abfd, find_stabs_section, & s); free (strtab); strtab = NULL; } /* Dump the any sections containing stabs debugging information. */ static void dump_stabs (bfd *abfd) { dump_stabs_section (abfd, ".stab", ".stabstr"); dump_stabs_section (abfd, ".stab.excl", ".stab.exclstr"); dump_stabs_section (abfd, ".stab.index", ".stab.indexstr"); /* For Darwin. */ dump_stabs_section (abfd, "LC_SYMTAB.stabs", "LC_SYMTAB.stabstr"); dump_stabs_section (abfd, "$GDB_SYMBOLS$", "$GDB_STRINGS$"); } static void dump_bfd_header (bfd *abfd) { char *comma = ""; printf (_("architecture: %s, "), bfd_printable_arch_mach (bfd_get_arch (abfd), bfd_get_mach (abfd))); printf (_("flags 0x%08x:\n"), abfd->flags & ~BFD_FLAGS_FOR_BFD_USE_MASK); #define PF(x, y) if (abfd->flags & x) {printf ("%s%s", comma, y); comma=", ";} PF (HAS_RELOC, "HAS_RELOC"); PF (EXEC_P, "EXEC_P"); PF (HAS_LINENO, "HAS_LINENO"); PF (HAS_DEBUG, "HAS_DEBUG"); PF (HAS_SYMS, "HAS_SYMS"); PF (HAS_LOCALS, "HAS_LOCALS"); PF (DYNAMIC, "DYNAMIC"); PF (WP_TEXT, "WP_TEXT"); PF (D_PAGED, "D_PAGED"); PF (BFD_IS_RELAXABLE, "BFD_IS_RELAXABLE"); printf (_("\nstart address 0x")); bfd_printf_vma (abfd, abfd->start_address); printf ("\n"); } /* Formatting callback function passed to ctf_dump. Returns either the pointer it is passed, or a pointer to newly-allocated storage, in which case dump_ctf() will free it when it no longer needs it. */ static char * dump_ctf_indent_lines (ctf_sect_names_t sect ATTRIBUTE_UNUSED, char *s, void *arg) { const char *blanks = arg; char *new_s; if (asprintf (&new_s, "%s%s", blanks, s) < 0) return s; return new_s; } /* Make a ctfsect suitable for ctf_bfdopen_ctfsect(). */ static ctf_sect_t make_ctfsect (const char *name, bfd_byte *data, bfd_size_type size) { ctf_sect_t ctfsect; ctfsect.cts_name = name; ctfsect.cts_entsize = 1; ctfsect.cts_size = size; ctfsect.cts_data = data; return ctfsect; } /* Dump one CTF archive member. */ static int dump_ctf_archive_member (ctf_file_t *ctf, const char *name, void *arg) { ctf_file_t *parent = (ctf_file_t *) arg; const char *things[] = {"Header", "Labels", "Data objects", "Function objects", "Variables", "Types", "Strings", ""}; const char **thing; size_t i; /* Only print out the name of non-default-named archive members. The name .ctf appears everywhere, even for things that aren't really archives, so printing it out is liable to be confusing. The parent, if there is one, is the default-owned archive member: avoid importing it into itself. (This does no harm, but looks confusing.) */ if (strcmp (name, ".ctf") != 0) { printf (_("\nCTF archive member: %s:\n"), sanitize_string (name)); ctf_import (ctf, parent); } for (i = 0, thing = things; *thing[0]; thing++, i++) { ctf_dump_state_t *s = NULL; char *item; printf ("\n %s:\n", *thing); while ((item = ctf_dump (ctf, &s, i, dump_ctf_indent_lines, (void *) " ")) != NULL) { printf ("%s\n", item); free (item); } if (ctf_errno (ctf)) { non_fatal (_("Iteration failed: %s, %s\n"), *thing, ctf_errmsg (ctf_errno (ctf))); break; } } return 0; } /* Dump the CTF debugging information. */ static void dump_ctf (bfd *abfd, const char *sect_name, const char *parent_name) { ctf_archive_t *ctfa, *parenta = NULL, *lookparent; bfd_byte *ctfdata, *parentdata = NULL; bfd_size_type ctfsize, parentsize; ctf_sect_t ctfsect; ctf_file_t *parent = NULL; int err; if ((ctfdata = read_section_stabs (abfd, sect_name, &ctfsize, NULL)) == NULL) bfd_fatal (bfd_get_filename (abfd)); if (parent_name && (parentdata = read_section_stabs (abfd, parent_name, &parentsize, NULL)) == NULL) bfd_fatal (bfd_get_filename (abfd)); /* Load the CTF file and dump it. */ ctfsect = make_ctfsect (sect_name, ctfdata, ctfsize); if ((ctfa = ctf_bfdopen_ctfsect (abfd, &ctfsect, &err)) == NULL) { non_fatal (_("CTF open failure: %s\n"), ctf_errmsg (err)); bfd_fatal (bfd_get_filename (abfd)); } if (parentdata) { ctfsect = make_ctfsect (parent_name, parentdata, parentsize); if ((parenta = ctf_bfdopen_ctfsect (abfd, &ctfsect, &err)) == NULL) { non_fatal (_("CTF open failure: %s\n"), ctf_errmsg (err)); bfd_fatal (bfd_get_filename (abfd)); } lookparent = parenta; } else lookparent = ctfa; /* Assume that the applicable parent archive member is the default one. (This is what all known implementations are expected to do, if they put CTFs and their parents in archives together.) */ if ((parent = ctf_arc_open_by_name (lookparent, NULL, &err)) == NULL) { non_fatal (_("CTF open failure: %s\n"), ctf_errmsg (err)); bfd_fatal (bfd_get_filename (abfd)); } printf (_("Contents of CTF section %s:\n"), sanitize_string (sect_name)); ctf_archive_iter (ctfa, dump_ctf_archive_member, parent); ctf_file_close (parent); ctf_close (ctfa); ctf_close (parenta); free (parentdata); free (ctfdata); } static void dump_bfd_private_header (bfd *abfd) { if (!bfd_print_private_bfd_data (abfd, stdout)) non_fatal (_("warning: private headers incomplete: %s"), bfd_errmsg (bfd_get_error ())); } static void dump_target_specific (bfd *abfd) { const struct objdump_private_desc * const *desc; struct objdump_private_option *opt; char *e, *b; /* Find the desc. */ for (desc = objdump_private_vectors; *desc != NULL; desc++) if ((*desc)->filter (abfd)) break; if (*desc == NULL) { non_fatal (_("option -P/--private not supported by this file")); return; } /* Clear all options. */ for (opt = (*desc)->options; opt->name; opt++) opt->selected = FALSE; /* Decode options. */ b = dump_private_options; do { e = strchr (b, ','); if (e) *e = 0; for (opt = (*desc)->options; opt->name; opt++) if (strcmp (opt->name, b) == 0) { opt->selected = TRUE; break; } if (opt->name == NULL) non_fatal (_("target specific dump '%s' not supported"), b); if (e) { *e = ','; b = e + 1; } } while (e != NULL); /* Dump. */ (*desc)->dump (abfd); } /* Display a section in hexadecimal format with associated characters. Each line prefixed by the zero padded address. */ static void dump_section (bfd *abfd, asection *section, void *dummy ATTRIBUTE_UNUSED) { bfd_byte *data = NULL; bfd_size_type datasize; bfd_vma addr_offset; bfd_vma start_offset; bfd_vma stop_offset; unsigned int opb = bfd_octets_per_byte (abfd, section); /* Bytes per line. */ const int onaline = 16; char buf[64]; int count; int width; if ((section->flags & SEC_HAS_CONTENTS) == 0) return; if (! process_section_p (section)) return; if ((datasize = bfd_section_size (section)) == 0) return; /* Compute the address range to display. */ if (start_address == (bfd_vma) -1 || start_address < section->vma) start_offset = 0; else start_offset = start_address - section->vma; if (stop_address == (bfd_vma) -1) stop_offset = datasize / opb; else { if (stop_address < section->vma) stop_offset = 0; else stop_offset = stop_address - section->vma; if (stop_offset > datasize / opb) stop_offset = datasize / opb; } if (start_offset >= stop_offset) return; printf (_("Contents of section %s:"), sanitize_string (section->name)); if (display_file_offsets) printf (_(" (Starting at file offset: 0x%lx)"), (unsigned long) (section->filepos + start_offset)); printf ("\n"); if (!bfd_get_full_section_contents (abfd, section, &data)) { non_fatal (_("Reading section %s failed because: %s"), section->name, bfd_errmsg (bfd_get_error ())); return; } width = 4; bfd_sprintf_vma (abfd, buf, start_offset + section->vma); if (strlen (buf) >= sizeof (buf)) abort (); count = 0; while (buf[count] == '0' && buf[count+1] != '\0') count++; count = strlen (buf) - count; if (count > width) width = count; bfd_sprintf_vma (abfd, buf, stop_offset + section->vma - 1); if (strlen (buf) >= sizeof (buf)) abort (); count = 0; while (buf[count] == '0' && buf[count+1] != '\0') count++; count = strlen (buf) - count; if (count > width) width = count; for (addr_offset = start_offset; addr_offset < stop_offset; addr_offset += onaline / opb) { bfd_size_type j; bfd_sprintf_vma (abfd, buf, (addr_offset + section->vma)); count = strlen (buf); if ((size_t) count >= sizeof (buf)) abort (); putchar (' '); while (count < width) { putchar ('0'); count++; } fputs (buf + count - width, stdout); putchar (' '); for (j = addr_offset * opb; j < addr_offset * opb + onaline; j++) { if (j < stop_offset * opb) printf ("%02x", (unsigned) (data[j])); else printf (" "); if ((j & 3) == 3) printf (" "); } printf (" "); for (j = addr_offset * opb; j < addr_offset * opb + onaline; j++) { if (j >= stop_offset * opb) printf (" "); else printf ("%c", ISPRINT (data[j]) ? data[j] : '.'); } putchar ('\n'); } free (data); } /* Actually display the various requested regions. */ static void dump_data (bfd *abfd) { bfd_map_over_sections (abfd, dump_section, NULL); } /* Should perhaps share code and display with nm? */ static void dump_symbols (bfd *abfd ATTRIBUTE_UNUSED, bfd_boolean dynamic) { asymbol **current; long max_count; long count; if (dynamic) { current = dynsyms; max_count = dynsymcount; printf ("DYNAMIC SYMBOL TABLE:\n"); } else { current = syms; max_count = symcount; printf ("SYMBOL TABLE:\n"); } if (max_count == 0) printf (_("no symbols\n")); for (count = 0; count < max_count; count++) { bfd *cur_bfd; if (*current == NULL) printf (_("no information for symbol number %ld\n"), count); else if ((cur_bfd = bfd_asymbol_bfd (*current)) == NULL) printf (_("could not determine the type of symbol number %ld\n"), count); else if (process_section_p ((* current)->section) && (dump_special_syms || !bfd_is_target_special_symbol (cur_bfd, *current))) { const char *name = (*current)->name; if (do_demangle && name != NULL && *name != '\0') { char *alloc; /* If we want to demangle the name, we demangle it here, and temporarily clobber it while calling bfd_print_symbol. FIXME: This is a gross hack. */ alloc = bfd_demangle (cur_bfd, name, demangle_flags); if (alloc != NULL) (*current)->name = alloc; bfd_print_symbol (cur_bfd, stdout, *current, bfd_print_symbol_all); if (alloc != NULL) { (*current)->name = name; free (alloc); } } else bfd_print_symbol (cur_bfd, stdout, *current, bfd_print_symbol_all); printf ("\n"); } current++; } printf ("\n\n"); } static void dump_reloc_set (bfd *abfd, asection *sec, arelent **relpp, long relcount) { arelent **p; char *last_filename, *last_functionname; unsigned int last_line; unsigned int last_discriminator; /* Get column headers lined up reasonably. */ { static int width; if (width == 0) { char buf[30]; bfd_sprintf_vma (abfd, buf, (bfd_vma) -1); width = strlen (buf) - 7; } printf ("OFFSET %*s TYPE %*s VALUE \n", width, "", 12, ""); } last_filename = NULL; last_functionname = NULL; last_line = 0; last_discriminator = 0; for (p = relpp; relcount && *p != NULL; p++, relcount--) { arelent *q = *p; const char *filename, *functionname; unsigned int linenumber; unsigned int discriminator; const char *sym_name; const char *section_name; bfd_vma addend2 = 0; if (start_address != (bfd_vma) -1 && q->address < start_address) continue; if (stop_address != (bfd_vma) -1 && q->address > stop_address) continue; if (with_line_numbers && sec != NULL && bfd_find_nearest_line_discriminator (abfd, sec, syms, q->address, &filename, &functionname, &linenumber, &discriminator)) { if (functionname != NULL && (last_functionname == NULL || strcmp (functionname, last_functionname) != 0)) { printf ("%s():\n", sanitize_string (functionname)); if (last_functionname != NULL) free (last_functionname); last_functionname = xstrdup (functionname); } if (linenumber > 0 && (linenumber != last_line || (filename != NULL && last_filename != NULL && filename_cmp (filename, last_filename) != 0) || (discriminator != last_discriminator))) { if (discriminator > 0) printf ("%s:%u\n", filename == NULL ? "???" : sanitize_string (filename), linenumber); else printf ("%s:%u (discriminator %u)\n", filename == NULL ? "???" : sanitize_string (filename), linenumber, discriminator); last_line = linenumber; last_discriminator = discriminator; if (last_filename != NULL) free (last_filename); if (filename == NULL) last_filename = NULL; else last_filename = xstrdup (filename); } } if (q->sym_ptr_ptr && *q->sym_ptr_ptr) { sym_name = (*(q->sym_ptr_ptr))->name; section_name = (*(q->sym_ptr_ptr))->section->name; } else { sym_name = NULL; section_name = NULL; } bfd_printf_vma (abfd, q->address); if (q->howto == NULL) printf (" *unknown* "); else if (q->howto->name) { const char *name = q->howto->name; /* R_SPARC_OLO10 relocations contain two addends. But because 'arelent' lacks enough storage to store them both, the 64-bit ELF Sparc backend records this as two relocations. One R_SPARC_LO10 and one R_SPARC_13, both pointing to the same address. This is merely so that we have some place to store both addend fields. Undo this transformation, otherwise the output will be confusing. */ if (abfd->xvec->flavour == bfd_target_elf_flavour && elf_tdata (abfd)->elf_header->e_machine == EM_SPARCV9 && relcount > 1 && !strcmp (q->howto->name, "R_SPARC_LO10")) { arelent *q2 = *(p + 1); if (q2 != NULL && q2->howto && q->address == q2->address && !strcmp (q2->howto->name, "R_SPARC_13")) { name = "R_SPARC_OLO10"; addend2 = q2->addend; p++; } } printf (" %-16s ", name); } else printf (" %-16d ", q->howto->type); if (sym_name) { objdump_print_symname (abfd, NULL, *q->sym_ptr_ptr); } else { if (section_name == NULL) section_name = "*unknown*"; printf ("[%s]", sanitize_string (section_name)); } if (q->addend) { bfd_signed_vma addend = q->addend; if (addend < 0) { printf ("-0x"); addend = -addend; } else printf ("+0x"); bfd_printf_vma (abfd, addend); } if (addend2) { printf ("+0x"); bfd_printf_vma (abfd, addend2); } printf ("\n"); } if (last_filename != NULL) free (last_filename); if (last_functionname != NULL) free (last_functionname); } static void dump_relocs_in_section (bfd *abfd, asection *section, void *dummy ATTRIBUTE_UNUSED) { arelent **relpp = NULL; long relcount; long relsize; if ( bfd_is_abs_section (section) || bfd_is_und_section (section) || bfd_is_com_section (section) || (! process_section_p (section)) || ((section->flags & SEC_RELOC) == 0)) return; printf ("RELOCATION RECORDS FOR [%s]:", sanitize_string (section->name)); relsize = bfd_get_reloc_upper_bound (abfd, section); if (relsize == 0) { printf (" (none)\n\n"); return; } if (relsize < 0) relcount = relsize; else { relpp = (arelent **) xmalloc (relsize); relcount = bfd_canonicalize_reloc (abfd, section, relpp, syms); } if (relcount < 0) { printf ("\n"); non_fatal (_("failed to read relocs in: %s"), sanitize_string (bfd_get_filename (abfd))); bfd_fatal (_("error message was")); } else if (relcount == 0) printf (" (none)\n\n"); else { printf ("\n"); dump_reloc_set (abfd, section, relpp, relcount); printf ("\n\n"); } free (relpp); } static void dump_relocs (bfd *abfd) { bfd_map_over_sections (abfd, dump_relocs_in_section, NULL); } static void dump_dynamic_relocs (bfd *abfd) { long relsize; arelent **relpp; long relcount; relsize = bfd_get_dynamic_reloc_upper_bound (abfd); if (relsize < 0) bfd_fatal (bfd_get_filename (abfd)); printf ("DYNAMIC RELOCATION RECORDS"); if (relsize == 0) printf (" (none)\n\n"); else { relpp = (arelent **) xmalloc (relsize); relcount = bfd_canonicalize_dynamic_reloc (abfd, relpp, dynsyms); if (relcount < 0) bfd_fatal (bfd_get_filename (abfd)); else if (relcount == 0) printf (" (none)\n\n"); else { printf ("\n"); dump_reloc_set (abfd, NULL, relpp, relcount); printf ("\n\n"); } free (relpp); } } /* Creates a table of paths, to search for source files. */ static void add_include_path (const char *path) { if (path[0] == 0) return; include_path_count++; include_paths = (const char **) xrealloc (include_paths, include_path_count * sizeof (*include_paths)); #ifdef HAVE_DOS_BASED_FILE_SYSTEM if (path[1] == ':' && path[2] == 0) path = concat (path, ".", (const char *) 0); #endif include_paths[include_path_count - 1] = path; } static void adjust_addresses (bfd *abfd ATTRIBUTE_UNUSED, asection *section, void *arg) { if ((section->flags & SEC_DEBUGGING) == 0) { bfd_boolean *has_reloc_p = (bfd_boolean *) arg; section->vma += adjust_section_vma; if (*has_reloc_p) section->lma += adjust_section_vma; } } /* Return the sign-extended form of an ARCH_SIZE sized VMA. */ static bfd_vma sign_extend_address (bfd *abfd ATTRIBUTE_UNUSED, bfd_vma vma, unsigned arch_size) { bfd_vma mask; mask = (bfd_vma) 1 << (arch_size - 1); return (((vma & ((mask << 1) - 1)) ^ mask) - mask); } /* Dump selected contents of ABFD. */ static void dump_bfd (bfd *abfd, bfd_boolean is_mainfile) { const struct elf_backend_data * bed; if (bfd_big_endian (abfd)) byte_get = byte_get_big_endian; else if (bfd_little_endian (abfd)) byte_get = byte_get_little_endian; else byte_get = NULL; /* Load any separate debug information files. We do this now and without checking do_follow_links because separate debug info files may contain symbol tables that we will need when displaying information about the main file. Any memory allocated by load_separate_debug_files will be released when we call free_debug_memory below. The test on is_mainfile is there because the chain of separate debug info files is a global variable shared by all invocations of dump_bfd. */ if (is_mainfile) { load_separate_debug_files (abfd, bfd_get_filename (abfd)); /* If asked to do so, recursively dump the separate files. */ if (do_follow_links) { separate_info * i; for (i = first_separate_info; i != NULL; i = i->next) dump_bfd (i->handle, FALSE); } } /* Adjust user-specified start and stop limits for targets that use signed addresses. */ if (bfd_get_flavour (abfd) == bfd_target_elf_flavour && (bed = get_elf_backend_data (abfd)) != NULL && bed->sign_extend_vma) { start_address = sign_extend_address (abfd, start_address, bed->s->arch_size); stop_address = sign_extend_address (abfd, stop_address, bed->s->arch_size); } /* If we are adjusting section VMA's, change them all now. Changing the BFD information is a hack. However, we must do it, or bfd_find_nearest_line will not do the right thing. */ if (adjust_section_vma != 0) { bfd_boolean has_reloc = (abfd->flags & HAS_RELOC); bfd_map_over_sections (abfd, adjust_addresses, &has_reloc); } if (! dump_debugging_tags && ! suppress_bfd_header) printf (_("\n%s: file format %s\n"), sanitize_string (bfd_get_filename (abfd)), abfd->xvec->name); if (dump_ar_hdrs) print_arelt_descr (stdout, abfd, TRUE, FALSE); if (dump_file_header) dump_bfd_header (abfd); if (dump_private_headers) dump_bfd_private_header (abfd); if (dump_private_options != NULL) dump_target_specific (abfd); if (! dump_debugging_tags && ! suppress_bfd_header) putchar ('\n'); if (dump_symtab || dump_reloc_info || disassemble || dump_debugging || dump_dwarf_section_info) { syms = slurp_symtab (abfd); /* If following links, load any symbol tables from the linked files as well. */ if (do_follow_links && is_mainfile) { separate_info * i; for (i = first_separate_info; i != NULL; i = i->next) { asymbol ** extra_syms; long old_symcount = symcount; extra_syms = slurp_symtab (i->handle); if (extra_syms) { if (old_symcount == 0) { syms = extra_syms; } else { syms = xrealloc (syms, (symcount + old_symcount) * sizeof (asymbol *)); memcpy (syms + old_symcount, extra_syms, symcount * sizeof (asymbol *)); } } symcount += old_symcount; } } } if (dump_section_headers) dump_headers (abfd); if (dump_dynamic_symtab || dump_dynamic_reloc_info || (disassemble && bfd_get_dynamic_symtab_upper_bound (abfd) > 0)) dynsyms = slurp_dynamic_symtab (abfd); if (disassemble) { synthcount = bfd_get_synthetic_symtab (abfd, symcount, syms, dynsymcount, dynsyms, &synthsyms); if (synthcount < 0) synthcount = 0; } if (dump_symtab) dump_symbols (abfd, FALSE); if (dump_dynamic_symtab) dump_symbols (abfd, TRUE); if (dump_dwarf_section_info) dump_dwarf (abfd); if (dump_ctf_section_info) dump_ctf (abfd, dump_ctf_section_name, dump_ctf_parent_name); if (dump_stab_section_info) dump_stabs (abfd); if (dump_reloc_info && ! disassemble) dump_relocs (abfd); if (dump_dynamic_reloc_info && ! disassemble) dump_dynamic_relocs (abfd); if (dump_section_contents) dump_data (abfd); if (disassemble) disassemble_data (abfd); if (dump_debugging) { void *dhandle; dhandle = read_debugging_info (abfd, syms, symcount, TRUE); if (dhandle != NULL) { if (!print_debugging_info (stdout, dhandle, abfd, syms, bfd_demangle, dump_debugging_tags ? TRUE : FALSE)) { non_fatal (_("%s: printing debugging information failed"), bfd_get_filename (abfd)); exit_status = 1; } free (dhandle); } /* PR 6483: If there was no STABS debug info in the file, try DWARF instead. */ else if (! dump_dwarf_section_info) { dwarf_select_sections_all (); dump_dwarf (abfd); } } if (syms) { free (syms); syms = NULL; } if (dynsyms) { free (dynsyms); dynsyms = NULL; } if (synthsyms) { free (synthsyms); synthsyms = NULL; } symcount = 0; dynsymcount = 0; synthcount = 0; if (is_mainfile) free_debug_memory (); } static void display_object_bfd (bfd *abfd) { char **matching; if (bfd_check_format_matches (abfd, bfd_object, &matching)) { dump_bfd (abfd, TRUE); return; } if (bfd_get_error () == bfd_error_file_ambiguously_recognized) { nonfatal (bfd_get_filename (abfd)); list_matching_formats (matching); free (matching); return; } if (bfd_get_error () != bfd_error_file_not_recognized) { nonfatal (bfd_get_filename (abfd)); return; } if (bfd_check_format_matches (abfd, bfd_core, &matching)) { dump_bfd (abfd, TRUE); return; } nonfatal (bfd_get_filename (abfd)); if (bfd_get_error () == bfd_error_file_ambiguously_recognized) { list_matching_formats (matching); free (matching); } } static void display_any_bfd (bfd *file, int level) { /* Decompress sections unless dumping the section contents. */ if (!dump_section_contents) file->flags |= BFD_DECOMPRESS; /* If the file is an archive, process all of its elements. */ if (bfd_check_format (file, bfd_archive)) { bfd *arfile = NULL; bfd *last_arfile = NULL; if (level == 0) printf (_("In archive %s:\n"), sanitize_string (bfd_get_filename (file))); else if (level > 100) { /* Prevent corrupted files from spinning us into an infinite loop. 100 is an arbitrary heuristic. */ fatal (_("Archive nesting is too deep")); return; } else printf (_("In nested archive %s:\n"), sanitize_string (bfd_get_filename (file))); for (;;) { bfd_set_error (bfd_error_no_error); arfile = bfd_openr_next_archived_file (file, arfile); if (arfile == NULL) { if (bfd_get_error () != bfd_error_no_more_archived_files) nonfatal (bfd_get_filename (file)); break; } display_any_bfd (arfile, level + 1); if (last_arfile != NULL) { bfd_close (last_arfile); /* PR 17512: file: ac585d01. */ if (arfile == last_arfile) { last_arfile = NULL; break; } } last_arfile = arfile; } if (last_arfile != NULL) bfd_close (last_arfile); } else display_object_bfd (file); } static void display_file (char *filename, char *target, bfd_boolean last_file) { bfd *file; if (get_file_size (filename) < 1) { exit_status = 1; return; } file = bfd_openr (filename, target); if (file == NULL) { nonfatal (filename); return; } display_any_bfd (file, 0); /* This is an optimization to improve the speed of objdump, especially when dumping a file with lots of associated debug informatiom. Calling bfd_close on such a file can take a non-trivial amount of time as there are lots of lists to walk and buffers to free. This is only really necessary however if we are about to load another file and we need the memory back. Otherwise, if we are about to exit, then we can save (a lot of) time by only doing a quick close, and allowing the OS to reclaim the memory for us. */ if (! last_file) bfd_close (file); else bfd_close_all_done (file); } int main (int argc, char **argv) { int c; char *target = default_target; bfd_boolean seenflag = FALSE; #if defined (HAVE_SETLOCALE) #if defined (HAVE_LC_MESSAGES) setlocale (LC_MESSAGES, ""); #endif setlocale (LC_CTYPE, ""); #endif bindtextdomain (PACKAGE, LOCALEDIR); textdomain (PACKAGE); program_name = *argv; xmalloc_set_program_name (program_name); bfd_set_error_program_name (program_name); START_PROGRESS (program_name, 0); expandargv (&argc, &argv); if (bfd_init () != BFD_INIT_MAGIC) fatal (_("fatal error: libbfd ABI mismatch")); set_default_bfd_target (); while ((c = getopt_long (argc, argv, "pP:ib:m:M:VvCdDlfFaHhrRtTxsSI:j:wE:zgeGW::", long_options, (int *) 0)) != EOF) { switch (c) { case 0: break; /* We've been given a long option. */ case 'm': machine = optarg; break; case 'M': { char *options; if (disassembler_options) /* Ignore potential memory leak for now. */ options = concat (disassembler_options, ",", optarg, (const char *) NULL); else options = optarg; disassembler_options = remove_whitespace_and_extra_commas (options); } break; case 'j': add_only (optarg); break; case 'F': display_file_offsets = TRUE; break; case 'l': with_line_numbers = TRUE; break; case 'b': target = optarg; break; case 'C': do_demangle = TRUE; if (optarg != NULL) { enum demangling_styles style; style = cplus_demangle_name_to_style (optarg); if (style == unknown_demangling) fatal (_("unknown demangling style `%s'"), optarg); cplus_demangle_set_style (style); } break; case OPTION_RECURSE_LIMIT: demangle_flags &= ~ DMGL_NO_RECURSE_LIMIT; break; case OPTION_NO_RECURSE_LIMIT: demangle_flags |= DMGL_NO_RECURSE_LIMIT; break; case 'w': do_wide = wide_output = TRUE; break; case OPTION_ADJUST_VMA: adjust_section_vma = parse_vma (optarg, "--adjust-vma"); break; case OPTION_START_ADDRESS: start_address = parse_vma (optarg, "--start-address"); if ((stop_address != (bfd_vma) -1) && stop_address <= start_address) fatal (_("error: the start address should be before the end address")); break; case OPTION_STOP_ADDRESS: stop_address = parse_vma (optarg, "--stop-address"); if ((start_address != (bfd_vma) -1) && stop_address <= start_address) fatal (_("error: the stop address should be after the start address")); break; case OPTION_PREFIX: prefix = optarg; prefix_length = strlen (prefix); /* Remove an unnecessary trailing '/' */ while (IS_DIR_SEPARATOR (prefix[prefix_length - 1])) prefix_length--; break; case OPTION_PREFIX_STRIP: prefix_strip = atoi (optarg); if (prefix_strip < 0) fatal (_("error: prefix strip must be non-negative")); break; case OPTION_INSN_WIDTH: insn_width = strtoul (optarg, NULL, 0); if (insn_width <= 0) fatal (_("error: instruction width must be positive")); break; case OPTION_INLINES: unwind_inlines = TRUE; break; case OPTION_VISUALIZE_JUMPS: visualize_jumps = TRUE; color_output = FALSE; extended_color_output = FALSE; if (optarg != NULL) { if (streq (optarg, "color")) color_output = TRUE; else if (streq (optarg, "extended-color")) { color_output = TRUE; extended_color_output = TRUE; } else if (streq (optarg, "off")) visualize_jumps = FALSE; else nonfatal (_("unrecognized argument to --visualize-option")); } break; case 'E': if (strcmp (optarg, "B") == 0) endian = BFD_ENDIAN_BIG; else if (strcmp (optarg, "L") == 0) endian = BFD_ENDIAN_LITTLE; else { nonfatal (_("unrecognized -E option")); usage (stderr, 1); } break; case OPTION_ENDIAN: if (strncmp (optarg, "big", strlen (optarg)) == 0) endian = BFD_ENDIAN_BIG; else if (strncmp (optarg, "little", strlen (optarg)) == 0) endian = BFD_ENDIAN_LITTLE; else { non_fatal (_("unrecognized --endian type `%s'"), optarg); exit_status = 1; usage (stderr, 1); } break; case 'f': dump_file_header = TRUE; seenflag = TRUE; break; case 'i': formats_info = TRUE; seenflag = TRUE; break; case 'I': add_include_path (optarg); break; case 'p': dump_private_headers = TRUE; seenflag = TRUE; break; case 'P': dump_private_options = optarg; seenflag = TRUE; break; case 'x': dump_private_headers = TRUE; dump_symtab = TRUE; dump_reloc_info = TRUE; dump_file_header = TRUE; dump_ar_hdrs = TRUE; dump_section_headers = TRUE; seenflag = TRUE; break; case 't': dump_symtab = TRUE; seenflag = TRUE; break; case 'T': dump_dynamic_symtab = TRUE; seenflag = TRUE; break; case 'd': disassemble = TRUE; seenflag = TRUE; disasm_sym = optarg; break; case 'z': disassemble_zeroes = TRUE; break; case 'D': disassemble = TRUE; disassemble_all = TRUE; seenflag = TRUE; break; case 'S': disassemble = TRUE; with_source_code = TRUE; seenflag = TRUE; break; case OPTION_SOURCE_COMMENT: disassemble = TRUE; with_source_code = TRUE; seenflag = TRUE; if (optarg) source_comment = xstrdup (sanitize_string (optarg)); else source_comment = xstrdup ("# "); break; case 'g': dump_debugging = 1; seenflag = TRUE; break; case 'e': dump_debugging = 1; dump_debugging_tags = 1; do_demangle = TRUE; seenflag = TRUE; break; case 'W': dump_dwarf_section_info = TRUE; seenflag = TRUE; if (optarg) dwarf_select_sections_by_letters (optarg); else dwarf_select_sections_all (); break; case OPTION_DWARF: dump_dwarf_section_info = TRUE; seenflag = TRUE; if (optarg) dwarf_select_sections_by_names (optarg); else dwarf_select_sections_all (); break; case OPTION_DWARF_DEPTH: { char *cp; dwarf_cutoff_level = strtoul (optarg, & cp, 0); } break; case OPTION_DWARF_START: { char *cp; dwarf_start_die = strtoul (optarg, & cp, 0); suppress_bfd_header = 1; } break; case OPTION_DWARF_CHECK: dwarf_check = TRUE; break; case OPTION_CTF: dump_ctf_section_info = TRUE; dump_ctf_section_name = xstrdup (optarg); seenflag = TRUE; break; case OPTION_CTF_PARENT: dump_ctf_parent_name = xstrdup (optarg); break; case 'G': dump_stab_section_info = TRUE; seenflag = TRUE; break; case 's': dump_section_contents = TRUE; seenflag = TRUE; break; case 'r': dump_reloc_info = TRUE; seenflag = TRUE; break; case 'R': dump_dynamic_reloc_info = TRUE; seenflag = TRUE; break; case 'a': dump_ar_hdrs = TRUE; seenflag = TRUE; break; case 'h': dump_section_headers = TRUE; seenflag = TRUE; break; case 'v': case 'V': show_version = TRUE; seenflag = TRUE; break; case 'H': usage (stdout, 0); /* No need to set seenflag or to break - usage() does not return. */ default: usage (stderr, 1); } } if (show_version) print_version ("objdump"); if (!seenflag) usage (stderr, 2); if (formats_info) exit_status = display_info (); else { if (optind == argc) display_file ("a.out", target, TRUE); else for (; optind < argc;) { display_file (argv[optind], target, optind == argc - 1); optind++; } } free_only_list (); free (dump_ctf_section_name); free (dump_ctf_parent_name); free ((void *) source_comment); END_PROGRESS (program_name); return exit_status; }
mattstock/binutils-bexkat1
binutils/objdump.c
C
gpl-2.0
144,000
.identificacao { width:500px; } .detalhamento { width:500px; height:200px; } .genero { color:#6e6e6e; } .arquivo { width:500px; } .aviso { color:#f00; text-align:justify; } .info { width:780px; padding:10px; margin-top:10px; font-size:10px; background:#f0f0f0; } .info input { font-size:10px; background:#f0f0f0; border:0px; } .info span { display:block; }
tassiocaique/gtransp
admin/editar_despesa.css
CSS
gpl-2.0
417
/* Copyright 2013 David Axmark Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifndef _OPENGLES_H_ #define _OPENGLES_H_ namespace Base { struct SubView { int x, y, w, h; void* data; // hold handle for instance. }; bool subViewOpen(int left, int top, int width, int height, SubView& out); bool subViewClose(const SubView& sv); bool openGLInit(const SubView& subView); bool openGLClose(const SubView& subView); bool openGLSwap(const SubView& subView); bool openGLProcessEvents(const SubView &subView); } #endif // _OPENGLES_H_
MoSync/MoSync
runtimes/cpp/platforms/sdl/OpenGLES.h
C
gpl-2.0
1,011
/* pwsafe.c * * Password Safe cracker * Copyright (C) 2013 Dhiru Kholia <dhiru at openwall.com> * * hashkill - a hash cracking tool * Copyright (C) 2010 Milen Rangelov <gat3way@gat3way.eu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <alloca.h> #include <sys/types.h> #include <openssl/sha.h> #include <fcntl.h> #include <assert.h> #include <stdint.h> #include <stdlib.h> #include <stdio.h> #include <stdlib.h> #include <limits.h> #include <errno.h> #include <string.h> #include "plugin.h" #include "err.h" #include "hashinterface.h" int vectorsize; static struct custom_salt { int version; unsigned int iterations; unsigned char salt[32]; unsigned char hash[32]; } cs; char *hash_plugin_summary(void) { return ("pwsafe \t\tPassword Safe passphrase plugin"); } char *hash_plugin_detailed(void) { return ("pwsafe - Password Safe passphrase plugin\n" "------------------------------------------------\n" "Use this module to crack Password Safe pwsafe files\n" "Input should be a Password Safe pwsafe (specified with -f)\n" "\nAuthor: Dhiru Kholia <dhiru at openwall.com>\n"); } static char *magic = "PWS3"; /* helper functions for byte order conversions, header values are stored * in little-endian byte order */ static uint32_t fget32(FILE * fp) { uint32_t v = fgetc(fp); v |= fgetc(fp) << 8; v |= fgetc(fp) << 16; v |= fgetc(fp) << 24; return v; } hash_stat hash_plugin_parse_hash(char *hashline, char *filename) { FILE *fp; int count; unsigned char buf[32]; if (!(fp = fopen(filename, "rb"))) { //fprintf(stderr, "! %s: %s\n", filename, strerror(errno)); return hash_err; } count = fread(buf, 4, 1, fp); if (count != 1) goto bail; if(memcmp(buf, magic, 4)) { //fprintf(stderr, "%s : Couldn't find PWS3 magic string. Is this a Password Safe file?\n", filename); goto bail; } count = fread(buf, 32, 1, fp); if (count != 1) goto bail; cs.iterations = fget32(fp); memcpy(cs.salt, buf, 32); count = fread(buf, 32, 1, fp); if (count != 1) goto bail; //assert(count == 1); memcpy(cs.hash, buf, 32); fclose(fp); (void) hash_add_username(filename); (void) hash_add_hash("Password Safe pwsafe file \0", 0); (void) hash_add_salt("123"); (void) hash_add_salt2(" "); return hash_ok; bail: fclose(fp); return hash_err; } hash_stat hash_plugin_check_hash(const char *hash, const char *password[VECTORSIZE], const char *salt, char *salt2[VECTORSIZE], const char *username, int *num, int threadid) { char *buf[VECTORSIZE]; char *buf2[VECTORSIZE]; int lens[VECTORSIZE]; int lens2[VECTORSIZE]; int a; for (a = 0; a < vectorsize; a++) { buf[a] = alloca(32); buf2[a] = alloca(128); lens[a]=strlen(password[a]); memcpy(buf2[a],password[a],lens[a]); memcpy(buf2[a]+lens[a],cs.salt,32); lens[a]+=32; lens2[a]=32; } hash_sha256_unicode((const char**)buf2, buf, lens); for (a=0;a<=cs.iterations;a++) { hash_sha256_unicode((const char**)buf, buf, lens2); } for (a = 0; a < vectorsize; a++) { if (!memcmp(buf[a], cs.hash, 32)) { *num = a; return hash_ok; } } return hash_err; } int hash_plugin_hash_length(void) { return 16; } int hash_plugin_is_raw(void) { return 0; } int hash_plugin_is_special(void) { return 1; } void get_vector_size(int size) { vectorsize = size; } int get_salt_size(void) { return 4; }
gat3way/hashkill
src/plugins/pwsafe.c
C
gpl-2.0
4,086
/* * Copyright (C) 2010 Google Inc. All rights reserved. * Copyright (C) 2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include "BaseCheckableInputType.h" namespace WebCore { class RadioInputType final : public BaseCheckableInputType { public: explicit RadioInputType(HTMLInputElement& element) : BaseCheckableInputType(element) { } private: const AtomicString& formControlType() const override; bool valueMissing(const String&) const override; String valueMissingText() const override; void handleClickEvent(MouseEvent&) override; void handleKeydownEvent(KeyboardEvent&) override; void handleKeyupEvent(KeyboardEvent&) override; bool isKeyboardFocusable(KeyboardEvent&) const override; bool shouldSendChangeEventAfterCheckedChanged() override; void willDispatchClick(InputElementClickState&) override; void didDispatchClick(Event*, const InputElementClickState&) override; bool isRadioButton() const override; bool matchesIndeterminatePseudoClass() const override; }; } // namespace WebCore
Debian/openjfx
modules/web/src/main/native/Source/WebCore/html/RadioInputType.h
C
gpl-2.0
2,561
/* =========================================================== * JFreeChart : a free chart library for the Java(tm) platform * =========================================================== * * (C) Copyright 2000-2007, by Object Refinery Limited and Contributors. * * Project Info: http://www.jfree.org/jfreechart/index.html * * This library is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public * License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. * * [Java is a trademark or registered trademark of Sun Microsystems, Inc. * in the United States and other countries.] * * -------------------- * AxisChangeEvent.java * -------------------- * (C) Copyright 2000-2007, by Object Refinery Limited. * * Original Author: David Gilbert (for Object Refinery Limited); * Contributor(s): -; * * Changes (from 24-Aug-2001) * -------------------------- * 24-Aug-2001 : Added standard source header. Fixed DOS encoding problem (DG); * 07-Nov-2001 : Updated header (DG); * 14-Oct-2002 : Now extends EventListener (DG); * */ package org.jfree.chart.event; import java.util.EventListener; /** * The interface that must be supported by classes that wish to receive * notification of changes to an axis. * <P> * The Plot class implements this interface, and automatically registers with * its axes (if any). Any axis changes are passed on by the plot as a plot * change event. This is part of the notification mechanism that ensures that * charts are redrawn whenever changes are made to any chart component. * */ public interface AxisChangeListener extends EventListener { /** * Receives notification of an axis change event. * * @param event the event. */ public void axisChanged(AxisChangeEvent event); }
SpoonLabs/astor
examples/chart_11/source/org/jfree/chart/event/AxisChangeListener.java
Java
gpl-2.0
2,464
/***************************************************************************** Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA *****************************************************************************/ /**************************************************//** @file os/os0sync.cc The interface to the operating system synchronization primitives. Created 9/6/1995 Heikki Tuuri *******************************************************/ #include "os0sync.h" #include "sync0rw.h" #ifdef UNIV_NONINL #include "os0sync.ic" #endif #ifdef __WIN__ #include <windows.h> #endif #include "ut0mem.h" #include "srv0start.h" #include "srv0srv.h" /* Type definition for an operating system mutex struct */ struct os_mutex_t{ os_event_t event; /*!< Used by sync0arr.cc for queing threads */ void* handle; /*!< OS handle to mutex */ ulint count; /*!< we use this counter to check that the same thread does not recursively lock the mutex: we do not assume that the OS mutex supports recursive locking, though NT seems to do that */ UT_LIST_NODE_T(os_mutex_t) os_mutex_list; /* list of all 'slow' OS mutexes created */ }; /** Mutex protecting counts and the lists of OS mutexes and events */ UNIV_INTERN os_ib_mutex_t os_sync_mutex; /** TRUE if os_sync_mutex has been initialized */ static ibool os_sync_mutex_inited = FALSE; /** TRUE when os_sync_free() is being executed */ static ibool os_sync_free_called = FALSE; UNIV_INTERN os_event_support_t* os_support = NULL; /** This is incremented by 1 in os_thread_create and decremented by 1 in os_thread_exit */ UNIV_INTERN ulint os_thread_count = 0; /** The list of all events created */ static UT_LIST_BASE_NODE_T(os_event_wrapper_struct_t) os_event_list; /** The list of all OS 'slow' mutexes */ static UT_LIST_BASE_NODE_T(os_mutex_t) os_mutex_list; UNIV_INTERN ulint os_event_count = 0; UNIV_INTERN ulint os_mutex_count = 0; UNIV_INTERN ulint os_fast_mutex_count = 0; /* The number of microsecnds in a second. */ static const ulint MICROSECS_IN_A_SECOND = 1000000; #ifdef UNIV_PFS_MUTEX UNIV_INTERN mysql_pfs_key_t event_os_mutex_key; UNIV_INTERN mysql_pfs_key_t os_mutex_key; #endif /* Because a mutex is embedded inside an event and there is an event embedded inside a mutex, on free, this generates a recursive call. This version of the free event function doesn't acquire the global lock */ static void os_event_free_internal(os_event_t event); /* On Windows (Vista and later), load function pointers for condition variable handling. Those functions are not available in prior versions, so we have to use them via runtime loading, as long as we support XP. */ static void os_cond_module_init(void); #ifdef __WIN__ /* Prototypes and function pointers for condition variable functions */ typedef VOID (WINAPI* InitializeConditionVariableProc) (PCONDITION_VARIABLE ConditionVariable); static InitializeConditionVariableProc initialize_condition_variable; typedef BOOL (WINAPI* SleepConditionVariableCSProc) (PCONDITION_VARIABLE ConditionVariable, PCRITICAL_SECTION CriticalSection, DWORD dwMilliseconds); static SleepConditionVariableCSProc sleep_condition_variable; typedef VOID (WINAPI* WakeAllConditionVariableProc) (PCONDITION_VARIABLE ConditionVariable); static WakeAllConditionVariableProc wake_all_condition_variable; typedef VOID (WINAPI* WakeConditionVariableProc) (PCONDITION_VARIABLE ConditionVariable); static WakeConditionVariableProc wake_condition_variable; #endif /*********************************************************//** Initialitze condition variable */ UNIV_INLINE void os_cond_init( /*=========*/ os_cond_t* cond) /*!< in: condition variable. */ { ut_a(cond); #ifdef __WIN__ ut_a(initialize_condition_variable != NULL); initialize_condition_variable(cond); #else ut_a(pthread_cond_init(cond, NULL) == 0); #endif } /*********************************************************//** Do a timed wait on condition variable. @return TRUE if timed out, FALSE otherwise */ UNIV_INLINE ibool os_cond_wait_timed( /*===============*/ os_cond_t* cond, /*!< in: condition variable. */ os_fast_mutex_t* fast_mutex, /*!< in: fast mutex */ #ifndef __WIN__ const struct timespec* abstime /*!< in: timeout */ #else DWORD time_in_ms /*!< in: timeout in milliseconds*/ #endif /* !__WIN__ */ ) { fast_mutex_t* mutex = &fast_mutex->mutex; #ifdef __WIN__ BOOL ret; DWORD err; ut_a(sleep_condition_variable != NULL); ret = sleep_condition_variable(cond, mutex, time_in_ms); if (!ret) { err = GetLastError(); /* From http://msdn.microsoft.com/en-us/library/ms686301%28VS.85%29.aspx, "Condition variables are subject to spurious wakeups (those not associated with an explicit wake) and stolen wakeups (another thread manages to run before the woken thread)." Check for both types of timeouts. Conditions are checked by the caller.*/ if ((err == WAIT_TIMEOUT) || (err == ERROR_TIMEOUT)) { return(TRUE); } } ut_a(ret); return(FALSE); #else int ret; ret = pthread_cond_timedwait(cond, mutex, abstime); switch (ret) { case 0: case ETIMEDOUT: /* We play it safe by checking for EINTR even though according to the POSIX documentation it can't return EINTR. */ case EINTR: break; default: fprintf(stderr, " InnoDB: pthread_cond_timedwait() returned: " "%d: abstime={%lu,%lu}\n", ret, (ulong) abstime->tv_sec, (ulong) abstime->tv_nsec); ut_error; } return(ret == ETIMEDOUT); #endif } /*********************************************************//** Wait on condition variable */ UNIV_INLINE void os_cond_wait( /*=========*/ os_cond_t* cond, /*!< in: condition variable. */ os_fast_mutex_t* fast_mutex)/*!< in: fast mutex */ { fast_mutex_t* mutex = &fast_mutex->mutex; ut_a(cond); ut_a(mutex); #ifdef __WIN__ ut_a(sleep_condition_variable != NULL); ut_a(sleep_condition_variable(cond, mutex, INFINITE)); #else ut_a(pthread_cond_wait(cond, mutex) == 0); #endif } /*********************************************************//** Wakes all threads waiting for condition variable */ UNIV_INLINE void os_cond_broadcast( /*==============*/ os_cond_t* cond) /*!< in: condition variable. */ { ut_a(cond); #ifdef __WIN__ ut_a(wake_all_condition_variable != NULL); wake_all_condition_variable(cond); #else ut_a(pthread_cond_broadcast(cond) == 0); #endif } /*********************************************************//** Wakes one thread waiting for condition variable */ UNIV_INLINE void os_cond_signal( /*==========*/ os_cond_t* cond) /*!< in: condition variable. */ { ut_a(cond); #ifdef __WIN__ ut_a(wake_condition_variable != NULL); wake_condition_variable(cond); #else ut_a(pthread_cond_signal(cond) == 0); #endif } /*********************************************************//** Destroys condition variable */ UNIV_INLINE void os_cond_destroy( /*============*/ os_cond_t* cond) /*!< in: condition variable. */ { #ifdef __WIN__ /* Do nothing */ #else ut_a(pthread_cond_destroy(cond) == 0); #endif } /*********************************************************//** On Windows (Vista and later), load function pointers for condition variable handling. Those functions are not available in prior versions, so we have to use them via runtime loading, as long as we support XP. */ static void os_cond_module_init(void) /*=====================*/ { #ifdef __WIN__ HMODULE h_dll; if (!srv_use_native_conditions) return; h_dll = GetModuleHandle("kernel32"); initialize_condition_variable = (InitializeConditionVariableProc) GetProcAddress(h_dll, "InitializeConditionVariable"); sleep_condition_variable = (SleepConditionVariableCSProc) GetProcAddress(h_dll, "SleepConditionVariableCS"); wake_all_condition_variable = (WakeAllConditionVariableProc) GetProcAddress(h_dll, "WakeAllConditionVariable"); wake_condition_variable = (WakeConditionVariableProc) GetProcAddress(h_dll, "WakeConditionVariable"); /* When using native condition variables, check function pointers */ ut_a(initialize_condition_variable); ut_a(sleep_condition_variable); ut_a(wake_all_condition_variable); ut_a(wake_condition_variable); #endif } /*********************************************************//** Initializes global event and OS 'slow' mutex lists. */ UNIV_INTERN void os_sync_init(void) /*==============*/ { UT_LIST_INIT(os_event_list); UT_LIST_INIT(os_mutex_list); os_sync_mutex = NULL; os_sync_mutex_inited = FALSE; /* Now for Windows only */ os_cond_module_init(); os_sync_mutex = os_mutex_create(); unsigned int i = 0; os_support = (os_event_support_t*)ut_malloc(sizeof(os_event_support_t) * srv_sync_pool_size); for(; i < srv_sync_pool_size; ++i) { #ifndef PFS_SKIP_EVENT_MUTEX os_fast_mutex_init(event_os_mutex_key, &os_support[i].os_mutex); #else os_fast_mutex_init(PFS_NOT_INSTRUMENTED, &os_support[i].os_mutex); #endif ut_a(0 == pthread_cond_init(&os_support[i].cond_var, NULL)); } os_sync_mutex_inited = TRUE; } /*********************************************************//** Frees created events and OS 'slow' mutexes. */ UNIV_INTERN void os_sync_free(void) /*==============*/ { os_event_t event; os_ib_mutex_t mutex; os_sync_free_called = TRUE; event = UT_LIST_GET_FIRST(os_event_list); while (event) { os_event_free(event); event = UT_LIST_GET_FIRST(os_event_list); } mutex = UT_LIST_GET_FIRST(os_mutex_list); while (mutex) { if (mutex == os_sync_mutex) { /* Set the flag to FALSE so that we do not try to reserve os_sync_mutex any more in remaining freeing operations in shutdown */ os_sync_mutex_inited = FALSE; } os_mutex_free(mutex); mutex = UT_LIST_GET_FIRST(os_mutex_list); } os_sync_free_called = FALSE; /* This is the code that should run to clean up int i = 0; for(; i < srv_sync_pool_size; ++i) { os_fast_mutex_free(&os_support[i].os_mutex); ut_a(0 == pthread_cond_destroy(&os_support[i].cond_var)); } ...but we do this instead, since we don't clean up the events pool, and so should not remove the pthread data they all point to: */ os_fast_mutex_count -= srv_sync_pool_size; if (UNIV_UNLIKELY(os_sync_mutex_inited)) { os_mutex_enter(os_sync_mutex); } /* Account for the events, and their fast_mutexes, * within the (never de-initialized) rw_lock structs */ os_event_count -= rw_lock_count*2; rw_lock_count = 0; if (UNIV_UNLIKELY(os_sync_mutex_inited)) { os_mutex_exit(os_sync_mutex); } } /*********************************************************//** Creates an event semaphore, i.e., a semaphore which may just have two states: signaled and nonsignaled. The created event is manual reset: it must be reset explicitly by calling sync_os_reset_event. @return the event handle */ UNIV_INTERN os_event_t os_event_create(void) /*==================*/ { os_event_t event = (os_event_t)ut_malloc( sizeof(struct os_event_wrapper_struct)); os_event_create2(&event->ev); /*Note: this overwrites the "sup" element, that was pointed to a shared sync data pool element by os_event_create2, with a dynamically-allocated set of data exclusively for this event. */ event->ev.sup = (os_event_support_t*)ut_malloc(sizeof(os_event_support_t)); #ifndef PFS_SKIP_EVENT_MUTEX os_fast_mutex_init(event_os_mutex_key, &(event->ev.sup->os_mutex)); #else os_fast_mutex_init(PFS_NOT_INSTRUMENTED, &(event->ev.sup->os_mutex)); #endif ut_a(0 == pthread_cond_init(&(event->ev.sup->cond_var), NULL)); if (os_sync_mutex != NULL) { os_mutex_enter(os_sync_mutex); } /* Put to the list of events */ UT_LIST_ADD_FIRST(os_event_list, os_event_list, event); if (os_sync_mutex != NULL) { os_mutex_exit(os_sync_mutex); } return(event); } UNIV_INTERN void os_event_create2( /*=============*/ os_event_struct_t* event)/*!< in: pre-allocated struct, or NULL */ { #ifdef __WIN__ if(!srv_use_native_conditions) { event = static_cast<os_event_t>(ut_malloc(sizeof(*event))); event->handle = CreateEvent(NULL, TRUE, FALSE, NULL); if (!event->handle) { fprintf(stderr, "InnoDB: Could not create a Windows event" " semaphore; Windows error %lu\n", (ulong) GetLastError()); } } else /* Windows with condition variables */ #endif { /* We return this value in os_event_reset(), which can then be be used to pass to the os_event_wait_low(). The value of zero is reserved in os_event_wait_low() for the case when the caller does not want to pass any signal_count value. To distinguish between the two cases we initialize signal_count to 1 here. */ event->stats = 1; } /* The os_sync_mutex can be NULL because during startup an event can be created [ because it's embedded in the mutex/rwlock ] before this module has been initialized */ if (os_sync_mutex != NULL) { os_mutex_enter(os_sync_mutex); } /* Note: this sets the "sup" element to point to a shared sync data pool element. This will be overwritten if this was called by os_event_create. */ event->sup = os_support + (os_event_count % srv_sync_pool_size); os_event_count++; if (os_sync_mutex != NULL) { os_mutex_exit(os_sync_mutex); } } /**********************************************************//** Sets an event semaphore to the signaled state: lets waiting threads proceed. */ UNIV_INTERN void os_event_set2( /*==========*/ os_event_struct_t* event) /*!< in: event to set */ { ut_a(event); #ifdef __WIN__ if (!srv_use_native_conditions) { ut_a(SetEvent(event->handle)); return; } #endif os_fast_mutex_lock(&(event->sup->os_mutex)); if (IS_SET(event)) { /* Do nothing */ } else { INC_SIGNAL_COUNT(event); SET_IS_SET(event); os_cond_broadcast(&(event->sup->cond_var)); } os_fast_mutex_unlock(&(event->sup->os_mutex)); } /**********************************************************//** Resets an event semaphore to the nonsignaled state. Waiting threads will stop to wait for the event. The return value should be passed to os_even_wait_low() if it is desired that this thread should not wait in case of an intervening call to os_event_set() between this os_event_reset() and the os_event_wait_low() call. See comments for os_event_wait_low(). @return current signal_count. */ UNIV_INTERN ib_int64_t os_event_reset2( /*===========*/ os_event_struct_t* event) /*!< in: event to reset */ { ib_int64_t ret = 0; ut_a(event); #ifdef __WIN__ if(!srv_use_native_conditions) { ut_a(ResetEvent(event->handle)); return(0); } #endif os_fast_mutex_lock(&(event->sup->os_mutex)); if (!IS_SET(event)) { /* Do nothing */ } else { CLEAR_IS_SET(event); } ret = SIGNAL_COUNT(event); os_fast_mutex_unlock(&(event->sup->os_mutex)); return(ret); } /**********************************************************//** Frees an event object, without acquiring the global lock. */ static void os_event_free_internal( /*===================*/ os_event_t event) /*!< in: event to free */ { #ifdef __WIN__ if(!srv_use_native_conditions) { ut_a(event); ut_a(CloseHandle(event->handle)); } else #endif { ut_a(event); /* This is to avoid freeing the mutex twice */ os_fast_mutex_free(&(event->ev.sup->os_mutex)); os_cond_destroy(&(event->ev.sup->cond_var)); } /* Remove from the list of events */ UT_LIST_REMOVE(os_event_list, os_event_list, event); os_event_count--; ut_free(event->ev.sup); ut_free(event); } /**********************************************************//** Frees an event object. */ UNIV_INTERN void os_event_free( /*==========*/ os_event_t event) /*!< in: event to free */ { os_fast_mutex_free(&(event->ev.sup->os_mutex)); ut_a(0 == pthread_cond_destroy(&(event->ev.sup->cond_var))); ut_free(event->ev.sup); os_event_free2(&event->ev); os_mutex_enter(os_sync_mutex); UT_LIST_REMOVE(os_event_list, os_event_list, event); os_mutex_exit(os_sync_mutex); ut_free(event); } /**********************************************************//** Cleans up an event object. */ UNIV_INTERN void os_event_free2( /*===========*/ os_event_struct_t* event) /*!< in: event to free */ { ut_a(event); #ifdef __WIN__ if(!srv_use_native_conditions){ ut_a(CloseHandle(event->handle)); } else /*Windows with condition variables */ #endif { event->sup = NULL; } /* Remove from the list of events */ os_mutex_enter(os_sync_mutex); os_event_count--; os_mutex_exit(os_sync_mutex); } /**********************************************************//** Waits for an event object until it is in the signaled state. Typically, if the event has been signalled after the os_event_reset() we'll return immediately because event->is_set == TRUE. There are, however, situations (e.g.: sync_array code) where we may lose this information. For example: thread A calls os_event_reset() thread B calls os_event_set() [event->is_set == TRUE] thread C calls os_event_reset() [event->is_set == FALSE] thread A calls os_event_wait() [infinite wait!] thread C calls os_event_wait() [infinite wait!] Where such a scenario is possible, to avoid infinite wait, the value returned by os_event_reset() should be passed in as reset_sig_count. */ UNIV_INTERN void os_event_wait_low2( /*==============*/ os_event_struct_t* event, /*!< in: event to wait */ ib_int64_t reset_sig_count)/*!< in: zero or the value returned by previous call of os_event_reset(). */ { #ifdef __WIN__ if(!srv_use_native_conditions) { DWORD err; ut_a(event); UT_NOT_USED(reset_sig_count); /* Specify an infinite wait */ err = WaitForSingleObject(event->handle, INFINITE); ut_a(err == WAIT_OBJECT_0); return; } #endif os_fast_mutex_lock(&event->sup->os_mutex); if (!reset_sig_count) { reset_sig_count = SIGNAL_COUNT(event); } while (!IS_SET(event) && SIGNAL_COUNT(event) == reset_sig_count) { os_cond_wait(&(event->sup->cond_var), &(event->sup->os_mutex)); /* Solaris manual said that spurious wakeups may occur: we have to check if the event really has been signaled after we came here to wait */ } os_fast_mutex_unlock(&event->sup->os_mutex); } /**********************************************************//** Waits for an event object until it is in the signaled state or a timeout is exceeded. @return 0 if success, OS_SYNC_TIME_EXCEEDED if timeout was exceeded */ UNIV_INTERN ulint os_event_wait_time_low2( /*===================*/ os_event_struct_t* event, /*!< in: event to wait */ ulint time_in_usec, /*!< in: timeout in microseconds, or OS_SYNC_INFINITE_TIME */ ib_int64_t reset_sig_count) /*!< in: zero or the value returned by previous call of os_event_reset(). */ { ibool timed_out = FALSE; #ifdef __WIN__ DWORD time_in_ms; if (!srv_use_native_conditions) { DWORD err; ut_a(event); if (time_in_usec != OS_SYNC_INFINITE_TIME) { time_in_ms = time_in_usec / 1000; err = WaitForSingleObject(event->handle, time_in_ms); } else { err = WaitForSingleObject(event->handle, INFINITE); } if (err == WAIT_OBJECT_0) { return(0); } else if ((err == WAIT_TIMEOUT) || (err == ERROR_TIMEOUT)) { return(OS_SYNC_TIME_EXCEEDED); } ut_error; /* Dummy value to eliminate compiler warning. */ return(42); } else { ut_a(sleep_condition_variable != NULL); if (time_in_usec != OS_SYNC_INFINITE_TIME) { time_in_ms = time_in_usec / 1000; } else { time_in_ms = INFINITE; } } #else struct timespec abstime; if (time_in_usec != OS_SYNC_INFINITE_TIME) { struct timeval tv; int ret; ulint sec; ulint usec; ret = ut_usectime(&sec, &usec); ut_a(ret == 0); tv.tv_sec = sec; tv.tv_usec = usec; tv.tv_usec += time_in_usec; if ((ulint) tv.tv_usec >= MICROSECS_IN_A_SECOND) { tv.tv_sec += time_in_usec / MICROSECS_IN_A_SECOND; tv.tv_usec %= MICROSECS_IN_A_SECOND; } abstime.tv_sec = tv.tv_sec; abstime.tv_nsec = tv.tv_usec * 1000; } else { abstime.tv_nsec = 999999999; abstime.tv_sec = (time_t) ULINT_MAX; } ut_a(abstime.tv_nsec <= 999999999); #endif /* __WIN__ */ os_fast_mutex_lock(&event->sup->os_mutex); if (!reset_sig_count) { reset_sig_count = SIGNAL_COUNT(event); } do { if (IS_SET(event) || SIGNAL_COUNT(event) != reset_sig_count) { break; } timed_out = os_cond_wait_timed( &event->sup->cond_var, &event->sup->os_mutex, #ifndef __WIN__ &abstime #else time_in_ms #endif /* !__WIN__ */ ); } while (!timed_out); os_fast_mutex_unlock(&event->sup->os_mutex); return(timed_out ? OS_SYNC_TIME_EXCEEDED : 0); } /*********************************************************//** Creates an operating system mutex semaphore. Because these are slow, the mutex semaphore of InnoDB itself (ib_mutex_t) should be used where possible. @return the mutex handle */ UNIV_INTERN os_ib_mutex_t os_mutex_create(void) /*=================*/ { os_fast_mutex_t* mutex; os_ib_mutex_t mutex_str; mutex = static_cast<os_fast_mutex_t*>( ut_malloc(sizeof(os_fast_mutex_t))); os_fast_mutex_init(os_mutex_key, mutex); mutex_str = static_cast<os_ib_mutex_t>(ut_malloc(sizeof *mutex_str)); mutex_str->handle = mutex; mutex_str->count = 0; mutex_str->event = os_event_create(); if (UNIV_LIKELY(os_sync_mutex_inited)) { /* When creating os_sync_mutex itself we cannot reserve it */ os_mutex_enter(os_sync_mutex); } UT_LIST_ADD_FIRST(os_mutex_list, os_mutex_list, mutex_str); os_mutex_count++; if (UNIV_LIKELY(os_sync_mutex_inited)) { os_mutex_exit(os_sync_mutex); } return(mutex_str); } /**********************************************************//** Acquires ownership of a mutex semaphore. */ UNIV_INTERN void os_mutex_enter( /*===========*/ os_ib_mutex_t mutex) /*!< in: mutex to acquire */ { os_fast_mutex_lock(static_cast<os_fast_mutex_t*>(mutex->handle)); (mutex->count)++; ut_a(mutex->count == 1); } /**********************************************************//** Releases ownership of a mutex. */ UNIV_INTERN void os_mutex_exit( /*==========*/ os_ib_mutex_t mutex) /*!< in: mutex to release */ { ut_a(mutex); ut_a(mutex->count == 1); (mutex->count)--; os_fast_mutex_unlock(static_cast<os_fast_mutex_t*>(mutex->handle)); } /**********************************************************//** Frees a mutex object. */ UNIV_INTERN void os_mutex_free( /*==========*/ os_ib_mutex_t mutex) /*!< in: mutex to free */ { ut_a(mutex); if (UNIV_LIKELY(!os_sync_free_called)) { os_event_free_internal(mutex->event); } if (UNIV_LIKELY(os_sync_mutex_inited)) { os_mutex_enter(os_sync_mutex); } UT_LIST_REMOVE(os_mutex_list, os_mutex_list, mutex); os_mutex_count--; if (UNIV_LIKELY(os_sync_mutex_inited)) { os_mutex_exit(os_sync_mutex); } os_fast_mutex_free(static_cast<os_fast_mutex_t*>(mutex->handle)); ut_free(mutex->handle); ut_free(mutex); } /*********************************************************//** Initializes an operating system fast mutex semaphore. */ UNIV_INTERN void os_fast_mutex_init_func( /*====================*/ fast_mutex_t* fast_mutex) /*!< in: fast mutex */ { #ifdef __WIN__ ut_a(fast_mutex); InitializeCriticalSection((LPCRITICAL_SECTION) fast_mutex); #else ut_a(0 == pthread_mutex_init(fast_mutex, MY_MUTEX_INIT_FAST)); #endif if (UNIV_LIKELY(os_sync_mutex_inited)) { /* When creating os_sync_mutex itself (in Unix) we cannot reserve it */ os_mutex_enter(os_sync_mutex); } os_fast_mutex_count++; if (UNIV_LIKELY(os_sync_mutex_inited)) { os_mutex_exit(os_sync_mutex); } } /**********************************************************//** Acquires ownership of a fast mutex. */ UNIV_INTERN void os_fast_mutex_lock_func( /*====================*/ fast_mutex_t* fast_mutex) /*!< in: mutex to acquire */ { #ifdef __WIN__ EnterCriticalSection((LPCRITICAL_SECTION) fast_mutex); #else pthread_mutex_lock(fast_mutex); #endif } /**********************************************************//** Releases ownership of a fast mutex. */ UNIV_INTERN void os_fast_mutex_unlock_func( /*======================*/ fast_mutex_t* fast_mutex) /*!< in: mutex to release */ { #ifdef __WIN__ LeaveCriticalSection(fast_mutex); #else pthread_mutex_unlock(fast_mutex); #endif } /**********************************************************//** Frees a mutex object. */ UNIV_INTERN void os_fast_mutex_free_func( /*====================*/ fast_mutex_t* fast_mutex) /*!< in: mutex to free */ { #ifdef __WIN__ ut_a(fast_mutex); DeleteCriticalSection((LPCRITICAL_SECTION) fast_mutex); #else int ret; ret = pthread_mutex_destroy(fast_mutex); if (UNIV_UNLIKELY(ret != 0)) { ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: error: return value %lu when calling\n" "InnoDB: pthread_mutex_destroy().\n", (ulint) ret); fprintf(stderr, "InnoDB: Byte contents of the pthread mutex at %p:\n", (void*) fast_mutex); ut_print_buf(stderr, fast_mutex, sizeof(os_fast_mutex_t)); putc('\n', stderr); } #endif if (UNIV_LIKELY(os_sync_mutex_inited)) { /* When freeing the last mutexes, we have already freed os_sync_mutex */ os_mutex_enter(os_sync_mutex); } ut_ad(os_fast_mutex_count > 0); os_fast_mutex_count--; if (UNIV_LIKELY(os_sync_mutex_inited)) { os_mutex_exit(os_sync_mutex); } }
oscar810429/mysql-5.6_facebook
storage/innobase/os/os0sync.cc
C++
gpl-2.0
25,773
cmd_drivers/media/common/tuners/tea5761.ko := /home/gjdlfg/kernel/prebuilt/linux-x86/toolchain/arm-eabi-4.4.0/bin/arm-eabi-ld -EL -r -T /home/gjdlfg/kernel/omap/scripts/module-common.lds --build-id -o drivers/media/common/tuners/tea5761.ko drivers/media/common/tuners/tea5761.o drivers/media/common/tuners/tea5761.mod.o
jdlfg/Mecha-kernel
drivers/media/common/tuners/.tea5761.ko.cmd
Batchfile
gpl-2.0
321
<?php /** * @version $Id: default.php 39490 2011-07-05 06:50:31Z btowles $ * @author RocketTheme http://www.rockettheme.com * @copyright Copyright (C) 2007 - 2013 RocketTheme, LLC * @license http://www.gnu.org/licenses/gpl-2.0.html GNU/GPLv2 only */ ?> <?php if ($that->show_page_heading): ?> <h1><?php echo $that->page_heading; ?></h1> <?php endif; ?> <div class="rg-grid-view-container<?php echo $that->pageclass_sfx; ?>"> <?php echo RokCommon_Composite::get($that->context)->load('header.php', array('that' => $that));?> <div class="rg-grid-view rg-col4"> <?php foreach ($that->images as $that->image): $that->slice = $that->slices[$that->image->id]; echo RokCommon_Composite::get($that->context)->load('default_row.php', array('that' => $that)); $that->item_number++; endforeach; ?> </div> </div> <?php echo RokCommon_Composite::get($that->context)->load('pagination.php', array('that' => $that));?>
smetal/gvdd
components/com_rokgallery/templates/gallery/grid-4col/default.php
PHP
gpl-2.0
1,003
/* Touch_synaptics.c * * Copyright (C) 2012 LGE. * * Author: yehan.ahn@lge.com, hyesung.shin@lge.com * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/err.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/gpio.h> #include <linux/async.h> #include <linux/input/lge_touch_core.h> #include <linux/input/touch_synaptics.h> #if defined(CONFIG_MACH_APQ8064_GK_KR) || defined(CONFIG_MACH_APQ8064_GKATT) || defined(CONFIG_MACH_APQ8064_GKGLOBAL) #include "SynaImage_for_GK.h" #elif defined(CONFIG_MACH_APQ8064_GVDCM) #include "SynaImage_for_DCM.h" #elif defined(CONFIG_MACH_APQ8064_J1D) || defined(CONFIG_MACH_APQ8064_J1KD) #include "SynaImage_for_GJ.h" #else #include "SynaImage.h" #define G_ONLY #endif #include <linux/regulator/machine.h> /* */ #define RMI_DEVICE_CONTROL 0x01 #define TOUCHPAD_SENSORS 0x11 #define CAPACITIVE_BUTTON_SENSORS 0x1A #define GPIO_LEDS 0x30 #define LEDS 0x31 #define ANALOG_CONTROL 0x54 #define TIMER 0x32 #define FLASH_MEMORY_MANAGEMENT 0x34 #define AUXILIARY_ADC 0x36 /* */ /* */ #define MANUFACTURER_ID_REG (ts->common_fc.dsc.query_base) /* */ #define FW_REVISION_REG (ts->common_fc.dsc.query_base+3) /* */ #define PRODUCT_ID_REG (ts->common_fc.dsc.query_base+11) /* */ #define DEVICE_COMMAND_REG (ts->common_fc.dsc.command_base) #define DEVICE_CONTROL_REG (ts->common_fc.dsc.control_base) /* */ #define DEVICE_CONTROL_NORMAL_OP 0x00 /* */ #define DEVICE_CONTROL_SLEEP 0x01 /* */ #define DEVICE_CONTROL_SPECIFIC 0x02 /* */ #define DEVICE_CONTROL_NOSLEEP 0x04 #define DEVICE_CONTROL_CONFIGURED 0x80 #ifdef CUST_G_TOUCH #define DEVICE_CHARGER_CONNECTED 0x20 #endif #define INTERRUPT_ENABLE_REG (ts->common_fc.dsc.control_base+1) /* */ #define DEVICE_STATUS_REG (ts->common_fc.dsc.data_base) /* */ #define DEVICE_FAILURE_MASK 0x03 #define DEVICE_CRC_ERROR_MASK 0x04 #define DEVICE_STATUS_FLASH_PROG 0x40 #define DEVICE_STATUS_UNCONFIGURED 0x80 #define INTERRUPT_STATUS_REG (ts->common_fc.dsc.data_base+1) /* */ #ifdef CUST_G_TOUCH // #else #define INTERRUPT_MASK_FLASH 0x01 #define INTERRUPT_MASK_ABS0 0x04 #define INTERRUPT_MASK_BUTTON 0x10 #endif /* */ #define FINGER_COMMAND_REG (ts->finger_fc.dsc.command_base) #define FINGER_STATE_REG (ts->finger_fc.dsc.data_base) /* */ #define FINGER_DATA_REG_START (ts->finger_fc.dsc.data_base+3) /* */ #define FINGER_STATE_MASK 0x03 #define REG_X_POSITION 0 #define REG_Y_POSITION 1 #define REG_YX_POSITION 2 #define REG_WY_WX 3 #define REG_Z 4 #define TWO_D_EXTEND_STATUS (ts->finger_fc.dsc.data_base+53) #define TWO_D_REPORTING_MODE (ts->finger_fc.dsc.control_base+0) /* */ #ifdef CUST_G_TOUCH #define REPORT_BEYOND_CLIP 0x80 #endif #define REPORT_MODE_CONTINUOUS 0x00 #define REPORT_MODE_REDUCED 0x01 #define ABS_FILTER 0x08 #define PALM_DETECT_REG (ts->finger_fc.dsc.control_base+1) /* */ #define DELTA_X_THRESH_REG (ts->finger_fc.dsc.control_base+2) /* */ #define DELTA_Y_THRESH_REG (ts->finger_fc.dsc.control_base+3) /* */ #define SENSOR_MAX_X_POS (ts->finger_fc.dsc.control_base+6) /* */ #define SENSOR_MAX_Y_POS (ts->finger_fc.dsc.control_base+8) /* */ /* */ #define BUTTON_COMMAND_REG (ts->button_fc.dsc.command_base) #define BUTTON_DATA_REG (ts->button_fc.dsc.data_base) /* */ #define MAX_NUM_OF_BUTTON 4 /* */ #define ANALOG_COMMAND_REG (ts->analog_fc.dsc.command_base) #define FORCE_UPDATE 0x04 #define ANALOG_CONTROL_REG (ts->analog_fc.dsc.control_base) #define FORCE_FAST_RELAXATION 0x04 #define FAST_RELAXATION_RATE (ts->analog_fc.dsc.control_base+16) /* */ #define FLASH_CONFIG_ID_REG (ts->flash_fc.dsc.control_base) /* */ #define FLASH_CONTROL_REG (ts->flash_fc.dsc.data_base+18) #define FLASH_STATUS_MASK 0xF0 /* */ #define COMMON_PAGE (ts->common_fc.function_page) #define FINGER_PAGE (ts->finger_fc.function_page) #define BUTTON_PAGE (ts->button_fc.function_page) #define ANALOG_PAGE (ts->analog_fc.function_page) #define FLASH_PAGE (ts->flash_fc.function_page) #define DEFAULT_PAGE 0x00 #ifdef CUST_G_TOUCH #define SMALL_OBJECT_DETECTION_TUNNING_REG (ts->finger_fc.dsc.control_base+45) // #define SMALL_OBJECT_DETECTION 0x04 #endif /* */ #define TS_SNTS_GET_X_POSITION(_high_reg, _low_reg) \ ( ((u16)((_high_reg << 4) & 0x0FF0) | (u16)(_low_reg&0x0F))) #define TS_SNTS_GET_Y_POSITION(_high_reg, _low_reg) \ ( ((u16)((_high_reg << 4) & 0x0FF0) | (u16)((_low_reg >> 4) & 0x0F))) #define TS_SNTS_GET_WIDTH_MAJOR(_width) \ ((((_width & 0xF0) >> 4) - (_width & 0x0F)) > 0) ? (_width & 0xF0) >> 4 : _width & 0x0F #define TS_SNTS_GET_WIDTH_MINOR(_width) \ ((((_width & 0xF0) >> 4) - (_width & 0x0F)) > 0) ? _width & 0x0F : (_width & 0xF0) >> 4 #define TS_SNTS_GET_ORIENTATION(_width) \ ((((_width & 0xF0) >> 4) - (_width & 0x0F)) > 0) ? 0 : 1 #define TS_SNTS_GET_PRESSURE(_pressure) \ _pressure /* */ #define GET_BIT_MASK(_finger_status_reg) \ (_finger_status_reg[2] & 0x04)<<7 | (_finger_status_reg[2] & 0x01)<<8 | \ (_finger_status_reg[1] & 0x40)<<1 | (_finger_status_reg[1] & 0x10)<<2 | \ (_finger_status_reg[1] & 0x04)<<3 | (_finger_status_reg[1] & 0x01)<<4 | \ (_finger_status_reg[0] & 0x40)>>3 | (_finger_status_reg[0] & 0x10)>>2 | \ (_finger_status_reg[0] & 0x04)>>1 | (_finger_status_reg[0] & 0x01) #define GET_INDEX_FROM_MASK(_index, _bit_mask, _max_finger) \ for(; !((_bit_mask>>_index)&0x01) && _index <= _max_finger; _index++); \ if (_index <= _max_finger) _bit_mask &= ~(_bit_mask & (1<<(_index))); #ifdef CUST_G_TOUCH u8 pressure_zero = 0; extern int ts_charger_plug; extern int ts_charger_type; extern int cur_hopping_idx; int cns_en = 0; u8 hopping = 0; #endif /* */ int synaptics_ts_page_data_read(struct i2c_client *client, u8 page, u8 reg, int size, u8 *data) { if (unlikely(touch_i2c_write_byte(client, PAGE_SELECT_REG, page) < 0)) { TOUCH_ERR_MSG("PAGE_SELECT_REG write fail\n"); return -EIO; } if (unlikely(touch_i2c_read(client, reg, size, data) < 0)) { TOUCH_ERR_MSG("[%dP:%d]register read fail\n", page, reg); return -EIO; } if (unlikely(touch_i2c_write_byte(client, PAGE_SELECT_REG, DEFAULT_PAGE) < 0)) { TOUCH_ERR_MSG("PAGE_SELECT_REG write fail\n"); return -EIO; } return 0; } int synaptics_ts_page_data_write(struct i2c_client *client, u8 page, u8 reg, int size, u8 *data) { if (unlikely(touch_i2c_write_byte(client, PAGE_SELECT_REG, page) < 0)) { TOUCH_ERR_MSG("PAGE_SELECT_REG write fail\n"); return -EIO; } if (unlikely(touch_i2c_write(client, reg, size, data) < 0)) { TOUCH_ERR_MSG("[%dP:%d]register read fail\n", page, reg); return -EIO; } if (unlikely(touch_i2c_write_byte(client, PAGE_SELECT_REG, DEFAULT_PAGE) < 0)) { TOUCH_ERR_MSG("PAGE_SELECT_REG write fail\n"); return -EIO; } return 0; } int synaptics_ts_page_data_write_byte(struct i2c_client *client, u8 page, u8 reg, u8 data) { if (unlikely(touch_i2c_write_byte(client, PAGE_SELECT_REG, page) < 0)) { TOUCH_ERR_MSG("PAGE_SELECT_REG write fail\n"); return -EIO; } if (unlikely(touch_i2c_write_byte(client, reg, data) < 0)) { TOUCH_ERR_MSG("[%dP:%d]register write fail\n", page, reg); return -EIO; } if (unlikely(touch_i2c_write_byte(client, PAGE_SELECT_REG, DEFAULT_PAGE) < 0)) { TOUCH_ERR_MSG("PAGE_SELECT_REG write fail\n"); return -EIO; } return 0; } int synaptics_ts_get_data(struct i2c_client *client, struct touch_data* data) { struct synaptics_ts_data* ts = (struct synaptics_ts_data*)get_touch_handle(client); u16 touch_finger_bit_mask=0; u8 finger_index=0; u8 index=0; u8 buf=0; u8 cnt; u8 buf2=0; u16 alpha = 0; u8 cns = 0; u16 im = 0; u16 vm = 0; u16 aim = 0; #if defined(CONFIG_MACH_APQ8064_GK_KR) || defined(CONFIG_MACH_APQ8064_GKATT) || defined(CONFIG_MACH_APQ8064_GKGLOBAL) int z_30_cnt = 0; /* */ #endif #ifdef G_ONLY hopping = 0; #endif data->total_num = 0; #ifdef CUST_G_TOUCH pressure_zero = 0; #endif if (unlikely(touch_debug_mask & DEBUG_TRACE)) TOUCH_DEBUG_MSG("\n"); if (unlikely(touch_i2c_read(client, DEVICE_STATUS_REG, sizeof(ts->ts_data.interrupt_status_reg), &ts->ts_data.device_status_reg) < 0)) { TOUCH_ERR_MSG("DEVICE_STATUS_REG read fail\n"); goto err_synaptics_getdata; } /* */ if ((ts->ts_data.device_status_reg & DEVICE_FAILURE_MASK)== DEVICE_FAILURE_MASK) { TOUCH_ERR_MSG("ESD damage occured. Reset Touch IC\n"); goto err_synaptics_device_damage; } /* */ if (((ts->ts_data.device_status_reg & DEVICE_STATUS_UNCONFIGURED) >> 7) == 1) { TOUCH_ERR_MSG("Touch IC resetted internally. Reconfigure register setting\n"); goto err_synaptics_device_damage; } if (unlikely(touch_i2c_read(client, INTERRUPT_STATUS_REG, sizeof(ts->ts_data.interrupt_status_reg), &ts->ts_data.interrupt_status_reg) < 0)) { TOUCH_ERR_MSG("INTERRUPT_STATUS_REG read fail\n"); goto err_synaptics_getdata; } if (unlikely(touch_debug_mask & DEBUG_GET_DATA)) TOUCH_INFO_MSG("Interrupt_status : 0x%x\n", ts->ts_data.interrupt_status_reg); #ifdef CUST_G_TOUCH // #else /* */ if (ts->ts_data.interrupt_status_reg == 0) { TOUCH_ERR_MSG("Interrupt_status reg is 0. Something is wrong in IC\n"); goto err_synaptics_device_damage; } #endif /* */ if (unlikely(ts->ts_data.interrupt_status_reg & ts->interrupt_mask.flash)){ TOUCH_ERR_MSG("Impossible Interrupt\n"); goto err_synaptics_device_damage; } #ifdef CUST_G_TOUCH if ( ts->ts_data.interrupt_status_reg == 0x08 || ts->ts_data.interrupt_status_reg == 0x00 ) { TOUCH_ERR_MSG("Ignore interrupt. interrupt status reg = 0x%x\n", ts->ts_data.interrupt_status_reg); goto ignore_interrupt; } #endif /* */ if (likely(ts->ts_data.interrupt_status_reg & ts->interrupt_mask.abs)) { if (unlikely(touch_i2c_read(client, FINGER_STATE_REG, sizeof(ts->ts_data.finger.finger_status_reg), ts->ts_data.finger.finger_status_reg) < 0)) { TOUCH_ERR_MSG("FINGER_STATE_REG read fail\n"); goto err_synaptics_getdata; } touch_finger_bit_mask = GET_BIT_MASK(ts->ts_data.finger.finger_status_reg); if (unlikely(touch_debug_mask & DEBUG_GET_DATA)) { TOUCH_INFO_MSG("Finger_status : 0x%x, 0x%x, 0x%x\n", ts->ts_data.finger.finger_status_reg[0], ts->ts_data.finger.finger_status_reg[1], ts->ts_data.finger.finger_status_reg[2]); TOUCH_INFO_MSG("Touch_bit_mask: 0x%x\n", touch_finger_bit_mask); } while(touch_finger_bit_mask) { GET_INDEX_FROM_MASK(finger_index, touch_finger_bit_mask, MAX_NUM_OF_FINGERS) if (unlikely(touch_i2c_read(ts->client, FINGER_DATA_REG_START + (NUM_OF_EACH_FINGER_DATA_REG * finger_index), NUM_OF_EACH_FINGER_DATA_REG, ts->ts_data.finger.finger_reg[finger_index]) < 0)) { TOUCH_ERR_MSG("FINGER_DATA_REG read fail\n"); goto err_synaptics_getdata; } data->curr_data[finger_index].id = finger_index; data->curr_data[finger_index].x_position = TS_SNTS_GET_X_POSITION(ts->ts_data.finger.finger_reg[finger_index][REG_X_POSITION], ts->ts_data.finger.finger_reg[finger_index][REG_YX_POSITION]); data->curr_data[finger_index].y_position = TS_SNTS_GET_Y_POSITION(ts->ts_data.finger.finger_reg[finger_index][REG_Y_POSITION], ts->ts_data.finger.finger_reg[finger_index][REG_YX_POSITION]); data->curr_data[finger_index].width_major = TS_SNTS_GET_WIDTH_MAJOR(ts->ts_data.finger.finger_reg[finger_index][REG_WY_WX]); data->curr_data[finger_index].width_minor = TS_SNTS_GET_WIDTH_MINOR(ts->ts_data.finger.finger_reg[finger_index][REG_WY_WX]); data->curr_data[finger_index].width_orientation = TS_SNTS_GET_ORIENTATION(ts->ts_data.finger.finger_reg[finger_index][REG_WY_WX]); data->curr_data[finger_index].pressure = TS_SNTS_GET_PRESSURE(ts->ts_data.finger.finger_reg[finger_index][REG_Z]); data->curr_data[finger_index].status = FINGER_PRESSED; #ifdef CUST_G_TOUCH if(ts->pdata->role->ghost_detection_enable) { if(data->curr_data[finger_index].pressure == 0) pressure_zero = 1; } #endif #if defined(CONFIG_MACH_APQ8064_GK_KR) || defined(CONFIG_MACH_APQ8064_GKATT) || defined(CONFIG_MACH_APQ8064_GKGLOBAL) if(data->curr_data[finger_index].pressure == 30) { z_30_cnt++; /* */ } #endif if (unlikely(touch_debug_mask & DEBUG_GET_DATA)) TOUCH_INFO_MSG("<%d> pos(%4d,%4d) w_m[%2d] w_n[%2d] w_o[%2d] p[%2d]\n", finger_index, data->curr_data[finger_index].x_position, data->curr_data[finger_index].y_position, data->curr_data[finger_index].width_major, data->curr_data[finger_index].width_minor, data->curr_data[finger_index].width_orientation, data->curr_data[finger_index].pressure); index++; } data->total_num = index; #if defined(CONFIG_MACH_APQ8064_GK_KR) || defined(CONFIG_MACH_APQ8064_GKATT) || defined(CONFIG_MACH_APQ8064_GKGLOBAL) z_30_num = z_30_cnt; /* */ #endif if (unlikely(touch_debug_mask & DEBUG_GET_DATA)) TOUCH_INFO_MSG("Total_num: %d\n", data->total_num); } /* */ if (unlikely(ts->button_fc.dsc.id != 0)) { if (likely(ts->ts_data.interrupt_status_reg & ts->interrupt_mask.button)) { if (unlikely(synaptics_ts_page_data_read(client, BUTTON_PAGE, BUTTON_DATA_REG, sizeof(ts->ts_data.button_data_reg), &ts->ts_data.button_data_reg) < 0)) { TOUCH_ERR_MSG("BUTTON_DATA_REG read fail\n"); goto err_synaptics_getdata; } if (unlikely(touch_debug_mask & DEBUG_BUTTON)) TOUCH_DEBUG_MSG("Button register: 0x%x\n", ts->ts_data.button_data_reg); if (ts->ts_data.button_data_reg) { /* */ for (cnt = 0; cnt < ts->pdata->caps->number_of_button; cnt++) { #ifdef CUST_G_TOUCH if(ts->ic_panel_type == G_IC3203_G2) { if ((ts->ts_data.button_data_reg >> (cnt << 1)) & 0x3) { ts->ts_data.button.key_code = ts->pdata->caps->button_name[cnt]; data->curr_button.key_code = ts->ts_data.button.key_code; data->curr_button.state = 1; break; } } else { if ((ts->ts_data.button_data_reg >> cnt) & 0x1) { ts->ts_data.button.key_code = ts->pdata->caps->button_name[cnt]; data->curr_button.key_code = ts->ts_data.button.key_code; data->curr_button.state = 1; break; } } #endif } }else { /* */ data->curr_button.key_code = ts->ts_data.button.key_code; data->curr_button.state = 0; } } } /* */ if (unlikely(touch_i2c_read(client, TWO_D_EXTEND_STATUS, 1, &buf) < 0)){ TOUCH_ERR_MSG("TWO_D_EXTEND_STATUS read fail\n"); goto err_synaptics_getdata; } data->palm = buf & 0x2; if( (ts_charger_plug == 1 && (data->prev_total_num != data->total_num)) || (touch_debug_mask & DEBUG_NOISE) ) { if (unlikely(synaptics_ts_page_data_read(client, ANALOG_PAGE, 0x0e, 1, &buf) < 0)) { TOUCH_ERR_MSG("Alpha REG read fail\n"); goto err_synaptics_getdata; } if (unlikely(synaptics_ts_page_data_read(client, ANALOG_PAGE, 0x0f, 1, &buf2) < 0)) { TOUCH_ERR_MSG("Alpha REG read fail\n"); goto err_synaptics_getdata; } alpha = (buf2<<8)|buf; if (unlikely(synaptics_ts_page_data_read(client, ANALOG_PAGE, 0x0D, 1, &cns) < 0)) { TOUCH_ERR_MSG("Current Noise State REG read fail\n"); goto err_synaptics_getdata; } if(ts_charger_plug && cns >= 1) { cns_en = 1; #ifdef G_ONLY if(cur_hopping_idx != 4){ buf = 0x84; synaptics_ts_page_data_write(client, 0x01, 0x04, 1, &buf); cur_hopping_idx = 4; hopping = 1; TOUCH_INFO_MSG("cur_hopping_idx [ %s ] = %x %x \n", __func__, buf, hopping); } else { hopping = 0; } #endif } if (unlikely(synaptics_ts_page_data_read(client, ANALOG_PAGE, 0x05, 1, &buf) < 0)) { TOUCH_ERR_MSG("Interference Metric REG read fail\n"); goto err_synaptics_getdata; } if (unlikely(synaptics_ts_page_data_read(client, ANALOG_PAGE, 0x06, 1, &buf2) < 0)) { TOUCH_ERR_MSG("Interference Metric REG read fail\n"); goto err_synaptics_getdata; } im = (buf2<<8)|buf; if (unlikely(synaptics_ts_page_data_read(client, ANALOG_PAGE, 0x09, 1, &buf) < 0)) { TOUCH_ERR_MSG("Variance Metric REG read fail\n"); goto err_synaptics_getdata; } if (unlikely(synaptics_ts_page_data_read(client, ANALOG_PAGE, 0x0a, 1, &buf2) < 0)) { TOUCH_ERR_MSG("Variance Metric REG read fail\n"); goto err_synaptics_getdata; } vm = (buf2<<8)|buf; if (unlikely(synaptics_ts_page_data_read(client, ANALOG_PAGE, 0x0b, 1, &buf) < 0)) { TOUCH_ERR_MSG("Averaged IM REG read fail\n"); goto err_synaptics_getdata; } if (unlikely(synaptics_ts_page_data_read(client, ANALOG_PAGE, 0x0c, 1, &buf2) < 0)) { TOUCH_ERR_MSG("Averaged IM REG read fail\n"); goto err_synaptics_getdata; } aim = (buf2<<8)|buf; TOUCH_INFO_MSG(" A[%5d] CNS[%d] IM[%5d] VM[%5d] AIM[%5d]\n", alpha, cns, im, vm, aim); } return 0; err_synaptics_device_damage: err_synaptics_getdata: return -EIO; #ifdef CUST_G_TOUCH ignore_interrupt: return -IGNORE_INTERRUPT; #endif } static int read_page_description_table(struct i2c_client* client) { struct synaptics_ts_data* ts = (struct synaptics_ts_data*)get_touch_handle(client); struct function_descriptor buffer; unsigned short u_address = 0; unsigned short page_num = 0; if (touch_debug_mask & DEBUG_TRACE) TOUCH_DEBUG_MSG("\n"); memset(&buffer, 0x0, sizeof(struct function_descriptor)); memset(&ts->common_fc, 0x0, sizeof(struct ts_ic_function)); memset(&ts->finger_fc, 0x0, sizeof(struct ts_ic_function)); memset(&ts->button_fc, 0x0, sizeof(struct ts_ic_function)); memset(&ts->analog_fc, 0x0, sizeof(struct ts_ic_function)); memset(&ts->flash_fc, 0x0, sizeof(struct ts_ic_function)); for(page_num = 0; page_num < PAGE_MAX_NUM; page_num++) { if (unlikely(touch_i2c_write_byte(client, PAGE_SELECT_REG, page_num) < 0)) { TOUCH_ERR_MSG("PAGE_SELECT_REG write fail\n"); return -EIO; } for(u_address = DESCRIPTION_TABLE_START; u_address > 10; u_address -= sizeof(struct function_descriptor)) { if (unlikely(touch_i2c_read(client, u_address, sizeof(buffer), (unsigned char *)&buffer) < 0)) { TOUCH_ERR_MSG("RMI4 Function Descriptor read fail\n"); return -EIO; } if (buffer.id == 0) break; switch (buffer.id) { case RMI_DEVICE_CONTROL: ts->common_fc.dsc = buffer; ts->common_fc.function_page = page_num; break; case TOUCHPAD_SENSORS: ts->finger_fc.dsc = buffer; ts->finger_fc.function_page = page_num; break; case CAPACITIVE_BUTTON_SENSORS: ts->button_fc.dsc = buffer; ts->button_fc.function_page = page_num; break; case ANALOG_CONTROL: ts->analog_fc.dsc = buffer; ts->analog_fc.function_page = page_num; break; case FLASH_MEMORY_MANAGEMENT: ts->flash_fc.dsc = buffer; ts->flash_fc.function_page = page_num; default: break; } } } if (unlikely(touch_i2c_write_byte(client, PAGE_SELECT_REG, 0x00) < 0)) { TOUCH_ERR_MSG("PAGE_SELECT_REG write fail\n"); return -EIO; } /* */ ts->interrupt_mask.flash = 0x1; ts->interrupt_mask.status = 0x2; #ifdef CUST_G_TOUCH ts->interrupt_mask.abs = 0x4; ts->interrupt_mask.button = 0x20; #endif if(ts->common_fc.dsc.id == 0 || ts->finger_fc.dsc.id == 0 || ts->analog_fc.dsc.id == 0 || ts->flash_fc.dsc.id == 0){ TOUCH_ERR_MSG("common/finger/analog/flash are not initiailized\n"); return -EPERM; } if (touch_debug_mask & DEBUG_BASE_INFO) TOUCH_INFO_MSG("common[%dP:0x%02x] finger[%dP:0x%02x] button[%dP:0x%02x] analog[%dP:0x%02x] flash[%dP:0x%02x]\n", ts->common_fc.function_page, ts->common_fc.dsc.id, ts->finger_fc.function_page, ts->finger_fc.dsc.id, ts->button_fc.function_page, ts->button_fc.dsc.id, ts->analog_fc.function_page, ts->analog_fc.dsc.id, ts->flash_fc.function_page, ts->flash_fc.dsc.id); return 0; } int get_ic_info(struct synaptics_ts_data* ts, struct touch_fw_info* fw_info) { #if defined(ARRAYED_TOUCH_FW_BIN) int cnt; #endif u8 device_status = 0; u8 flash_control = 0; read_page_description_table(ts->client); memset(&ts->fw_info, 0, sizeof(struct synaptics_ts_fw_info)); if (unlikely(touch_i2c_read(ts->client, FW_REVISION_REG, sizeof(ts->fw_info.fw_rev), &ts->fw_info.fw_rev) < 0)) { TOUCH_ERR_MSG("FW_REVISION_REG read fail\n"); return -EIO; } if (unlikely(touch_i2c_read(ts->client, MANUFACTURER_ID_REG, sizeof(ts->fw_info.manufacturer_id), &ts->fw_info.manufacturer_id) < 0)) { TOUCH_ERR_MSG("MANUFACTURER_ID_REG read fail\n"); return -EIO; } /* */ if (unlikely(touch_i2c_read(ts->client, PRODUCT_ID_REG, sizeof(ts->fw_info.product_id) - 1, ts->fw_info.product_id) < 0)) { TOUCH_ERR_MSG("PRODUCT_ID_REG read fail\n"); return -EIO; } if (unlikely(touch_i2c_read(ts->client, FLASH_CONFIG_ID_REG, sizeof(ts->fw_info.config_id) - 1, ts->fw_info.config_id) < 0)) { TOUCH_ERR_MSG("FLASH_CONFIG_ID_REG read fail\n"); return -EIO; } snprintf(fw_info->ic_fw_identifier, sizeof(fw_info->ic_fw_identifier), "%s - %d", ts->fw_info.product_id, ts->fw_info.manufacturer_id); snprintf(fw_info->ic_fw_version, sizeof(fw_info->ic_fw_version), "%s", ts->fw_info.config_id); #ifdef CUST_G_TOUCH if(!strncmp(ts->fw_info.product_id, "DS4 R3.0", 8)) { // if(!strncmp(fw_info->ic_fw_version, "0000", 4) || !strncmp(fw_info->ic_fw_version, "S001", 4)) { ts->ic_panel_type = G_IC7020_GFF; TOUCH_INFO_MSG("IC is 7020, panel is GFF."); } else { if( fw_info->ic_fw_version[0] == 'E' && (int)simple_strtol(&fw_info->ic_fw_version[1], NULL, 10) < 14) { ts->ic_panel_type = G_IC7020_G2; TOUCH_INFO_MSG("IC is 7020, panel is G2."); } else if( (fw_info->ic_fw_version[0] == 'E' && (int)simple_strtol(&fw_info->ic_fw_version[1], NULL, 10) >= 14 && (int)simple_strtol(&fw_info->ic_fw_version[1], NULL, 10) < 27) || fw_info->ic_fw_version[0] == 'T') { ts->ic_panel_type = G_IC3203_G2; TOUCH_INFO_MSG("IC is 3203, panel is G2."); } else { ts->ic_panel_type = UNKNOWN; TOUCH_INFO_MSG("UNKNOWN OLD PANEL"); } } } else if(!strncmp(ts->fw_info.product_id, "TM2000", 6)) { // ts->ic_panel_type = G_IC7020_G2_LGIT; TOUCH_INFO_MSG("IC is 7020, H pattern, panel is G2. LGIT"); if((fw_info->ic_fw_version[0] == 'E') && ((int)simple_strtol(&fw_info->ic_fw_version[1], NULL, 10) >= 40)) { ts->interrupt_mask.button = 0x10; } } else if(!strncmp(ts->fw_info.product_id, "TM2369", 6)) { // ts->ic_panel_type = G_IC7020_G2_TPK; TOUCH_INFO_MSG("IC is 7020, H pattern, panel is G2. TPK"); ts->interrupt_mask.button = 0x10; } else if(!strncmp(ts->fw_info.product_id, "TM2372", 6)) { // ts->ic_panel_type = GJ_IC7020_GFF_H_PTN; TOUCH_INFO_MSG("IC is 7020, H pattern, panel is GFF."); if((fw_info->ic_fw_version[0] == 'E') && ((int)simple_strtol(&fw_info->ic_fw_version[1], NULL, 10) >= 2)) { ts->interrupt_mask.button = 0x10; } } else if(!strncmp(ts->fw_info.product_id, "PLG124", 6)) { // ts->ic_panel_type = GK_IC7020_G1F; TOUCH_INFO_MSG("IC is 7020, H pattern, panel is G1F."); ts->interrupt_mask.button = 0x10; } else if(!strncmp(ts->fw_info.product_id, "PLG192", 6)) { // ts->ic_panel_type = GK_IC7020_GFF_SUNTEL; TOUCH_INFO_MSG("IC is 7020, H pattern, panel is GFF. SUNTEL"); ts->interrupt_mask.button = 0x10; } else if(!strncmp(ts->fw_info.product_id, "PLG193", 6)) { // ts->ic_panel_type = GK_IC7020_GFF_LGIT; TOUCH_INFO_MSG("IC is 7020, H pattern, panel is GFF. LGIT"); ts->interrupt_mask.button = 0x10; } else if(!strncmp(ts->fw_info.product_id, "PLG207", 6)) { // ts->ic_panel_type = GK_IC7020_GFF_LGIT_HYBRID; TOUCH_INFO_MSG("IC is 7020, H pattern, panel is GFF. LGIT"); ts->interrupt_mask.button = 0x10; } else if(!strncmp(ts->fw_info.product_id, "PLG121", 6)) { // ts->ic_panel_type = GV_IC7020_G2_H_PTN_LGIT; TOUCH_INFO_MSG("IC is 7020, H pattern, panel is G2. LGIT"); ts->interrupt_mask.button = 0x10; } else if(!strncmp(ts->fw_info.product_id, "PLG184", 6)) { // ts->ic_panel_type = GV_IC7020_G2_H_PTN_TPK; TOUCH_INFO_MSG("IC is 7020, H pattern, panel is G2. TPK"); ts->interrupt_mask.button = 0x10; } else if(!strncmp(ts->fw_info.product_id, "S7020", 5)) { // TOUCH_INFO_MSG("UNKNOWN PANEL, Product id is S7020."); ts->interrupt_mask.button = 0x10; } else { TOUCH_INFO_MSG("UNKNOWN PANEL"); } #endif #if defined(ARRAYED_TOUCH_FW_BIN) for (cnt = 0; cnt < sizeof(SynaFirmware)/sizeof(SynaFirmware[0]); cnt++) { strncpy(ts->fw_info.fw_image_product_id, &SynaFirmware[cnt][16], 10); if (!(strncmp(ts->fw_info.product_id , ts->fw_info.fw_image_product_id, 10))) break; } strncpy(ts->fw_info.image_config_id, &SynaFirmware[cnt][0xb100],4); ts->fw_info.fw_start = (unsigned char *)&SynaFirmware[cnt][0]; ts->fw_info.fw_size = sizeof(SynaFirmware[0]); #else #ifdef G_ONLY switch(ts->ic_panel_type){ case G_IC7020_GFF: case G_IC7020_G2: case G_IC3203_G2: case G_IC7020_G2_LGIT: memcpy(&SynaFirmware[0], &SynaFirmware_TM2000[0], sizeof(SynaFirmware)); break; case G_IC7020_G2_TPK: memcpy(&SynaFirmware[0], &SynaFirmware_TM2369[0], sizeof(SynaFirmware)); break; default: TOUCH_ERR_MSG("UNKNOWN PANEL. SynaImage set error"); break; } #endif #if defined(CONFIG_MACH_APQ8064_GK_KR) || defined(CONFIG_MACH_APQ8064_GKATT) || defined(CONFIG_MACH_APQ8064_GKGLOBAL) switch(ts->ic_panel_type){ case GK_IC7020_G1F: memcpy(&SynaFirmware[0], &SynaFirmware_PLG124[0], sizeof(SynaFirmware)); break; case GK_IC7020_GFF_SUNTEL: memcpy(&SynaFirmware[0], &SynaFirmware_PLG192[0], sizeof(SynaFirmware)); break; case GK_IC7020_GFF_LGIT: memcpy(&SynaFirmware[0], &SynaFirmware_PLG193[0], sizeof(SynaFirmware)); break; case GK_IC7020_GFF_LGIT_HYBRID: memcpy(&SynaFirmware[0], &SynaFirmware_PLG207[0], sizeof(SynaFirmware)); break; default: TOUCH_ERR_MSG("UNKNOWN PANEL(GK). SynaImage set error"); break; } #elif defined(CONFIG_MACH_APQ8064_GVDCM) switch(ts->ic_panel_type){ case GV_IC7020_G2_H_PTN_LGIT: memcpy(&SynaFirmware[0], &SynaFirmware_PLG121[0], sizeof(SynaFirmware)); break; case GV_IC7020_G2_H_PTN_TPK: memcpy(&SynaFirmware[0], &SynaFirmware_PLG184[0], sizeof(SynaFirmware)); break; default: TOUCH_ERR_MSG("UNKNOWN PANEL(GV). SynaImage set error"); break; } #endif strncpy(ts->fw_info.fw_image_product_id, &SynaFirmware[16], 10); strncpy(ts->fw_info.image_config_id, &SynaFirmware[0xb100],4); #ifdef CUST_G_TOUCH strncpy(fw_info->syna_img_fw_version, &SynaFirmware[0xb100], 4); strncpy(fw_info->syna_img_fw_product_id, &SynaFirmware[0x0040], 6); strncpy(ts->fw_info.syna_img_product_id, &SynaFirmware[0x0040], 6); strncpy(ts->fw_info.syna_img_fw_ver, &SynaFirmware[0xb100],4); #endif ts->fw_info.fw_start = (unsigned char *)&SynaFirmware[0]; ts->fw_info.fw_size = sizeof(SynaFirmware); #endif ts->fw_info.fw_image_rev = ts->fw_info.fw_start[31]; if (unlikely(touch_i2c_read(ts->client, FLASH_CONTROL_REG, sizeof(flash_control), &flash_control) < 0)) { TOUCH_ERR_MSG("FLASH_CONTROL_REG read fail\n"); return -EIO; } if (unlikely(touch_i2c_read(ts->client, DEVICE_STATUS_REG, sizeof(device_status), &device_status) < 0)) { TOUCH_ERR_MSG("DEVICE_STATUS_REG read fail\n"); return -EIO; } /* */ if(device_status & DEVICE_STATUS_FLASH_PROG || (device_status & DEVICE_CRC_ERROR_MASK) != 0 || (flash_control & FLASH_STATUS_MASK) != 0) { TOUCH_ERR_MSG("Firmware has a unknown-problem, so it needs firmware-upgrade.\n"); TOUCH_ERR_MSG("FLASH_CONTROL[%x] DEVICE_STATUS_REG[%x]\n", (u32)flash_control, (u32)device_status); TOUCH_ERR_MSG("FW-upgrade Force Rework.\n"); /* */ ts->fw_info.fw_rev = 0; snprintf(ts->fw_info.config_id, sizeof(ts->fw_info.config_id), "ERR"); #ifdef CUST_G_TOUCH fw_info->fw_upgrade.fw_force_rework = true; #endif } return 0; } int synaptics_ts_init(struct i2c_client* client, struct touch_fw_info* fw_info) { struct synaptics_ts_data* ts = (struct synaptics_ts_data*)get_touch_handle(client); u8 buf = 0; if (touch_debug_mask & DEBUG_TRACE) TOUCH_DEBUG_MSG("\n"); if (!ts->is_probed) if (unlikely(get_ic_info(ts, fw_info) < 0)) return -EIO; #ifdef CUST_G_TOUCH if(ts_charger_plug==0){ if (unlikely(touch_i2c_write_byte(client, DEVICE_CONTROL_REG, DEVICE_CONTROL_NOSLEEP | DEVICE_CONTROL_CONFIGURED) < 0)) { TOUCH_ERR_MSG("DEVICE_CONTROL_REG write fail\n"); return -EIO; } } else if(ts_charger_plug==1){ if (unlikely(touch_i2c_write_byte(client, DEVICE_CONTROL_REG, DEVICE_CONTROL_NOSLEEP | DEVICE_CONTROL_CONFIGURED | DEVICE_CHARGER_CONNECTED) < 0)) { TOUCH_ERR_MSG("DEVICE_CONTROL_REG write fail\n"); return -EIO; } #ifdef G_ONLY if (unlikely(synaptics_ts_page_data_read(client, 0x01, 0x04, 1, &buf) < 0)) { TOUCH_ERR_MSG("Current Hopping Index read fail\n"); return -EIO; } if(buf == 3) cur_hopping_idx = 3; else cur_hopping_idx = 4; TOUCH_INFO_MSG("cur_hopping_idx [ %s ] = %x\n", __func__, buf); switch(ts_charger_type) { case 0: case 1: if(cns_en && cur_hopping_idx != 4){ buf = 0x84; synaptics_ts_page_data_write(client, 0x01, 0x04, 1, &buf); cur_hopping_idx = 4; TOUCH_INFO_MSG("cur_hopping_idx [ %s ] = %x\n", __func__, buf); } break; default: break; } #endif } if (unlikely(touch_i2c_read(client, DEVICE_CONTROL_REG, 1, &buf) < 0)) { TOUCH_ERR_MSG("DEVICE_CONTROL_REG read fail\n"); return -EIO; } TOUCH_INFO_MSG("DEVICE CONTROL_REG = %x\n", buf); #else if (unlikely(touch_i2c_write_byte(client, DEVICE_CONTROL_REG, DEVICE_CONTROL_NOSLEEP | DEVICE_CONTROL_CONFIGURED) < 0)) { TOUCH_ERR_MSG("DEVICE_CONTROL_REG write fail\n"); return -EIO; } #endif if (unlikely(touch_i2c_read(client, INTERRUPT_ENABLE_REG, 1, &buf) < 0)) { TOUCH_ERR_MSG("INTERRUPT_ENABLE_REG read fail\n"); return -EIO; } if (unlikely(touch_i2c_write_byte(client, INTERRUPT_ENABLE_REG, buf | ts->interrupt_mask.abs | ts->interrupt_mask.button) < 0)) { TOUCH_ERR_MSG("INTERRUPT_ENABLE_REG write fail\n"); return -EIO; } if(ts->pdata->role->report_mode == CONTINUOUS_REPORT_MODE) { #ifdef CUST_G_TOUCH if (unlikely(touch_i2c_write_byte(client, TWO_D_REPORTING_MODE, REPORT_BEYOND_CLIP | ABS_FILTER | REPORT_MODE_CONTINUOUS) < 0)) { TOUCH_ERR_MSG("TWO_D_REPORTING_MODE write fail\n"); return -EIO; } #else if (unlikely(touch_i2c_write_byte(client, TWO_D_REPORTING_MODE, REPORT_MODE_CONTINUOUS) < 0)) { TOUCH_ERR_MSG("TWO_D_REPORTING_MODE write fail\n"); return -EIO; } #endif } else { /* */ #ifdef CUST_G_TOUCH if (unlikely(touch_i2c_write_byte(client, TWO_D_REPORTING_MODE, REPORT_BEYOND_CLIP | ABS_FILTER | REPORT_MODE_REDUCED) < 0)) { TOUCH_ERR_MSG("TWO_D_REPORTING_MODE write fail\n"); return -EIO; } #else if (unlikely(touch_i2c_write_byte(client, TWO_D_REPORTING_MODE, REPORT_MODE_REDUCED) < 0)) { TOUCH_ERR_MSG("TWO_D_REPORTING_MODE write fail\n"); return -EIO; } #endif if (unlikely(touch_i2c_write_byte(client, DELTA_X_THRESH_REG, ts->pdata->role->delta_pos_threshold) < 0)) { TOUCH_ERR_MSG("DELTA_X_THRESH_REG write fail\n"); return -EIO; } if (unlikely(touch_i2c_write_byte(client, DELTA_Y_THRESH_REG, ts->pdata->role->delta_pos_threshold) < 0)) { TOUCH_ERR_MSG("DELTA_Y_THRESH_REG write fail\n"); return -EIO; } } #ifdef CUST_G_TOUCH if (unlikely(touch_i2c_read(client, SMALL_OBJECT_DETECTION_TUNNING_REG, 1, &buf) < 0)) { TOUCH_ERR_MSG("SMALL_OBJECT_DETECTION_TUNNING_REG read fail\n"); return -EIO; } if(buf & SMALL_OBJECT_DETECTION) { TOUCH_INFO_MSG("Stylus Pen is Enabled\n"); ts->pdata->role->pen_enable = 1; } #endif if (unlikely(touch_i2c_read(client, INTERRUPT_STATUS_REG, 1, &buf) < 0)) { TOUCH_ERR_MSG("INTERRUPT_STATUS_REG read fail\n"); return -EIO; // } if (unlikely(touch_i2c_read(client, FINGER_STATE_REG, sizeof(ts->ts_data.finger.finger_status_reg), ts->ts_data.finger.finger_status_reg) < 0)) { TOUCH_ERR_MSG("FINGER_STATE_REG read fail\n"); return -EIO; // } ts->is_probed = 1; return 0; } int synaptics_ts_power(struct i2c_client* client, int power_ctrl) { struct synaptics_ts_data* ts = (struct synaptics_ts_data*)get_touch_handle(client); if (touch_debug_mask & DEBUG_TRACE) TOUCH_DEBUG_MSG("\n"); switch (power_ctrl) { case POWER_OFF: if (ts->pdata->pwr->use_regulator) { regulator_disable(ts->regulator_vio); regulator_disable(ts->regulator_vdd); } else ts->pdata->pwr->power(0); #ifdef CUST_G_TOUCH if (ts->pdata->reset_pin > 0) { gpio_set_value(ts->pdata->reset_pin, 0); } #endif break; case POWER_ON: #ifdef CUST_G_TOUCH if (ts->pdata->reset_pin > 0) { gpio_set_value(ts->pdata->reset_pin, 1); } #endif if (ts->pdata->pwr->use_regulator) { regulator_enable(ts->regulator_vdd); regulator_enable(ts->regulator_vio); } else ts->pdata->pwr->power(1); #ifdef CUST_G_TOUCH if (ts->pdata->reset_pin > 0) { gpio_set_value(ts->pdata->reset_pin, 0); msleep(ts->pdata->role->reset_delay); gpio_set_value(ts->pdata->reset_pin, 1); } #else /* */ if (ts->pdata->reset_pin > 0) { msleep(10); gpio_set_value(ts->pdata->reset_pin, 0); msleep(ts->pdata->role->reset_delay); gpio_set_value(ts->pdata->reset_pin, 1); } #endif break; case POWER_SLEEP: if (unlikely(touch_i2c_write_byte(client, DEVICE_CONTROL_REG, DEVICE_CONTROL_SLEEP | DEVICE_CONTROL_CONFIGURED) < 0)) { TOUCH_ERR_MSG("DEVICE_CONTROL_REG write fail\n"); return -EIO; } break; case POWER_WAKE: if (unlikely(touch_i2c_write_byte(client, DEVICE_CONTROL_REG, DEVICE_CONTROL_NORMAL_OP | DEVICE_CONTROL_CONFIGURED) < 0)) { TOUCH_ERR_MSG("DEVICE_CONTROL_REG write fail\n"); return -EIO; } break; default: return -EIO; break; } return 0; } int synaptics_ts_probe(struct i2c_client* client) { struct synaptics_ts_data* ts; int ret = 0; if (touch_debug_mask & DEBUG_TRACE) TOUCH_DEBUG_MSG("\n"); ts = kzalloc(sizeof(struct synaptics_ts_data), GFP_KERNEL); if (!ts) { TOUCH_ERR_MSG("Can not allocate memory\n"); ret = -ENOMEM; goto err_alloc_data_failed; } set_touch_handle(client, ts); ts->client = client; ts->pdata = client->dev.platform_data; if (ts->pdata->pwr->use_regulator) { ts->regulator_vdd = regulator_get_exclusive(NULL, ts->pdata->pwr->vdd); if (IS_ERR(ts->regulator_vdd)) { TOUCH_ERR_MSG("FAIL: regulator_get_vdd - %s\n", ts->pdata->pwr->vdd); ret = -EPERM; goto err_get_vdd_failed; } ts->regulator_vio = regulator_get_exclusive(NULL, ts->pdata->pwr->vio); if (IS_ERR(ts->regulator_vio)) { TOUCH_ERR_MSG("FAIL: regulator_get_vio - %s\n", ts->pdata->pwr->vio); ret = -EPERM; goto err_get_vio_failed; } if (ts->pdata->pwr->vdd_voltage > 0) { ret = regulator_set_voltage(ts->regulator_vdd, ts->pdata->pwr->vdd_voltage, ts->pdata->pwr->vdd_voltage); if (ret < 0) TOUCH_ERR_MSG("FAIL: VDD voltage setting - (%duV)\n", ts->pdata->pwr->vdd_voltage); } if (ts->pdata->pwr->vio_voltage > 0) { ret = regulator_set_voltage(ts->regulator_vio, ts->pdata->pwr->vio_voltage, ts->pdata->pwr->vio_voltage); if (ret < 0) TOUCH_ERR_MSG("FAIL: VIO voltage setting - (%duV)\n",ts->pdata->pwr->vio_voltage); } } return ret; err_get_vio_failed: if (ts->pdata->pwr->use_regulator) { regulator_put(ts->regulator_vdd); } err_get_vdd_failed: err_alloc_data_failed: kfree(ts); return ret; } #ifdef CUST_G_TOUCH int synaptics_ts_resolution(struct i2c_client* client) { struct synaptics_ts_data* ts = (struct synaptics_ts_data*)get_touch_handle(client); u8 resolution[2] = {0}; if(ts->pdata->role->key_type == TOUCH_HARD_KEY) { if (unlikely(touch_i2c_read(ts->client, SENSOR_MAX_X_POS, sizeof(resolution), resolution) < 0)) { TOUCH_ERR_MSG("SENSOR_MAX_X read fail\n"); return -EIO; // } TOUCH_INFO_MSG("SENSOR_MAX_X=%d", (int)(resolution[1] << 8 | resolution[0])); ts->pdata->caps->x_max = (int)(resolution[1] << 8 | resolution[0]); if (unlikely(touch_i2c_read(ts->client, SENSOR_MAX_Y_POS, sizeof(resolution), resolution) < 0)) { TOUCH_ERR_MSG("SENSOR_MAX_Y read fail\n"); return -EIO; // } TOUCH_INFO_MSG("SENSOR_MAX_Y=%d", (int)(resolution[1] << 8 | resolution[0])); ts->pdata->caps->y_max = (int)(resolution[1] << 8 | resolution[0]); } return 0; } #endif void synaptics_ts_remove(struct i2c_client* client) { struct synaptics_ts_data* ts = (struct synaptics_ts_data*)get_touch_handle(client); if (touch_debug_mask & DEBUG_TRACE) TOUCH_DEBUG_MSG("\n"); if (ts->pdata->pwr->use_regulator) { regulator_put(ts->regulator_vio); regulator_put(ts->regulator_vdd); } kfree(ts); } int synaptics_ts_fw_upgrade(struct i2c_client* client, struct touch_fw_info* fw_info) { struct synaptics_ts_data* ts = (struct synaptics_ts_data*)get_touch_handle(client); int ret = 0; ts->is_probed = 0; ret = FirmwareUpgrade(ts, fw_info->fw_upgrade.fw_path); /* */ if (ret >= 0) get_ic_info(ts, fw_info); return ret; } #ifdef CUST_G_TOUCH int synaptics_ts_ic_ctrl(struct i2c_client *client, u8 code, u32 value) #else int synaptics_ts_ic_ctrl(struct i2c_client *client, u8 code, u16 value) #endif { struct synaptics_ts_data* ts = (struct synaptics_ts_data*)get_touch_handle(client); u8 buf = 0; switch (code) { case IC_CTRL_BASELINE: switch (value) { case BASELINE_OPEN: #ifdef CUST_G_TOUCH break; #endif if (unlikely(synaptics_ts_page_data_write_byte(client, ANALOG_PAGE, ANALOG_CONTROL_REG, FORCE_FAST_RELAXATION) < 0)) { TOUCH_ERR_MSG("ANALOG_CONTROL_REG write fail\n"); return -EIO; } msleep(10); if (unlikely(synaptics_ts_page_data_write_byte(client, ANALOG_PAGE, ANALOG_COMMAND_REG, FORCE_UPDATE) < 0)) { TOUCH_ERR_MSG("ANALOG_COMMAND_REG write fail\n"); return -EIO; } if (unlikely(touch_debug_mask & DEBUG_GHOST)) TOUCH_INFO_MSG("BASELINE_OPEN\n"); break; case BASELINE_FIX: #ifdef CUST_G_TOUCH break; #endif if (unlikely(synaptics_ts_page_data_write_byte(client, ANALOG_PAGE, ANALOG_CONTROL_REG, 0x00) < 0)) { TOUCH_ERR_MSG("ANALOG_CONTROL_REG write fail\n"); return -EIO; } msleep(10); if (unlikely(synaptics_ts_page_data_write_byte(client, ANALOG_PAGE, ANALOG_COMMAND_REG, FORCE_UPDATE) < 0)) { TOUCH_ERR_MSG("ANALOG_COMMAND_REG write fail\n"); return -EIO; } if (unlikely(touch_debug_mask & DEBUG_GHOST)) TOUCH_INFO_MSG("BASELINE_FIX\n"); break; case BASELINE_REBASE: /* */ if (likely(ts->finger_fc.dsc.id != 0)) { if (unlikely(touch_i2c_write_byte(client, FINGER_COMMAND_REG, 0x1) < 0)) { TOUCH_ERR_MSG("finger baseline reset command write fail\n"); return -EIO; } } break; default: break; } break; case IC_CTRL_READ: #ifdef CUST_G_TOUCH if (unlikely(touch_i2c_write_byte(client, PAGE_SELECT_REG, ((value & 0xFF00) >> 8)) < 0)) { TOUCH_ERR_MSG("PAGE_SELECT_REG write fail\n"); return -EIO; } if (touch_i2c_read(client, (value & 0xFF), 1, &buf) < 0) { TOUCH_ERR_MSG("IC register read fail\n"); return -EIO; } if (unlikely(touch_i2c_write_byte(client, PAGE_SELECT_REG, 0x00) < 0)) { TOUCH_ERR_MSG("PAGE_SELECT_REG write fail\n"); return -EIO; } #else if (touch_i2c_read(client, value, 1, &buf) < 0) { TOUCH_ERR_MSG("IC register read fail\n"); return -EIO; } #endif break; case IC_CTRL_WRITE: #ifdef CUST_G_TOUCH if (unlikely(touch_i2c_write_byte(client, PAGE_SELECT_REG, ((value & 0xFF0000) >> 16)) < 0)) { TOUCH_ERR_MSG("PAGE_SELECT_REG write fail\n"); return -EIO; } if (touch_i2c_write_byte(client, ((value & 0xFF00) >> 8), (value & 0xFF)) < 0) { TOUCH_ERR_MSG("IC register write fail\n"); return -EIO; } if (unlikely(touch_i2c_write_byte(client, PAGE_SELECT_REG, 0x00) < 0)) { TOUCH_ERR_MSG("PAGE_SELECT_REG write fail\n"); return -EIO; } #else if (touch_i2c_write_byte(client, ((value & 0xFF00) >> 8), (value & 0xFF)) < 0) { TOUCH_ERR_MSG("IC register write fail\n"); return -EIO; } #endif break; case IC_CTRL_RESET_CMD: if (unlikely(touch_i2c_write_byte(client, DEVICE_COMMAND_REG, 0x1) < 0)) { TOUCH_ERR_MSG("IC Reset command write fail\n"); return -EIO; } break; case IC_CTRL_REPORT_MODE: switch (value) { case 0: // #ifdef CUST_G_TOUCH if (unlikely(touch_i2c_write_byte(client, TWO_D_REPORTING_MODE, REPORT_BEYOND_CLIP | ABS_FILTER | REPORT_MODE_CONTINUOUS) < 0)) { TOUCH_ERR_MSG("TWO_D_REPORTING_MODE write fail\n"); return -EIO; } #else if (unlikely(touch_i2c_write_byte(client, TWO_D_REPORTING_MODE, REPORT_MODE_CONTINUOUS) < 0)) { TOUCH_ERR_MSG("TWO_D_REPORTING_MODE write fail\n"); return -EIO; } #endif break; case 1: // #ifdef CUST_G_TOUCH if (unlikely(touch_i2c_write_byte(client, TWO_D_REPORTING_MODE, REPORT_BEYOND_CLIP | ABS_FILTER | REPORT_MODE_REDUCED) < 0)) { TOUCH_ERR_MSG("TWO_D_REPORTING_MODE write fail\n"); return -EIO; } #else if (unlikely(touch_i2c_write_byte(client, TWO_D_REPORTING_MODE, REPORT_MODE_REDUCED) < 0)) { TOUCH_ERR_MSG("TWO_D_REPORTING_MODE write fail\n"); return -EIO; } #endif default: break; } break; default: break; } return buf; } struct touch_device_driver synaptics_ts_driver = { .probe = synaptics_ts_probe, #ifdef CUST_G_TOUCH .resolution = synaptics_ts_resolution, #endif .remove = synaptics_ts_remove, .init = synaptics_ts_init, .data = synaptics_ts_get_data, .power = synaptics_ts_power, .fw_upgrade = synaptics_ts_fw_upgrade, .ic_ctrl = synaptics_ts_ic_ctrl, }; static void async_touch_init(void *data, async_cookie_t cookie) { if (touch_debug_mask & DEBUG_TRACE) TOUCH_DEBUG_MSG("\n"); touch_driver_register(&synaptics_ts_driver); } static int __devinit touch_init(void) { if (touch_debug_mask & DEBUG_TRACE) TOUCH_DEBUG_MSG("\n"); async_schedule(async_touch_init, NULL); return 0; } static void __exit touch_exit(void) { if (touch_debug_mask & DEBUG_TRACE) TOUCH_DEBUG_MSG("\n"); touch_driver_unregister(); } module_init(touch_init); module_exit(touch_exit); MODULE_AUTHOR("yehan.ahn@lge.com, hyesung.shin@lge.com"); MODULE_DESCRIPTION("LGE Touch Driver"); MODULE_LICENSE("GPL");
aicjofs/android_kernel_lge_v500
drivers/input/touchscreen/touch_synaptics.c
C
gpl-2.0
45,288
/* * Copyright (C) 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef OESTextureHalfFloatLinear_h #define OESTextureHalfFloatLinear_h #include "WebGLExtension.h" #include <wtf/PassOwnPtr.h> namespace WebCore { class OESTextureHalfFloatLinear : public WebGLExtension { public: static OwnPtr<OESTextureHalfFloatLinear> create(WebGLRenderingContext*); virtual ~OESTextureHalfFloatLinear(); virtual ExtensionName getName() const override; private: OESTextureHalfFloatLinear(WebGLRenderingContext*); }; } // namespace WebCore #endif // OESTextureHalfFloatLinear_h
loveyoupeng/rt
modules/web/src/main/native/Source/WebCore/html/canvas/OESTextureHalfFloatLinear.h
C
gpl-2.0
1,876
<h3 class="evento"><?php echo __('Notas')?></h3> <?php if (!$pager->getNbResults()): ?> <blockquote class="notice"><p> <?php echo __('No hay notas') ?> </p></blockquote> <?php else: ?> <?php include_partial('notas_list', array('pager' => $pager , 'labels' => $labels)) ?> <?php endif; ?> <?php //include_partial('notas_actions' ) ?>
Esleelkartea/legedia-ESLE
legedia2/apps/frontend/modules/panel/templates/_notas.php
PHP
gpl-2.0
334
/* * Copyright (c) 2003, 2007-11 Matteo Frigo * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* This file was automatically generated --- DO NOT EDIT */ /* Generated on Wed Jul 27 06:19:01 EDT 2011 */ #include "codelet-rdft.h" #ifdef HAVE_FMA /* Generated by: ../../../genfft/gen_r2cb.native -fma -reorder-insns -schedule-for-pipeline -compact -variables 4 -pipeline-latency 4 -sign 1 -n 2 -name r2cbIII_2 -dft-III -include r2cbIII.h */ /* * This function contains 0 FP additions, 2 FP multiplications, * (or, 0 additions, 2 multiplications, 0 fused multiply/add), * 4 stack variables, 1 constants, and 4 memory accesses */ #include "r2cbIII.h" static void r2cbIII_2(R *R0, R *R1, R *Cr, R *Ci, stride rs, stride csr, stride csi, INT v, INT ivs, INT ovs) { DK(KP2_000000000, +2.000000000000000000000000000000000000000000000); { INT i; for (i = v; i > 0; i = i - 1, R0 = R0 + ovs, R1 = R1 + ovs, Cr = Cr + ivs, Ci = Ci + ivs, MAKE_VOLATILE_STRIDE(rs), MAKE_VOLATILE_STRIDE(csr), MAKE_VOLATILE_STRIDE(csi)) { E T1, T2; T1 = Cr[0]; T2 = Ci[0]; R0[0] = KP2_000000000 * T1; R1[0] = -(KP2_000000000 * T2); } } } static const kr2c_desc desc = { 2, "r2cbIII_2", {0, 2, 0, 0}, &GENUS }; void X(codelet_r2cbIII_2) (planner *p) { X(kr2c_register) (p, r2cbIII_2, &desc); } #else /* HAVE_FMA */ /* Generated by: ../../../genfft/gen_r2cb.native -compact -variables 4 -pipeline-latency 4 -sign 1 -n 2 -name r2cbIII_2 -dft-III -include r2cbIII.h */ /* * This function contains 0 FP additions, 2 FP multiplications, * (or, 0 additions, 2 multiplications, 0 fused multiply/add), * 4 stack variables, 1 constants, and 4 memory accesses */ #include "r2cbIII.h" static void r2cbIII_2(R *R0, R *R1, R *Cr, R *Ci, stride rs, stride csr, stride csi, INT v, INT ivs, INT ovs) { DK(KP2_000000000, +2.000000000000000000000000000000000000000000000); { INT i; for (i = v; i > 0; i = i - 1, R0 = R0 + ovs, R1 = R1 + ovs, Cr = Cr + ivs, Ci = Ci + ivs, MAKE_VOLATILE_STRIDE(rs), MAKE_VOLATILE_STRIDE(csr), MAKE_VOLATILE_STRIDE(csi)) { E T1, T2; T1 = Cr[0]; T2 = Ci[0]; R0[0] = KP2_000000000 * T1; R1[0] = -(KP2_000000000 * T2); } } } static const kr2c_desc desc = { 2, "r2cbIII_2", {0, 2, 0, 0}, &GENUS }; void X(codelet_r2cbIII_2) (planner *p) { X(kr2c_register) (p, r2cbIII_2, &desc); } #endif /* HAVE_FMA */
dstuck/tinker_integrated_PIMC
tinker/fftw/rdft/scalar/r2cb/r2cbIII_2.c
C
gpl-2.0
3,192
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (c) 2021 SUSE LLC */ #include <stdlib.h> #include <stdio.h> #include "tst_test.h" static char *only_mount_v1; static char *no_cleanup; static struct tst_option opts[] = { {"v", &only_mount_v1, "-v\tOnly try to mount CGroups V1"}, {"n", &no_cleanup, "-n\tLeave CGroups created by test"}, {NULL, NULL, NULL}, }; static struct tst_cg_opts cgopts; static struct tst_cg_group *cg_child; static void do_test(void) { char buf[BUFSIZ]; size_t mem; if (!TST_CG_VER_IS_V1(tst_cg, "memory")) SAFE_CG_PRINT(tst_cg, "cgroup.subtree_control", "+memory"); if (!TST_CG_VER_IS_V1(tst_cg, "cpuset")) SAFE_CG_PRINT(tst_cg, "cgroup.subtree_control", "+cpuset"); cg_child = tst_cg_group_mk(tst_cg, "child"); if (!SAFE_FORK()) { SAFE_CG_PRINTF(cg_child, "cgroup.procs", "%d", getpid()); SAFE_CG_SCANF(cg_child, "memory.current", "%zu", &mem); tst_res(TPASS, "child/memory.current = %zu", mem); SAFE_CG_PRINTF(cg_child, "memory.max", "%zu", (1UL << 24) - 1); SAFE_CG_PRINTF(cg_child, "memory.swap.max", "%zu", 1UL << 31); SAFE_CG_READ(cg_child, "cpuset.mems", buf, sizeof(buf)); tst_res(TPASS, "child/cpuset.mems = %s", buf); SAFE_CG_PRINT(cg_child, "cpuset.mems", buf); exit(0); } SAFE_CG_PRINTF(tst_cg, "memory.max", "%zu", (1UL << 24) - 1); SAFE_CG_PRINTF(cg_child, "cgroup.procs", "%d", getpid()); SAFE_CG_SCANF(tst_cg, "memory.current", "%zu", &mem); tst_res(TPASS, "memory.current = %zu", mem); tst_reap_children(); SAFE_CG_PRINTF(tst_cg_drain, "cgroup.procs", "%d", getpid()); cg_child = tst_cg_group_rm(cg_child); } static void setup(void) { cgopts.needs_ver = !!only_mount_v1 ? TST_CG_V1 : 0; tst_cg_scan(); tst_cg_print_config(); tst_cg_require("memory", &cgopts); tst_cg_require("cpuset", &cgopts); tst_cg_init(); } static void cleanup(void) { if (cg_child) { SAFE_CG_PRINTF(tst_cg_drain, "cgroup.procs", "%d", getpid()); cg_child = tst_cg_group_rm(cg_child); } if (!no_cleanup) tst_cg_cleanup(); } static struct tst_test test = { .test_all = do_test, .setup = setup, .cleanup = cleanup, .options = opts, .forks_child = 1, };
linux-test-project/ltp
lib/newlib_tests/tst_cgroup02.c
C
gpl-2.0
2,169
/* * Exercise Classic API Workspace Wrappers * * COPYRIGHT (c) 1989-2009. * On-Line Applications Research Corporation (OAR). * * The license and distribution terms for this file may be * found in the file LICENSE in this distribution or at * http://www.rtems.com/license/LICENSE. * * $Id: init.c,v 1.4 2009/12/08 17:52:56 joel Exp $ */ #include <tmacros.h> rtems_task Init( rtems_task_argument argument ) { void *p1; bool retbool; Heap_Information_block info; puts( "\n\n*** TEST WORKSPACE CLASSIC API ***" ); puts( "rtems_workspace_get_information - null pointer" ); retbool = rtems_workspace_get_information( NULL ); rtems_test_assert( retbool == false ); puts( "rtems_workspace_get_information - OK" ); retbool = rtems_workspace_get_information( &info ); rtems_test_assert( retbool == true ); puts( "rtems_workspace_allocate - null pointer" ); retbool = rtems_workspace_allocate( 42, NULL ); rtems_test_assert( retbool == false ); puts( "rtems_workspace_allocate - 0 bytes" ); retbool = rtems_workspace_allocate( 0, &p1 ); rtems_test_assert( retbool == false ); puts( "rtems_workspace_allocate - too many bytes" ); retbool = rtems_workspace_allocate( info.Free.largest * 2, &p1 ); rtems_test_assert( retbool == false ); puts( "rtems_workspace_allocate - 42 bytes" ); retbool = rtems_workspace_allocate( 42, &p1 ); rtems_test_assert( retbool == true ); rtems_test_assert( p1 != NULL ); puts( "rtems_workspace_free - NULL" ); retbool = rtems_workspace_free( NULL ); rtems_test_assert( retbool == false ); puts( "rtems_workspace_free - previous pointer to 42 bytes" ); retbool = rtems_workspace_free( p1 ); rtems_test_assert( retbool == true ); puts( "*** END OF TEST WORKSPACE CLASSIC API ***" ); rtems_test_exit( 0 ); } /* configuration information */ #define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER #define CONFIGURE_APPLICATION_DOES_NOT_NEED_CLOCK_DRIVER #define CONFIGURE_RTEMS_INIT_TASKS_TABLE #define CONFIGURE_MAXIMUM_TASKS 1 #define CONFIGURE_INIT #include <rtems/confdefs.h>
yunusdawji/rtems-at91sam9g20ek
testsuites/sptests/spwkspace/init.c
C
gpl-2.0
2,138
/* * Implementation of the kernel access vector cache (AVC). * * Authors: Stephen Smalley, <sds@epoch.ncsc.mil> * James Morris <jmorris@redhat.com> * * Update: KaiGai, Kohei <kaigai@ak.jp.nec.com> * Replaced the avc_lock spinlock by RCU. * * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, * as published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/dcache.h> #include <linux/init.h> #include <linux/skbuff.h> #include <linux/percpu.h> #include <linux/list.h> #include <net/sock.h> #include <linux/un.h> #include <net/af_unix.h> #include <linux/ip.h> #include <linux/audit.h> #include <linux/ipv6.h> #include <net/ipv6.h> #include "avc.h" #include "avc_ss.h" #include "classmap.h" #define AVC_CACHE_SLOTS 512 #define AVC_DEF_CACHE_THRESHOLD 512 #define AVC_CACHE_RECLAIM 16 #ifdef CONFIG_SECURITY_SELINUX_AVC_STATS #define avc_cache_stats_incr(field) this_cpu_inc(avc_cache_stats.field) #else #define avc_cache_stats_incr(field) do {} while (0) #endif struct avc_entry { u32 ssid; u32 tsid; u16 tclass; struct av_decision avd; struct avc_xperms_node *xp_node; }; struct avc_node { struct avc_entry ae; struct hlist_node list; /* anchored in avc_cache->slots[i] */ struct rcu_head rhead; }; struct avc_xperms_decision_node { struct extended_perms_decision xpd; struct list_head xpd_list; /* list of extended_perms_decision */ }; struct avc_xperms_node { struct extended_perms xp; struct list_head xpd_head; /* list head of extended_perms_decision */ }; struct avc_cache { struct hlist_head slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */ spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */ atomic_t lru_hint; /* LRU hint for reclaim scan */ atomic_t active_nodes; u32 latest_notif; /* latest revocation notification */ }; struct avc_callback_node { int (*callback) (u32 event); u32 events; struct avc_callback_node *next; }; /* Exported via selinufs */ unsigned int avc_cache_threshold = AVC_DEF_CACHE_THRESHOLD; #ifdef CONFIG_SECURITY_SELINUX_AVC_STATS DEFINE_PER_CPU(struct avc_cache_stats, avc_cache_stats) = { 0 }; #endif static struct avc_cache avc_cache; static struct avc_callback_node *avc_callbacks; static struct kmem_cache *avc_node_cachep; static struct kmem_cache *avc_xperms_data_cachep; static struct kmem_cache *avc_xperms_decision_cachep; static struct kmem_cache *avc_xperms_cachep; static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass) { return (ssid ^ (tsid<<2) ^ (tclass<<4)) & (AVC_CACHE_SLOTS - 1); } /** * avc_dump_av - Display an access vector in human-readable form. * @tclass: target security class * @av: access vector */ static void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av) { const char **perms; int i, perm; if (av == 0) { audit_log_format(ab, " null"); return; } perms = secclass_map[tclass-1].perms; audit_log_format(ab, " {"); i = 0; perm = 1; while (i < (sizeof(av) * 8)) { if ((perm & av) && perms[i]) { audit_log_format(ab, " %s", perms[i]); av &= ~perm; } i++; perm <<= 1; } if (av) audit_log_format(ab, " 0x%x", av); audit_log_format(ab, " }"); } /** * avc_dump_query - Display a SID pair and a class in human-readable form. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class */ static void avc_dump_query(struct audit_buffer *ab, u32 ssid, u32 tsid, u16 tclass) { int rc; char *scontext; u32 scontext_len; rc = security_sid_to_context(ssid, &scontext, &scontext_len); if (rc) audit_log_format(ab, "ssid=%d", ssid); else { audit_log_format(ab, "scontext=%s", scontext); kfree(scontext); } rc = security_sid_to_context(tsid, &scontext, &scontext_len); if (rc) audit_log_format(ab, " tsid=%d", tsid); else { audit_log_format(ab, " tcontext=%s", scontext); kfree(scontext); } BUG_ON(tclass >= ARRAY_SIZE(secclass_map)); audit_log_format(ab, " tclass=%s", secclass_map[tclass-1].name); } /** * avc_init - Initialize the AVC. * * Initialize the access vector cache. */ void __init avc_init(void) { int i; for (i = 0; i < AVC_CACHE_SLOTS; i++) { INIT_HLIST_HEAD(&avc_cache.slots[i]); spin_lock_init(&avc_cache.slots_lock[i]); } atomic_set(&avc_cache.active_nodes, 0); atomic_set(&avc_cache.lru_hint, 0); avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node), 0, SLAB_PANIC, NULL); avc_xperms_cachep = kmem_cache_create("avc_xperms_node", sizeof(struct avc_xperms_node), 0, SLAB_PANIC, NULL); avc_xperms_decision_cachep = kmem_cache_create( "avc_xperms_decision_node", sizeof(struct avc_xperms_decision_node), 0, SLAB_PANIC, NULL); avc_xperms_data_cachep = kmem_cache_create("avc_xperms_data", sizeof(struct extended_perms_data), 0, SLAB_PANIC, NULL); audit_log(current->audit_context, GFP_KERNEL, AUDIT_KERNEL, "AVC INITIALIZED\n"); } int avc_get_hash_stats(char *page) { int i, chain_len, max_chain_len, slots_used; struct avc_node *node; struct hlist_head *head; rcu_read_lock(); slots_used = 0; max_chain_len = 0; for (i = 0; i < AVC_CACHE_SLOTS; i++) { head = &avc_cache.slots[i]; if (!hlist_empty(head)) { slots_used++; chain_len = 0; hlist_for_each_entry_rcu(node, head, list) chain_len++; if (chain_len > max_chain_len) max_chain_len = chain_len; } } rcu_read_unlock(); return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n" "longest chain: %d\n", atomic_read(&avc_cache.active_nodes), slots_used, AVC_CACHE_SLOTS, max_chain_len); } /* * using a linked list for extended_perms_decision lookup because the list is * always small. i.e. less than 5, typically 1 */ static struct extended_perms_decision *avc_xperms_decision_lookup(u8 driver, struct avc_xperms_node *xp_node) { struct avc_xperms_decision_node *xpd_node; list_for_each_entry(xpd_node, &xp_node->xpd_head, xpd_list) { if (xpd_node->xpd.driver == driver) return &xpd_node->xpd; } return NULL; } static inline unsigned int avc_xperms_has_perm(struct extended_perms_decision *xpd, u8 perm, u8 which) { unsigned int rc = 0; if ((which == XPERMS_ALLOWED) && (xpd->used & XPERMS_ALLOWED)) rc = security_xperm_test(xpd->allowed->p, perm); else if ((which == XPERMS_AUDITALLOW) && (xpd->used & XPERMS_AUDITALLOW)) rc = security_xperm_test(xpd->auditallow->p, perm); else if ((which == XPERMS_DONTAUDIT) && (xpd->used & XPERMS_DONTAUDIT)) rc = security_xperm_test(xpd->dontaudit->p, perm); return rc; } static void avc_xperms_allow_perm(struct avc_xperms_node *xp_node, u8 driver, u8 perm) { struct extended_perms_decision *xpd; security_xperm_set(xp_node->xp.drivers.p, driver); xpd = avc_xperms_decision_lookup(driver, xp_node); if (xpd && xpd->allowed) security_xperm_set(xpd->allowed->p, perm); } static void avc_xperms_decision_free(struct avc_xperms_decision_node *xpd_node) { struct extended_perms_decision *xpd; xpd = &xpd_node->xpd; if (xpd->allowed) kmem_cache_free(avc_xperms_data_cachep, xpd->allowed); if (xpd->auditallow) kmem_cache_free(avc_xperms_data_cachep, xpd->auditallow); if (xpd->dontaudit) kmem_cache_free(avc_xperms_data_cachep, xpd->dontaudit); kmem_cache_free(avc_xperms_decision_cachep, xpd_node); } static void avc_xperms_free(struct avc_xperms_node *xp_node) { struct avc_xperms_decision_node *xpd_node, *tmp; if (!xp_node) return; list_for_each_entry_safe(xpd_node, tmp, &xp_node->xpd_head, xpd_list) { list_del(&xpd_node->xpd_list); avc_xperms_decision_free(xpd_node); } kmem_cache_free(avc_xperms_cachep, xp_node); } static void avc_copy_xperms_decision(struct extended_perms_decision *dest, struct extended_perms_decision *src) { dest->driver = src->driver; dest->used = src->used; if (dest->used & XPERMS_ALLOWED) memcpy(dest->allowed->p, src->allowed->p, sizeof(src->allowed->p)); if (dest->used & XPERMS_AUDITALLOW) memcpy(dest->auditallow->p, src->auditallow->p, sizeof(src->auditallow->p)); if (dest->used & XPERMS_DONTAUDIT) memcpy(dest->dontaudit->p, src->dontaudit->p, sizeof(src->dontaudit->p)); } /* * similar to avc_copy_xperms_decision, but only copy decision * information relevant to this perm */ static inline void avc_quick_copy_xperms_decision(u8 perm, struct extended_perms_decision *dest, struct extended_perms_decision *src) { /* * compute index of the u32 of the 256 bits (8 u32s) that contain this * command permission */ u8 i = perm >> 5; dest->used = src->used; if (dest->used & XPERMS_ALLOWED) dest->allowed->p[i] = src->allowed->p[i]; if (dest->used & XPERMS_AUDITALLOW) dest->auditallow->p[i] = src->auditallow->p[i]; if (dest->used & XPERMS_DONTAUDIT) dest->dontaudit->p[i] = src->dontaudit->p[i]; } static struct avc_xperms_decision_node *avc_xperms_decision_alloc(u8 which) { struct avc_xperms_decision_node *xpd_node; struct extended_perms_decision *xpd; xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep, GFP_ATOMIC | __GFP_NOMEMALLOC); if (!xpd_node) return NULL; xpd = &xpd_node->xpd; if (which & XPERMS_ALLOWED) { xpd->allowed = kmem_cache_zalloc(avc_xperms_data_cachep, GFP_ATOMIC | __GFP_NOMEMALLOC); if (!xpd->allowed) goto error; } if (which & XPERMS_AUDITALLOW) { xpd->auditallow = kmem_cache_zalloc(avc_xperms_data_cachep, GFP_ATOMIC | __GFP_NOMEMALLOC); if (!xpd->auditallow) goto error; } if (which & XPERMS_DONTAUDIT) { xpd->dontaudit = kmem_cache_zalloc(avc_xperms_data_cachep, GFP_ATOMIC | __GFP_NOMEMALLOC); if (!xpd->dontaudit) goto error; } return xpd_node; error: avc_xperms_decision_free(xpd_node); return NULL; } static int avc_add_xperms_decision(struct avc_node *node, struct extended_perms_decision *src) { struct avc_xperms_decision_node *dest_xpd; node->ae.xp_node->xp.len++; dest_xpd = avc_xperms_decision_alloc(src->used); if (!dest_xpd) return -ENOMEM; avc_copy_xperms_decision(&dest_xpd->xpd, src); list_add(&dest_xpd->xpd_list, &node->ae.xp_node->xpd_head); return 0; } static struct avc_xperms_node *avc_xperms_alloc(void) { struct avc_xperms_node *xp_node; xp_node = kmem_cache_zalloc(avc_xperms_cachep, GFP_ATOMIC|__GFP_NOMEMALLOC); if (!xp_node) return xp_node; INIT_LIST_HEAD(&xp_node->xpd_head); return xp_node; } static int avc_xperms_populate(struct avc_node *node, struct avc_xperms_node *src) { struct avc_xperms_node *dest; struct avc_xperms_decision_node *dest_xpd; struct avc_xperms_decision_node *src_xpd; if (src->xp.len == 0) return 0; dest = avc_xperms_alloc(); if (!dest) return -ENOMEM; memcpy(dest->xp.drivers.p, src->xp.drivers.p, sizeof(dest->xp.drivers.p)); dest->xp.len = src->xp.len; /* for each source xpd allocate a destination xpd and copy */ list_for_each_entry(src_xpd, &src->xpd_head, xpd_list) { dest_xpd = avc_xperms_decision_alloc(src_xpd->xpd.used); if (!dest_xpd) goto error; avc_copy_xperms_decision(&dest_xpd->xpd, &src_xpd->xpd); list_add(&dest_xpd->xpd_list, &dest->xpd_head); } node->ae.xp_node = dest; return 0; error: avc_xperms_free(dest); return -ENOMEM; } static inline u32 avc_xperms_audit_required(u32 requested, struct av_decision *avd, struct extended_perms_decision *xpd, u8 perm, int result, u32 *deniedp) { u32 denied, audited; denied = requested & ~avd->allowed; if (unlikely(denied)) { audited = denied & avd->auditdeny; if (audited && xpd) { if (avc_xperms_has_perm(xpd, perm, XPERMS_DONTAUDIT)) audited &= ~requested; } } else if (result) { audited = denied = requested; } else { audited = requested & avd->auditallow; if (audited && xpd) { if (!avc_xperms_has_perm(xpd, perm, XPERMS_AUDITALLOW)) audited &= ~requested; } } *deniedp = denied; return audited; } static inline int avc_xperms_audit(u32 ssid, u32 tsid, u16 tclass, u32 requested, struct av_decision *avd, struct extended_perms_decision *xpd, u8 perm, int result, struct common_audit_data *ad) { u32 audited, denied; audited = avc_xperms_audit_required( requested, avd, xpd, perm, result, &denied); if (likely(!audited)) return 0; return slow_avc_audit(ssid, tsid, tclass, requested, audited, denied, result, ad, 0); } static void avc_node_free(struct rcu_head *rhead) { struct avc_node *node = container_of(rhead, struct avc_node, rhead); avc_xperms_free(node->ae.xp_node); kmem_cache_free(avc_node_cachep, node); avc_cache_stats_incr(frees); } static void avc_node_delete(struct avc_node *node) { hlist_del_rcu(&node->list); call_rcu(&node->rhead, avc_node_free); atomic_dec(&avc_cache.active_nodes); } static void avc_node_kill(struct avc_node *node) { avc_xperms_free(node->ae.xp_node); kmem_cache_free(avc_node_cachep, node); avc_cache_stats_incr(frees); atomic_dec(&avc_cache.active_nodes); } static void avc_node_replace(struct avc_node *new, struct avc_node *old) { hlist_replace_rcu(&old->list, &new->list); call_rcu(&old->rhead, avc_node_free); atomic_dec(&avc_cache.active_nodes); } static inline int avc_reclaim_node(void) { struct avc_node *node; int hvalue, try, ecx; unsigned long flags; struct hlist_head *head; spinlock_t *lock; for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) { hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1); head = &avc_cache.slots[hvalue]; lock = &avc_cache.slots_lock[hvalue]; if (!spin_trylock_irqsave(lock, flags)) continue; rcu_read_lock(); hlist_for_each_entry(node, head, list) { avc_node_delete(node); avc_cache_stats_incr(reclaims); ecx++; if (ecx >= AVC_CACHE_RECLAIM) { rcu_read_unlock(); spin_unlock_irqrestore(lock, flags); goto out; } } rcu_read_unlock(); spin_unlock_irqrestore(lock, flags); } out: return ecx; } static struct avc_node *avc_alloc_node(void) { struct avc_node *node; node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC|__GFP_NOMEMALLOC); if (!node) goto out; INIT_HLIST_NODE(&node->list); avc_cache_stats_incr(allocations); if (atomic_inc_return(&avc_cache.active_nodes) > avc_cache_threshold) avc_reclaim_node(); out: return node; } static void avc_node_populate(struct avc_node *node, u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd) { node->ae.ssid = ssid; node->ae.tsid = tsid; node->ae.tclass = tclass; memcpy(&node->ae.avd, avd, sizeof(node->ae.avd)); } static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass) { struct avc_node *node, *ret = NULL; int hvalue; struct hlist_head *head; hvalue = avc_hash(ssid, tsid, tclass); head = &avc_cache.slots[hvalue]; hlist_for_each_entry_rcu(node, head, list) { if (ssid == node->ae.ssid && tclass == node->ae.tclass && tsid == node->ae.tsid) { ret = node; break; } } return ret; } /** * avc_lookup - Look up an AVC entry. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class * * Look up an AVC entry that is valid for the * (@ssid, @tsid), interpreting the permissions * based on @tclass. If a valid AVC entry exists, * then this function returns the avc_node. * Otherwise, this function returns NULL. */ static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass) { struct avc_node *node; avc_cache_stats_incr(lookups); node = avc_search_node(ssid, tsid, tclass); if (node) return node; avc_cache_stats_incr(misses); return NULL; } static int avc_latest_notif_update(int seqno, int is_insert) { int ret = 0; static DEFINE_SPINLOCK(notif_lock); unsigned long flag; spin_lock_irqsave(&notif_lock, flag); if (is_insert) { if (seqno < avc_cache.latest_notif) { printk(KERN_WARNING "SELinux: avc: seqno %d < latest_notif %d\n", seqno, avc_cache.latest_notif); ret = -EAGAIN; } } else { if (seqno > avc_cache.latest_notif) avc_cache.latest_notif = seqno; } spin_unlock_irqrestore(&notif_lock, flag); return ret; } /** * avc_insert - Insert an AVC entry. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class * @avd: resulting av decision * @xp_node: resulting extended permissions * * Insert an AVC entry for the SID pair * (@ssid, @tsid) and class @tclass. * The access vectors and the sequence number are * normally provided by the security server in * response to a security_compute_av() call. If the * sequence number @avd->seqno is not less than the latest * revocation notification, then the function copies * the access vectors into a cache entry, returns * avc_node inserted. Otherwise, this function returns NULL. */ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd, struct avc_xperms_node *xp_node) { struct avc_node *pos, *node = NULL; int hvalue; unsigned long flag; if (avc_latest_notif_update(avd->seqno, 1)) goto out; node = avc_alloc_node(); if (node) { struct hlist_head *head; spinlock_t *lock; int rc = 0; hvalue = avc_hash(ssid, tsid, tclass); avc_node_populate(node, ssid, tsid, tclass, avd); rc = avc_xperms_populate(node, xp_node); if (rc) { kmem_cache_free(avc_node_cachep, node); return NULL; } head = &avc_cache.slots[hvalue]; lock = &avc_cache.slots_lock[hvalue]; spin_lock_irqsave(lock, flag); hlist_for_each_entry(pos, head, list) { if (pos->ae.ssid == ssid && pos->ae.tsid == tsid && pos->ae.tclass == tclass) { avc_node_replace(node, pos); goto found; } } hlist_add_head_rcu(&node->list, head); found: spin_unlock_irqrestore(lock, flag); } out: return node; } /** * avc_audit_pre_callback - SELinux specific information * will be called by generic audit code * @ab: the audit buffer * @a: audit_data */ static void avc_audit_pre_callback(struct audit_buffer *ab, void *a) { struct common_audit_data *ad = a; audit_log_format(ab, "avc: %s ", ad->selinux_audit_data->denied ? "denied" : "granted"); avc_dump_av(ab, ad->selinux_audit_data->tclass, ad->selinux_audit_data->audited); audit_log_format(ab, " for "); } /** * avc_audit_post_callback - SELinux specific information * will be called by generic audit code * @ab: the audit buffer * @a: audit_data */ static void avc_audit_post_callback(struct audit_buffer *ab, void *a) { struct common_audit_data *ad = a; audit_log_format(ab, " "); avc_dump_query(ab, ad->selinux_audit_data->ssid, ad->selinux_audit_data->tsid, ad->selinux_audit_data->tclass); if (ad->selinux_audit_data->denied) { struct task_struct *tsk = current; audit_log_format(ab, " permissive=%u", ad->selinux_audit_data->result ? 0 : 1); if (strncmp(tsk->comm, "kworker", 7) == 0) { dump_stack(); } } } /* This is the slow part of avc audit with big stack footprint */ noinline int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass, u32 requested, u32 audited, u32 denied, int result, struct common_audit_data *a, unsigned flags) { struct common_audit_data stack_data; struct selinux_audit_data sad; if (!a) { a = &stack_data; a->type = LSM_AUDIT_DATA_NONE; } /* * When in a RCU walk do the audit on the RCU retry. This is because * the collection of the dname in an inode audit message is not RCU * safe. Note this may drop some audits when the situation changes * during retry. However this is logically just as if the operation * happened a little later. */ if ((a->type == LSM_AUDIT_DATA_INODE) && (flags & MAY_NOT_BLOCK)) return -ECHILD; sad.tclass = tclass; sad.requested = requested; sad.ssid = ssid; sad.tsid = tsid; sad.audited = audited; sad.denied = denied; sad.result = result; a->selinux_audit_data = &sad; common_lsm_audit(a, avc_audit_pre_callback, avc_audit_post_callback); return 0; } /** * avc_add_callback - Register a callback for security events. * @callback: callback function * @events: security events * * Register a callback function for events in the set @events. * Returns %0 on success or -%ENOMEM if insufficient memory * exists to add the callback. */ int __init avc_add_callback(int (*callback)(u32 event), u32 events) { struct avc_callback_node *c; int rc = 0; c = kmalloc(sizeof(*c), GFP_KERNEL); if (!c) { rc = -ENOMEM; goto out; } c->callback = callback; c->events = events; c->next = avc_callbacks; avc_callbacks = c; out: return rc; } static inline int avc_sidcmp(u32 x, u32 y) { return (x == y || x == SECSID_WILD || y == SECSID_WILD); } /** * avc_update_node Update an AVC entry * @event : Updating event * @perms : Permission mask bits * @ssid,@tsid,@tclass : identifier of an AVC entry * @seqno : sequence number when decision was made * @xpd: extended_perms_decision to be added to the node * * if a valid AVC entry doesn't exist,this function returns -ENOENT. * if kmalloc() called internal returns NULL, this function returns -ENOMEM. * otherwise, this function updates the AVC entry. The original AVC-entry object * will release later by RCU. */ static int avc_update_node(u32 event, u32 perms, u8 driver, u8 xperm, u32 ssid, u32 tsid, u16 tclass, u32 seqno, struct extended_perms_decision *xpd, u32 flags) { int hvalue, rc = 0; unsigned long flag; struct avc_node *pos, *node, *orig = NULL; struct hlist_head *head; spinlock_t *lock; node = avc_alloc_node(); if (!node) { rc = -ENOMEM; goto out; } /* Lock the target slot */ hvalue = avc_hash(ssid, tsid, tclass); head = &avc_cache.slots[hvalue]; lock = &avc_cache.slots_lock[hvalue]; spin_lock_irqsave(lock, flag); hlist_for_each_entry(pos, head, list) { if (ssid == pos->ae.ssid && tsid == pos->ae.tsid && tclass == pos->ae.tclass && seqno == pos->ae.avd.seqno){ orig = pos; break; } } if (!orig) { rc = -ENOENT; avc_node_kill(node); goto out_unlock; } /* * Copy and replace original node. */ avc_node_populate(node, ssid, tsid, tclass, &orig->ae.avd); if (orig->ae.xp_node) { rc = avc_xperms_populate(node, orig->ae.xp_node); if (rc) { kmem_cache_free(avc_node_cachep, node); goto out_unlock; } } switch (event) { case AVC_CALLBACK_GRANT: node->ae.avd.allowed |= perms; if (node->ae.xp_node && (flags & AVC_EXTENDED_PERMS)) avc_xperms_allow_perm(node->ae.xp_node, driver, xperm); break; case AVC_CALLBACK_TRY_REVOKE: case AVC_CALLBACK_REVOKE: node->ae.avd.allowed &= ~perms; break; case AVC_CALLBACK_AUDITALLOW_ENABLE: node->ae.avd.auditallow |= perms; break; case AVC_CALLBACK_AUDITALLOW_DISABLE: node->ae.avd.auditallow &= ~perms; break; case AVC_CALLBACK_AUDITDENY_ENABLE: node->ae.avd.auditdeny |= perms; break; case AVC_CALLBACK_AUDITDENY_DISABLE: node->ae.avd.auditdeny &= ~perms; break; case AVC_CALLBACK_ADD_XPERMS: avc_add_xperms_decision(node, xpd); break; } avc_node_replace(node, orig); out_unlock: spin_unlock_irqrestore(lock, flag); out: return rc; } /** * avc_flush - Flush the cache */ static void avc_flush(void) { struct hlist_head *head; struct avc_node *node; spinlock_t *lock; unsigned long flag; int i; for (i = 0; i < AVC_CACHE_SLOTS; i++) { head = &avc_cache.slots[i]; lock = &avc_cache.slots_lock[i]; spin_lock_irqsave(lock, flag); /* * With preemptable RCU, the outer spinlock does not * prevent RCU grace periods from ending. */ rcu_read_lock(); hlist_for_each_entry(node, head, list) avc_node_delete(node); rcu_read_unlock(); spin_unlock_irqrestore(lock, flag); } } /** * avc_ss_reset - Flush the cache and revalidate migrated permissions. * @seqno: policy sequence number */ int avc_ss_reset(u32 seqno) { struct avc_callback_node *c; int rc = 0, tmprc; avc_flush(); for (c = avc_callbacks; c; c = c->next) { if (c->events & AVC_CALLBACK_RESET) { tmprc = c->callback(AVC_CALLBACK_RESET); /* save the first error encountered for the return value and continue processing the callbacks */ if (!rc) rc = tmprc; } } avc_latest_notif_update(seqno, 0); return rc; } /* * Slow-path helper function for avc_has_perm_noaudit, * when the avc_node lookup fails. We get called with * the RCU read lock held, and need to return with it * still held, but drop if for the security compute. * * Don't inline this, since it's the slow-path and just * results in a bigger stack frame. */ static noinline struct avc_node *avc_compute_av(u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd, struct avc_xperms_node *xp_node) { rcu_read_unlock(); INIT_LIST_HEAD(&xp_node->xpd_head); security_compute_av(ssid, tsid, tclass, avd, &xp_node->xp); rcu_read_lock(); return avc_insert(ssid, tsid, tclass, avd, xp_node); } static noinline int avc_denied(u32 ssid, u32 tsid, u16 tclass, u32 requested, u8 driver, u8 xperm, unsigned flags, struct av_decision *avd) { if (flags & AVC_STRICT) return -EACCES; if (selinux_enforcing && !(avd->flags & AVD_FLAGS_PERMISSIVE)) return -EACCES; avc_update_node(AVC_CALLBACK_GRANT, requested, driver, xperm, ssid, tsid, tclass, avd->seqno, NULL, flags); return 0; } /* * The avc extended permissions logic adds an additional 256 bits of * permissions to an avc node when extended permissions for that node are * specified in the avtab. If the additional 256 permissions is not adequate, * as-is the case with ioctls, then multiple may be chained together and the * driver field is used to specify which set contains the permission. */ int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested, u8 driver, u8 xperm, struct common_audit_data *ad) { struct avc_node *node; struct av_decision avd; u32 denied; struct extended_perms_decision local_xpd; struct extended_perms_decision *xpd = NULL; struct extended_perms_data allowed; struct extended_perms_data auditallow; struct extended_perms_data dontaudit; struct avc_xperms_node local_xp_node; struct avc_xperms_node *xp_node; int rc = 0, rc2; xp_node = &local_xp_node; BUG_ON(!requested); rcu_read_lock(); node = avc_lookup(ssid, tsid, tclass); if (unlikely(!node)) { node = avc_compute_av(ssid, tsid, tclass, &avd, xp_node); } else { memcpy(&avd, &node->ae.avd, sizeof(avd)); xp_node = node->ae.xp_node; } /* if extended permissions are not defined, only consider av_decision */ if (!xp_node || !xp_node->xp.len) goto decision; local_xpd.allowed = &allowed; local_xpd.auditallow = &auditallow; local_xpd.dontaudit = &dontaudit; xpd = avc_xperms_decision_lookup(driver, xp_node); if (unlikely(!xpd)) { /* * Compute the extended_perms_decision only if the driver * is flagged */ if (!security_xperm_test(xp_node->xp.drivers.p, driver)) { avd.allowed &= ~requested; goto decision; } rcu_read_unlock(); security_compute_xperms_decision(ssid, tsid, tclass, driver, &local_xpd); rcu_read_lock(); avc_update_node(AVC_CALLBACK_ADD_XPERMS, requested, driver, xperm, ssid, tsid, tclass, avd.seqno, &local_xpd, 0); } else { avc_quick_copy_xperms_decision(xperm, &local_xpd, xpd); } xpd = &local_xpd; if (!avc_xperms_has_perm(xpd, xperm, XPERMS_ALLOWED)) avd.allowed &= ~requested; decision: denied = requested & ~(avd.allowed); if (unlikely(denied)) rc = avc_denied(ssid, tsid, tclass, requested, driver, xperm, AVC_EXTENDED_PERMS, &avd); rcu_read_unlock(); rc2 = avc_xperms_audit(ssid, tsid, tclass, requested, &avd, xpd, xperm, rc, ad); if (rc2) return rc2; return rc; } /** * avc_has_perm_noaudit - Check permissions but perform no auditing. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class * @requested: requested permissions, interpreted based on @tclass * @flags: AVC_STRICT or 0 * @avd: access vector decisions * * Check the AVC to determine whether the @requested permissions are granted * for the SID pair (@ssid, @tsid), interpreting the permissions * based on @tclass, and call the security server on a cache miss to obtain * a new decision and add it to the cache. Return a copy of the decisions * in @avd. Return %0 if all @requested permissions are granted, * -%EACCES if any permissions are denied, or another -errno upon * other errors. This function is typically called by avc_has_perm(), * but may also be called directly to separate permission checking from * auditing, e.g. in cases where a lock must be held for the check but * should be released for the auditing. */ inline int avc_has_perm_noaudit(u32 ssid, u32 tsid, u16 tclass, u32 requested, unsigned flags, struct av_decision *avd) { struct avc_node *node; struct avc_xperms_node xp_node; int rc = 0; u32 denied; BUG_ON(!requested); rcu_read_lock(); node = avc_lookup(ssid, tsid, tclass); if (unlikely(!node)) node = avc_compute_av(ssid, tsid, tclass, avd, &xp_node); else memcpy(avd, &node->ae.avd, sizeof(*avd)); denied = requested & ~(avd->allowed); if (unlikely(denied)) rc = avc_denied(ssid, tsid, tclass, requested, 0, 0, flags, avd); rcu_read_unlock(); return rc; } /** * avc_has_perm - Check permissions and perform any appropriate auditing. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class * @requested: requested permissions, interpreted based on @tclass * @auditdata: auxiliary audit data * @flags: VFS walk flags * * Check the AVC to determine whether the @requested permissions are granted * for the SID pair (@ssid, @tsid), interpreting the permissions * based on @tclass, and call the security server on a cache miss to obtain * a new decision and add it to the cache. Audit the granting or denial of * permissions in accordance with the policy. Return %0 if all @requested * permissions are granted, -%EACCES if any permissions are denied, or * another -errno upon other errors. */ int avc_has_perm_flags(u32 ssid, u32 tsid, u16 tclass, u32 requested, struct common_audit_data *auditdata, unsigned flags) { struct av_decision avd; int rc, rc2; rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd); rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata, flags); if (rc2) return rc2; return rc; } u32 avc_policy_seqno(void) { return avc_cache.latest_notif; } void avc_disable(void) { /* * If you are looking at this because you have realized that we are * not destroying the avc_node_cachep it might be easy to fix, but * I don't know the memory barrier semantics well enough to know. It's * possible that some other task dereferenced security_ops when * it still pointed to selinux operations. If that is the case it's * possible that it is about to use the avc and is about to need the * avc_node_cachep. I know I could wrap the security.c security_ops call * in an rcu_lock, but seriously, it's not worth it. Instead I just flush * the cache and get that memory back. */ if (avc_node_cachep) { avc_flush(); /* kmem_cache_destroy(avc_node_cachep); */ } }
thicklizard/m9-patches
security/selinux/avc.c
C
gpl-2.0
31,378
<?php class temporaryModelBup extends modelBup { private $basePath; public function setBasePath($basePath) { $this->basePath = untrailingslashit($basePath); } public function getBasePath() { return $this->basePath; } public function getPath() { return $this->basePath . DIRECTORY_SEPARATOR . 'tmp'; } public function exists() { $path = $this->getPath(); return is_dir($path) && file_exists($path); } public function isWritable($path = false) { if(!$path) $path = $this->getPath(); return $this->exists() && is_writable($path); } public function create($path = false) { if(!$path) $path = $this->getPath() . DIRECTORY_SEPARATOR; if (@mkdir($path, 0775, true)) { $htaccess = $path . '.htaccess'; $indexphp = $path . 'index.php'; @file_put_contents($htaccess, 'DENY FROM ALL', FILE_APPEND); @file_put_contents($indexphp, '<?php die("Hacking attempt");'); return true; } return false; } public function clearAll() { return $this->clearByPattern('*'); } public function clearByPattern($pattern) { $files = glob($this->getPath() . DIRECTORY_SEPARATOR . $pattern); if (!$files || is_array($files)) { $this->pushError( sprintf( __('Failed to clear temporary folder by pattern "%s"', BUP_LANG_CODE), htmlspecialchars($pattern) ) ); return false; } foreach ($files as $file) { @unlink($file); } return true; } }
VitaliyProdan/pro-massage
wp-content/plugins/backup-by-supsystic/modules/warehouse/models/temporary.php
PHP
gpl-2.0
1,793
# coding: utf-8 """ Utilities for database insertion """ import gridfs import json import pymongo import paramiko import os import stat import shutil from monty.json import MSONable class MongoDatabase(MSONable): """ MongoDB database class for access, insertion, update, ... in a MongoDB database """ def __init__(self, host, port, database, username, password, collection, gridfs_collection=None): self._host = host self._port = port self._database = database self._username = username self._password = password self._collection = collection self._gridfs_collection = gridfs_collection self._connect() def _connect(self): self.server = pymongo.MongoClient(host=self._host, port=self._port) self.database = self.server[self._database] if self._username: self.database.authenticate(name=self._username, password=self._password) self.collection = self.database[self._collection] if self._gridfs_collection is not None: self.gridfs = gridfs.GridFS(self.database, collection=self._gridfs_collection) else: self.gridfs = None def insert_entry(self, entry, gridfs_msonables=None): if gridfs_msonables is not None: for entry_value, msonable_object in gridfs_msonables.items(): dict_str = json.dumps(msonable_object.as_dict()) file_obj = self.gridfs.put(dict_str, encoding='utf-8') entry[entry_value] = file_obj self.collection.insert(entry) def get_entry(self, criteria): count = self.collection.find(criteria).count() if count == 0: raise ValueError("No entry found with criteria ...") elif count > 1: raise ValueError("Multiple entries ({:d}) found with criteria ...".format(count)) return self.collection.find_one(criteria) def save_entry(self, entry): if '_id' not in entry: raise ValueError('Entry should contain "_id" field to be saved') self.collection.save(entry) def update_entry(self, query, entry_update, gridfs_msonables=None): count = self.collection.find(query).count() if count != 1: raise RuntimeError("Number of entries != 1, found : {:d}".format(count)) entry = self.collection.find_one(query) entry.update(entry_update) if gridfs_msonables is not None: for entry_value, msonable_object in gridfs_msonables.items(): if entry_value in entry: backup_current_entry_value = str(entry_value) backup_number = 1 while True: if backup_number > 10: raise ValueError('Too many backups (10) for object with entry name "{}"'.format(entry_value)) if backup_current_entry_value in entry: backup_current_entry_value = '{}_backup_{:d}'.format(entry_value, backup_number) backup_number += 1 continue entry[backup_current_entry_value] = entry[entry_value] break dict_str = json.dumps(msonable_object.as_dict()) file_obj = self.gridfs.put(dict_str, encoding='utf-8') entry[entry_value] = file_obj self.collection.save(entry) def as_dict(self): """ Json-serializable dict representation of a MongoDatabase """ dd = {"@module": self.__class__.__module__, "@class": self.__class__.__name__, "host": self._host, "port": self._port, "database": self._database, "username": self._username, "password": self._password, "collection": self._collection, "gridfs_collection": self._gridfs_collection} return dd @classmethod def from_dict(cls, d): return cls(host=d['host'], port=d['port'], database=d['database'], username=d['username'], password=d['password'], collection=d['collection'], gridfs_collection=d['gridfs_collection']) class StorageServer(MSONable): """ Storage server class for moving files to/from a given server """ REMOTE_SERVER = 'REMOTE_SERVER' LOCAL_SERVER = 'LOCAL_SERVER' def __init__(self, hostname, port=22, username=None, password=None, server_type=REMOTE_SERVER): self.hostname = hostname self.port = port self.username = username self.password = password self.server_type = server_type # self.connect() def connect(self): if self.server_type == self.REMOTE_SERVER: self.ssh_client = paramiko.SSHClient() self.ssh_client.load_system_host_keys() self.ssh_client.connect(hostname=self.hostname, port=self.port, username=self.username, password=self.password) self.sftp_client = self.ssh_client.open_sftp() def disconnect(self): if self.server_type == self.REMOTE_SERVER: self.sftp_client.close() self.ssh_client.close() def remotepath_exists(self, path): try: self.sftp_client.stat(path) except IOError as e: if e[0] == 2: return False raise else: return True def remote_makedirs(self, path): head, tail = os.path.split(path) if not tail: head, tail = os.path.split(head) if head and tail and not self.remotepath_exists(path=head): self.remote_makedirs(head) if tail == '.': return self.sftp_client.mkdir(path=path) def put(self, localpath, remotepath, overwrite=False, makedirs=True): if self.server_type == self.REMOTE_SERVER: self.connect() if not os.path.exists(localpath): raise IOError('Local path "{}" does not exist'.format(localpath)) if not overwrite and self.remotepath_exists(remotepath): raise IOError('Remote path "{}" exists'.format(remotepath)) rdirname, rfilename = os.path.split(remotepath) if not rfilename or rfilename in ['.', '..']: raise IOError('Remote path "{}" is not a valid filepath'.format(remotepath)) if not self.remotepath_exists(rdirname): if makedirs: self.remote_makedirs(rdirname) else: raise IOError('Directory of remote path "{}" does not exists and ' '"makedirs" is set to False'.format(remotepath)) sftp_stat = self.sftp_client.put(localpath=localpath, remotepath=remotepath) self.disconnect() return sftp_stat elif self.server_type == self.LOCAL_SERVER: if not os.path.exists(localpath): raise IOError('Source path "{}" does not exist'.format(localpath)) if os.path.exists(remotepath) and not overwrite: raise IOError('Dest path "{}" exists'.format(remotepath)) if not os.path.isfile(localpath): raise NotImplementedError('Only files can be copied in LOCAL_SERVER mode.') shutil.copyfile(src=localpath, dst=remotepath) else: raise ValueError('Server type "{}" is not allowed'.format(self.server_type)) def get(self, remotepath, localpath=None, overwrite=False, makedirs=True): if self.server_type == self.REMOTE_SERVER: self.connect() if not self.remotepath_exists(remotepath): raise IOError('Remote path "{}" does not exist'.format(remotepath)) if localpath is None: head, tail = os.path.split(remotepath) localpath = tail localpath = os.path.expanduser(localpath) if not overwrite and os.path.exists(localpath): raise IOError('Local path "{}" exists'.format(localpath)) # Check if the remotepath is a regular file (right now, this is the only option that is implemented, # directories should be implemented, symbolic links should be handled in some way). remotepath_stat = self.sftp_client.stat(remotepath) if stat.S_ISREG(remotepath_stat.st_mode): sftp_stat = self.sftp_client.get(remotepath, localpath) else: raise NotImplementedError('Remote path "{}" is not a regular file'.format(remotepath)) self.disconnect() return sftp_stat elif self.server_type == self.LOCAL_SERVER: if not os.path.exists(remotepath): raise IOError('Source path "{}" does not exist'.format(remotepath)) if os.path.exists(localpath) and not overwrite: raise IOError('Dest path "{}" exists'.format(localpath)) if not os.path.isfile(remotepath): raise NotImplementedError('Only files can be copied in LOCAL_SERVER mode.') shutil.copyfile(src=remotepath, dst=localpath) else: raise ValueError('Server type "{}" is not allowed'.format(self.server_type)) def as_dict(self): """ Json-serializable dict representation of a StorageServer """ dd = {"@module": self.__class__.__module__, "@class": self.__class__.__name__, "hostname": self.hostname, "port": self.port, "username": self.username, "password": self.password, "server_type": self.server_type} return dd @classmethod def from_dict(cls, d): return cls(hostname=d['hostname'], port=d['port'], username=d['username'], password=d['password'], server_type=d['server_type'] if 'server_type' in d else cls.REMOTE_SERVER)
davidwaroquiers/abiflows
abiflows/fireworks/utils/databases.py
Python
gpl-2.0
10,111
<?php /** * --------------------------------------------------------------------- * GLPI - Gestionnaire Libre de Parc Informatique * Copyright (C) 2015-2021 Teclib' and contributors. * * http://glpi-project.org * * based on GLPI - Gestionnaire Libre de Parc Informatique * Copyright (C) 2003-2014 by the INDEPNET Development Team. * * --------------------------------------------------------------------- * * LICENSE * * This file is part of GLPI. * * GLPI is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * GLPI is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GLPI. If not, see <http://www.gnu.org/licenses/>. * --------------------------------------------------------------------- */ if (!defined('GLPI_ROOT')) { die("Sorry. You can't access this file directly"); } /** * Class IPAddress_IPNetwork : Connection between IPAddress and IPNetwork * * @since 0.84 **/ class IPAddress_IPNetwork extends CommonDBRelation { // From CommonDBRelation static public $itemtype_1 = 'IPAddress'; static public $items_id_1 = 'ipaddresses_id'; static public $itemtype_2 = 'IPNetwork'; static public $items_id_2 = 'ipnetworks_id'; /** * Update IPNetwork's dependency * * @param $network IPNetwork object **/ static function linkIPAddressFromIPNetwork(IPNetwork $network) { global $DB; $linkObject = new self(); $linkTable = $linkObject->getTable(); $ipnetworks_id = $network->getID(); // First, remove all links of the current Network $iterator = $DB->request([ 'SELECT' => 'id', 'FROM' => $linkTable, 'WHERE' => ['ipnetworks_id' => $ipnetworks_id] ]); while ($link = $iterator->next()) { $linkObject->delete(['id' => $link['id']]); } // Then, look each IP address contained inside current Network $iterator = $DB->request([ 'SELECT' => [ new \QueryExpression($DB->quoteValue($ipnetworks_id) . ' AS ' . $DB->quoteName('ipnetworks_id')), 'id AS ipaddresses_id' ], 'FROM' => 'glpi_ipaddresses', 'WHERE' => $network->getCriteriaForMatchingElement('glpi_ipaddresses', 'binary', 'version'), 'GROUP' => 'id' ]); while ($link = $iterator->next()) { $linkObject->add($link); } } /** * @param $ipaddress IPAddress object **/ static function addIPAddress(IPAddress $ipaddress) { $linkObject = new self(); $input = ['ipaddresses_id' => $ipaddress->getID()]; $entity = $ipaddress->getEntityID(); $ipnetworks_ids = IPNetwork::searchNetworksContainingIP($ipaddress, $entity); if ($ipnetworks_ids !== false) { // Beware that invalid IPaddresses don't have any valid address ! $entity = $ipaddress->getEntityID(); foreach (IPNetwork::searchNetworksContainingIP($ipaddress, $entity) as $ipnetworks_id) { $input['ipnetworks_id'] = $ipnetworks_id; $linkObject->add($input); } } } }
stonebuzz/glpi
inc/ipaddress_ipnetwork.class.php
PHP
gpl-2.0
3,521
<?php XxxInstaller::Install('XwwForms'); # # XwwForms - Xoo World of Wiki - Forms # # Part of Xoo (c) 1997-2008 [[w:en:User:Zocky]], mitko.si # GPL3 applies # # # Wikivariables and parser functions for dealing with forms # ############################################################################ class XwwForms extends Xxx { function fl_input(&$parser,&$F,&$A) { $args=new XxxArgs ($F,$A); $name=$args->getKey(1); $id=$name; $value=$args->cropExpandValue(1); $style=''; $class=''; $cols=40; $rows=25; $options=array(); foreach ($args->args as $i) { if ($args->isNamed($i) && $this->removePrefix($args->getKey($i),'#')) { switch ($args->getKey($i)) { case 'style': $style ='style="' . $args->trimExpandValue($i) . '"'; break; case 'class': $class ='class="' . $args->trimExpandValue($i) . '"'; break; case 'id' : $id = $args->trimExpandValue($i); break; case 'rows' : $rows = $args->trimExpandValue($i); break; case 'cols' : $cols = $args->trimExpandValue($i); break; } } else { if ($args->isNamed($i)) { $options[$args->getKey($i)]= $args->cropExpandValue($i); } else { $options[$args->cropExpandValue($i)]= $args->cropExpandValue($i); } } } switch($command) { case 'hidden': $r = "<input type=\"text\" $style $class name=\"$name\" id=\"$id\" size=\"$cols\" value=\"$value\" />"; break; case 'text'; $r = "<input type=\"text\" name=\"$name\" id=\"$id\" value=\"$value\"/>"; break; case 'select': $r = "<select $style $class name=\"$name\" id=\"$id\" size=\"$rows\">"; foreach ($options as $k=>$v) { $r.="<option value=\"$k\"" . ($k==$value ? 'selected' : '') .">$v</option>"; } $r .="</select>"; break; case 'radio': $r = "<div $style $class id=\"$id\" >"; foreach ($options as $k=>$v) { $r.="<label for=\"$id-$v\" ($k==$value ? 'class=\"opened\"' : '')><input name=\"$name\" id=\"$id-$v\" type=\"radio\" value=\"$k\"" . ($k==$value ? 'checked' : '') ."> $v</label>"; } $r .="</div>"; break; case 'textarea': $r = "<textearea $style $class name=\"$name\" id=\"$id\" cols=\"$cols\" rows=\"$rows\">$value</textarea>"; break; } } }
wiki-data/wiki-data
extensions/Xoo/Xww/Forms.php
PHP
gpl-2.0
2,274
//============================================================================= // MuseScore // Music Composition & Notation // // Copyright (C) 2002-2011 Werner Schweer // // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License version 2 // as published by the Free Software Foundation and appearing in // the file LICENCE.GPL //============================================================================= #include "dynamic.h" #include "xml.h" #include "score.h" #include "measure.h" #include "system.h" #include "segment.h" #include "utils.h" #include "style.h" #include "mscore.h" #include "chord.h" #include "undo.h" #include "sym.h" namespace Ms { //----------------------------------------------------------------------------- // Dyn // see: http://en.wikipedia.org/wiki/File:Dynamic's_Note_Velocity.svg //----------------------------------------------------------------------------- struct Dyn { int velocity; ///< associated midi velocity (0-127, -1 = none) bool accent; ///< if true add velocity to current chord velocity const char* tag; // name of dynamics, eg. "fff" const char* text; // utf8 text of dynamic }; // variant with ligatures, works for both emmentaler and bravura: static Dyn dynList[] = { // dynamic: { -1, true, "other-dynamics", "" }, { 1, false, "pppppp", "<sym>dynamicPiano</sym><sym>dynamicPiano</sym><sym>dynamicPiano</sym><sym>dynamicPiano</sym><sym>dynamicPiano</sym><sym>dynamicPiano</sym>" }, { 5, false, "ppppp", "<sym>dynamicPiano</sym><sym>dynamicPiano</sym><sym>dynamicPiano</sym><sym>dynamicPiano</sym><sym>dynamicPiano</sym>" }, { 10, false, "pppp", "<sym>dynamicPiano</sym><sym>dynamicPiano</sym><sym>dynamicPiano</sym><sym>dynamicPiano</sym>" }, { 16, false, "ppp", "<sym>dynamicPiano</sym><sym>dynamicPiano</sym><sym>dynamicPiano</sym>" }, { 33, false, "pp", "<sym>dynamicPiano</sym><sym>dynamicPiano</sym>" }, { 49, false, "p", "<sym>dynamicPiano</sym>" }, { 64, false, "mp", "<sym>dynamicMezzo</sym><sym>dynamicPiano</sym>" }, { 80, false, "mf", "<sym>dynamicMezzo</sym><sym>dynamicForte</sym>" }, { 96, false, "f", "<sym>dynamicForte</sym>" }, { 112, false, "ff", "<sym>dynamicForte</sym><sym>dynamicForte</sym>" }, { 126, false, "fff", "<sym>dynamicForte</sym><sym>dynamicForte</sym><sym>dynamicForte</sym>" }, { 127, false, "ffff", "<sym>dynamicForte</sym><sym>dynamicForte</sym><sym>dynamicForte</sym><sym>dynamicForte</sym>" }, { 127, false, "fffff", "<sym>dynamicForte</sym><sym>dynamicForte</sym><sym>dynamicForte</sym><sym>dynamicForte</sym><sym>dynamicForte</sym>" }, { 127, false, "ffffff", "<sym>dynamicForte</sym><sym>dynamicForte</sym><sym>dynamicForte</sym><sym>dynamicForte</sym><sym>dynamicForte</sym><sym>dynamicForte</sym>" }, // accents: { 0, true, "fp", "<sym>dynamicForte</sym><sym>dynamicPiano</sym>"}, { 0, true, "sf", "<sym>dynamicSforzando</sym><sym>dynamicForte</sym>"}, { 0, true, "sfz", "<sym>dynamicSforzando</sym><sym>dynamicForte</sym><sym>dynamicZ</sym>"}, { 0, true, "sff", "<sym>dynamicSforzando</sym><sym>dynamicForte</sym><sym>dynamicForte</sym>"}, { 0, true, "sffz", "<sym>dynamicSforzando</sym><sym>dynamicForte</sym><sym>dynamicForte</sym><sym>dynamicZ</sym>"}, { 0, true, "sfp", "<sym>dynamicSforzando</sym><sym>dynamicForte</sym><sym>dynamicPiano</sym>"}, { 0, true, "sfpp", "<sym>dynamicSforzando</sym><sym>dynamicForte</sym><sym>dynamicPiano</sym><sym>dynamicPiano</sym>"}, { 0, true, "rfz", "<sym>dynamicRinforzando</sym><sym>dynamicForte</sym><sym>dynamicZ</sym>"}, { 0, true, "rf", "<sym>dynamicRinforzando</sym><sym>dynamicForte</sym>"}, { 0, true, "fz", "<sym>dynamicForte</sym><sym>dynamicZ</sym>"}, { 0, true, "m", "<sym>dynamicMezzo</sym>"}, { 0, true, "r", "<sym>dynamicRinforzando</sym>"}, { 0, true, "s", "<sym>dynamicSforzando</sym>"}, { 0, true, "z", "<sym>dynamicZ</sym>"}, { 0, true, "n", "<sym>dynamicNiente</sym>" } }; //--------------------------------------------------------- // Dynamic //--------------------------------------------------------- Dynamic::Dynamic(Score* s) : Text(s) { setFlags(ElementFlag::MOVABLE | ElementFlag::SELECTABLE | ElementFlag::ON_STAFF); _velocity = -1; _dynRange = Range::PART; setTextStyleType(TextStyleType::DYNAMICS); _dynamicType = Type::OTHER; } Dynamic::Dynamic(const Dynamic& d) : Text(d) { _dynamicType = d._dynamicType; _velocity = d._velocity; _dynRange = d._dynRange; } //--------------------------------------------------------- // velocity //--------------------------------------------------------- int Dynamic::velocity() const { return _velocity <= 0 ? dynList[int(dynamicType())].velocity : _velocity; } //--------------------------------------------------------- // write //--------------------------------------------------------- void Dynamic::write(Xml& xml) const { if (!xml.canWrite(this)) return; xml.stag("Dynamic"); xml.tag("subtype", dynamicTypeName()); writeProperty(xml, P_ID::VELOCITY); writeProperty(xml, P_ID::DYNAMIC_RANGE); Text::writeProperties(xml, dynamicType() == Type::OTHER); xml.etag(); } //--------------------------------------------------------- // read //--------------------------------------------------------- void Dynamic::read(XmlReader& e) { while (e.readNextStartElement()) { const QStringRef& tag = e.name(); if (tag == "subtype") setDynamicType(e.readElementText()); else if (tag == "velocity") _velocity = e.readInt(); else if (tag == "dynType") _dynRange = Range(e.readInt()); else if (!Text::readProperties(e)) e.unknown(); } if (textStyleType() == TextStyleType::DEFAULT) setTextStyleType(TextStyleType::DYNAMICS); } //--------------------------------------------------------- // layout //--------------------------------------------------------- void Dynamic::layout() { if (autoplace()) setUserOff(QPointF()); QPointF p(textStyle().offset(spatium())); if (placeAbove()) p.ry() = staff()->height() - p.ry() + lineHeight(); setPos(p); Text::layout1(); Segment* s = segment(); if (s) { int t = track() & ~0x3; for (int voice = 0; voice < VOICES; ++voice) { Element* e = s->element(t + voice); if (!e) continue; if (e->isChord()) { Chord* c = toChord(e); qreal noteHeadWidth = score()->noteHeadWidth() * c->mag(); if (c->stem() && !c->up()) // stem down rxpos() += noteHeadWidth * .25; // center on stem + optical correction else rxpos() += noteHeadWidth * .5; // center on notehead } else rxpos() += e->width() * .5; break; } } adjustReadPos(); } //------------------------------------------------------------------- // doAutoplace // // Move Dynamic up or down to avoid collisions with other elements. //------------------------------------------------------------------- void Dynamic::doAutoplace() { Segment* s = segment(); if (!(s && autoplace())) return; qreal minDistance = score()->styleP(StyleIdx::dynamicsMinDistance); Shape s1 = s->staffShape(staffIdx()).translated(s->pos()); Shape s2 = shape().translated(s->pos()); if (placeAbove()) { qreal d = s2.minVerticalDistance(s1); if (d > -minDistance) rUserYoffset() = -d - minDistance; } else { qreal d = s1.minVerticalDistance(s2); if (d > -minDistance) rUserYoffset() = d + minDistance; } } //--------------------------------------------------------- // setDynamicType //--------------------------------------------------------- void Dynamic::setDynamicType(const QString& tag) { int n = sizeof(dynList)/sizeof(*dynList); for (int i = 0; i < n; ++i) { if (dynList[i].tag == tag || dynList[i].text == tag) { setDynamicType(Type(i)); setXmlText(QString::fromUtf8(dynList[i].text)); return; } } qDebug("setDynamicType: other <%s>", qPrintable(tag)); setDynamicType(Type::OTHER); setXmlText(tag); } //--------------------------------------------------------- // dynamicTypeName //--------------------------------------------------------- QString Dynamic::dynamicTypeName() const { return dynList[int(dynamicType())].tag; } //--------------------------------------------------------- // startEdit //--------------------------------------------------------- void Dynamic::startEdit(MuseScoreView* v, const QPointF& p) { Text::startEdit(v, p); } //--------------------------------------------------------- // endEdit //--------------------------------------------------------- void Dynamic::endEdit() { Text::endEdit(); if (xmlText() != QString::fromUtf8(dynList[int(_dynamicType)].text)) _dynamicType = Type::OTHER; } //--------------------------------------------------------- // reset //--------------------------------------------------------- void Dynamic::reset() { Text::reset(); } //--------------------------------------------------------- // drag //--------------------------------------------------------- QRectF Dynamic::drag(EditData* ed) { QRectF f = Element::drag(ed); // // move anchor // Qt::KeyboardModifiers km = qApp->keyboardModifiers(); if (km != (Qt::ShiftModifier | Qt::ControlModifier)) { int si = staffIdx(); Segment* seg = segment(); score()->dragPosition(ed->pos, &si, &seg); if (seg != segment() || staffIdx() != si) { QPointF pos1(canvasPos()); score()->undo(new ChangeParent(this, seg, si)); setUserOff(QPointF()); layout(); QPointF pos2(canvasPos()); setUserOff(pos1 - pos2); ed->startMove = pos2; } } return f; } //--------------------------------------------------------- // undoSetDynRange //--------------------------------------------------------- void Dynamic::undoSetDynRange(Range v) { undoChangeProperty(P_ID::DYNAMIC_RANGE, int(v)); } //--------------------------------------------------------- // getProperty //--------------------------------------------------------- QVariant Dynamic::getProperty(P_ID propertyId) const { switch (propertyId) { case P_ID::DYNAMIC_RANGE: return int(_dynRange); case P_ID::VELOCITY: return velocity(); case P_ID::SUBTYPE: return int(_dynamicType); default: return Text::getProperty(propertyId); } } //--------------------------------------------------------- // setProperty //--------------------------------------------------------- bool Dynamic::setProperty(P_ID propertyId, const QVariant& v) { switch (propertyId) { case P_ID::DYNAMIC_RANGE: _dynRange = Range(v.toInt()); break; case P_ID::VELOCITY: _velocity = v.toInt(); break; case P_ID::SUBTYPE: _dynamicType = Type(v.toInt()); break; default: if (!Text::setProperty(propertyId, v)) return false; break; } triggerLayout(); return true; } //--------------------------------------------------------- // propertyDefault //--------------------------------------------------------- QVariant Dynamic::propertyDefault(P_ID id) const { switch(id) { case P_ID::TEXT_STYLE_TYPE: return int(TextStyleType::DYNAMICS); case P_ID::DYNAMIC_RANGE: return int(Range::PART); case P_ID::VELOCITY: return -1; default: return Text::propertyDefault(id); } } //--------------------------------------------------------- // accessibleInfo //--------------------------------------------------------- QString Dynamic::accessibleInfo() const { return QString("%1: %2").arg(Element::accessibleInfo()).arg(this->dynamicTypeName()); } }
Red54/MuseScore
libmscore/dynamic.cpp
C++
gpl-2.0
13,542
@echo off call unc ISR_GCC_HC12B.c call unc ISR_GCC_HC12Dx128.c
brahimalaya/K-OS--Driver-Kit
InstallISR/gcc/hc12/proc.bat
Batchfile
gpl-2.0
64
<?php /** * The Sidebar containing the main widget areas. * * @package dino */ ?> <div id="sidebar"> <?php dynamic_sidebar('sidebar');?> <div class="clear"></div> </div>
rayhan-mursalin/ecnh
wp-content/themes/food-cook/sidebar.php
PHP
gpl-2.0
188
/* ============================================================================== This file is part of the JUCE library. Copyright (c) 2017 - ROLI Ltd. JUCE is an open source library subject to commercial or open-source licensing. By using JUCE, you agree to the terms of both the JUCE 5 End-User License Agreement and JUCE 5 Privacy Policy (both updated and effective as of the 27th April 2017). End User License Agreement: www.juce.com/juce-5-licence Privacy Policy: www.juce.com/juce-5-privacy-policy Or: You may also use this code under the terms of the GPL v3 (see www.gnu.org/licenses). JUCE IS PROVIDED "AS IS" WITHOUT ANY WARRANTY, AND ALL WARRANTIES, WHETHER EXPRESSED OR IMPLIED, INCLUDING MERCHANTABILITY AND FITNESS FOR PURPOSE, ARE DISCLAIMED. ============================================================================== */ namespace juce { //============================================================================== /** A base class for buttons. This contains all the logic for button behaviours such as enabling/disabling, responding to shortcut keystrokes, auto-repeating when held down, toggle-buttons and radio groups, etc. @see TextButton, DrawableButton, ToggleButton @tags{GUI} */ class JUCE_API Button : public Component, public SettableTooltipClient { protected: //============================================================================== /** Creates a button. @param buttonName the text to put in the button (the component's name is also initially set to this string, but these can be changed later using the setName() and setButtonText() methods) */ explicit Button (const String& buttonName); public: /** Destructor. */ ~Button() override; //============================================================================== /** Changes the button's text. @see getButtonText */ void setButtonText (const String& newText); /** Returns the text displayed in the button. @see setButtonText */ const String& getButtonText() const { return text; } //============================================================================== /** Returns true if the button is currently being held down. @see isOver */ bool isDown() const noexcept; /** Returns true if the mouse is currently over the button. This will be also be true if the button is being held down. @see isDown */ bool isOver() const noexcept; //============================================================================== /** A button has an on/off state associated with it, and this changes that. By default buttons are 'off' and for simple buttons that you click to perform an action you won't change this. Toggle buttons, however will want to change their state when turned on or off. @param shouldBeOn whether to set the button's toggle state to be on or off. If it's a member of a button group, this will always try to turn it on, and to turn off any other buttons in the group @param notification determines the behaviour if the value changes - this can invoke a synchronous call to clicked(), but sendNotificationAsync is not supported @see getToggleState, setRadioGroupId */ void setToggleState (bool shouldBeOn, NotificationType notification); /** Returns true if the button is 'on'. By default buttons are 'off' and for simple buttons that you click to perform an action you won't change this. Toggle buttons, however will want to change their state when turned on or off. @see setToggleState */ bool getToggleState() const noexcept { return isOn.getValue(); } /** Returns the Value object that represents the button's toggle state. You can use this Value object to connect the button's state to external values or setters, either by taking a copy of the Value, or by using Value::referTo() to make it point to your own Value object. @see getToggleState, Value */ Value& getToggleStateValue() noexcept { return isOn; } /** This tells the button to automatically flip the toggle state when the button is clicked. If set to true, then before the clicked() callback occurs, the toggle-state of the button is flipped. */ void setClickingTogglesState (bool shouldAutoToggleOnClick) noexcept; /** Returns true if this button is set to be an automatic toggle-button. This returns the last value that was passed to setClickingTogglesState(). */ bool getClickingTogglesState() const noexcept; //============================================================================== /** Enables the button to act as a member of a mutually-exclusive group of 'radio buttons'. If the group ID is set to a non-zero number, then this button will act as part of a group of buttons with the same ID, only one of which can be 'on' at the same time. Note that when it's part of a group, clicking a toggle-button that's 'on' won't turn it off. To find other buttons with the same ID, this button will search through its sibling components for ToggleButtons, so all the buttons for a particular group must be placed inside the same parent component. Set the group ID back to zero if you want it to act as a normal toggle button again. The notification argument lets you specify how other buttons should react to being turned on or off in response to this call. @see getRadioGroupId */ void setRadioGroupId (int newGroupId, NotificationType notification = sendNotification); /** Returns the ID of the group to which this button belongs. (See setRadioGroupId() for an explanation of this). */ int getRadioGroupId() const noexcept { return radioGroupId; } //============================================================================== /** Used to receive callbacks when a button is clicked. @see Button::addListener, Button::removeListener */ class JUCE_API Listener { public: /** Destructor. */ virtual ~Listener() = default; /** Called when the button is clicked. */ virtual void buttonClicked (Button*) = 0; /** Called when the button's state changes. */ virtual void buttonStateChanged (Button*) {} }; /** Registers a listener to receive events when this button's state changes. If the listener is already registered, this will not register it again. @see removeListener */ void addListener (Listener* newListener); /** Removes a previously-registered button listener @see addListener */ void removeListener (Listener* listener); //============================================================================== /** You can assign a lambda to this callback object to have it called when the button is clicked. */ std::function<void()> onClick; /** You can assign a lambda to this callback object to have it called when the button's state changes. */ std::function<void()> onStateChange; //============================================================================== /** Causes the button to act as if it's been clicked. This will asynchronously make the button draw itself going down and up, and will then call back the clicked() method as if mouse was clicked on it. @see clicked */ virtual void triggerClick(); //============================================================================== /** Sets a command ID for this button to automatically invoke when it's clicked. When the button is pressed, it will use the given manager to trigger the command ID. Obviously be careful that the ApplicationCommandManager doesn't get deleted before this button is. To disable the command triggering, call this method and pass nullptr as the command manager. If generateTooltip is true, then the button's tooltip will be automatically generated based on the name of this command and its current shortcut key. @see addShortcut, getCommandID */ void setCommandToTrigger (ApplicationCommandManager* commandManagerToUse, CommandID commandID, bool generateTooltip); /** Returns the command ID that was set by setCommandToTrigger(). */ CommandID getCommandID() const noexcept { return commandID; } //============================================================================== /** Assigns a shortcut key to trigger the button. The button registers itself with its top-level parent component for keypresses. Note that a different way of linking buttons to keypresses is by using the setCommandToTrigger() method to invoke a command. @see clearShortcuts */ void addShortcut (const KeyPress&); /** Removes all key shortcuts that had been set for this button. @see addShortcut */ void clearShortcuts(); /** Returns true if the given keypress is a shortcut for this button. @see addShortcut */ bool isRegisteredForShortcut (const KeyPress&) const; //============================================================================== /** Sets an auto-repeat speed for the button when it is held down. (Auto-repeat is disabled by default). @param initialDelayInMillisecs how long to wait after the mouse is pressed before triggering the next click. If this is zero, auto-repeat is disabled @param repeatDelayInMillisecs the frequently subsequent repeated clicks should be triggered @param minimumDelayInMillisecs if this is greater than 0, the auto-repeat speed will get faster, the longer the button is held down, up to the minimum interval specified here */ void setRepeatSpeed (int initialDelayInMillisecs, int repeatDelayInMillisecs, int minimumDelayInMillisecs = -1) noexcept; /** Sets whether the button click should happen when the mouse is pressed or released. By default the button is only considered to have been clicked when the mouse is released, but setting this to true will make it call the clicked() method as soon as the button is pressed. This is useful if the button is being used to show a pop-up menu, as it allows the click to be used as a drag onto the menu. */ void setTriggeredOnMouseDown (bool isTriggeredOnMouseDown) noexcept; /** Returns whether the button click happens when the mouse is pressed or released. @see setTriggeredOnMouseDown */ bool getTriggeredOnMouseDown() const noexcept; /** Returns the number of milliseconds since the last time the button went into the 'down' state. */ uint32 getMillisecondsSinceButtonDown() const noexcept; //============================================================================== /** Sets the tooltip for this button. @see TooltipClient, TooltipWindow */ void setTooltip (const String& newTooltip) override; //============================================================================== /** A combination of these flags are used by setConnectedEdges(). */ enum ConnectedEdgeFlags { ConnectedOnLeft = 1, ConnectedOnRight = 2, ConnectedOnTop = 4, ConnectedOnBottom = 8 }; /** Hints about which edges of the button might be connected to adjoining buttons. The value passed in is a bitwise combination of any of the values in the ConnectedEdgeFlags enum. E.g. if you are placing two buttons adjacent to each other, you could use this to indicate which edges are touching, and the LookAndFeel might choose to drawn them without rounded corners on the edges that connect. It's only a hint, so the LookAndFeel can choose to ignore it if it's not relevant for this type of button. */ void setConnectedEdges (int connectedEdgeFlags); /** Returns the set of flags passed into setConnectedEdges(). */ int getConnectedEdgeFlags() const noexcept { return connectedEdgeFlags; } /** Indicates whether the button adjoins another one on its left edge. @see setConnectedEdges */ bool isConnectedOnLeft() const noexcept { return (connectedEdgeFlags & ConnectedOnLeft) != 0; } /** Indicates whether the button adjoins another one on its right edge. @see setConnectedEdges */ bool isConnectedOnRight() const noexcept { return (connectedEdgeFlags & ConnectedOnRight) != 0; } /** Indicates whether the button adjoins another one on its top edge. @see setConnectedEdges */ bool isConnectedOnTop() const noexcept { return (connectedEdgeFlags & ConnectedOnTop) != 0; } /** Indicates whether the button adjoins another one on its bottom edge. @see setConnectedEdges */ bool isConnectedOnBottom() const noexcept { return (connectedEdgeFlags & ConnectedOnBottom) != 0; } //============================================================================== /** Used by setState(). */ enum ButtonState { buttonNormal, buttonOver, buttonDown }; /** Can be used to force the button into a particular state. This only changes the button's appearance, it won't trigger a click, or stop any mouse-clicks from happening. The state that you set here will only last until it is automatically changed when the mouse enters or exits the button, or the mouse-button is pressed or released. */ void setState (ButtonState newState); /** Returns the button's current over/down/up state. */ ButtonState getState() const noexcept { return buttonState; } //============================================================================== /** This abstract base class is implemented by LookAndFeel classes to provide button-drawing functionality. */ struct JUCE_API LookAndFeelMethods { virtual ~LookAndFeelMethods() = default; virtual void drawButtonBackground (Graphics&, Button&, const Colour& backgroundColour, bool shouldDrawButtonAsHighlighted, bool shouldDrawButtonAsDown) = 0; virtual Font getTextButtonFont (TextButton&, int buttonHeight) = 0; virtual int getTextButtonWidthToFitText (TextButton&, int buttonHeight) = 0; /** Draws the text for a TextButton. */ virtual void drawButtonText (Graphics&, TextButton&, bool shouldDrawButtonAsHighlighted, bool shouldDrawButtonAsDown) = 0; /** Draws the contents of a standard ToggleButton. */ virtual void drawToggleButton (Graphics&, ToggleButton&, bool shouldDrawButtonAsHighlighted, bool shouldDrawButtonAsDown) = 0; virtual void changeToggleButtonWidthToFitText (ToggleButton&) = 0; virtual void drawTickBox (Graphics&, Component&, float x, float y, float w, float h, bool ticked, bool isEnabled, bool shouldDrawButtonAsHighlighted, bool shouldDrawButtonAsDown) = 0; virtual void drawDrawableButton (Graphics&, DrawableButton&, bool shouldDrawButtonAsHighlighted, bool shouldDrawButtonAsDown) = 0; }; // This method's parameters have changed - see the new version. JUCE_DEPRECATED (void setToggleState (bool, bool)); protected: //============================================================================== /** This method is called when the button has been clicked. Subclasses can override this to perform whatever actions they need to do. In general, you wouldn't use this method to receive clicks, but should get your callbacks by attaching a std::function to the onClick callback, or adding a Button::Listener. @see triggerClick, onClick */ virtual void clicked(); /** This method is called when the button has been clicked. By default it just calls clicked(), but you might want to override it to handle things like clicking when a modifier key is pressed, etc. @see ModifierKeys */ virtual void clicked (const ModifierKeys& modifiers); /** Subclasses should override this to actually paint the button's contents. It's better to use this than the paint method, because it gives you information about the over/down state of the button. @param g the graphics context to use @param shouldDrawButtonAsHighlighted true if the button is either in the 'over' or 'down' state @param shouldDrawButtonAsDown true if the button should be drawn in the 'down' position */ virtual void paintButton (Graphics& g, bool shouldDrawButtonAsHighlighted, bool shouldDrawButtonAsDown) = 0; /** Called when the button's up/down/over state changes. Subclasses can override this if they need to do something special when the button goes up or down. @see isDown, isOver */ virtual void buttonStateChanged(); //============================================================================== /** @internal */ virtual void internalClickCallback (const ModifierKeys&); /** @internal */ void handleCommandMessage (int commandId) override; /** @internal */ void mouseEnter (const MouseEvent&) override; /** @internal */ void mouseExit (const MouseEvent&) override; /** @internal */ void mouseDown (const MouseEvent&) override; /** @internal */ void mouseDrag (const MouseEvent&) override; /** @internal */ void mouseUp (const MouseEvent&) override; /** @internal */ bool keyPressed (const KeyPress&) override; /** @internal */ using Component::keyStateChanged; /** @internal */ void paint (Graphics&) override; /** @internal */ void parentHierarchyChanged() override; /** @internal */ void visibilityChanged() override; /** @internal */ void focusGained (FocusChangeType) override; /** @internal */ void focusLost (FocusChangeType) override; /** @internal */ void enablementChanged() override; private: //============================================================================== Array<KeyPress> shortcuts; WeakReference<Component> keySource; String text; ListenerList<Listener> buttonListeners; struct CallbackHelper; std::unique_ptr<CallbackHelper> callbackHelper; uint32 buttonPressTime = 0, lastRepeatTime = 0; ApplicationCommandManager* commandManagerToUse = nullptr; int autoRepeatDelay = -1, autoRepeatSpeed = 0, autoRepeatMinimumDelay = -1; int radioGroupId = 0, connectedEdgeFlags = 0; CommandID commandID = {}; ButtonState buttonState = buttonNormal, lastStatePainted = buttonNormal; Value isOn; bool lastToggleState = false; bool clickTogglesState = false; bool needsToRelease = false; bool needsRepainting = false; bool isKeyDown = false; bool triggerOnMouseDown = false; bool generateTooltip = false; void repeatTimerCallback(); bool keyStateChangedCallback(); void applicationCommandListChangeCallback(); void updateAutomaticTooltip (const ApplicationCommandInfo&); ButtonState updateState(); ButtonState updateState (bool isOver, bool isDown); bool isShortcutPressed() const; void turnOffOtherButtonsInGroup (NotificationType click, NotificationType state); void flashButtonState(); void sendClickMessage (const ModifierKeys&); void sendStateMessage(); void setToggleState (bool shouldBeOn, NotificationType click, NotificationType state); bool isMouseSourceOver (const MouseEvent& e); JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (Button) }; } // namespace juce
liamlacey/Shuttertone
JuceLibraryCode/modules/juce_gui_basics/buttons/juce_Button.h
C
gpl-2.0
21,578
describe('fs-xhr',function() { function createJQPromise(success) { return { done: function() { var p = Q.when(success); return p.then.apply(p,arguments); } }; } var fakeJQuery = { get: jasmine.createSpy('jqget').andReturn(createJQPromise('get')), post: jasmine.createSpy('jqpost').andReturn(createJQPromise('post')), ajax: jasmine.createSpy('jqajax').andReturn(createJQPromise('ajax')) }; var fs = factory('services/fs-xhr',{ 'q':Q, 'jquery': fakeJQuery }); describe('fs signature',function() { it('should have a read method',function() { expect(fs.read).not.toBeUndefined(); }); it('should have a write method',function() { expect(fs.write).not.toBeUndefined(); }); it('should have a remove method',function() { expect(fs.remove).not.toBeUndefined(); }); }); describe('reading',function() { it('should read a test file',function() { return fs.read('foo.txt').then(function(data) { expect(data).toBe('get'); expect(fakeJQuery.get).toHaveBeenCalledWith('fs/foo.txt'); }); }); it('should fail on xhr exception',function() { fakeJQuery.get.andReturn(createJQPromise(Q.reject(new Error('foo')))); return fs.read('foo.txt').catch(function(err) { expect(fakeJQuery.get).toHaveBeenCalledWith('fs/foo.txt'); expect(err.message).toBe('foo'); }); }); }); describe('writing',function() { it('should write a test file',function() { return fs.write('foo.txt','bar').then(function(data) { expect(fakeJQuery.post).toHaveBeenCalledWith('fs/foo.txt','bar'); expect(data).toBe('post'); }); }); it('should fail on xhr exception',function() { fakeJQuery.post.andReturn(createJQPromise(Q.reject(new Error('foo')))); return fs.write('foo.txt','bar').catch(function(err) { expect(fakeJQuery.post).toHaveBeenCalledWith('fs/foo.txt','bar'); expect(err.message).toBe('foo'); }); }); }); describe('removing',function() { it('should remove a test file',function() { return fs.remove('foo.txt').then(function(data) { expect(fakeJQuery.ajax).toHaveBeenCalledWith('fs/foo.txt',{ type: 'DELETE' }); expect(data).toBe('ajax'); }); }); it('should fail on xhr exception',function() { fakeJQuery.ajax.andReturn(createJQPromise(Q.reject(new Error('foo')))); return fs.remove('foo.txt','bar').catch(function(err) { expect(fakeJQuery.ajax).toHaveBeenCalledWith('fs/foo.txt',{ type: 'DELETE' }); expect(err.message).toBe('foo'); }); }); }); });
rikkertkoppes/fllscoring
spec/services/fs-xhrSpec.js
JavaScript
gpl-2.0
3,106
/* * Tool for fine grained PE code permutation * Copyright (C) 2015 Bruno Humic * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "Node.h" Node::Node() { } Node::~Node() { free(instructions); children.clear(); } DWORD Node::GetOffset() { return dwOffset; } DWORD Node::GetSize() { return dwSize; } BYTE* Node::GetInstructions() { return instructions; } std::vector<Node* > Node::GetChildren() { return children; } int Node::AppendChild(Node* child) { // Check if file alredy exists. Loop removal for (std::vector<Node* >::iterator it = children.begin(); it != children.end(); ++it) { if ((**it).dwOffset == child->dwOffset) return 1; } children.push_back(child); return 0; } Node* Node::FindChild(DWORD offset) { int numOfChildren = children.size(); for (int i = 0; i < numOfChildren; ++i) { Node* tmp = children.at(i); if (tmp->dwOffset == offset) return tmp; } return nullptr; } void Node::SetEnd(BOOL value) { end = value; } void Node::SetOffset(DWORD offset) { dwOffset = offset; } void Node::SetInstructions(BYTE* instructions, DWORD size) { this->instructions = (BYTE*)malloc(size); this->dwSize = size; std::memcpy((BYTE*)this->instructions, (BYTE*)instructions, size); } bool Node::operator==(const Node& node) { if (this->dwOffset == node.dwOffset) return true; return false; }
bhumic/PErmutator
Source/Node.cpp
C++
gpl-2.0
2,029
// MissionMapsClass.cpp // 1.4 // This file is part of OpenRedAlert. // // OpenRedAlert is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, version 2 of the License. // // OpenRedAlert is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with OpenRedAlert. If not, see <http://www.gnu.org/licenses/>. #include "MissionMapsClass.h" #include <string> #include "SDL/SDL_types.h" #include "vfs/vfs.h" #include "vfs/VFile.h" #include "misc/config.h" using std::string; MissionMapsClass::MissionMapsClass() { readMissionData(); } string MissionMapsClass::getGdiMissionMap(Uint32 missionNumber) { // If the index required is < if (missionNumber < GdiMissionMaps.size()) { return GdiMissionMaps[missionNumber]; } return NULL; } /** * @param missionNumber Number of the Nod/Soviets mission */ string MissionMapsClass::getNodMissionMap(Uint32 missionNumber) { // if (missionNumber < NodMissionMaps.size()) { return NodMissionMaps[missionNumber]; } return 0; } void MissionMapsClass::readMissionData() { VFile *MapFile; char Line[255]; string tmpString; Uint32 pos; Uint32 pos2; // I am not sure how the maps from td work so... if (getConfig().gamenum != GAME_RA) { return; } // get the offset and size of the binfile along with a // pointer to it //binfile = mixes->getOffsetAndSize(binname, &offset, &size); MapFile = VFSUtils::VFS_Open("mission.ini"); // Check if the file exist if (MapFile == 0) { return; } // Parse all line of the file int linesize = sizeof (Line); while (MapFile->getLine(Line, linesize )) { // Get the string tmpString = Line; if (tmpString.empty()) continue; //memset (Line, '\0', sizeof (Line)); unsigned int index = string::npos; while (tmpString.find('[') != -1 || tmpString.find(']') != -1) { index = tmpString.find('['); if (index != string::npos) tmpString.replace(index, 1, ""); index = tmpString.find(']'); if (index != string::npos) { tmpString.replace(index, 1, ""); //HACK: can we cut everything after this ']'? in windows there will be these special chars that seem to disturb... but more important would be to find the source for those strange chars and get em out //debug these strange chars: // char one = tmpString[index]; // char two = tmpString[index+1]; tmpString = tmpString.substr(0, index); } } /* OUTCOMMENCTED: //VS runtime checks wont like if you check on something that position.... it will trigger an assert while (tmpString[i] != '\0') { if (tmpString[i] == '[' || tmpString[i] == ']') { tmpString.erase(i, i+1); } i++; } */ // Check if the mission is availlable VFile* tmp = VFSUtils::VFS_Open(tmpString.c_str()); // Does it exist ? if (tmp != 0) { // Close the file VFSUtils::VFS_Close(tmp); // For now we don't support the mission objective strings if (((pos = tmpString.find(".INI", 0)) != (Uint32)string::npos) && ((pos2 = tmpString.find("A", 0)) != (Uint32)string::npos)) { // remove ".ini" at the end of the string tmpString.erase(pos, pos+4); // If it's soviets mission if ((pos = tmpString.find("SCU", 0)) != (Uint32)string::npos) { // Add the mission in Allies Mission list NodMissionMaps.push_back(tmpString); //they are in format like ex "SCU06EA" } else { // Add the mission in Soviets Mission list GdiMissionMaps.push_back(tmpString); //they are in format like ex "SCG01EA" } } } } }
erikkallen/openredalert
src/game/MissionMapsClass.cpp
C++
gpl-2.0
3,907
<?php render($page['content']); ?> <div class="l-page"> <div class="l-main" id="main-content"> <div class="l-content" role="main"> <h1><?php print t('Access denied'); ?></h1> </div> </div> </div>
liquidfridge/phoebe
templates/system/page--403.tpl.php
PHP
gpl-2.0
214