code
stringlengths
3
1.01M
repo_name
stringlengths
5
116
path
stringlengths
3
311
language
stringclasses
30 values
license
stringclasses
15 values
size
int64
3
1.01M
// Copyright (c) 2011 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package org.chromium.sdk.tests.system.runner; import java.io.File; import java.util.List; /** * Gets a fresh Chromium build from build site in internet. */ class BuildLoader { interface LoadedBuild { java.io.File getChromeBinary(); int getBuildNumber(); } static LoadedBuild load() { GoogleStorage googleStorage = new GoogleStorage("http://commondatastorage.googleapis.com/chromium-browser-continuous"); List<GoogleStorage.Resource> buildList = googleStorage.readNodes("Linux_x64/"); int maxBuildNum = -1; GoogleStorage.Dir dir = null; for (GoogleStorage.Resource res : buildList) { GoogleStorage.Dir nextDir = res.accept( new GoogleStorage.Resource.Visitor<GoogleStorage.Dir>() { @Override public GoogleStorage.Dir visitFile(GoogleStorage.File file) { return null; } @Override public GoogleStorage.Dir visitDir(GoogleStorage.Dir dir) { return dir; } }); if (nextDir == null) { continue; } int buildNum = Integer.parseInt(nextDir.getShortName()); if (buildNum > maxBuildNum) { maxBuildNum = buildNum; dir = nextDir; } } if (dir == null) { throw new RuntimeException(); } final int buildVersion = maxBuildNum; GoogleStorage.File zipFile = null; for (GoogleStorage.Resource res : dir.getChildren()) { GoogleStorage.File nextFile = res.accept(new GoogleStorage.Resource.Visitor<GoogleStorage.File>() { @Override public GoogleStorage.File visitFile(GoogleStorage.File file) { return file; } @Override public GoogleStorage.File visitDir(GoogleStorage.Dir dir) { return null; } }); if (nextFile == null) { continue; } String shortName = nextFile.getShortName(); if (!shortName.endsWith(".zip")) { continue; } zipFile = nextFile; break; } if (zipFile == null) { throw new RuntimeException(); } String url = zipFile.getUrl(); final java.io.File chromeFile = ICustom.INSTANCE.downloadChrome(url); return new LoadedBuild() { @Override public File getChromeBinary() { return chromeFile; } @Override public int getBuildNumber() { return buildVersion; } }; } // Simply loads a new build. public static void main(String[] args) { LoadedBuild loadedBuild = load(); System.out.println("Build #" + loadedBuild.getBuildNumber() + " put into " + loadedBuild.getChromeBinary().getPath()); } }
ParinVachhani/chromedevtools
utils/org.chromium.sdk.tests.system.runner/src/org/chromium/sdk/tests/system/runner/BuildLoader.java
Java
bsd-3-clause
2,786
<!DOCTYPE html> <script src="../../../resources/js-test.js"></script> <div id="host1"></div> <div id="host2"></div> <div id="host3" class="c3"></div> <div id="host4"></div> <script> description("Check that targeted class invalidation works with the :host pseudo class."); // Create shadow trees var host1 = document.getElementById("host1"); host1.createShadowRoot().innerHTML = "<style>:host(.c1) { background-color: green }</style><div></div><div></div><div></div><div></div><div></div>"; var host2 = document.getElementById("host2"); host2.createShadowRoot().innerHTML = '<style>:host(.c2) .inner { background-color: green }</style><div></div><div></div><div></div><div><span id="inner" class="inner"></span></div>'; var host3 = document.getElementById("host3"); host3.createShadowRoot().innerHTML = "<style>:host(#host3:not(.c3)) { background-color: green }</style><div></div><div></div><div></div><div></div>"; var host4 = document.getElementById("host4"); host4.createShadowRoot().innerHTML = "<style>:host(.nomatch, .c4) { background-color: green }</style><div></div><div></div><div></div><div></div>"; var transparent = "rgba(0, 0, 0, 0)"; var green = "rgb(0, 128, 0)"; var inner = host2.shadowRoot.getElementById("inner"); shouldBe("getComputedStyle(host1, null).backgroundColor", "transparent"); shouldBe("getComputedStyle(inner, null).backgroundColor", "transparent"); shouldBe("getComputedStyle(host3, null).backgroundColor", "transparent"); shouldBe("getComputedStyle(host4, null).backgroundColor", "transparent"); document.body.offsetLeft; // force style recalc. host1.className = "c1"; if (window.internals) shouldBe("internals.updateStyleAndReturnAffectedElementCount()", "1"); shouldBe("getComputedStyle(host1, null).backgroundColor", "green"); document.body.offsetLeft; // force style recalc. host2.className = "c2"; if (window.internals) shouldBe("internals.updateStyleAndReturnAffectedElementCount()", "2"); shouldBe("getComputedStyle(inner, null).backgroundColor", "green"); document.body.offsetLeft; // force style recalc. host3.className = ""; if (window.internals) shouldBe("internals.updateStyleAndReturnAffectedElementCount()", "1"); shouldBe("getComputedStyle(host3, null).backgroundColor", "green"); document.body.offsetLeft; // force style recalc. host4.className = "c4"; if (window.internals) shouldBe("internals.updateStyleAndReturnAffectedElementCount()", "1"); shouldBe("getComputedStyle(host4, null).backgroundColor", "green"); </script>
vadimtk/chrome4sdp
third_party/WebKit/LayoutTests/fast/css/invalidation/targeted-class-host-pseudo.html
HTML
bsd-3-clause
2,504
// Generated source. // Generator: org.chromium.sdk.internal.wip.tools.protocolgenerator.Generator // Origin: http://svn.webkit.org/repository/webkit/trunk/Source/WebCore/inspector/Inspector.json@142888 package org.chromium.sdk.internal.wip.protocol.output.page; /** Tells if backend supports continuous painting */ public class CanContinuouslyPaintParams extends org.chromium.sdk.internal.wip.protocol.output.WipParamsWithResponse<org.chromium.sdk.internal.wip.protocol.input.page.CanContinuouslyPaintData> { public CanContinuouslyPaintParams() { } public static final String METHOD_NAME = org.chromium.sdk.internal.wip.protocol.BasicConstants.Domain.PAGE + ".canContinuouslyPaint"; @Override protected String getRequestName() { return METHOD_NAME; } @Override public org.chromium.sdk.internal.wip.protocol.input.page.CanContinuouslyPaintData parseResponse(org.chromium.sdk.internal.wip.protocol.input.WipCommandResponse.Data data, org.chromium.sdk.internal.wip.protocol.input.WipGeneratedParserRoot parser) throws org.chromium.sdk.internal.protocolparser.JsonProtocolParseException { return parser.parsePageCanContinuouslyPaintData(data.getUnderlyingObject()); } }
ParinVachhani/chromedevtools
plugins/org.chromium.sdk.wipbackend.dev/src-wip-generated/org/chromium/sdk/internal/wip/protocol/output/page/CanContinuouslyPaintParams.java
Java
bsd-3-clause
1,196
// // TKDecimalInputWithNextKeyView.h // Created by Devin Ross on 3/21/14. // /* tapku || http://github.com/devinross/tapkulibrary Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #import "TKDecimalInputView.h" /** `TKDecimalInputWithNextKeyView` is subclass `TKDecimalInputView` with a next key. */ @interface TKDecimalInputWithNextKeyView : TKDecimalInputView ///---------------------------- /// @name Properties ///---------------------------- /** The next key. */ @property (nonatomic,strong) TKInputKey *nextKey; @end
flyingfan76/DiabeteHealth
tapkulibrary-master/src/TapkuLibrary/TKDecimalInputWithNextKeyView.h
C
mit
1,533
//------------------------------------------------------------------------------ // <copyright file="ILinqToSql.cs" company="Microsoft"> // Copyright (c) Microsoft Corporation. All rights reserved. // </copyright> //------------------------------------------------------------------------------ namespace System.Web.UI.WebControls { using System.Data.Linq; internal interface ILinqToSql { void Add(ITable table, object row); void Attach(ITable table, object row); object GetOriginalEntityState(ITable table, object row); void Refresh(DataContext dataContext, RefreshMode mode, object entity); void Remove(ITable table, object row); void SubmitChanges(DataContext dataContext); } }
sekcheong/referencesource
System.Web.Extensions/ui/WebControls/ILinqToSql.cs
C#
mit
756
var searchData= [ ['data',['data',['../classv8_1_1_string_1_1_external_string_resource.html#a987309199b848511adb708e221e0fb0a',1,'v8::String::ExternalStringResource::data()'],['../classv8_1_1_string_1_1_external_one_byte_string_resource.html#aaeca31240d3dbf990d1b974e3c64593e',1,'v8::String::ExternalOneByteStringResource::data()'],['../classv8_1_1_external_one_byte_string_resource_impl.html#a37ada5dc21ecb982c50482c90fffe529',1,'v8::ExternalOneByteStringResourceImpl::data()']]], ['datetimeconfigurationchangenotification',['DateTimeConfigurationChangeNotification',['../classv8_1_1_date.html#adb084ec0683d3d195ad0f78af5f6f72b',1,'v8::Date']]], ['delete',['Delete',['../classv8_1_1_cpu_profile.html#a70c93f0c14d07a7e1bad42ee95665ca0',1,'v8::CpuProfile::Delete()'],['../classv8_1_1_heap_snapshot.html#aeaa6073009e4041839dff7a860d2548a',1,'v8::HeapSnapshot::Delete()']]], ['deleteallheapsnapshots',['DeleteAllHeapSnapshots',['../classv8_1_1_heap_profiler.html#a6a75bcc6d8350858597b6a6ce5e349a2',1,'v8::HeapProfiler']]], ['deoptimizeall',['DeoptimizeAll',['../classv8_1_1_testing.html#ae541bd8d75667db1d83c8aef7f8c1cf3',1,'v8::Testing']]], ['detachglobal',['DetachGlobal',['../classv8_1_1_context.html#a841c7dd92eb8c57df92a268a164dea97',1,'v8::Context']]], ['dispose',['Dispose',['../classv8_1_1_retained_object_info.html#a5011203f7c5949049ba36b8059f03eca',1,'v8::RetainedObjectInfo::Dispose()'],['../classv8_1_1_string_1_1_external_string_resource_base.html#af4720342ae31e1ab4656df3f15d069c0',1,'v8::String::ExternalStringResourceBase::Dispose()'],['../classv8_1_1_isolate.html#a1a5a5762e4221aff8c6b10f9e3cec0af',1,'v8::Isolate::Dispose()'],['../classv8_1_1_v8.html#a566450d632c0a63770682b9da3cae08d',1,'v8::V8::Dispose()']]] ];
v8-dox/v8-dox.github.io
fe4434b/html/search/functions_3.js
JavaScript
mit
1,743
/******************************************************************************* * Copyright (c) 2000, 2014 IBM Corporation and others. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * IBM Corporation - initial API and implementation * Benjamin Muskalla <b.muskalla@gmx.net> - [quick fix] Quick fix for missing synchronized modifier - https://bugs.eclipse.org/bugs/show_bug.cgi?id=245250 * Stephan Herrmann - Contributions for * [quick fix] Add quick fixes for null annotations - https://bugs.eclipse.org/337977 * [quick fix] The fix change parameter type to @NotNull generated a null change - https://bugs.eclipse.org/400668 * [quick fix] don't propose null annotations when those are disabled - https://bugs.eclipse.org/405086 * [quickfix] Update null annotation quick fixes for bug 388281 - https://bugs.eclipse.org/395555 * Lukas Hanke <hanke@yatta.de> - Bug 241696 [quick fix] quickfix to iterate over a collection - https://bugs.eclipse.org/bugs/show_bug.cgi?id=241696 *******************************************************************************/ package org.eclipse.jdt.internal.ui.text.correction; import org.eclipse.che.jdt.util.JavaModelUtil; import org.eclipse.core.runtime.CoreException; import org.eclipse.jdt.core.IBuffer; import org.eclipse.jdt.core.ICompilationUnit; import org.eclipse.jdt.core.IJavaProject; import org.eclipse.jdt.core.JavaCore; import org.eclipse.jdt.core.JavaModelException; import org.eclipse.jdt.core.compiler.IProblem; import org.eclipse.jdt.internal.corext.fix.NullAnnotationsRewriteOperations.ChangeKind; import org.eclipse.jdt.internal.ui.text.correction.proposals.ReplaceCorrectionProposal; import org.eclipse.jdt.internal.ui.text.correction.proposals.TaskMarkerProposal; import org.eclipse.jdt.ui.text.java.IInvocationContext; import org.eclipse.jdt.ui.text.java.IJavaCompletionProposal; import org.eclipse.jdt.ui.text.java.IProblemLocation; import org.eclipse.jdt.ui.text.java.IQuickFixProcessor; import org.eclipse.jdt.ui.text.java.correction.ICommandAccess; import java.util.ArrayList; import java.util.Collection; import java.util.HashSet; /** */ public class QuickFixProcessor implements IQuickFixProcessor { public boolean hasCorrections(ICompilationUnit cu, int problemId) { switch (problemId) { case IProblem.UnterminatedString: case IProblem.UnusedImport: case IProblem.DuplicateImport: case IProblem.CannotImportPackage: case IProblem.ConflictingImport: case IProblem.ImportNotFound: case IProblem.UndefinedMethod: case IProblem.UndefinedConstructor: case IProblem.ParameterMismatch: case IProblem.MethodButWithConstructorName: case IProblem.UndefinedField: case IProblem.UndefinedName: case IProblem.UnresolvedVariable: case IProblem.PublicClassMustMatchFileName: case IProblem.PackageIsNotExpectedPackage: case IProblem.UndefinedType: case IProblem.TypeMismatch: case IProblem.ReturnTypeMismatch: case IProblem.UnhandledException: case IProblem.UnhandledExceptionOnAutoClose: case IProblem.UnreachableCatch: case IProblem.InvalidCatchBlockSequence: case IProblem.InvalidUnionTypeReferenceSequence: case IProblem.VoidMethodReturnsValue: case IProblem.ShouldReturnValue: case IProblem.ShouldReturnValueHintMissingDefault: case IProblem.MissingReturnType: case IProblem.NonExternalizedStringLiteral: case IProblem.NonStaticAccessToStaticField: case IProblem.NonStaticAccessToStaticMethod: case IProblem.NonStaticOrAlienTypeReceiver: case IProblem.StaticMethodRequested: case IProblem.NonStaticFieldFromStaticInvocation: case IProblem.InstanceMethodDuringConstructorInvocation: case IProblem.InstanceFieldDuringConstructorInvocation: case IProblem.NotVisibleMethod: case IProblem.NotVisibleConstructor: case IProblem.NotVisibleType: case IProblem.NotVisibleField: case IProblem.BodyForAbstractMethod: case IProblem.AbstractMethodInAbstractClass: case IProblem.AbstractMethodMustBeImplemented: case IProblem.EnumAbstractMethodMustBeImplemented: case IProblem.AbstractMethodsInConcreteClass: case IProblem.AbstractMethodInEnum: case IProblem.EnumConstantMustImplementAbstractMethod: case IProblem.ShouldImplementHashcode: case IProblem.BodyForNativeMethod: case IProblem.OuterLocalMustBeFinal: case IProblem.UninitializedLocalVariable: case IProblem.UninitializedLocalVariableHintMissingDefault: case IProblem.UndefinedConstructorInDefaultConstructor: case IProblem.UnhandledExceptionInDefaultConstructor: case IProblem.NotVisibleConstructorInDefaultConstructor: case IProblem.AmbiguousType: case IProblem.UnusedPrivateMethod: case IProblem.UnusedPrivateConstructor: case IProblem.UnusedPrivateField: case IProblem.UnusedPrivateType: case IProblem.LocalVariableIsNeverUsed: case IProblem.ArgumentIsNeverUsed: case IProblem.MethodRequiresBody: case IProblem.NeedToEmulateFieldReadAccess: case IProblem.NeedToEmulateFieldWriteAccess: case IProblem.NeedToEmulateMethodAccess: case IProblem.NeedToEmulateConstructorAccess: case IProblem.SuperfluousSemicolon: case IProblem.UnnecessaryCast: case IProblem.UnnecessaryInstanceof: case IProblem.IndirectAccessToStaticField: case IProblem.IndirectAccessToStaticMethod: case IProblem.Task: case IProblem.UnusedMethodDeclaredThrownException: case IProblem.UnusedConstructorDeclaredThrownException: case IProblem.UnqualifiedFieldAccess: case IProblem.JavadocMissing: case IProblem.JavadocMissingParamTag: case IProblem.JavadocMissingReturnTag: case IProblem.JavadocMissingThrowsTag: case IProblem.JavadocUndefinedType: case IProblem.JavadocAmbiguousType: case IProblem.JavadocNotVisibleType: case IProblem.JavadocInvalidThrowsClassName: case IProblem.JavadocDuplicateThrowsClassName: case IProblem.JavadocDuplicateReturnTag: case IProblem.JavadocDuplicateParamName: case IProblem.JavadocInvalidParamName: case IProblem.JavadocUnexpectedTag: case IProblem.JavadocInvalidTag: case IProblem.NonBlankFinalLocalAssignment: case IProblem.DuplicateFinalLocalInitialization: case IProblem.FinalFieldAssignment: case IProblem.DuplicateBlankFinalFieldInitialization: case IProblem.AnonymousClassCannotExtendFinalClass: case IProblem.ClassExtendFinalClass: case IProblem.FinalMethodCannotBeOverridden: case IProblem.InheritedMethodReducesVisibility: case IProblem.MethodReducesVisibility: case IProblem.OverridingNonVisibleMethod: case IProblem.CannotOverrideAStaticMethodWithAnInstanceMethod: case IProblem.CannotHideAnInstanceMethodWithAStaticMethod: case IProblem.UnexpectedStaticModifierForMethod: case IProblem.LocalVariableHidingLocalVariable: case IProblem.LocalVariableHidingField: case IProblem.FieldHidingLocalVariable: case IProblem.FieldHidingField: case IProblem.ArgumentHidingLocalVariable: case IProblem.ArgumentHidingField: case IProblem.DuplicateField: case IProblem.DuplicateMethod: case IProblem.DuplicateTypeVariable: case IProblem.DuplicateNestedType: case IProblem.IllegalModifierForInterfaceMethod: case IProblem.IllegalModifierForInterfaceMethod18: case IProblem.IllegalModifierForInterface: case IProblem.IllegalModifierForClass: case IProblem.IllegalModifierForInterfaceField: case IProblem.IllegalModifierForMemberInterface: case IProblem.IllegalModifierForMemberClass: case IProblem.IllegalModifierForLocalClass: case IProblem.IllegalModifierForArgument: case IProblem.IllegalModifierForField: case IProblem.IllegalModifierForMethod: case IProblem.IllegalModifierForConstructor: case IProblem.IllegalModifierForVariable: case IProblem.IllegalModifierForEnum: case IProblem.IllegalModifierForEnumConstant: case IProblem.IllegalModifierForEnumConstructor: case IProblem.IllegalModifierForMemberEnum: case IProblem.UnexpectedStaticModifierForField: case IProblem.IllegalModifierCombinationFinalVolatileForField: case IProblem.IllegalVisibilityModifierForInterfaceMemberType: case IProblem.IncompatibleReturnType: case IProblem.IncompatibleExceptionInThrowsClause: case IProblem.NoMessageSendOnArrayType: case IProblem.InvalidOperator: case IProblem.MissingSerialVersion: case IProblem.UnnecessaryElse: case IProblem.SuperclassMustBeAClass: case IProblem.UseAssertAsAnIdentifier: case IProblem.UseEnumAsAnIdentifier: case IProblem.RedefinedLocal: case IProblem.RedefinedArgument: case IProblem.CodeCannotBeReached: case IProblem.DeadCode: case IProblem.InvalidUsageOfTypeParameters: case IProblem.InvalidUsageOfStaticImports: case IProblem.InvalidUsageOfForeachStatements: case IProblem.InvalidUsageOfTypeArguments: case IProblem.InvalidUsageOfEnumDeclarations: case IProblem.InvalidUsageOfVarargs: case IProblem.InvalidUsageOfAnnotations: case IProblem.InvalidUsageOfAnnotationDeclarations: case IProblem.FieldMissingDeprecatedAnnotation: case IProblem.OverridingDeprecatedMethod: case IProblem.MethodMissingDeprecatedAnnotation: case IProblem.TypeMissingDeprecatedAnnotation: case IProblem.MissingOverrideAnnotation: case IProblem.MissingOverrideAnnotationForInterfaceMethodImplementation: case IProblem.MethodMustOverride: case IProblem.MethodMustOverrideOrImplement: case IProblem.IsClassPathCorrect: case IProblem.MethodReturnsVoid: case IProblem.ForbiddenReference: case IProblem.DiscouragedReference: case IProblem.UnnecessaryNLSTag: case IProblem.AssignmentHasNoEffect: case IProblem.UnsafeTypeConversion: case IProblem.UnsafeElementTypeConversion: case IProblem.RawTypeReference: case IProblem.UnsafeRawMethodInvocation: case IProblem.RedundantSpecificationOfTypeArguments: case IProblem.UndefinedAnnotationMember: case IProblem.MissingValueForAnnotationMember: case IProblem.FallthroughCase: case IProblem.NonGenericType: case IProblem.UnhandledWarningToken: case IProblem.UnusedWarningToken: case IProblem.RedundantSuperinterface: case IProblem.JavadocInvalidMemberTypeQualification: case IProblem.IncompatibleTypesInForeach: case IProblem.MissingEnumConstantCase: case IProblem.MissingEnumDefaultCase: case IProblem.MissingDefaultCase: case IProblem.MissingEnumConstantCaseDespiteDefault: case IProblem.MissingSynchronizedModifierInInheritedMethod: case IProblem.UnusedObjectAllocation: case IProblem.MethodCanBeStatic: case IProblem.MethodCanBePotentiallyStatic: case IProblem.AutoManagedResourceNotBelow17: case IProblem.MultiCatchNotBelow17: case IProblem.PolymorphicMethodNotBelow17: case IProblem.BinaryLiteralNotBelow17: case IProblem.UnderscoresInLiteralsNotBelow17: case IProblem.SwitchOnStringsNotBelow17: case IProblem.DiamondNotBelow17: case IProblem.PotentialHeapPollutionFromVararg : case IProblem.UnsafeGenericArrayForVarargs: case IProblem.SafeVarargsOnFixedArityMethod : case IProblem.SafeVarargsOnNonFinalInstanceMethod: case IProblem.RequiredNonNullButProvidedNull: case IProblem.RequiredNonNullButProvidedPotentialNull: case IProblem.RequiredNonNullButProvidedSpecdNullable: case IProblem.RequiredNonNullButProvidedUnknown: case IProblem.IllegalReturnNullityRedefinition: case IProblem.IllegalRedefinitionToNonNullParameter: case IProblem.IllegalDefinitionToNonNullParameter: case IProblem.ParameterLackingNonNullAnnotation: case IProblem.ParameterLackingNullableAnnotation: case IProblem.SpecdNonNullLocalVariableComparisonYieldsFalse: case IProblem.RedundantNullCheckOnSpecdNonNullLocalVariable: case IProblem.RedundantNullAnnotation: case IProblem.UnusedTypeParameter: case IProblem.NullableFieldReference: case IProblem.ConflictingNullAnnotations: case IProblem.ConflictingInheritedNullAnnotations: case IProblem.ExplicitThisParameterNotBelow18: case IProblem.DefaultMethodNotBelow18: case IProblem.StaticInterfaceMethodNotBelow18: case IProblem.LambdaExpressionNotBelow18: case IProblem.MethodReferenceNotBelow18: case IProblem.ConstructorReferenceNotBelow18: case IProblem.IntersectionCastNotBelow18: case IProblem.InvalidUsageOfTypeAnnotations: return true; default: return SuppressWarningsSubProcessor.hasSuppressWarningsProposal(cu.getJavaProject(), problemId); } } private static int moveBack(int offset, int start, String ignoreCharacters, ICompilationUnit cu) { try { IBuffer buf= cu.getBuffer(); while (offset >= start) { if (ignoreCharacters.indexOf(buf.getChar(offset - 1)) == -1) { return offset; } offset--; } } catch(JavaModelException e) { // use start } return start; } /* (non-Javadoc) * @see IAssistProcessor#getCorrections(org.eclipse.jdt.internal.ui.text.correction.IAssistContext, org.eclipse.jdt.internal.ui.text.correction.IProblemLocation[]) */ public IJavaCompletionProposal[] getCorrections(IInvocationContext context, IProblemLocation[] locations) throws CoreException { if (locations == null || locations.length == 0) { return null; } HashSet<Integer> handledProblems= new HashSet<Integer>(locations.length); ArrayList<ICommandAccess> resultingCollections= new ArrayList<ICommandAccess>(); for (int i= 0; i < locations.length; i++) { IProblemLocation curr= locations[i]; Integer id= new Integer(curr.getProblemId()); if (handledProblems.add(id)) { process(context, curr, resultingCollections); } } return resultingCollections.toArray(new IJavaCompletionProposal[resultingCollections.size()]); } private void process(IInvocationContext context, IProblemLocation problem, Collection<ICommandAccess> proposals) throws CoreException { int id= problem.getProblemId(); if (id == 0) { // no proposals for none-problem locations return; } switch (id) { case IProblem.UnterminatedString: String quoteLabel= CorrectionMessages.JavaCorrectionProcessor_addquote_description; int pos= moveBack(problem.getOffset() + problem.getLength(), problem.getOffset(), "\n\r", context.getCompilationUnit()); //$NON-NLS-1$ proposals.add(new ReplaceCorrectionProposal(quoteLabel, context.getCompilationUnit(), pos, 0, "\"", IProposalRelevance.ADD_QUOTE)); //$NON-NLS-1$ break; case IProblem.UnusedImport: case IProblem.DuplicateImport: case IProblem.CannotImportPackage: case IProblem.ConflictingImport: ReorgCorrectionsSubProcessor.removeImportStatementProposals(context, problem, proposals); break; case IProblem.ImportNotFound: ReorgCorrectionsSubProcessor.importNotFoundProposals(context, problem, proposals); ReorgCorrectionsSubProcessor.removeImportStatementProposals(context, problem, proposals); break; case IProblem.UndefinedMethod: UnresolvedElementsSubProcessor.getMethodProposals(context, problem, false, proposals); break; case IProblem.UndefinedConstructor: UnresolvedElementsSubProcessor.getConstructorProposals(context, problem, proposals); break; case IProblem.UndefinedAnnotationMember: UnresolvedElementsSubProcessor.getAnnotationMemberProposals(context, problem, proposals); break; case IProblem.ParameterMismatch: UnresolvedElementsSubProcessor.getMethodProposals(context, problem, true, proposals); break; case IProblem.MethodButWithConstructorName: ReturnTypeSubProcessor.addMethodWithConstrNameProposals(context, problem, proposals); break; case IProblem.UndefinedField: case IProblem.UndefinedName: case IProblem.UnresolvedVariable: UnresolvedElementsSubProcessor.getVariableProposals(context, problem, null, proposals); break; case IProblem.AmbiguousType: case IProblem.JavadocAmbiguousType: UnresolvedElementsSubProcessor.getAmbiguosTypeReferenceProposals(context, problem, proposals); break; case IProblem.PublicClassMustMatchFileName: ReorgCorrectionsSubProcessor.getWrongTypeNameProposals(context, problem, proposals); break; case IProblem.PackageIsNotExpectedPackage: ReorgCorrectionsSubProcessor.getWrongPackageDeclNameProposals(context, problem, proposals); break; case IProblem.UndefinedType: case IProblem.JavadocUndefinedType: UnresolvedElementsSubProcessor.getTypeProposals(context, problem, proposals); break; case IProblem.TypeMismatch: case IProblem.ReturnTypeMismatch: TypeMismatchSubProcessor.addTypeMismatchProposals(context, problem, proposals); break; case IProblem.IncompatibleTypesInForeach: TypeMismatchSubProcessor.addTypeMismatchInForEachProposals(context, problem, proposals); break; case IProblem.IncompatibleReturnType: TypeMismatchSubProcessor.addIncompatibleReturnTypeProposals(context, problem, proposals); break; case IProblem.IncompatibleExceptionInThrowsClause: TypeMismatchSubProcessor.addIncompatibleThrowsProposals(context, problem, proposals); break; case IProblem.UnhandledException: case IProblem.UnhandledExceptionOnAutoClose: LocalCorrectionsSubProcessor.addUncaughtExceptionProposals(context, problem, proposals); break; case IProblem.UnreachableCatch: case IProblem.InvalidCatchBlockSequence: case IProblem.InvalidUnionTypeReferenceSequence: LocalCorrectionsSubProcessor.addUnreachableCatchProposals(context, problem, proposals); break; case IProblem.RedundantSuperinterface: LocalCorrectionsSubProcessor.addRedundantSuperInterfaceProposal(context, problem, proposals); break; case IProblem.VoidMethodReturnsValue: ReturnTypeSubProcessor.addVoidMethodReturnsProposals(context, problem, proposals); break; case IProblem.MethodReturnsVoid: ReturnTypeSubProcessor.addMethodRetunsVoidProposals(context, problem, proposals); break; case IProblem.MissingReturnType: ReturnTypeSubProcessor.addMissingReturnTypeProposals(context, problem, proposals); break; case IProblem.ShouldReturnValue: case IProblem.ShouldReturnValueHintMissingDefault: ReturnTypeSubProcessor.addMissingReturnStatementProposals(context, problem, proposals); break; case IProblem.NonExternalizedStringLiteral: LocalCorrectionsSubProcessor.addNLSProposals(context, problem, proposals); break; case IProblem.UnnecessaryNLSTag: LocalCorrectionsSubProcessor.getUnnecessaryNLSTagProposals(context, problem, proposals); break; case IProblem.NonStaticAccessToStaticField: case IProblem.NonStaticAccessToStaticMethod: case IProblem.NonStaticOrAlienTypeReceiver: case IProblem.IndirectAccessToStaticField: case IProblem.IndirectAccessToStaticMethod: LocalCorrectionsSubProcessor.addCorrectAccessToStaticProposals(context, problem, proposals); break; case IProblem.StaticMethodRequested: case IProblem.NonStaticFieldFromStaticInvocation: case IProblem.InstanceMethodDuringConstructorInvocation: case IProblem.InstanceFieldDuringConstructorInvocation: ModifierCorrectionSubProcessor.addNonAccessibleReferenceProposal(context, problem, proposals, ModifierCorrectionSubProcessor.TO_STATIC, IProposalRelevance.CHANGE_MODIFIER_TO_STATIC); break; case IProblem.NonBlankFinalLocalAssignment: case IProblem.DuplicateFinalLocalInitialization: case IProblem.FinalFieldAssignment: case IProblem.DuplicateBlankFinalFieldInitialization: case IProblem.AnonymousClassCannotExtendFinalClass: case IProblem.ClassExtendFinalClass: ModifierCorrectionSubProcessor.addNonAccessibleReferenceProposal(context, problem, proposals, ModifierCorrectionSubProcessor.TO_NON_FINAL, IProposalRelevance.REMOVE_FINAL_MODIFIER); break; case IProblem.InheritedMethodReducesVisibility: case IProblem.MethodReducesVisibility: case IProblem.OverridingNonVisibleMethod: ModifierCorrectionSubProcessor.addChangeOverriddenModifierProposal(context, problem, proposals, ModifierCorrectionSubProcessor.TO_VISIBLE); break; case IProblem.FinalMethodCannotBeOverridden: ModifierCorrectionSubProcessor.addChangeOverriddenModifierProposal(context, problem, proposals, ModifierCorrectionSubProcessor.TO_NON_FINAL); break; case IProblem.CannotOverrideAStaticMethodWithAnInstanceMethod: ModifierCorrectionSubProcessor.addChangeOverriddenModifierProposal(context, problem, proposals, ModifierCorrectionSubProcessor.TO_NON_STATIC); break; case IProblem.CannotHideAnInstanceMethodWithAStaticMethod: case IProblem.IllegalModifierForInterfaceMethod: case IProblem.IllegalModifierForInterface: case IProblem.IllegalModifierForClass: case IProblem.IllegalModifierForInterfaceField: case IProblem.UnexpectedStaticModifierForField: case IProblem.IllegalModifierCombinationFinalVolatileForField: case IProblem.IllegalModifierForMemberInterface: case IProblem.IllegalModifierForMemberClass: case IProblem.IllegalModifierForLocalClass: case IProblem.IllegalModifierForArgument: case IProblem.IllegalModifierForField: case IProblem.IllegalModifierForMethod: case IProblem.IllegalModifierForConstructor: case IProblem.IllegalModifierForVariable: case IProblem.IllegalModifierForEnum: case IProblem.IllegalModifierForEnumConstant: case IProblem.IllegalModifierForEnumConstructor: case IProblem.IllegalModifierForMemberEnum: case IProblem.IllegalVisibilityModifierForInterfaceMemberType: case IProblem.UnexpectedStaticModifierForMethod: case IProblem.IllegalModifierForInterfaceMethod18: ModifierCorrectionSubProcessor.addRemoveInvalidModifiersProposal(context, problem, proposals, IProposalRelevance.REMOVE_INVALID_MODIFIERS); break; case IProblem.NotVisibleField: GetterSetterCorrectionSubProcessor.addGetterSetterProposal(context, problem, proposals, IProposalRelevance.GETTER_SETTER_NOT_VISIBLE_FIELD); ModifierCorrectionSubProcessor.addNonAccessibleReferenceProposal(context, problem, proposals, ModifierCorrectionSubProcessor.TO_VISIBLE, IProposalRelevance.CHANGE_VISIBILITY); break; case IProblem.NotVisibleMethod: case IProblem.NotVisibleConstructor: case IProblem.NotVisibleType: case IProblem.JavadocNotVisibleType: ModifierCorrectionSubProcessor.addNonAccessibleReferenceProposal(context, problem, proposals, ModifierCorrectionSubProcessor.TO_VISIBLE, IProposalRelevance.CHANGE_VISIBILITY); break; case IProblem.BodyForAbstractMethod: case IProblem.AbstractMethodInAbstractClass: case IProblem.AbstractMethodInEnum: case IProblem.EnumAbstractMethodMustBeImplemented: ModifierCorrectionSubProcessor.addAbstractMethodProposals(context, problem, proposals); break; case IProblem.AbstractMethodsInConcreteClass: ModifierCorrectionSubProcessor.addAbstractTypeProposals(context, problem, proposals); break; case IProblem.AbstractMethodMustBeImplemented: case IProblem.EnumConstantMustImplementAbstractMethod: LocalCorrectionsSubProcessor.addUnimplementedMethodsProposals(context, problem, proposals); break; case IProblem.ShouldImplementHashcode: LocalCorrectionsSubProcessor.addMissingHashCodeProposals(context, problem, proposals); break; case IProblem.MissingValueForAnnotationMember: LocalCorrectionsSubProcessor.addValueForAnnotationProposals(context, problem, proposals); break; case IProblem.BodyForNativeMethod: ModifierCorrectionSubProcessor.addNativeMethodProposals(context, problem, proposals); break; case IProblem.MethodRequiresBody: ModifierCorrectionSubProcessor.addMethodRequiresBodyProposals(context, problem, proposals); break; case IProblem.OuterLocalMustBeFinal: ModifierCorrectionSubProcessor.addNonFinalLocalProposal(context, problem, proposals); break; case IProblem.UninitializedLocalVariable: case IProblem.UninitializedLocalVariableHintMissingDefault: LocalCorrectionsSubProcessor.addUninitializedLocalVariableProposal(context, problem, proposals); break; case IProblem.UnhandledExceptionInDefaultConstructor: case IProblem.UndefinedConstructorInDefaultConstructor: case IProblem.NotVisibleConstructorInDefaultConstructor: LocalCorrectionsSubProcessor.addConstructorFromSuperclassProposal(context, problem, proposals); break; case IProblem.UnusedPrivateMethod: case IProblem.UnusedPrivateConstructor: case IProblem.UnusedPrivateField: case IProblem.UnusedPrivateType: case IProblem.LocalVariableIsNeverUsed: case IProblem.ArgumentIsNeverUsed: LocalCorrectionsSubProcessor.addUnusedMemberProposal(context, problem, proposals); break; case IProblem.NeedToEmulateFieldReadAccess: case IProblem.NeedToEmulateFieldWriteAccess: case IProblem.NeedToEmulateMethodAccess: case IProblem.NeedToEmulateConstructorAccess: ModifierCorrectionSubProcessor.addNonAccessibleReferenceProposal(context, problem, proposals, ModifierCorrectionSubProcessor.TO_NON_PRIVATE, IProposalRelevance.CHANGE_VISIBILITY_TO_NON_PRIVATE); break; case IProblem.SuperfluousSemicolon: LocalCorrectionsSubProcessor.addSuperfluousSemicolonProposal(context, problem, proposals); break; case IProblem.UnnecessaryCast: LocalCorrectionsSubProcessor.addUnnecessaryCastProposal(context, problem, proposals); break; case IProblem.UnnecessaryInstanceof: LocalCorrectionsSubProcessor.addUnnecessaryInstanceofProposal(context, problem, proposals); break; case IProblem.UnusedMethodDeclaredThrownException: case IProblem.UnusedConstructorDeclaredThrownException: LocalCorrectionsSubProcessor.addUnnecessaryThrownExceptionProposal(context, problem, proposals); break; case IProblem.UnqualifiedFieldAccess: GetterSetterCorrectionSubProcessor.addGetterSetterProposal(context, problem, proposals, IProposalRelevance.GETTER_SETTER_UNQUALIFIED_FIELD_ACCESS); LocalCorrectionsSubProcessor.addUnqualifiedFieldAccessProposal(context, problem, proposals); break; case IProblem.Task: proposals.add(new TaskMarkerProposal(context.getCompilationUnit(), problem, 10)); break; case IProblem.JavadocMissing: JavadocTagsSubProcessor.getMissingJavadocCommentProposals(context, problem, proposals); break; case IProblem.JavadocMissingParamTag: case IProblem.JavadocMissingReturnTag: case IProblem.JavadocMissingThrowsTag: JavadocTagsSubProcessor.getMissingJavadocTagProposals(context, problem, proposals); break; case IProblem.JavadocInvalidThrowsClassName: case IProblem.JavadocDuplicateThrowsClassName: case IProblem.JavadocDuplicateReturnTag: case IProblem.JavadocDuplicateParamName: case IProblem.JavadocInvalidParamName: case IProblem.JavadocUnexpectedTag: case IProblem.JavadocInvalidTag: JavadocTagsSubProcessor.getRemoveJavadocTagProposals(context, problem, proposals); break; case IProblem.JavadocInvalidMemberTypeQualification: JavadocTagsSubProcessor.getInvalidQualificationProposals(context, problem, proposals); break; case IProblem.LocalVariableHidingLocalVariable: case IProblem.LocalVariableHidingField: case IProblem.FieldHidingLocalVariable: case IProblem.FieldHidingField: case IProblem.ArgumentHidingLocalVariable: case IProblem.ArgumentHidingField: case IProblem.UseAssertAsAnIdentifier: case IProblem.UseEnumAsAnIdentifier: case IProblem.RedefinedLocal: case IProblem.RedefinedArgument: case IProblem.DuplicateField: case IProblem.DuplicateMethod: case IProblem.DuplicateTypeVariable: case IProblem.DuplicateNestedType: LocalCorrectionsSubProcessor.addInvalidVariableNameProposals(context, problem, proposals); break; case IProblem.NoMessageSendOnArrayType: UnresolvedElementsSubProcessor.getArrayAccessProposals(context, problem, proposals); break; case IProblem.InvalidOperator: LocalCorrectionsSubProcessor.getInvalidOperatorProposals(context, problem, proposals); break; case IProblem.MissingSerialVersion: SerialVersionSubProcessor.getSerialVersionProposals(context, problem, proposals); break; case IProblem.UnnecessaryElse: LocalCorrectionsSubProcessor.getUnnecessaryElseProposals(context, problem, proposals); break; case IProblem.SuperclassMustBeAClass: LocalCorrectionsSubProcessor.getInterfaceExtendsClassProposals(context, problem, proposals); break; case IProblem.CodeCannotBeReached: case IProblem.DeadCode: LocalCorrectionsSubProcessor.getUnreachableCodeProposals(context, problem, proposals); break; case IProblem.InvalidUsageOfTypeParameters: case IProblem.InvalidUsageOfStaticImports: case IProblem.InvalidUsageOfForeachStatements: case IProblem.InvalidUsageOfTypeArguments: case IProblem.InvalidUsageOfEnumDeclarations: case IProblem.InvalidUsageOfVarargs: case IProblem.InvalidUsageOfAnnotations: case IProblem.InvalidUsageOfAnnotationDeclarations: ReorgCorrectionsSubProcessor.getNeedHigherComplianceProposals(context, problem, proposals, JavaCore.VERSION_1_5); break; case IProblem.DiamondNotBelow17: TypeArgumentMismatchSubProcessor.getInferDiamondArgumentsProposal(context, problem, proposals); //$FALL-THROUGH$ case IProblem.AutoManagedResourceNotBelow17: case IProblem.MultiCatchNotBelow17: case IProblem.PolymorphicMethodNotBelow17: case IProblem.BinaryLiteralNotBelow17: case IProblem.UnderscoresInLiteralsNotBelow17: case IProblem.SwitchOnStringsNotBelow17: ReorgCorrectionsSubProcessor.getNeedHigherComplianceProposals(context, problem, proposals, JavaCore.VERSION_1_7); break; case IProblem.ExplicitThisParameterNotBelow18: case IProblem.DefaultMethodNotBelow18: case IProblem.StaticInterfaceMethodNotBelow18: case IProblem.LambdaExpressionNotBelow18: case IProblem.MethodReferenceNotBelow18: case IProblem.ConstructorReferenceNotBelow18: case IProblem.IntersectionCastNotBelow18: case IProblem.InvalidUsageOfTypeAnnotations: ReorgCorrectionsSubProcessor.getNeedHigherComplianceProposals(context, problem, proposals, JavaCore.VERSION_1_8); break; case IProblem.NonGenericType: TypeArgumentMismatchSubProcessor.removeMismatchedArguments(context, problem, proposals); break; case IProblem.MissingOverrideAnnotation: case IProblem.MissingOverrideAnnotationForInterfaceMethodImplementation: ModifierCorrectionSubProcessor.addOverrideAnnotationProposal(context, problem, proposals); break; case IProblem.MethodMustOverride: case IProblem.MethodMustOverrideOrImplement: ModifierCorrectionSubProcessor.removeOverrideAnnotationProposal(context, problem, proposals); break; case IProblem.FieldMissingDeprecatedAnnotation: case IProblem.MethodMissingDeprecatedAnnotation: case IProblem.TypeMissingDeprecatedAnnotation: ModifierCorrectionSubProcessor.addDeprecatedAnnotationProposal(context, problem, proposals); break; case IProblem.OverridingDeprecatedMethod: ModifierCorrectionSubProcessor.addOverridingDeprecatedMethodProposal(context, problem, proposals); break; case IProblem.IsClassPathCorrect: ReorgCorrectionsSubProcessor.getIncorrectBuildPathProposals(context, problem, proposals); break; case IProblem.ForbiddenReference: case IProblem.DiscouragedReference: ReorgCorrectionsSubProcessor.getAccessRulesProposals(context, problem, proposals); break; case IProblem.AssignmentHasNoEffect: LocalCorrectionsSubProcessor.getAssignmentHasNoEffectProposals(context, problem, proposals); break; case IProblem.UnsafeTypeConversion: case IProblem.RawTypeReference: case IProblem.UnsafeRawMethodInvocation: LocalCorrectionsSubProcessor.addDeprecatedFieldsToMethodsProposals(context, problem, proposals); //$FALL-THROUGH$ case IProblem.UnsafeElementTypeConversion: LocalCorrectionsSubProcessor.addTypePrametersToRawTypeReference(context, problem, proposals); break; case IProblem.RedundantSpecificationOfTypeArguments: LocalCorrectionsSubProcessor.addRemoveRedundantTypeArgumentsProposals(context, problem, proposals); break; case IProblem.FallthroughCase: LocalCorrectionsSubProcessor.addFallThroughProposals(context, problem, proposals); break; case IProblem.UnhandledWarningToken: SuppressWarningsSubProcessor.addUnknownSuppressWarningProposals(context, problem, proposals); break; case IProblem.UnusedWarningToken: SuppressWarningsSubProcessor.addRemoveUnusedSuppressWarningProposals(context, problem, proposals); break; case IProblem.MissingEnumConstantCase: case IProblem.MissingEnumDefaultCase: LocalCorrectionsSubProcessor.getMissingEnumConstantCaseProposals(context, problem, proposals); break; case IProblem.MissingDefaultCase: LocalCorrectionsSubProcessor.addMissingDefaultCaseProposal(context, problem, proposals); break; case IProblem.MissingEnumConstantCaseDespiteDefault: LocalCorrectionsSubProcessor.getMissingEnumConstantCaseProposals(context, problem, proposals); LocalCorrectionsSubProcessor.addCasesOmittedProposals(context, problem, proposals); break; case IProblem.MissingSynchronizedModifierInInheritedMethod: ModifierCorrectionSubProcessor.addSynchronizedMethodProposal(context, problem, proposals); break; case IProblem.UnusedObjectAllocation: LocalCorrectionsSubProcessor.getUnusedObjectAllocationProposals(context, problem, proposals); break; case IProblem.MethodCanBeStatic: case IProblem.MethodCanBePotentiallyStatic: ModifierCorrectionSubProcessor.addStaticMethodProposal(context, problem, proposals); break; case IProblem.PotentialHeapPollutionFromVararg : VarargsWarningsSubProcessor.addAddSafeVarargsProposals(context, problem, proposals); break; case IProblem.UnsafeGenericArrayForVarargs: VarargsWarningsSubProcessor.addAddSafeVarargsToDeclarationProposals(context, problem, proposals); break; case IProblem.SafeVarargsOnFixedArityMethod : case IProblem.SafeVarargsOnNonFinalInstanceMethod: VarargsWarningsSubProcessor.addRemoveSafeVarargsProposals(context, problem, proposals); break; case IProblem.IllegalReturnNullityRedefinition: case IProblem.IllegalDefinitionToNonNullParameter: case IProblem.IllegalRedefinitionToNonNullParameter: boolean isArgProblem = id != IProblem.IllegalReturnNullityRedefinition; NullAnnotationsCorrectionProcessor.addNullAnnotationInSignatureProposal(context, problem, proposals, ChangeKind.LOCAL, isArgProblem); NullAnnotationsCorrectionProcessor.addNullAnnotationInSignatureProposal(context, problem, proposals, ChangeKind.OVERRIDDEN, isArgProblem); break; case IProblem.RequiredNonNullButProvidedSpecdNullable: case IProblem.RequiredNonNullButProvidedUnknown: NullAnnotationsCorrectionProcessor.addExtractCheckedLocalProposal(context, problem, proposals); //$FALL-THROUGH$ case IProblem.RequiredNonNullButProvidedNull: case IProblem.RequiredNonNullButProvidedPotentialNull: case IProblem.ParameterLackingNonNullAnnotation: case IProblem.ParameterLackingNullableAnnotation: NullAnnotationsCorrectionProcessor.addReturnAndArgumentTypeProposal(context, problem, ChangeKind.LOCAL, proposals); NullAnnotationsCorrectionProcessor.addReturnAndArgumentTypeProposal(context, problem, ChangeKind.TARGET, proposals); break; case IProblem.SpecdNonNullLocalVariableComparisonYieldsFalse: case IProblem.RedundantNullCheckOnSpecdNonNullLocalVariable: IJavaProject prj = context.getCompilationUnit().getJavaProject(); if (prj != null && JavaCore.ENABLED.equals(prj.getOption(JavaCore.COMPILER_ANNOTATION_NULL_ANALYSIS, true))) { NullAnnotationsCorrectionProcessor.addReturnAndArgumentTypeProposal(context, problem, ChangeKind.LOCAL, proposals); } break; case IProblem.RedundantNullAnnotation: case IProblem.RedundantNullDefaultAnnotationPackage: case IProblem.RedundantNullDefaultAnnotationType: case IProblem.RedundantNullDefaultAnnotationMethod: NullAnnotationsCorrectionProcessor.addRemoveRedundantAnnotationProposal(context, problem, proposals); break; case IProblem.UnusedTypeParameter: LocalCorrectionsSubProcessor.addUnusedTypeParameterProposal(context, problem, proposals); break; case IProblem.NullableFieldReference: NullAnnotationsCorrectionProcessor.addExtractCheckedLocalProposal(context, problem, proposals); break; case IProblem.ConflictingNullAnnotations: case IProblem.ConflictingInheritedNullAnnotations: NullAnnotationsCorrectionProcessor.addReturnAndArgumentTypeProposal(context, problem, ChangeKind.LOCAL, proposals); NullAnnotationsCorrectionProcessor.addReturnAndArgumentTypeProposal(context, problem, ChangeKind.INVERSE, proposals); break; default: } if (JavaModelUtil.is50OrHigher(context.getCompilationUnit().getJavaProject())) { SuppressWarningsSubProcessor.addSuppressWarningsProposals(context, problem, proposals); } } }
gazarenkov/che-sketch
plugins/plugin-java/che-plugin-java-ext-jdt/org-eclipse-jdt-ui/src/main/java/org/eclipse/jdt/internal/ui/text/correction/QuickFixProcessor.java
Java
epl-1.0
37,207
<?php /** * @package Joomla.Administrator * @subpackage mod_feed * * @copyright (C) 2006 Open Source Matters, Inc. <https://www.joomla.org> * @license GNU General Public License version 2 or later; see LICENSE.txt */ defined('_JEXEC') or die; use Joomla\CMS\Filter\OutputFilter; use Joomla\CMS\HTML\HTMLHelper; use Joomla\CMS\Language\Text; // Check if feed URL has been set if (empty ($rssurl)) { echo '<div>' . Text::_('MOD_FEED_ERR_NO_URL') . '</div>'; return; } if (!empty($feed) && is_string($feed)) { echo $feed; } else { $lang = $app->getLanguage(); $myrtl = $params->get('rssrtl', 0); $direction = ' '; if ($lang->isRtl() && $myrtl == 0) { $direction = ' redirect-rtl'; } // Feed description elseif ($lang->isRtl() && $myrtl == 1) { $direction = ' redirect-ltr'; } elseif ($lang->isRtl() && $myrtl == 2) { $direction = ' redirect-rtl'; } elseif ($myrtl == 0) { $direction = ' redirect-ltr'; } elseif ($myrtl == 1) { $direction = ' redirect-ltr'; } elseif ($myrtl == 2) { $direction = ' redirect-rtl'; } if ($feed != false) : ?> <div style="direction: <?php echo $rssrtl ? 'rtl' : 'ltr'; ?>; text-align: <?php echo $rssrtl ? 'right' : 'left'; ?> !important" class="feed"> <?php // Feed title if (!is_null($feed->title) && $params->get('rsstitle', 1)) : ?> <h2 class="<?php echo $direction; ?>"> <a href="<?php echo str_replace('&', '&amp;', $rssurl); ?>" target="_blank"> <?php echo $feed->title; ?></a> </h2> <?php endif; // Feed date if ($params->get('rssdate', 1)) : ?> <h3> <?php echo HTMLHelper::_('date', $feed->publishedDate, Text::_('DATE_FORMAT_LC3')); ?> </h3> <?php endif; ?> <?php // Feed description ?> <?php if ($params->get('rssdesc', 1)) : ?> <?php echo $feed->description; ?> <?php endif; ?> <?php // Feed image ?> <?php if ($params->get('rssimage', 1) && $feed->image) : ?> <img class="w-100" src="<?php echo $feed->image->uri; ?>" alt="<?php echo $feed->image->title; ?>"/> <?php endif; ?> <?php // Show items ?> <?php if (!empty($feed)) : ?> <ul class="newsfeed list-group"> <?php for ($i = 0; $i < $params->get('rssitems', 3); $i++) : if (!$feed->offsetExists($i)) : break; endif; $uri = $feed[$i]->uri || !$feed[$i]->isPermaLink ? trim($feed[$i]->uri) : trim($feed[$i]->guid); $uri = !$uri || stripos($uri, 'http') !== 0 ? $rssurl : $uri; $text = $feed[$i]->content !== '' ? trim($feed[$i]->content) : ''; ?> <li class="list-group-item mb-2"> <?php if (!empty($uri)) : ?> <h5 class="feed-link"> <a href="<?php echo $uri; ?>" target="_blank"> <?php echo trim($feed[$i]->title); ?></a></h5> <?php else : ?> <h5 class="feed-link"><?php echo trim($feed[$i]->title); ?></h5> <?php endif; ?> <?php if ($params->get('rssitemdate', 0)) : ?> <div class="feed-item-date"> <?php echo HTMLHelper::_('date', $feed[$i]->publishedDate, Text::_('DATE_FORMAT_LC3')); ?> </div> <?php endif; ?> <?php if ($params->get('rssitemdesc', 1) && $text !== '') : ?> <div class="feed-item-description"> <?php // Strip the images. $text = OutputFilter::stripImages($text); $text = HTMLHelper::_('string.truncate', $text, $params->get('word_count', 0), true, false); echo str_replace('&apos;', "'", $text); ?> </div> <?php endif; ?> </li> <?php endfor; ?> </ul> <?php endif; ?> </div> <?php endif; }
brianteeman/joomla-cms
administrator/modules/mod_feed/tmpl/default.php
PHP
gpl-2.0
3,512
/* * This file is part of the coreboot project. * * Copyright (C) 2007 Advanced Micro Devices, Inc. * Copyright (C) 2011 Mark Norman <mpnorman@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* Based on irq_tables.c from AMD's DB800 mainboard. */ #include <arch/pirq_routing.h> #include <console/console.h> #include <arch/io.h> #include <arch/pirq_routing.h> #include "southbridge/amd/cs5536/cs5536.h" /* Platform IRQs */ #define PIRQA 5 #define PIRQB 11 #define PIRQC 10 #define PIRQD 9 /* Map */ #define M_PIRQA (1 << PIRQA) /* Bitmap of supported IRQs */ #define M_PIRQB (1 << PIRQB) /* Bitmap of supported IRQs */ #define M_PIRQC (1 << PIRQC) /* Bitmap of supported IRQs */ #define M_PIRQD (1 << PIRQD) /* Bitmap of supported IRQs */ /* Link */ #define L_PIRQA 1 /* Means Slot INTx# Connects To Chipset INTA# */ #define L_PIRQB 2 /* Means Slot INTx# Connects To Chipset INTB# */ #define L_PIRQC 3 /* Means Slot INTx# Connects To Chipset INTC# */ #define L_PIRQD 4 /* Means Slot INTx# Connects To Chipset INTD# */ static const struct irq_routing_table intel_irq_routing_table = { PIRQ_SIGNATURE, /* u32 signature */ PIRQ_VERSION, /* u16 version */ 32 + 16 * CONFIG_IRQ_SLOT_COUNT, /* there can be total CONFIG_IRQ_SLOT_COUNT devices on the bus */ 0x00, /* Where the interrupt router lies (bus) */ (0x0F << 3) | 0x0, /* Where the interrupt router lies (dev) */ 0x00, /* IRQs devoted exclusively to PCI usage */ 0x100B, /* Vendor */ 0x002B, /* Device */ 0, /* Miniport data */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* u8 rfu[11] */ 0x00, /* u8 checksum , this has to set to some value that would give 0 after the sum of all bytes for this structure (including checksum) */ { /* If you change the number of entries, change the CONFIG_IRQ_SLOT_COUNT above! */ /* bus, dev|fn, {link, bitmap}, {link, bitmap}, {link, bitmap}, {link, bitmap}, slot, rfu */ /* CPU */ {0x00, (0x01 << 3) | 0x0, {{L_PIRQA, M_PIRQA}, {0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00}}, 0x0, 0x0}, /* Ethernet */ {0x00, (0x0E << 3) | 0x0, {{L_PIRQA, M_PIRQA}, {0x00, 0x00}, {0x00, 0x00}, {0x00, 0x00}}, 0x0, 0x0}, /* Chipset */ {0x00, (0x0F << 3) | 0x0, {{L_PIRQA, M_PIRQA}, {L_PIRQB, M_PIRQB}, {L_PIRQC, M_PIRQC}, {L_PIRQD, M_PIRQD}}, 0x0, 0x0}, } }; unsigned long write_pirq_routing_table(unsigned long addr) { return copy_pirq_routing_table(addr, &intel_irq_routing_table); }
jpl888/coreboot
src/mainboard/aaeon/pfm-540i_revb/irq_tables.c
C
gpl-2.0
3,043
<?php /** * File containing the ezcDocumentWikiBlockquoteNode struct * * @package Document * @version //autogen// * @copyright Copyright (C) 2005-2010 eZ Systems AS. All rights reserved. * @license http://ez.no/licenses/new_bsd New BSD License */ /** * Struct for Wiki document abstract syntax tree blockquote nodes * * @package Document * @version //autogen// */ class ezcDocumentWikiBlockquoteNode extends ezcDocumentWikiLineLevelNode { /** * Blockquote indentation level * * @var int */ public $level = 1; /** * Construct Wiki node * * @param ezcDocumentWikiToken $token * @param int $type * @return void */ public function __construct( ezcDocumentWikiToken $token ) { parent::__construct( $token ); if ( $token instanceof ezcDocumentWikiParagraphIndentationToken ) { $this->level = $token->level; } } /** * Set state after var_export * * @param array $properties * @return void * @ignore */ public static function __set_state( $properties ) { $nodeClass = __CLASS__; $node = new $nodeClass( $properties['token'] ); $node->nodes = $properties['nodes']; return $node; } } ?>
crevillo/ezc
lib/ezc/Document/src/document/wiki/nodes/blockquote.php
PHP
gpl-2.0
1,290
#ifndef _ANTARES_DOCK_H #define _ANTARES_DOCK_H struct dock_platform_data { unsigned int irq; /* interrupt number */ unsigned int gpio_num; }; #endif
pio-masaki/android-kernel-at1s0
include/linux/antares_dock.h
C
gpl-2.0
159
/* * pNFS client data structures. * * Copyright (c) 2002 * The Regents of the University of Michigan * All Rights Reserved * * Dean Hildebrand <dhildebz@umich.edu> * * Permission is granted to use, copy, create derivative works, and * redistribute this software and such derivative works for any purpose, * so long as the name of the University of Michigan is not used in * any advertising or publicity pertaining to the use or distribution * of this software without specific, written prior authorization. If * the above copyright notice or any other identification of the * University of Michigan is included in any copy of any portion of * this software, then the disclaimer below must also be included. * * This software is provided as is, without representation or warranty * of any kind either express or implied, including without limitation * the implied warranties of merchantability, fitness for a particular * purpose, or noninfringement. The Regents of the University of * Michigan shall not be liable for any damages, including special, * indirect, incidental, or consequential damages, with respect to any * claim arising out of or in connection with the use of the software, * even if it has been or is hereafter advised of the possibility of * such damages. */ #ifndef FS_NFS_PNFS_H #define FS_NFS_PNFS_H #include <linux/refcount.h> #include <linux/nfs_fs.h> #include <linux/nfs_page.h> #include <linux/workqueue.h> struct nfs4_opendata; enum { NFS_LSEG_VALID = 0, /* cleared when lseg is recalled/returned */ NFS_LSEG_ROC, /* roc bit received from server */ NFS_LSEG_LAYOUTCOMMIT, /* layoutcommit bit set for layoutcommit */ NFS_LSEG_LAYOUTRETURN, /* layoutreturn bit set for layoutreturn */ NFS_LSEG_UNAVAILABLE, /* unavailable bit set for temporary problem */ }; /* Individual ip address */ struct nfs4_pnfs_ds_addr { struct sockaddr_storage da_addr; size_t da_addrlen; struct list_head da_node; /* nfs4_pnfs_dev_hlist dev_dslist */ char *da_remotestr; /* human readable addr+port */ }; struct nfs4_pnfs_ds { struct list_head ds_node; /* nfs4_pnfs_dev_hlist dev_dslist */ char *ds_remotestr; /* comma sep list of addrs */ struct list_head ds_addrs; struct nfs_client *ds_clp; refcount_t ds_count; unsigned long ds_state; #define NFS4DS_CONNECTING 0 /* ds is establishing connection */ }; struct pnfs_layout_segment { struct list_head pls_list; struct list_head pls_lc_list; struct pnfs_layout_range pls_range; refcount_t pls_refcount; u32 pls_seq; unsigned long pls_flags; struct pnfs_layout_hdr *pls_layout; }; enum pnfs_try_status { PNFS_ATTEMPTED = 0, PNFS_NOT_ATTEMPTED = 1, PNFS_TRY_AGAIN = 2, }; /* error codes for internal use */ #define NFS4ERR_RESET_TO_MDS 12001 #define NFS4ERR_RESET_TO_PNFS 12002 #ifdef CONFIG_NFS_V4_1 #define LAYOUT_NFSV4_1_MODULE_PREFIX "nfs-layouttype4" /* * Default data server connection timeout and retrans vaules. * Set by module parameters dataserver_timeo and dataserver_retrans. */ #define NFS4_DEF_DS_TIMEO 600 /* in tenths of a second */ #define NFS4_DEF_DS_RETRANS 5 #define PNFS_DEVICE_RETRY_TIMEOUT (120*HZ) enum { NFS_LAYOUT_RO_FAILED = 0, /* get ro layout failed stop trying */ NFS_LAYOUT_RW_FAILED, /* get rw layout failed stop trying */ NFS_LAYOUT_BULK_RECALL, /* bulk recall affecting layout */ NFS_LAYOUT_RETURN, /* layoutreturn in progress */ NFS_LAYOUT_RETURN_LOCK, /* Serialise layoutreturn */ NFS_LAYOUT_RETURN_REQUESTED, /* Return this layout ASAP */ NFS_LAYOUT_INVALID_STID, /* layout stateid id is invalid */ NFS_LAYOUT_FIRST_LAYOUTGET, /* Serialize first layoutget */ NFS_LAYOUT_INODE_FREEING, /* The inode is being freed */ }; enum layoutdriver_policy_flags { /* Should the pNFS client commit and return the layout upon truncate to * a smaller size */ PNFS_LAYOUTRET_ON_SETATTR = 1 << 0, PNFS_LAYOUTRET_ON_ERROR = 1 << 1, PNFS_READ_WHOLE_PAGE = 1 << 2, PNFS_LAYOUTGET_ON_OPEN = 1 << 3, }; struct nfs4_deviceid_node; /* Per-layout driver specific registration structure */ struct pnfs_layoutdriver_type { struct list_head pnfs_tblid; const u32 id; const char *name; struct module *owner; unsigned flags; unsigned max_deviceinfo_size; unsigned max_layoutget_response; int (*set_layoutdriver) (struct nfs_server *, const struct nfs_fh *); int (*clear_layoutdriver) (struct nfs_server *); struct pnfs_layout_hdr * (*alloc_layout_hdr) (struct inode *inode, gfp_t gfp_flags); void (*free_layout_hdr) (struct pnfs_layout_hdr *); struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr, gfp_t gfp_flags); void (*free_lseg) (struct pnfs_layout_segment *lseg); void (*add_lseg) (struct pnfs_layout_hdr *layoutid, struct pnfs_layout_segment *lseg, struct list_head *free_me); void (*return_range) (struct pnfs_layout_hdr *lo, struct pnfs_layout_range *range); /* test for nfs page cache coalescing */ const struct nfs_pageio_ops *pg_read_ops; const struct nfs_pageio_ops *pg_write_ops; struct pnfs_ds_commit_info *(*get_ds_info) (struct inode *inode); void (*mark_request_commit) (struct nfs_page *req, struct pnfs_layout_segment *lseg, struct nfs_commit_info *cinfo, u32 ds_commit_idx); void (*clear_request_commit) (struct nfs_page *req, struct nfs_commit_info *cinfo); int (*scan_commit_lists) (struct nfs_commit_info *cinfo, int max); void (*recover_commit_reqs) (struct list_head *list, struct nfs_commit_info *cinfo); struct nfs_page * (*search_commit_reqs)(struct nfs_commit_info *cinfo, struct page *page); int (*commit_pagelist)(struct inode *inode, struct list_head *mds_pages, int how, struct nfs_commit_info *cinfo); int (*sync)(struct inode *inode, bool datasync); /* * Return PNFS_ATTEMPTED to indicate the layout code has attempted * I/O, else return PNFS_NOT_ATTEMPTED to fall back to normal NFS */ enum pnfs_try_status (*read_pagelist)(struct nfs_pgio_header *); enum pnfs_try_status (*write_pagelist)(struct nfs_pgio_header *, int); void (*free_deviceid_node) (struct nfs4_deviceid_node *); struct nfs4_deviceid_node * (*alloc_deviceid_node) (struct nfs_server *server, struct pnfs_device *pdev, gfp_t gfp_flags); int (*prepare_layoutreturn) (struct nfs4_layoutreturn_args *); void (*cleanup_layoutcommit) (struct nfs4_layoutcommit_data *data); int (*prepare_layoutcommit) (struct nfs4_layoutcommit_args *args); int (*prepare_layoutstats) (struct nfs42_layoutstat_args *args); }; struct pnfs_layout_hdr { refcount_t plh_refcount; atomic_t plh_outstanding; /* number of RPCs out */ struct list_head plh_layouts; /* other client layouts */ struct list_head plh_bulk_destroy; struct list_head plh_segs; /* layout segments list */ struct list_head plh_return_segs; /* invalid layout segments */ unsigned long plh_block_lgets; /* block LAYOUTGET if >0 */ unsigned long plh_retry_timestamp; unsigned long plh_flags; nfs4_stateid plh_stateid; u32 plh_barrier; /* ignore lower seqids */ u32 plh_return_seq; enum pnfs_iomode plh_return_iomode; loff_t plh_lwb; /* last write byte for layoutcommit */ const struct cred *plh_lc_cred; /* layoutcommit cred */ struct inode *plh_inode; }; struct pnfs_device { struct nfs4_deviceid dev_id; unsigned int layout_type; unsigned int mincount; unsigned int maxcount; /* gdia_maxcount */ struct page **pages; unsigned int pgbase; unsigned int pglen; /* reply buffer length */ unsigned char nocache : 1;/* May not be cached */ }; #define NFS4_PNFS_GETDEVLIST_MAXNUM 16 struct pnfs_devicelist { unsigned int eof; unsigned int num_devs; struct nfs4_deviceid dev_id[NFS4_PNFS_GETDEVLIST_MAXNUM]; }; extern int pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *); extern void pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *); /* nfs4proc.c */ extern size_t max_response_pages(struct nfs_server *server); extern int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *dev, const struct cred *cred); extern struct pnfs_layout_segment* nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout); extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync); /* pnfs.c */ void pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo); void pnfs_put_lseg(struct pnfs_layout_segment *lseg); void set_pnfs_layoutdriver(struct nfs_server *, const struct nfs_fh *, struct nfs_fsinfo *); void unset_pnfs_layoutdriver(struct nfs_server *); void pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor *pgio); void pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *, struct nfs_page *); int pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc); void pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req, u64 wb_size); void pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor *); int pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc); size_t pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, struct nfs_page *req); void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg); struct pnfs_layout_segment *pnfs_layout_process(struct nfs4_layoutget *lgp); void pnfs_layoutget_free(struct nfs4_layoutget *lgp); void pnfs_free_lseg_list(struct list_head *tmp_list); void pnfs_destroy_layout(struct nfs_inode *); void pnfs_destroy_all_layouts(struct nfs_client *); int pnfs_destroy_layouts_byfsid(struct nfs_client *clp, struct nfs_fsid *fsid, bool is_recall); int pnfs_destroy_layouts_byclid(struct nfs_client *clp, bool is_recall); bool nfs4_layout_refresh_old_stateid(nfs4_stateid *dst, struct pnfs_layout_range *dst_range, struct inode *inode); void pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo); void pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new, bool update_barrier); int pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo, struct list_head *tmp_list, const struct pnfs_layout_range *recall_range, u32 seq); int pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo, struct list_head *tmp_list, const struct pnfs_layout_range *recall_range, u32 seq); int pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo, struct list_head *lseg_list); bool pnfs_roc(struct inode *ino, struct nfs4_layoutreturn_args *args, struct nfs4_layoutreturn_res *res, const struct cred *cred); int pnfs_roc_done(struct rpc_task *task, struct inode *inode, struct nfs4_layoutreturn_args **argpp, struct nfs4_layoutreturn_res **respp, int *ret); void pnfs_roc_release(struct nfs4_layoutreturn_args *args, struct nfs4_layoutreturn_res *res, int ret); bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task); void pnfs_set_layoutcommit(struct inode *, struct pnfs_layout_segment *, loff_t); void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data); int pnfs_layoutcommit_inode(struct inode *inode, bool sync); int pnfs_generic_sync(struct inode *inode, bool datasync); int pnfs_nfs_generic_sync(struct inode *inode, bool datasync); int _pnfs_return_layout(struct inode *); int pnfs_commit_and_return_layout(struct inode *); void pnfs_ld_write_done(struct nfs_pgio_header *); void pnfs_ld_read_done(struct nfs_pgio_header *); void pnfs_read_resend_pnfs(struct nfs_pgio_header *); struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx, loff_t pos, u64 count, enum pnfs_iomode iomode, bool strict_iomode, gfp_t gfp_flags); void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo, const nfs4_stateid *arg_stateid, const struct pnfs_layout_range *range, const nfs4_stateid *stateid); void pnfs_generic_layout_insert_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg, bool (*is_after)(const struct pnfs_layout_range *lseg_range, const struct pnfs_layout_range *old), bool (*do_merge)(struct pnfs_layout_segment *lseg, struct pnfs_layout_segment *old), struct list_head *free_me); void nfs4_deviceid_mark_client_invalid(struct nfs_client *clp); int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *); int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *); struct nfs4_threshold *pnfs_mdsthreshold_alloc(void); void pnfs_error_mark_layout_for_return(struct inode *inode, struct pnfs_layout_segment *lseg); /* nfs4_deviceid_flags */ enum { NFS_DEVICEID_INVALID = 0, /* set when MDS clientid recalled */ NFS_DEVICEID_UNAVAILABLE, /* device temporarily unavailable */ NFS_DEVICEID_NOCACHE, /* device may not be cached */ }; /* pnfs_dev.c */ struct nfs4_deviceid_node { struct hlist_node node; struct hlist_node tmpnode; const struct pnfs_layoutdriver_type *ld; const struct nfs_client *nfs_client; unsigned long flags; unsigned long timestamp_unavailable; struct nfs4_deviceid deviceid; struct rcu_head rcu; atomic_t ref; }; struct nfs4_deviceid_node * nfs4_find_get_deviceid(struct nfs_server *server, const struct nfs4_deviceid *id, const struct cred *cred, gfp_t gfp_mask); void nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *, const struct nfs_client *, const struct nfs4_deviceid *); void nfs4_init_deviceid_node(struct nfs4_deviceid_node *, struct nfs_server *, const struct nfs4_deviceid *); bool nfs4_put_deviceid_node(struct nfs4_deviceid_node *); void nfs4_mark_deviceid_available(struct nfs4_deviceid_node *node); void nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node *node); bool nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node); void nfs4_deviceid_purge_client(const struct nfs_client *); /* pnfs_nfs.c */ void pnfs_generic_clear_request_commit(struct nfs_page *req, struct nfs_commit_info *cinfo); void pnfs_generic_commit_release(void *calldata); void pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data *data); void pnfs_generic_rw_release(void *data); void pnfs_generic_recover_commit_reqs(struct list_head *dst, struct nfs_commit_info *cinfo); int pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages, int how, struct nfs_commit_info *cinfo, int (*initiate_commit)(struct nfs_commit_data *data, int how)); int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo, int max); void pnfs_generic_write_commit_done(struct rpc_task *task, void *data); void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds); struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags); void nfs4_pnfs_v3_ds_connect_unload(void); int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, struct nfs4_deviceid_node *devid, unsigned int timeo, unsigned int retrans, u32 version, u32 minor_version); struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net, struct xdr_stream *xdr, gfp_t gfp_flags); void pnfs_layout_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, struct nfs_commit_info *cinfo, u32 ds_commit_idx); void pnfs_lgopen_prepare(struct nfs4_opendata *data, struct nfs_open_context *ctx); void pnfs_parse_lgopen(struct inode *ino, struct nfs4_layoutget *lgp, struct nfs_open_context *ctx); void nfs4_lgopen_release(struct nfs4_layoutget *lgp); static inline bool nfs_have_layout(struct inode *inode) { return NFS_I(inode)->layout != NULL; } static inline bool pnfs_layout_is_valid(const struct pnfs_layout_hdr *lo) { return test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) == 0; } static inline struct nfs4_deviceid_node * nfs4_get_deviceid(struct nfs4_deviceid_node *d) { atomic_inc(&d->ref); return d; } static inline struct pnfs_layout_segment * pnfs_get_lseg(struct pnfs_layout_segment *lseg) { if (lseg) { refcount_inc(&lseg->pls_refcount); smp_mb__after_atomic(); } return lseg; } static inline bool pnfs_is_valid_lseg(struct pnfs_layout_segment *lseg) { return test_bit(NFS_LSEG_VALID, &lseg->pls_flags) != 0; } /* Return true if a layout driver is being used for this mountpoint */ static inline int pnfs_enabled_sb(struct nfs_server *nfss) { return nfss->pnfs_curr_ld != NULL; } static inline int pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how, struct nfs_commit_info *cinfo) { if (cinfo->ds == NULL || cinfo->ds->ncommitting == 0) return PNFS_NOT_ATTEMPTED; return NFS_SERVER(inode)->pnfs_curr_ld->commit_pagelist(inode, mds_pages, how, cinfo); } static inline struct pnfs_ds_commit_info * pnfs_get_ds_info(struct inode *inode) { struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; if (ld == NULL || ld->get_ds_info == NULL) return NULL; return ld->get_ds_info(inode); } static inline void pnfs_generic_mark_devid_invalid(struct nfs4_deviceid_node *node) { set_bit(NFS_DEVICEID_INVALID, &node->flags); } static inline bool pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, struct nfs_commit_info *cinfo, u32 ds_commit_idx) { struct inode *inode = d_inode(nfs_req_openctx(req)->dentry); struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; if (lseg == NULL || ld->mark_request_commit == NULL) return false; ld->mark_request_commit(req, lseg, cinfo, ds_commit_idx); return true; } static inline bool pnfs_clear_request_commit(struct nfs_page *req, struct nfs_commit_info *cinfo) { struct inode *inode = d_inode(nfs_req_openctx(req)->dentry); struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; if (ld == NULL || ld->clear_request_commit == NULL) return false; ld->clear_request_commit(req, cinfo); return true; } static inline int pnfs_scan_commit_lists(struct inode *inode, struct nfs_commit_info *cinfo, int max) { if (cinfo->ds == NULL || cinfo->ds->nwritten == 0) return 0; else return NFS_SERVER(inode)->pnfs_curr_ld->scan_commit_lists(cinfo, max); } static inline struct nfs_page * pnfs_search_commit_reqs(struct inode *inode, struct nfs_commit_info *cinfo, struct page *page) { struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; if (ld == NULL || ld->search_commit_reqs == NULL) return NULL; return ld->search_commit_reqs(cinfo, page); } /* Should the pNFS client commit and return the layout upon a setattr */ static inline bool pnfs_ld_layoutret_on_setattr(struct inode *inode) { if (!pnfs_enabled_sb(NFS_SERVER(inode))) return false; return NFS_SERVER(inode)->pnfs_curr_ld->flags & PNFS_LAYOUTRET_ON_SETATTR; } static inline bool pnfs_ld_read_whole_page(struct inode *inode) { if (!pnfs_enabled_sb(NFS_SERVER(inode))) return false; return NFS_SERVER(inode)->pnfs_curr_ld->flags & PNFS_READ_WHOLE_PAGE; } static inline int pnfs_sync_inode(struct inode *inode, bool datasync) { if (!pnfs_enabled_sb(NFS_SERVER(inode))) return 0; return NFS_SERVER(inode)->pnfs_curr_ld->sync(inode, datasync); } static inline bool pnfs_layoutcommit_outstanding(struct inode *inode) { struct nfs_inode *nfsi = NFS_I(inode); return test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags) != 0 || test_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags) != 0; } static inline int pnfs_return_layout(struct inode *ino) { struct nfs_inode *nfsi = NFS_I(ino); struct nfs_server *nfss = NFS_SERVER(ino); if (pnfs_enabled_sb(nfss) && nfsi->layout) { set_bit(NFS_LAYOUT_RETURN_REQUESTED, &nfsi->layout->plh_flags); return _pnfs_return_layout(ino); } return 0; } static inline bool pnfs_use_threshold(struct nfs4_threshold **dst, struct nfs4_threshold *src, struct nfs_server *nfss) { return (dst && src && src->bm != 0 && nfss->pnfs_curr_ld && nfss->pnfs_curr_ld->id == src->l_type); } static inline u64 pnfs_calc_offset_end(u64 offset, u64 len) { if (len == NFS4_MAX_UINT64 || len >= NFS4_MAX_UINT64 - offset) return NFS4_MAX_UINT64; return offset + len - 1; } static inline u64 pnfs_calc_offset_length(u64 offset, u64 end) { if (end == NFS4_MAX_UINT64 || end <= offset) return NFS4_MAX_UINT64; return 1 + end - offset; } static inline void pnfs_copy_range(struct pnfs_layout_range *dst, const struct pnfs_layout_range *src) { memcpy(dst, src, sizeof(*dst)); } static inline u64 pnfs_end_offset(u64 start, u64 len) { if (NFS4_MAX_UINT64 - start <= len) return NFS4_MAX_UINT64; return start + len; } /* * Are 2 ranges intersecting? * start1 end1 * [----------------------------------) * start2 end2 * [----------------) */ static inline bool pnfs_is_range_intersecting(u64 start1, u64 end1, u64 start2, u64 end2) { return (end1 == NFS4_MAX_UINT64 || start2 < end1) && (end2 == NFS4_MAX_UINT64 || start1 < end2); } static inline bool pnfs_lseg_range_intersecting(const struct pnfs_layout_range *l1, const struct pnfs_layout_range *l2) { u64 end1 = pnfs_end_offset(l1->offset, l1->length); u64 end2 = pnfs_end_offset(l2->offset, l2->length); return pnfs_is_range_intersecting(l1->offset, end1, l2->offset, end2); } static inline bool pnfs_lseg_request_intersecting(struct pnfs_layout_segment *lseg, struct nfs_page *req) { u64 seg_last = pnfs_end_offset(lseg->pls_range.offset, lseg->pls_range.length); u64 req_last = req_offset(req) + req->wb_bytes; return pnfs_is_range_intersecting(lseg->pls_range.offset, seg_last, req_offset(req), req_last); } extern unsigned int layoutstats_timer; #ifdef NFS_DEBUG void nfs4_print_deviceid(const struct nfs4_deviceid *dev_id); #else static inline void nfs4_print_deviceid(const struct nfs4_deviceid *dev_id) { } #endif /* NFS_DEBUG */ #else /* CONFIG_NFS_V4_1 */ static inline bool nfs_have_layout(struct inode *inode) { return false; } static inline void pnfs_destroy_all_layouts(struct nfs_client *clp) { } static inline void pnfs_destroy_layout(struct nfs_inode *nfsi) { } static inline struct pnfs_layout_segment * pnfs_get_lseg(struct pnfs_layout_segment *lseg) { return NULL; } static inline void pnfs_put_lseg(struct pnfs_layout_segment *lseg) { } static inline int pnfs_return_layout(struct inode *ino) { return 0; } static inline int pnfs_commit_and_return_layout(struct inode *inode) { return 0; } static inline bool pnfs_ld_layoutret_on_setattr(struct inode *inode) { return false; } static inline bool pnfs_ld_read_whole_page(struct inode *inode) { return false; } static inline int pnfs_sync_inode(struct inode *inode, bool datasync) { return 0; } static inline bool pnfs_layoutcommit_outstanding(struct inode *inode) { return false; } static inline bool pnfs_roc(struct inode *ino, struct nfs4_layoutreturn_args *args, struct nfs4_layoutreturn_res *res, const struct cred *cred) { return false; } static inline int pnfs_roc_done(struct rpc_task *task, struct inode *inode, struct nfs4_layoutreturn_args **argpp, struct nfs4_layoutreturn_res **respp, int *ret) { return 0; } static inline void pnfs_roc_release(struct nfs4_layoutreturn_args *args, struct nfs4_layoutreturn_res *res, int ret) { } static inline bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task) { return false; } static inline void set_pnfs_layoutdriver(struct nfs_server *s, const struct nfs_fh *mntfh, struct nfs_fsinfo *fsinfo) { } static inline void unset_pnfs_layoutdriver(struct nfs_server *s) { } static inline int pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how, struct nfs_commit_info *cinfo) { return PNFS_NOT_ATTEMPTED; } static inline struct pnfs_ds_commit_info * pnfs_get_ds_info(struct inode *inode) { return NULL; } static inline bool pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, struct nfs_commit_info *cinfo, u32 ds_commit_idx) { return false; } static inline bool pnfs_clear_request_commit(struct nfs_page *req, struct nfs_commit_info *cinfo) { return false; } static inline int pnfs_scan_commit_lists(struct inode *inode, struct nfs_commit_info *cinfo, int max) { return 0; } static inline struct nfs_page * pnfs_search_commit_reqs(struct inode *inode, struct nfs_commit_info *cinfo, struct page *page) { return NULL; } static inline int pnfs_layoutcommit_inode(struct inode *inode, bool sync) { return 0; } static inline bool pnfs_use_threshold(struct nfs4_threshold **dst, struct nfs4_threshold *src, struct nfs_server *nfss) { return false; } static inline struct nfs4_threshold *pnfs_mdsthreshold_alloc(void) { return NULL; } static inline void nfs4_pnfs_v3_ds_connect_unload(void) { } static inline bool nfs4_layout_refresh_old_stateid(nfs4_stateid *dst, struct pnfs_layout_range *dst_range, struct inode *inode) { return false; } static inline void pnfs_lgopen_prepare(struct nfs4_opendata *data, struct nfs_open_context *ctx) { } static inline void pnfs_parse_lgopen(struct inode *ino, struct nfs4_layoutget *lgp, struct nfs_open_context *ctx) { } static inline void nfs4_lgopen_release(struct nfs4_layoutget *lgp) { } static inline bool pnfs_layout_is_valid(const struct pnfs_layout_hdr *lo) { return false; } #endif /* CONFIG_NFS_V4_1 */ #if IS_ENABLED(CONFIG_NFS_V4_2) int pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags); #else static inline int pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags) { return 0; } #endif #endif /* FS_NFS_PNFS_H */
c0d3z3r0/linux-rockchip
fs/nfs/pnfs.h
C
gpl-2.0
25,607
<?php /** * Checkout terms and conditions checkbox * * @author WooThemes * @package WooCommerce/Templates * @version 3.1.1 */ if ( ! defined( 'ABSPATH' ) ) { exit; } $terms_page_id = wc_get_page_id( 'terms' ); if ( $terms_page_id > 0 && apply_filters( 'woocommerce_checkout_show_terms', true ) ) : $terms = get_post( $terms_page_id ); $terms_content = has_shortcode( $terms->post_content, 'woocommerce_checkout' ) ? '' : wc_format_content( $terms->post_content ); if ( $terms_content ) { do_action( 'woocommerce_checkout_before_terms_and_conditions' ); echo '<div class="woocommerce-terms-and-conditions" style="display: none; max-height: 200px; overflow: auto;">' . $terms_content . '</div>'; } ?> <p class="form-row terms wc-terms-and-conditions"> <label class="woocommerce-form__label woocommerce-form__label-for-checkbox checkbox"> <input type="checkbox" class="woocommerce-form__input woocommerce-form__input-checkbox input-checkbox" name="terms" <?php checked( apply_filters( 'woocommerce_terms_is_checked_default', isset( $_POST['terms'] ) ), true ); ?> id="terms" /> <span><?php printf( __( 'I&rsquo;ve read and accept the <a href="%s" target="_blank" class="woocommerce-terms-and-conditions-link">terms &amp; conditions</a>', 'woocommerce' ), esc_url( wc_get_page_permalink( 'terms' ) ) ); ?></span> <span class="required">*</span> </label> <input type="hidden" name="terms-field" value="1" /> </p> <?php do_action( 'woocommerce_checkout_after_terms_and_conditions' ); ?> <?php endif; ?>
JordanPak/RenaRomano
wp-content/plugins/woocommerce/templates/checkout/terms.php
PHP
gpl-2.0
1,543
<?php /** * Zend Framework * * LICENSE * * This source file is subject to the new BSD license that is bundled * with this package in the file LICENSE.txt. * It is also available through the world-wide-web at this URL: * http://framework.zend.com/license/new-bsd * If you did not receive a copy of the license and are unable to * obtain it through the world-wide-web, please send an email * to license@zend.com so we can send you a copy immediately. * * @category Zend * @package Zend_Gdata * @subpackage Gdata * @copyright Copyright (c) 2005-2012 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License * @version $Id: FeedLink.php 24593 2012-01-05 20:35:02Z matthew $ */ /** * @see Zend_Gdata_Extension */ #require_once 'Zend/Gdata/Extension.php'; /** * @see Zend_Gdata_Feed */ #require_once 'Zend/Gdata/Feed.php'; /** * Represents the gd:feedLink element * * @category Zend * @package Zend_Gdata * @subpackage Gdata * @copyright Copyright (c) 2005-2012 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License */ class Zend_Gdata_Extension_FeedLink extends Zend_Gdata_Extension { protected $_rootElement = 'feedLink'; protected $_countHint = null; protected $_href = null; protected $_readOnly = null; protected $_rel = null; protected $_feed = null; public function __construct($href = null, $rel = null, $countHint = null, $readOnly = null, $feed = null) { parent::__construct(); $this->_countHint = $countHint; $this->_href = $href; $this->_readOnly = $readOnly; $this->_rel = $rel; $this->_feed = $feed; } public function getDOM($doc = null, $majorVersion = 1, $minorVersion = null) { $element = parent::getDOM($doc, $majorVersion, $minorVersion); if ($this->_countHint !== null) { $element->setAttribute('countHint', $this->_countHint); } if ($this->_href !== null) { $element->setAttribute('href', $this->_href); } if ($this->_readOnly !== null) { $element->setAttribute('readOnly', ($this->_readOnly ? "true" : "false")); } if ($this->_rel !== null) { $element->setAttribute('rel', $this->_rel); } if ($this->_feed !== null) { $element->appendChild($this->_feed->getDOM($element->ownerDocument)); } return $element; } protected function takeChildFromDOM($child) { $absoluteNodeName = $child->namespaceURI . ':' . $child->localName; switch ($absoluteNodeName) { case $this->lookupNamespace('atom') . ':' . 'feed'; $feed = new Zend_Gdata_Feed(); $feed->transferFromDOM($child); $this->_feed = $feed; break; default: parent::takeChildFromDOM($child); break; } } protected function takeAttributeFromDOM($attribute) { switch ($attribute->localName) { case 'countHint': $this->_countHint = $attribute->nodeValue; break; case 'href': $this->_href = $attribute->nodeValue; break; case 'readOnly': if ($attribute->nodeValue == "true") { $this->_readOnly = true; } else if ($attribute->nodeValue == "false") { $this->_readOnly = false; } else { throw new Zend_Gdata_App_InvalidArgumentException("Expected 'true' or 'false' for gCal:selected#value."); } break; case 'rel': $this->_rel = $attribute->nodeValue; break; default: parent::takeAttributeFromDOM($attribute); } } /** * @return string */ public function getHref() { return $this->_href; } public function setHref($value) { $this->_href = $value; return $this; } public function getReadOnly() { return $this->_readOnly; } public function setReadOnly($value) { $this->_readOnly = $value; return $this; } public function getRel() { return $this->_rel; } public function setRel($value) { $this->_rel = $value; return $this; } public function getFeed() { return $this->_feed; } public function setFeed($value) { $this->_feed = $value; return $this; } }
Eristoff47/P2
src/public/lib/Zend/Gdata/Extension/FeedLink.php
PHP
gpl-2.0
4,703
/* * Copyright (c) 2007, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ /* * @test * @bug 6520665 6357133 * @modules java.base/sun.net.www * @run main/othervm NTLMTest * @summary 6520665 & 6357133: NTLM authentication issues. */ import java.net.*; import java.io.*; import sun.net.www.MessageHeader; public class NTLMTest { public static void main(String[] args) { Authenticator.setDefault(new NullAuthenticator()); try { // Test with direct connection. ServerSocket serverSS = new ServerSocket(0); startServer(serverSS, false); runClient(Proxy.NO_PROXY, serverSS.getLocalPort()); // Test with proxy. serverSS = new ServerSocket(0); startServer(serverSS, true /*proxy*/); SocketAddress proxyAddr = new InetSocketAddress("localhost", serverSS.getLocalPort()); runClient(new Proxy(java.net.Proxy.Type.HTTP, proxyAddr), 8888); } catch (IOException e) { e.printStackTrace(); } } static void runClient(Proxy proxy, int serverPort) { try { String urlStr = "http://localhost:" + serverPort + "/"; URL url = new URL(urlStr); HttpURLConnection uc = (HttpURLConnection) url.openConnection(proxy); uc.getInputStream(); } catch (ProtocolException e) { /* java.net.ProtocolException: Server redirected too many times (20) */ throw new RuntimeException("Failed: ProtocolException", e); } catch (IOException ioe) { /* IOException is OK. We are expecting "java.io.IOException: Server * returned HTTP response code: 401 for URL: ..." */ //ioe.printStackTrace(); } catch (NullPointerException npe) { throw new RuntimeException("Failed: NPE thrown ", npe); } } static String[] serverResp = new String[] { "HTTP/1.1 401 Unauthorized\r\n" + "Content-Length: 0\r\n" + "WWW-Authenticate: NTLM\r\n\r\n", "HTTP/1.1 401 Unauthorized\r\n" + "Content-Length: 0\r\n" + "WWW-Authenticate: NTLM TlRMTVNTUAACAAAAAAAAACgAAAABggAAU3J2Tm9uY2UAAAAAAAAAAA==\r\n\r\n"}; static String[] proxyResp = new String[] { "HTTP/1.1 407 Proxy Authentication Required\r\n" + "Content-Length: 0\r\n" + "Proxy-Authenticate: NTLM\r\n\r\n", "HTTP/1.1 407 Proxy Authentication Required\r\n" + "Content-Length: 0\r\n" + "Proxy-Authenticate: NTLM TlRMTVNTUAACAAAAAAAAACgAAAABggAAU3J2Tm9uY2UAAAAAAAAAAA==\r\n\r\n"}; static void startServer(ServerSocket serverSS, boolean proxy) { final ServerSocket ss = serverSS; final boolean isProxy = proxy; Thread thread = new Thread(new Runnable() { public void run() { boolean doing2ndStageNTLM = false; while (true) { try { Socket s = ss.accept(); if (!doing2ndStageNTLM) { handleConnection(s, isProxy ? proxyResp : serverResp, 0, 1); doing2ndStageNTLM = true; } else { handleConnection(s, isProxy ? proxyResp : serverResp, 1, 2); doing2ndStageNTLM = false; } connectionCount++; //System.out.println("connectionCount = " + connectionCount); } catch (IOException ioe) { ioe.printStackTrace(); } } } }); thread.setDaemon(true); thread.start(); } static int connectionCount = 0; static void handleConnection(Socket s, String[] resp, int start, int end) { try { OutputStream os = s.getOutputStream(); for (int i=start; i<end; i++) { MessageHeader header = new MessageHeader (s.getInputStream()); //System.out.println("Input :" + header); //System.out.println("Output:" + resp[i]); os.write(resp[i].getBytes("ASCII")); } s.close(); } catch (IOException ioe) { ioe.printStackTrace(); } } static class NullAuthenticator extends java.net.Authenticator { public int count = 0; protected PasswordAuthentication getPasswordAuthentication() { count++; System.out.println("NullAuthenticator.getPasswordAuthentication called " + count + " times"); return null; } public int getCallCount() { return count; } } }
FauxFaux/jdk9-jdk
test/sun/net/www/protocol/http/NTLMTest.java
Java
gpl-2.0
5,862
package com.engc.smartedu.support.utils; import com.engc.smartedu.R; public enum StatusMode { offline(R.string.status_offline, -1), // 离线状态,没有图标 dnd(R.string.status_dnd, R.drawable.status_shield), // 请勿打扰 xa(R.string.status_xa, R.drawable.status_invisible), // 隐身 away(R.string.status_away, R.drawable.status_leave), // 离开 available(R.string.status_online, R.drawable.status_online), // 在线 chat(R.string.status_chat, R.drawable.status_qme);// Q我吧 private final int textId; private final int drawableId; StatusMode(int textId, int drawableId) { this.textId = textId; this.drawableId = drawableId; } public int getTextId() { return textId; } public int getDrawableId() { return drawableId; } public String toString() { return name(); } public static StatusMode fromString(String status) { return StatusMode.valueOf(status); } }
giserh/smartedu
src/com/engc/smartedu/support/utils/StatusMode.java
Java
gpl-3.0
909
----------------------------------- -- Area: Dynamis -- NPC: Somnial Threshold ------------------------------------- ----------------------------------- -- onTrigger Action ----------------------------------- function onTrigger(player,npc) player:startEvent(101, 0, 0, 5); end; ----------------------------------- -- onEventUpdate ----------------------------------- function onEventUpdate(player,csid,option) -- printf("onUpdate CSID: %u",csid); -- printf("onUpdate RESULT: %u",option); end; ----------------------------------- -- onEventFinish Action ----------------------------------- function onEventFinish(player,csid,option) -- printf("onFinish CSID: %u",csid); -- printf("onFinish RESULT: %u",option); if (csid == 101 and option == 1) then player:setPos(154,-1,-170,190, 118); end end;
thedraked/darkstar
scripts/zones/Dynamis-Buburimu/npcs/Somnial_Threshold.lua
Lua
gpl-3.0
848
/* * Copyright (C) 1997-2005, International Business Machines Corporation and others. All Rights Reserved. ******************************************************************************* * * File PARSEPOS.H * * Modification History: * * Date Name Description * 07/09/97 helena Converted from java. * 07/17/98 stephen Added errorIndex support. * 05/11/99 stephen Cleaned up. ******************************************************************************* */ #ifndef PARSEPOS_H #define PARSEPOS_H #include "unicode/utypes.h" #include "unicode/uobject.h" U_NAMESPACE_BEGIN /** * \file * \brief C++ API: Canonical Iterator */ /** * <code>ParsePosition</code> is a simple class used by <code>Format</code> * and its subclasses to keep track of the current position during parsing. * The <code>parseObject</code> method in the various <code>Format</code> * classes requires a <code>ParsePosition</code> object as an argument. * * <p> * By design, as you parse through a string with different formats, * you can use the same <code>ParsePosition</code>, since the index parameter * records the current position. * * The ParsePosition class is not suitable for subclassing. * * @version 1.3 10/30/97 * @author Mark Davis, Helena Shih * @see java.text.Format */ class U_COMMON_API ParsePosition : public UObject { public: /** * Default constructor, the index starts with 0 as default. * @stable ICU 2.0 */ ParsePosition() : UObject(), index(0), errorIndex(-1) {} /** * Create a new ParsePosition with the given initial index. * @param newIndex the new text offset. * @stable ICU 2.0 */ ParsePosition(int32_t newIndex) : UObject(), index(newIndex), errorIndex(-1) {} /** * Copy constructor * @param copy the object to be copied from. * @stable ICU 2.0 */ ParsePosition(const ParsePosition& copy) : UObject(copy), index(copy.index), errorIndex(copy.errorIndex) {} /** * Destructor * @stable ICU 2.0 */ virtual ~ParsePosition(); /** * Assignment operator * @stable ICU 2.0 */ ParsePosition& operator=(const ParsePosition& copy); /** * Equality operator. * @return TRUE if the two parse positions are equal, FALSE otherwise. * @stable ICU 2.0 */ UBool operator==(const ParsePosition& that) const; /** * Equality operator. * @return TRUE if the two parse positions are not equal, FALSE otherwise. * @stable ICU 2.0 */ UBool operator!=(const ParsePosition& that) const; /** * Clone this object. * Clones can be used concurrently in multiple threads. * If an error occurs, then NULL is returned. * The caller must delete the clone. * * @return a clone of this object * * @see getDynamicClassID * @stable ICU 2.8 */ ParsePosition *clone() const; /** * Retrieve the current parse position. On input to a parse method, this * is the index of the character at which parsing will begin; on output, it * is the index of the character following the last character parsed. * @return the current index. * @stable ICU 2.0 */ int32_t getIndex(void) const; /** * Set the current parse position. * @param index the new index. * @stable ICU 2.0 */ void setIndex(int32_t index); /** * Set the index at which a parse error occurred. Formatters * should set this before returning an error code from their * parseObject method. The default value is -1 if this is not * set. * @stable ICU 2.0 */ void setErrorIndex(int32_t ei); /** * Retrieve the index at which an error occurred, or -1 if the * error index has not been set. * @stable ICU 2.0 */ int32_t getErrorIndex(void) const; /** * ICU "poor man's RTTI", returns a UClassID for this class. * * @stable ICU 2.2 */ static UClassID U_EXPORT2 getStaticClassID(); /** * ICU "poor man's RTTI", returns a UClassID for the actual class. * * @stable ICU 2.2 */ virtual UClassID getDynamicClassID() const; private: /** * Input: the place you start parsing. * <br>Output: position where the parse stopped. * This is designed to be used serially, * with each call setting index up for the next one. */ int32_t index; /** * The index at which a parse error occurred. */ int32_t errorIndex; }; inline ParsePosition& ParsePosition::operator=(const ParsePosition& copy) { index = copy.index; errorIndex = copy.errorIndex; return *this; } inline UBool ParsePosition::operator==(const ParsePosition& copy) const { if(index != copy.index || errorIndex != copy.errorIndex) return FALSE; else return TRUE; } inline UBool ParsePosition::operator!=(const ParsePosition& copy) const { return !operator==(copy); } inline int32_t ParsePosition::getIndex() const { return index; } inline void ParsePosition::setIndex(int32_t offset) { this->index = offset; } inline int32_t ParsePosition::getErrorIndex() const { return errorIndex; } inline void ParsePosition::setErrorIndex(int32_t ei) { this->errorIndex = ei; } U_NAMESPACE_END #endif
NeCarbon/ivmultiplayer
Client/Core/EA/EAWebKit/internal/icu/include/common/unicode/parsepos.h
C
gpl-3.0
5,698
namespace MissionPlanner.Controls.BackstageView { partial class BackstageView { /// <summary> /// Required designer variable. /// </summary> private System.ComponentModel.IContainer components = null; /// <summary> /// Clean up any resources being used. /// </summary> /// <param name="disposing">true if managed resources should be disposed; otherwise, false.</param> protected override void Dispose(bool disposing) { if (disposing && (components != null)) { components.Dispose(); } base.Dispose(disposing); } #region Component Designer generated code /// <summary> /// Required method for Designer support - do not modify /// the contents of this method with the code editor. /// </summary> private void InitializeComponent() { this.pnlPages = new System.Windows.Forms.Panel(); this.pnlMenu = new MissionPlanner.Controls.BackstageView.BackStageViewMenuPanel(); this.SuspendLayout(); // // pnlPages // this.pnlPages.AutoScroll = true; this.pnlPages.Dock = System.Windows.Forms.DockStyle.Fill; this.pnlPages.Location = new System.Drawing.Point(203, 0); this.pnlPages.Margin = new System.Windows.Forms.Padding(4); this.pnlPages.MinimumSize = new System.Drawing.Size(133, 0); this.pnlPages.Name = "pnlPages"; this.pnlPages.Size = new System.Drawing.Size(454, 422); this.pnlPages.TabIndex = 0; // // pnlMenu // this.pnlMenu.Dock = System.Windows.Forms.DockStyle.Left; this.pnlMenu.Location = new System.Drawing.Point(0, 0); this.pnlMenu.Margin = new System.Windows.Forms.Padding(4); this.pnlMenu.Name = "pnlMenu"; this.pnlMenu.Size = new System.Drawing.Size(203, 422); this.pnlMenu.TabIndex = 1; // // BackstageView // this.AutoScaleMode = System.Windows.Forms.AutoScaleMode.None; this.Controls.Add(this.pnlPages); this.Controls.Add(this.pnlMenu); this.Margin = new System.Windows.Forms.Padding(4); this.Name = "BackstageView"; this.Size = new System.Drawing.Size(657, 422); this.Load += new System.EventHandler(this.BackstageView_Load); this.ResumeLayout(false); } #endregion private System.Windows.Forms.Panel pnlPages; private BackStageViewMenuPanel pnlMenu; } }
jason406/MissionPlanner
ExtLibs/Controls/BackstageView/BackstageView.Designer.cs
C#
gpl-3.0
2,815
<?php // This file is part of Moodle - http://moodle.org/ // // Moodle is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // Moodle is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Moodle. If not, see <http://www.gnu.org/licenses/>. /** * File locking for the Cache API * * @package cachelock_file * @category cache * @copyright 2012 Sam Hemelryk * @license http://www.gnu.org/copyleft/gpl.html GNU GPL v3 or later */ defined('MOODLE_INTERNAL') || die(); $plugin->version = 2021052500; // The current plugin version (Date: YYYYMMDDXX) $plugin->requires = 2021052500; // Requires this Moodle version $plugin->component = 'cachelock_file'; // Full name of the plugin (used for diagnostics)
mudrd8mz/moodle
cache/locks/file/version.php
PHP
gpl-3.0
1,181
/** * jQuery EasyUI 1.5 * * Copyright (c) 2009-2016 www.jeasyui.com. All rights reserved. * * Licensed under the freeware license: http://www.jeasyui.com/license_freeware.php * To use it on other terms please contact us: info@jeasyui.com * */ (function($){ function _1(_2){ var _3=$.data(_2,"datetimebox"); var _4=_3.options; $(_2).datebox($.extend({},_4,{onShowPanel:function(){ var _5=$(this).datetimebox("getValue"); _d(this,_5,true); _4.onShowPanel.call(this); },formatter:$.fn.datebox.defaults.formatter,parser:$.fn.datebox.defaults.parser})); $(_2).removeClass("datebox-f").addClass("datetimebox-f"); $(_2).datebox("calendar").calendar({onSelect:function(_6){ _4.onSelect.call(this.target,_6); }}); if(!_3.spinner){ var _7=$(_2).datebox("panel"); var p=$("<div style=\"padding:2px\"><input></div>").insertAfter(_7.children("div.datebox-calendar-inner")); _3.spinner=p.children("input"); } _3.spinner.timespinner({width:_4.spinnerWidth,showSeconds:_4.showSeconds,separator:_4.timeSeparator}); $(_2).datetimebox("initValue",_4.value); }; function _8(_9){ var c=$(_9).datetimebox("calendar"); var t=$(_9).datetimebox("spinner"); var _a=c.calendar("options").current; return new Date(_a.getFullYear(),_a.getMonth(),_a.getDate(),t.timespinner("getHours"),t.timespinner("getMinutes"),t.timespinner("getSeconds")); }; function _b(_c,q){ _d(_c,q,true); }; function _e(_f){ var _10=$.data(_f,"datetimebox").options; var _11=_8(_f); _d(_f,_10.formatter.call(_f,_11)); $(_f).combo("hidePanel"); }; function _d(_12,_13,_14){ var _15=$.data(_12,"datetimebox").options; $(_12).combo("setValue",_13); if(!_14){ if(_13){ var _16=_15.parser.call(_12,_13); $(_12).combo("setText",_15.formatter.call(_12,_16)); $(_12).combo("setValue",_15.formatter.call(_12,_16)); }else{ $(_12).combo("setText",_13); } } var _16=_15.parser.call(_12,_13); $(_12).datetimebox("calendar").calendar("moveTo",_16); $(_12).datetimebox("spinner").timespinner("setValue",_17(_16)); function _17(_18){ function _19(_1a){ return (_1a<10?"0":"")+_1a; }; var tt=[_19(_18.getHours()),_19(_18.getMinutes())]; if(_15.showSeconds){ tt.push(_19(_18.getSeconds())); } return tt.join($(_12).datetimebox("spinner").timespinner("options").separator); }; }; $.fn.datetimebox=function(_1b,_1c){ if(typeof _1b=="string"){ var _1d=$.fn.datetimebox.methods[_1b]; if(_1d){ return _1d(this,_1c); }else{ return this.datebox(_1b,_1c); } } _1b=_1b||{}; return this.each(function(){ var _1e=$.data(this,"datetimebox"); if(_1e){ $.extend(_1e.options,_1b); }else{ $.data(this,"datetimebox",{options:$.extend({},$.fn.datetimebox.defaults,$.fn.datetimebox.parseOptions(this),_1b)}); } _1(this); }); }; $.fn.datetimebox.methods={options:function(jq){ var _1f=jq.datebox("options"); return $.extend($.data(jq[0],"datetimebox").options,{originalValue:_1f.originalValue,disabled:_1f.disabled,readonly:_1f.readonly}); },cloneFrom:function(jq,_20){ return jq.each(function(){ $(this).datebox("cloneFrom",_20); $.data(this,"datetimebox",{options:$.extend(true,{},$(_20).datetimebox("options")),spinner:$(_20).datetimebox("spinner")}); $(this).removeClass("datebox-f").addClass("datetimebox-f"); }); },spinner:function(jq){ return $.data(jq[0],"datetimebox").spinner; },initValue:function(jq,_21){ return jq.each(function(){ var _22=$(this).datetimebox("options"); var _23=_22.value; if(_23){ _23=_22.formatter.call(this,_22.parser.call(this,_23)); } $(this).combo("initValue",_23).combo("setText",_23); }); },setValue:function(jq,_24){ return jq.each(function(){ _d(this,_24); }); },reset:function(jq){ return jq.each(function(){ var _25=$(this).datetimebox("options"); $(this).datetimebox("setValue",_25.originalValue); }); }}; $.fn.datetimebox.parseOptions=function(_26){ var t=$(_26); return $.extend({},$.fn.datebox.parseOptions(_26),$.parser.parseOptions(_26,["timeSeparator","spinnerWidth",{showSeconds:"boolean"}])); }; $.fn.datetimebox.defaults=$.extend({},$.fn.datebox.defaults,{spinnerWidth:"100%",showSeconds:true,timeSeparator:":",keyHandler:{up:function(e){ },down:function(e){ },left:function(e){ },right:function(e){ },enter:function(e){ _e(this); },query:function(q,e){ _b(this,q); }},buttons:[{text:function(_27){ return $(_27).datetimebox("options").currentText; },handler:function(_28){ var _29=$(_28).datetimebox("options"); _d(_28,_29.formatter.call(_28,new Date())); $(_28).datetimebox("hidePanel"); }},{text:function(_2a){ return $(_2a).datetimebox("options").okText; },handler:function(_2b){ _e(_2b); }},{text:function(_2c){ return $(_2c).datetimebox("options").closeText; },handler:function(_2d){ $(_2d).datetimebox("hidePanel"); }}],formatter:function(_2e){ var h=_2e.getHours(); var M=_2e.getMinutes(); var s=_2e.getSeconds(); function _2f(_30){ return (_30<10?"0":"")+_30; }; var _31=$(this).datetimebox("spinner").timespinner("options").separator; var r=$.fn.datebox.defaults.formatter(_2e)+" "+_2f(h)+_31+_2f(M); if($(this).datetimebox("options").showSeconds){ r+=_31+_2f(s); } return r; },parser:function(s){ if($.trim(s)==""){ return new Date(); } var dt=s.split(" "); var d=$.fn.datebox.defaults.parser(dt[0]); if(dt.length<2){ return d; } var _32=$(this).datetimebox("spinner").timespinner("options").separator; var tt=dt[1].split(_32); var _33=parseInt(tt[0],10)||0; var _34=parseInt(tt[1],10)||0; var _35=parseInt(tt[2],10)||0; return new Date(d.getFullYear(),d.getMonth(),d.getDate(),_33,_34,_35); }}); })(jQuery);
zyzydream/zhihu
zhihu/src/main/webapp/easyui/plugins/jquery.datetimebox.js
JavaScript
gpl-3.0
5,402
/* ------------------------------------------------------------------------------ * * # Dashboard configuration * * Demo dashboard configuration. Contains charts and plugin inits * * Version: 1.0 * Latest update: Aug 1, 2015 * * ---------------------------------------------------------------------------- */ $(function() { // Switchery toggles // ------------------------------ var switches = Array.prototype.slice.call(document.querySelectorAll('.switch')); switches.forEach(function(html) { var switchery = new Switchery(html, {color: '#4CAF50'}); }); // Daterange picker // ------------------------------ $('.daterange-ranges').daterangepicker( { startDate: moment().subtract('days', 29), endDate: moment(), minDate: '01/01/2012', maxDate: '12/31/2016', dateLimit: { days: 60 }, ranges: { 'Today': [moment(), moment()], 'Yesterday': [moment().subtract('days', 1), moment().subtract('days', 1)], 'Last 7 Days': [moment().subtract('days', 6), moment()], 'Last 30 Days': [moment().subtract('days', 29), moment()], 'This Month': [moment().startOf('month'), moment().endOf('month')], 'Last Month': [moment().subtract('month', 1).startOf('month'), moment().subtract('month', 1).endOf('month')] }, opens: 'left', applyClass: 'btn-small bg-slate-600 btn-block', cancelClass: 'btn-small btn-default btn-block', format: 'MM/DD/YYYY' }, function(start, end) { $('.daterange-ranges span').html(start.format('MMMM D') + ' - ' + end.format('MMMM D')); } ); $('.daterange-ranges span').html(moment().subtract('days', 29).format('MMMM D') + ' - ' + moment().format('MMMM D')); // Traffic sources stream chart // ------------------------------ trafficSources('#traffic-sources', 330); // initialize chart // Chart setup function trafficSources(element, height) { // Basic setup // ------------------------------ // Define main variables var d3Container = d3.select(element), margin = {top: 5, right: 50, bottom: 40, left: 50}, width = d3Container.node().getBoundingClientRect().width - margin.left - margin.right, height = height - margin.top - margin.bottom, tooltipOffset = 30; // Tooltip var tooltip = d3Container .append("div") .attr("class", "d3-tip e") .style("display", "none") // Format date var format = d3.time.format("%m/%d/%y %H:%M"); var formatDate = d3.time.format("%H:%M"); // Colors var colorrange = ['#03A9F4', '#29B6F6', '#4FC3F7', '#81D4FA', '#B3E5FC', '#E1F5FE']; // Construct scales // ------------------------------ // Horizontal var x = d3.time.scale().range([0, width]); // Vertical var y = d3.scale.linear().range([height, 0]); // Colors var z = d3.scale.ordinal().range(colorrange); // Create axes // ------------------------------ // Horizontal var xAxis = d3.svg.axis() .scale(x) .orient("bottom") .ticks(d3.time.hours, 4) .innerTickSize(4) .tickPadding(8) .tickFormat(d3.time.format("%H:%M")); // Display hours and minutes in 24h format // Left vertical var yAxis = d3.svg.axis() .scale(y) .ticks(6) .innerTickSize(4) .outerTickSize(0) .tickPadding(8) .tickFormat(function (d) { return (d/1000) + "k"; }); // Right vertical var yAxis2 = yAxis; // Dash lines var gridAxis = d3.svg.axis() .scale(y) .orient("left") .ticks(6) .tickPadding(8) .tickFormat("") .tickSize(-width, 0, 0); // Create chart // ------------------------------ // Container var container = d3Container.append("svg") // SVG element var svg = container .attr('width', width + margin.left + margin.right) .attr("height", height + margin.top + margin.bottom) .append("g") .attr("transform", "translate(" + margin.left + "," + margin.top + ")"); // Construct chart layout // ------------------------------ // Stack var stack = d3.layout.stack() .offset("silhouette") .values(function(d) { return d.values; }) .x(function(d) { return d.date; }) .y(function(d) { return d.value; }); // Nest var nest = d3.nest() .key(function(d) { return d.key; }); // Area var area = d3.svg.area() .interpolate("cardinal") .x(function(d) { return x(d.date); }) .y0(function(d) { return y(d.y0); }) .y1(function(d) { return y(d.y0 + d.y); }); // Load data // ------------------------------ d3.csv("assets/demo_data/dashboard/traffic_sources.csv", function (error, data) { // Pull out values data.forEach(function (d) { d.date = format.parse(d.date); d.value = +d.value; }); // Stack and nest layers var layers = stack(nest.entries(data)); // Set input domains // ------------------------------ // Horizontal x.domain(d3.extent(data, function(d, i) { return d.date; })); // Vertical y.domain([0, d3.max(data, function(d) { return d.y0 + d.y; })]); // Add grid // ------------------------------ // Horizontal grid. Must be before the group svg.append("g") .attr("class", "d3-grid-dashed") .call(gridAxis); // // Append chart elements // // Stream layers // ------------------------------ // Create group var group = svg.append('g') .attr('class', 'streamgraph-layers-group'); // And append paths to this group var layer = group.selectAll(".streamgraph-layer") .data(layers) .enter() .append("path") .attr("class", "streamgraph-layer") .attr("d", function(d) { return area(d.values); }) .style('stroke', '#fff') .style('stroke-width', 0.5) .style("fill", function(d, i) { return z(i); }); // Add transition var layerTransition = layer .style('opacity', 0) .transition() .duration(750) .delay(function(d, i) { return i * 50; }) .style('opacity', 1) // Append axes // ------------------------------ // // Left vertical // svg.append("g") .attr("class", "d3-axis d3-axis-left d3-axis-solid") .call(yAxis.orient("left")); // Hide first tick d3.select(svg.selectAll('.d3-axis-left .tick text')[0][0]) .style("visibility", "hidden"); // // Right vertical // svg.append("g") .attr("class", "d3-axis d3-axis-right d3-axis-solid") .attr("transform", "translate(" + width + ", 0)") .call(yAxis2.orient("right")); // Hide first tick d3.select(svg.selectAll('.d3-axis-right .tick text')[0][0]) .style("visibility", "hidden"); // // Horizontal // var xaxisg = svg.append("g") .attr("class", "d3-axis d3-axis-horizontal d3-axis-solid") .attr("transform", "translate(0," + height + ")") .call(xAxis); // Add extra subticks for hidden hours xaxisg.selectAll(".d3-axis-subticks") .data(x.ticks(d3.time.hours), function(d) { return d; }) .enter() .append("line") .attr("class", "d3-axis-subticks") .attr("y1", 0) .attr("y2", 4) .attr("x1", x) .attr("x2", x); // Add hover line and pointer // ------------------------------ // Append group to the group of paths to prevent appearance outside chart area var hoverLineGroup = group.append("g") .attr("class", "hover-line"); // Add line var hoverLine = hoverLineGroup .append("line") .attr("y1", 0) .attr("y2", height) .style('fill', 'none') .style('stroke', '#fff') .style('stroke-width', 1) .style('pointer-events', 'none') .style('shape-rendering', 'crispEdges') .style("opacity", 0); // Add pointer var hoverPointer = hoverLineGroup .append("rect") .attr("x", 2) .attr("y", 2) .attr("width", 6) .attr("height", 6) .style('fill', '#03A9F4') .style('stroke', '#fff') .style('stroke-width', 1) .style('shape-rendering', 'crispEdges') .style('pointer-events', 'none') .style("opacity", 0); // Append events to the layers group // ------------------------------ layerTransition.each("end", function() { layer .on("mouseover", function (d, i) { svg.selectAll(".streamgraph-layer") .transition() .duration(250) .style("opacity", function (d, j) { return j != i ? 0.75 : 1; // Mute all except hovered }); }) .on("mousemove", function (d, i) { mouse = d3.mouse(this); mousex = mouse[0]; mousey = mouse[1]; datearray = []; var invertedx = x.invert(mousex); invertedx = invertedx.getHours(); var selected = (d.values); for (var k = 0; k < selected.length; k++) { datearray[k] = selected[k].date datearray[k] = datearray[k].getHours(); } mousedate = datearray.indexOf(invertedx); pro = d.values[mousedate].value; // Display mouse pointer hoverPointer .attr("x", mousex - 3) .attr("y", mousey - 6) .style("opacity", 1); hoverLine .attr("x1", mousex) .attr("x2", mousex) .style("opacity", 1); // // Tooltip // // Tooltip data tooltip.html( "<ul class='list-unstyled mb-5'>" + "<li>" + "<div class='text-size-base mt-5 mb-5'><i class='icon-circle-left2 position-left'></i>" + d.key + "</div>" + "</li>" + "<li>" + "Visits: &nbsp;" + "<span class='text-semibold pull-right'>" + pro + "</span>" + "</li>" + "<li>" + "Time: &nbsp; " + "<span class='text-semibold pull-right'>" + formatDate(d.values[mousedate].date) + "</span>" + "</li>" + "</ul>" ) .style("display", "block"); // Tooltip arrow tooltip.append('div').attr('class', 'd3-tip-arrow'); }) .on("mouseout", function (d, i) { // Revert full opacity to all paths svg.selectAll(".streamgraph-layer") .transition() .duration(250) .style("opacity", 1); // Hide cursor pointer hoverPointer.style("opacity", 0); // Hide tooltip tooltip.style("display", "none"); hoverLine.style("opacity", 0); }); }); // Append events to the chart container // ------------------------------ d3Container .on("mousemove", function (d, i) { mouse = d3.mouse(this); mousex = mouse[0]; mousey = mouse[1]; // Display hover line //.style("opacity", 1); // Move tooltip vertically tooltip.style("top", (mousey - ($('.d3-tip').outerHeight() / 2)) - 2 + "px") // Half tooltip height - half arrow width // Move tooltip horizontally if(mousex >= ($(element).outerWidth() - $('.d3-tip').outerWidth() - margin.right - (tooltipOffset * 2))) { tooltip .style("left", (mousex - $('.d3-tip').outerWidth() - tooltipOffset) + "px") // Change tooltip direction from right to left to keep it inside graph area .attr("class", "d3-tip w"); } else { tooltip .style("left", (mousex + tooltipOffset) + "px" ) .attr("class", "d3-tip e"); } }); }); // Resize chart // ------------------------------ // Call function on window resize $(window).on('resize', resizeStream); // Call function on sidebar width change $(document).on('click', '.sidebar-control', resizeStream); // Resize function // // Since D3 doesn't support SVG resize by default, // we need to manually specify parts of the graph that need to // be updated on window resize function resizeStream() { // Layout // ------------------------- // Define width width = d3Container.node().getBoundingClientRect().width - margin.left - margin.right; // Main svg width container.attr("width", width + margin.left + margin.right); // Width of appended group svg.attr("width", width + margin.left + margin.right); // Horizontal range x.range([0, width]); // Chart elements // ------------------------- // Horizontal axis svg.selectAll('.d3-axis-horizontal').call(xAxis); // Horizontal axis subticks svg.selectAll('.d3-axis-subticks').attr("x1", x).attr("x2", x); // Grid lines width svg.selectAll(".d3-grid-dashed").call(gridAxis.tickSize(-width, 0, 0)) // Right vertical axis svg.selectAll(".d3-axis-right").attr("transform", "translate(" + width + ", 0)"); // Area paths svg.selectAll('.streamgraph-layer').attr("d", function(d) { return area(d.values); }); } } // App sales lines chart // ------------------------------ appSalesLines('#app_sales', 255); // initialize chart // Chart setup function appSalesLines(element, height) { // Basic setup // ------------------------------ // Define main variables var d3Container = d3.select(element), margin = {top: 5, right: 30, bottom: 30, left: 50}, width = d3Container.node().getBoundingClientRect().width - margin.left - margin.right, height = height - margin.top - margin.bottom; // Tooltip var tooltip = d3.tip() .attr('class', 'd3-tip') .html(function (d) { return "<ul class='list-unstyled mb-5'>" + "<li>" + "<div class='text-size-base mt-5 mb-5'><i class='icon-circle-left2 position-left'></i>" + d.name + " app" + "</div>" + "</li>" + "<li>" + "Sales: &nbsp;" + "<span class='text-semibold pull-right'>" + d.value + "</span>" + "</li>" + "<li>" + "Revenue: &nbsp; " + "<span class='text-semibold pull-right'>" + "$" + (d.value * 25).toFixed(2) + "</span>" + "</li>" + "</ul>"; }); // Format date var parseDate = d3.time.format("%Y/%m/%d").parse, formatDate = d3.time.format("%b %d, '%y"); // Line colors var scale = ["#4CAF50", "#FF5722", "#5C6BC0"], color = d3.scale.ordinal().range(scale); // Create chart // ------------------------------ // Container var container = d3Container.append('svg'); // SVG element var svg = container .attr('width', width + margin.left + margin.right) .attr('height', height + margin.top + margin.bottom) .append("g") .attr("transform", "translate(" + margin.left + "," + margin.top + ")") .call(tooltip); // Add date range switcher // ------------------------------ // Menu var menu = $("#select_date").multiselect({ buttonClass: 'btn btn-link text-semibold', enableHTML: true, dropRight: true, onChange: function() { change(), $.uniform.update(); }, buttonText: function (options, element) { var selected = ''; options.each(function() { selected += $(this).html() + ', '; }); return '<span class="status-mark border-warning position-left"></span>' + selected.substr(0, selected.length -2); } }); // Radios $(".multiselect-container input").uniform({ radioClass: 'choice' }); // Load data // ------------------------------ d3.csv("assets/demo_data/dashboard/app_sales.csv", function(error, data) { formatted = data; redraw(); }); // Construct layout // ------------------------------ // Add events var altKey; d3.select(window) .on("keydown", function() { altKey = d3.event.altKey; }) .on("keyup", function() { altKey = false; }); // Set terms of transition on date change function change() { d3.transition() .duration(altKey ? 7500 : 500) .each(redraw); } // Main chart drawing function // ------------------------------ function redraw() { // Construct chart layout // ------------------------------ // Create data nests var nested = d3.nest() .key(function(d) { return d.type; }) .map(formatted) // Get value from menu selection // the option values correspond //to the [type] value we used to nest the data var series = menu.val(); // Only retrieve data from the selected series using nest var data = nested[series]; // For object constancy we will need to set "keys", one for each type of data (column name) exclude all others. color.domain(d3.keys(data[0]).filter(function(key) { return (key !== "date" && key !== "type"); })); // Setting up color map var linedata = color.domain().map(function(name) { return { name: name, values: data.map(function(d) { return {name: name, date: parseDate(d.date), value: parseFloat(d[name], 10)}; }) }; }); // Draw the line var line = d3.svg.line() .x(function(d) { return x(d.date); }) .y(function(d) { return y(d.value); }) .interpolate('cardinal'); // Construct scales // ------------------------------ // Horizontal var x = d3.time.scale() .domain([ d3.min(linedata, function(c) { return d3.min(c.values, function(v) { return v.date; }); }), d3.max(linedata, function(c) { return d3.max(c.values, function(v) { return v.date; }); }) ]) .range([0, width]); // Vertical var y = d3.scale.linear() .domain([ d3.min(linedata, function(c) { return d3.min(c.values, function(v) { return v.value; }); }), d3.max(linedata, function(c) { return d3.max(c.values, function(v) { return v.value; }); }) ]) .range([height, 0]); // Create axes // ------------------------------ // Horizontal var xAxis = d3.svg.axis() .scale(x) .orient("bottom") .tickPadding(8) .ticks(d3.time.days) .innerTickSize(4) .tickFormat(d3.time.format("%a")); // Display hours and minutes in 24h format // Vertical var yAxis = d3.svg.axis() .scale(y) .orient("left") .ticks(6) .tickSize(0 -width) .tickPadding(8); // // Append chart elements // // Append axes // ------------------------------ // Horizontal svg.append("g") .attr("class", "d3-axis d3-axis-horizontal d3-axis-solid") .attr("transform", "translate(0," + height + ")"); // Vertical svg.append("g") .attr("class", "d3-axis d3-axis-vertical d3-axis-transparent"); // Append lines // ------------------------------ // Bind the data var lines = svg.selectAll(".lines") .data(linedata) // Append a group tag for each line var lineGroup = lines .enter() .append("g") .attr("class", "lines") .attr('id', function(d){ return d.name + "-line"; }); // Append the line to the graph lineGroup.append("path") .attr("class", "d3-line d3-line-medium") .style("stroke", function(d) { return color(d.name); }) .style('opacity', 0) .attr("d", function(d) { return line(d.values[0]); }) .transition() .duration(500) .delay(function(d, i) { return i * 200; }) .style('opacity', 1); // Append circles // ------------------------------ var circles = lines.selectAll("circle") .data(function(d) { return d.values; }) .enter() .append("circle") .attr("class", "d3-line-circle d3-line-circle-medium") .attr("cx", function(d,i){return x(d.date)}) .attr("cy",function(d,i){return y(d.value)}) .attr("r", 3) .style('fill', '#fff') .style("stroke", function(d) { return color(d.name); }); // Add transition circles .style('opacity', 0) .transition() .duration(500) .delay(500) .style('opacity', 1); // Append tooltip // ------------------------------ // Add tooltip on circle hover circles .on("mouseover", function (d) { tooltip.offset([-15, 0]).show(d); // Animate circle radius d3.select(this).transition().duration(250).attr('r', 4); }) .on("mouseout", function (d) { tooltip.hide(d); // Animate circle radius d3.select(this).transition().duration(250).attr('r', 3); }); // Change tooltip direction of first point // to always keep it inside chart, useful on mobiles lines.each(function (d) { d3.select(d3.select(this).selectAll('circle')[0][0]) .on("mouseover", function (d) { tooltip.offset([0, 15]).direction('e').show(d); // Animate circle radius d3.select(this).transition().duration(250).attr('r', 4); }) .on("mouseout", function (d) { tooltip.direction('n').hide(d); // Animate circle radius d3.select(this).transition().duration(250).attr('r', 3); }); }) // Change tooltip direction of last point // to always keep it inside chart, useful on mobiles lines.each(function (d) { d3.select(d3.select(this).selectAll('circle')[0][d3.select(this).selectAll('circle').size() - 1]) .on("mouseover", function (d) { tooltip.offset([0, -15]).direction('w').show(d); // Animate circle radius d3.select(this).transition().duration(250).attr('r', 4); }) .on("mouseout", function (d) { tooltip.direction('n').hide(d); // Animate circle radius d3.select(this).transition().duration(250).attr('r', 3); }) }) // Update chart on date change // ------------------------------ // Set variable for updating visualization var lineUpdate = d3.transition(lines); // Update lines lineUpdate.select("path") .attr("d", function(d, i) { return line(d.values); }); // Update circles lineUpdate.selectAll("circle") .attr("cy",function(d,i){return y(d.value)}) .attr("cx", function(d,i){return x(d.date)}); // Update vertical axes d3.transition(svg) .select(".d3-axis-vertical") .call(yAxis); // Update horizontal axes d3.transition(svg) .select(".d3-axis-horizontal") .attr("transform", "translate(0," + height + ")") .call(xAxis); // Resize chart // ------------------------------ // Call function on window resize $(window).on('resize', appSalesResize); // Call function on sidebar width change $(document).on('click', '.sidebar-control', appSalesResize); // Resize function // // Since D3 doesn't support SVG resize by default, // we need to manually specify parts of the graph that need to // be updated on window resize function appSalesResize() { // Layout // ------------------------- // Define width width = d3Container.node().getBoundingClientRect().width - margin.left - margin.right; // Main svg width container.attr("width", width + margin.left + margin.right); // Width of appended group svg.attr("width", width + margin.left + margin.right); // Horizontal range x.range([0, width]); // Vertical range y.range([height, 0]); // Chart elements // ------------------------- // Horizontal axis svg.select('.d3-axis-horizontal').call(xAxis); // Vertical axis svg.select('.d3-axis-vertical').call(yAxis.tickSize(0-width)); // Lines svg.selectAll('.d3-line').attr("d", function(d, i) { return line(d.values); }); // Circles svg.selectAll('.d3-line-circle').attr("cx", function(d,i){return x(d.date)}) } } } // App sales heatmap chart // ------------------------------ salesHeatmap(); // initialize chart // Chart setup function salesHeatmap() { // Load data // ------------------------------ d3.csv("assets/demo_data/dashboard/app_sales_heatmap.csv", function(error, data) { // Bind data // ------------------------------ // Nest data var nested_data = d3.nest().key(function(d) { return d.app; }), nest = nested_data.entries(data); // Format date var format = d3.time.format("%Y/%m/%d %H:%M"), formatTime = d3.time.format("%H:%M"); // Pull out values data.forEach(function(d, i) { d.date = format.parse(d.date), d.value = +d.value }); // Layout setup // ------------------------------ // Define main variables var d3Container = d3.select('#sales-heatmap'); margin = { top: 20, right: 0, bottom: 30, left: 0 }, width = d3Container.node().getBoundingClientRect().width - margin.left - margin.right, gridSize = width / new Date(data[data.length - 1].date).getHours(), // dynamically set grid size rowGap = 40, // vertical gap between rows height = (rowGap + gridSize) * (d3.max(nest, function(d,i) {return i+1})) - margin.top, buckets = 5, // number of colors in range colors = ["#DCEDC8","#C5E1A5","#9CCC65","#7CB342","#558B2F"]; // Construct scales // ------------------------------ // Horizontal var x = d3.time.scale().range([0, width]); // Vertical var y = d3.scale.linear().range([height, 0]); // Colors var colorScale = d3.scale.quantile() .domain([0, buckets - 1, d3.max(data, function (d) { return d.value; })]) .range(colors); // Set input domains // ------------------------------ // Horizontal x.domain([new Date(data[0].date), d3.time.hour.offset(new Date(data[data.length - 1].date), 1)]); // Vertical y.domain([0, d3.max(data, function(d) { return d.app; })]); // Create chart // ------------------------------ // Container var container = d3Container.append('svg'); // SVG element var svg = container .attr('width', width + margin.left + margin.right) .attr("height", height + margin.bottom) .append("g") .attr("transform", "translate(" + margin.left + "," + margin.top + ")"); // // Append chart elements // // App groups // ------------------------------ // Add groups for each app var hourGroup = svg.selectAll('.hour-group') .data(nest) .enter() .append('g') .attr('class', 'hour-group') .attr("transform", function(d, i) { return "translate(0, " + ((gridSize + rowGap) * i) +")"; }); // Add app name hourGroup .append("text") .attr('class', 'app-label') .attr('x', 0) .attr('y', -(margin.top/2)) .text(function (d, i) { return d.key; }); // Sales count text hourGroup .append("text") .attr('class', 'sales-count') .attr('x', width) .attr('y', -(margin.top/2)) .style('text-anchor', 'end') .text(function (d, i) { return d3.sum(d.values, function(d) { return d.value; }) + " sales today" }); // Add map elements // ------------------------------ // Add map squares var heatMap = hourGroup.selectAll(".heatmap-hour") .data(function(d) {return d.values}) .enter() .append("rect") .attr("x", function(d,i) { return x(d.date); }) .attr("y", 0) .attr("class", "heatmap-hour") .attr("width", gridSize) .attr("height", gridSize) .style("fill", '#fff') .style('stroke', '#fff') .style('cursor', 'pointer') .style('shape-rendering', 'crispEdges'); // Add loading transition heatMap.transition() .duration(250) .delay(function(d, i) { return i * 20; }) .style("fill", function(d) { return colorScale(d.value); }) // Add user interaction hourGroup.each(function(d) { heatMap .on("mouseover", function (d, i) { d3.select(this).style('opacity', 0.75); d3.select(this.parentNode).select('.sales-count').text(function(d) { return d.values[i].value + " sales at " + formatTime(d.values[i].date); }) }) .on("mouseout", function (d, i) { d3.select(this).style('opacity', 1); d3.select(this.parentNode).select('.sales-count').text(function (d, i) { return d3.sum(d.values, function(d) { return d.value; }) + " sales today" }) }) }) // Add legend // ------------------------------ // Get min and max values var minValue, maxValue; data.forEach(function(d, i) { maxValue = d3.max(data, function (d) { return d.value; }); minValue = d3.min(data, function (d) { return d.value; }); }); // Place legend inside separate group var legendGroup = svg.append('g') .attr('class', 'legend-group') .attr('width', width) .attr("transform", "translate(" + ((width/2) - ((buckets * gridSize))/2) + "," + (height + (margin.bottom - margin.top)) + ")"); // Then group legend elements var legend = legendGroup.selectAll(".heatmap-legend") .data([0].concat(colorScale.quantiles()), function(d) { return d; }) .enter() .append("g") .attr("class", "heatmap-legend"); // Add legend items legend.append("rect") .attr('class', 'heatmap-legend-item') .attr("x", function(d, i) { return gridSize * i; }) .attr("y", -8) .attr("width", gridSize) .attr("height", 5) .style('stroke', '#fff') .style('shape-rendering', 'crispEdges') .style("fill", function(d, i) { return colors[i]; }); // Add min value text label legendGroup.append("text") .attr("class", "min-legend-value") .attr("x", -10) .attr("y", -2) .style('text-anchor', 'end') .style('font-size', 11) .style('fill', '#999') .text(minValue); // Add max value text label legendGroup.append("text") .attr("class", "max-legend-value") .attr("x", (buckets * gridSize) + 10) .attr("y", -2) .style('font-size', 11) .style('fill', '#999') .text(maxValue); // Resize chart // ------------------------------ // Call function on window resize $(window).on('resize', resizeHeatmap); // Call function on sidebar width change $(document).on('click', '.sidebar-control', resizeHeatmap); // Resize function // // Since D3 doesn't support SVG resize by default, // we need to manually specify parts of the graph that need to // be updated on window resize function resizeHeatmap() { // Layout // ------------------------- // Width width = d3Container.node().getBoundingClientRect().width - margin.left - margin.right, // Grid size gridSize = width / new Date(data[data.length - 1].date).getHours(), // Height height = (rowGap + gridSize) * (d3.max(nest, function(d,i) {return i+1})) - margin.top, // Main svg width container.attr("width", width + margin.left + margin.right).attr("height", height + margin.bottom); // Width of appended group svg.attr("width", width + margin.left + margin.right).attr("height", height + margin.bottom); // Horizontal range x.range([0, width]); // Chart elements // ------------------------- // Groups for each app svg.selectAll('.hour-group') .attr("transform", function(d, i) { return "translate(0, " + ((gridSize + rowGap) * i) +")"; }); // Map squares svg.selectAll(".heatmap-hour") .attr("width", gridSize) .attr("height", gridSize) .attr("x", function(d,i) { return x(d.date); }); // Legend group svg.selectAll('.legend-group') .attr("transform", "translate(" + ((width/2) - ((buckets * gridSize))/2) + "," + (height + margin.bottom - margin.top) + ")"); // Sales count text svg.selectAll('.sales-count') .attr("x", width); // Legend item svg.selectAll('.heatmap-legend-item') .attr("width", gridSize) .attr("x", function(d, i) { return gridSize * i; }); // Max value text label svg.selectAll('.max-legend-value') .attr("x", (buckets * gridSize) + 10); } }); } // Monthly app sales area chart // ------------------------------ monthlySalesArea("#monthly-sales-stats", 100, '#4DB6AC'); // initialize chart // Chart setup function monthlySalesArea(element, height, color) { // Basic setup // ------------------------------ // Define main variables var d3Container = d3.select(element), margin = {top: 20, right: 35, bottom: 40, left: 35}, width = d3Container.node().getBoundingClientRect().width - margin.left - margin.right, height = height - margin.top - margin.bottom; // Date and time format var parseDate = d3.time.format( '%Y-%m-%d' ).parse, bisectDate = d3.bisector(function(d) { return d.date; }).left, formatDate = d3.time.format("%b %d"); // Create SVG // ------------------------------ // Container var container = d3Container.append('svg'); // SVG element var svg = container .attr('width', width + margin.left + margin.right) .attr('height', height + margin.top + margin.bottom) .append("g") .attr("transform", "translate(" + margin.left + "," + margin.top + ")") // Construct chart layout // ------------------------------ // Area var area = d3.svg.area() .x(function(d) { return x(d.date); }) .y0(height) .y1(function(d) { return y(d.value); }) .interpolate('monotone') // Construct scales // ------------------------------ // Horizontal var x = d3.time.scale().range([0, width ]); // Vertical var y = d3.scale.linear().range([height, 0]); // Create axes // ------------------------------ // Horizontal var xAxis = d3.svg.axis() .scale(x) .orient("bottom") .ticks(d3.time.days, 6) .innerTickSize(4) .tickPadding(8) .tickFormat(d3.time.format("%b %d")); // Load data // ------------------------------ d3.json("assets/demo_data/dashboard/monthly_sales.json", function (error, data) { // Show what's wrong if error if (error) return console.error(error); // Pull out values data.forEach(function (d) { d.date = parseDate(d.date); d.value = +d.value; }); // Get the maximum value in the given array var maxY = d3.max(data, function(d) { return d.value; }); // Reset start data for animation var startData = data.map(function(datum) { return { date: datum.date, value: 0 }; }); // Set input domains // ------------------------------ // Horizontal x.domain(d3.extent(data, function(d, i) { return d.date; })); // Vertical y.domain([0, d3.max( data, function(d) { return d.value; })]); // // Append chart elements // // Append axes // ------------------------- // Horizontal var horizontalAxis = svg.append("g") .attr("class", "d3-axis d3-axis-horizontal d3-axis-solid") .attr("transform", "translate(0," + height + ")") .call(xAxis); // Add extra subticks for hidden hours horizontalAxis.selectAll(".d3-axis-subticks") .data(x.ticks(d3.time.days), function(d) { return d; }) .enter() .append("line") .attr("class", "d3-axis-subticks") .attr("y1", 0) .attr("y2", 4) .attr("x1", x) .attr("x2", x); // Append area // ------------------------- // Add area path svg.append("path") .datum(data) .attr("class", "d3-area") .attr("d", area) .style('fill', color) .transition() // begin animation .duration(1000) .attrTween('d', function() { var interpolator = d3.interpolateArray(startData, data); return function (t) { return area(interpolator (t)); } }); // Append crosshair and tooltip // ------------------------- // // Line // // Line group var focusLine = svg.append("g") .attr("class", "d3-crosshair-line") .style("display", "none"); // Line element focusLine.append("line") .attr("class", "vertical-crosshair") .attr("y1", 0) .attr("y2", -maxY) .style("stroke", "#e5e5e5") .style('shape-rendering', 'crispEdges') // // Pointer // // Pointer group var focusPointer = svg.append("g") .attr("class", "d3-crosshair-pointer") .style("display", "none"); // Pointer element focusPointer.append("circle") .attr("r", 3) .style("fill", "#fff") .style('stroke', color) .style('stroke-width', 1) // // Text // // Text group var focusText = svg.append("g") .attr("class", "d3-crosshair-text") .style("display", "none"); // Text element focusText.append("text") .attr("dy", -10) .style('font-size', 12); // // Overlay with events // svg.append("rect") .attr("class", "d3-crosshair-overlay") .style('fill', 'none') .style('pointer-events', 'all') .attr("width", width) .attr("height", height) .on("mouseover", function() { focusPointer.style("display", null); focusLine.style("display", null) focusText.style("display", null); }) .on("mouseout", function() { focusPointer.style("display", "none"); focusLine.style("display", "none"); focusText.style("display", "none"); }) .on("mousemove", mousemove); // Display tooltip on mousemove function mousemove() { // Define main variables var mouse = d3.mouse(this), mousex = mouse[0], mousey = mouse[1], x0 = x.invert(mousex), i = bisectDate(data, x0), d0 = data[i - 1], d1 = data[i], d = x0 - d0.date > d1.date - x0 ? d1 : d0; // Move line focusLine.attr("transform", "translate(" + x(d.date) + "," + height + ")"); // Move pointer focusPointer.attr("transform", "translate(" + x(d.date) + "," + y(d.value) + ")"); // Reverse tooltip at the end point if(mousex >= (d3Container.node().getBoundingClientRect().width - focusText.select('text').node().getBoundingClientRect().width - margin.right - margin.left)) { focusText.select("text").attr('text-anchor', 'end').attr("x", function () { return (x(d.date) - 15) + "px" }).text(formatDate(d.date) + " - " + d.value + " sales"); } else { focusText.select("text").attr('text-anchor', 'start').attr("x", function () { return (x(d.date) + 15) + "px" }).text(formatDate(d.date) + " - " + d.value + " sales"); } } // Resize chart // ------------------------------ // Call function on window resize $(window).on('resize', monthlySalesAreaResize); // Call function on sidebar width change $(document).on('click', '.sidebar-control', monthlySalesAreaResize); // Resize function // // Since D3 doesn't support SVG resize by default, // we need to manually specify parts of the graph that need to // be updated on window resize function monthlySalesAreaResize() { // Layout variables width = d3Container.node().getBoundingClientRect().width - margin.left - margin.right; // Layout // ------------------------- // Main svg width container.attr("width", width + margin.left + margin.right); // Width of appended group svg.attr("width", width + margin.left + margin.right); // Axes // ------------------------- // Horizontal range x.range([0, width]); // Horizontal axis svg.selectAll('.d3-axis-horizontal').call(xAxis); // Horizontal axis subticks svg.selectAll('.d3-axis-subticks').attr("x1", x).attr("x2", x); // Chart elements // ------------------------- // Area path svg.selectAll('.d3-area').datum( data ).attr("d", area); // Crosshair svg.selectAll('.d3-crosshair-overlay').attr("width", width); } }); } // Messages area chart // ------------------------------ messagesArea("#messages-stats", 40, '#5C6BC0'); // initialize chart // Chart setup function messagesArea(element, height, color) { // Basic setup // ------------------------------ // Define main variables var d3Container = d3.select(element), margin = {top: 0, right: 0, bottom: 0, left: 0}, width = d3Container.node().getBoundingClientRect().width - margin.left - margin.right, height = height - margin.top - margin.bottom; // Date and time format var parseDate = d3.time.format( '%Y-%m-%d' ).parse; // Create SVG // ------------------------------ // Container var container = d3Container.append('svg'); // SVG element var svg = container .attr('width', width + margin.left + margin.right) .attr('height', height + margin.top + margin.bottom) .append("g") .attr("transform", "translate(" + margin.left + "," + margin.top + ")") // Construct chart layout // ------------------------------ // Area var area = d3.svg.area() .x(function(d) { return x(d.date); }) .y0(height) .y1(function(d) { return y(d.value); }) .interpolate('monotone') // Construct scales // ------------------------------ // Horizontal var x = d3.time.scale().range([0, width ]); // Vertical var y = d3.scale.linear().range([height, 0]); // Load data // ------------------------------ d3.json("assets/demo_data/dashboard/monthly_sales.json", function (error, data) { // Show what's wrong if error if (error) return console.error(error); // Pull out values data.forEach(function (d) { d.date = parseDate(d.date); d.value = +d.value; }); // Get the maximum value in the given array var maxY = d3.max(data, function(d) { return d.value; }); // Reset start data for animation var startData = data.map(function(datum) { return { date: datum.date, value: 0 }; }); // Set input domains // ------------------------------ // Horizontal x.domain(d3.extent(data, function(d, i) { return d.date; })); // Vertical y.domain([0, d3.max( data, function(d) { return d.value; })]); // // Append chart elements // // Add area path svg.append("path") .datum(data) .attr("class", "d3-area") .style('fill', color) .attr("d", area) .transition() // begin animation .duration(1000) .attrTween('d', function() { var interpolator = d3.interpolateArray(startData, data); return function (t) { return area(interpolator (t)); } }); // Resize chart // ------------------------------ // Call function on window resize $(window).on('resize', messagesAreaResize); // Call function on sidebar width change $(document).on('click', '.sidebar-control', messagesAreaResize); // Resize function // // Since D3 doesn't support SVG resize by default, // we need to manually specify parts of the graph that need to // be updated on window resize function messagesAreaResize() { // Layout variables width = d3Container.node().getBoundingClientRect().width - margin.left - margin.right; // Layout // ------------------------- // Main svg width container.attr("width", width + margin.left + margin.right); // Width of appended group svg.attr("width", width + margin.left + margin.right); // Horizontal range x.range([0, width]); // Chart elements // ------------------------- // Area path svg.selectAll('.d3-area').datum( data ).attr("d", area); } }); } // Sparklines // ------------------------------ // Initialize chart sparkline("#new-visitors", "line", 30, 35, "basis", 750, 2000, "#26A69A"); sparkline("#new-sessions", "line", 30, 35, "basis", 750, 2000, "#FF7043"); sparkline("#total-online", "line", 30, 35, "basis", 750, 2000, "#5C6BC0"); sparkline("#server-load", "area", 30, 50, "basis", 750, 2000, "rgba(255,255,255,0.5)"); // Chart setup function sparkline(element, chartType, qty, height, interpolation, duration, interval, color) { // Basic setup // ------------------------------ // Define main variables var d3Container = d3.select(element), margin = {top: 0, right: 0, bottom: 0, left: 0}, width = d3Container.node().getBoundingClientRect().width - margin.left - margin.right, height = height - margin.top - margin.bottom; // Generate random data (for demo only) var data = []; for (var i=0; i < qty; i++) { data.push(Math.floor(Math.random() * qty) + 5) } // Construct scales // ------------------------------ // Horizontal var x = d3.scale.linear().range([0, width]); // Vertical var y = d3.scale.linear().range([height - 5, 5]); // Set input domains // ------------------------------ // Horizontal x.domain([1, qty - 3]) // Vertical y.domain([0, qty]) // Construct chart layout // ------------------------------ // Line var line = d3.svg.line() .interpolate(interpolation) .x(function(d, i) { return x(i); }) .y(function(d, i) { return y(d); }); // Area var area = d3.svg.area() .interpolate(interpolation) .x(function(d,i) { return x(i); }) .y0(height) .y1(function(d) { return y(d); }); // Create SVG // ------------------------------ // Container var container = d3Container.append('svg'); // SVG element var svg = container .attr('width', width + margin.left + margin.right) .attr('height', height + margin.top + margin.bottom) .append("g") .attr("transform", "translate(" + margin.left + "," + margin.top + ")"); // Add mask for animation // ------------------------------ // Add clip path var clip = svg.append("defs") .append("clipPath") .attr('id', function(d, i) { return "load-clip-" + element.substring(1) }) // Add clip shape var clips = clip.append("rect") .attr('class', 'load-clip') .attr("width", 0) .attr("height", height); // Animate mask clips .transition() .duration(1000) .ease('linear') .attr("width", width); // // Append chart elements // // Main path var path = svg.append("g") .attr("clip-path", function(d, i) { return "url(#load-clip-" + element.substring(1) + ")"}) .append("path") .datum(data) .attr("transform", "translate(" + x(0) + ",0)"); // Add path based on chart type if(chartType == "area") { path.attr("d", area).attr('class', 'd3-area').style("fill", color); // area } else { path.attr("d", line).attr("class", "d3-line d3-line-medium").style('stroke', color); // line } // Animate path path .style('opacity', 0) .transition() .duration(750) .style('opacity', 1); // Set update interval. For demo only // ------------------------------ setInterval(function() { // push a new data point onto the back data.push(Math.floor(Math.random() * qty) + 5); // pop the old data point off the front data.shift(); update(); }, interval); // Update random data. For demo only // ------------------------------ function update() { // Redraw the path and slide it to the left path .attr("transform", null) .transition() .duration(duration) .ease("linear") .attr("transform", "translate(" + x(0) + ",0)"); // Update path type if(chartType == "area") { path.attr("d", area).attr('class', 'd3-area').style("fill", color) } else { path.attr("d", line).attr("class", "d3-line d3-line-medium").style('stroke', color); } } // Resize chart // ------------------------------ // Call function on window resize $(window).on('resize', resizeSparklines); // Call function on sidebar width change $(document).on('click', '.sidebar-control', resizeSparklines); // Resize function // // Since D3 doesn't support SVG resize by default, // we need to manually specify parts of the graph that need to // be updated on window resize function resizeSparklines() { // Layout variables width = d3Container.node().getBoundingClientRect().width - margin.left - margin.right; // Layout // ------------------------- // Main svg width container.attr("width", width + margin.left + margin.right); // Width of appended group svg.attr("width", width + margin.left + margin.right); // Horizontal range x.range([0, width]); // Chart elements // ------------------------- // Clip mask clips.attr("width", width); // Line svg.select(".d3-line").attr("d", line); // Area svg.select(".d3-area").attr("d", area); } } // Daily revenue line chart // ------------------------------ dailyRevenue('#today-revenue', 50); // initialize chart // Chart setup function dailyRevenue(element, height) { // Basic setup // ------------------------------ // Add data set var dataset = [ { "date": "04/13/14", "alpha": "60" }, { "date": "04/14/14", "alpha": "35" }, { "date": "04/15/14", "alpha": "65" }, { "date": "04/16/14", "alpha": "50" }, { "date": "04/17/14", "alpha": "65" }, { "date": "04/18/14", "alpha": "20" }, { "date": "04/19/14", "alpha": "60" } ]; // Main variables var d3Container = d3.select(element), margin = {top: 0, right: 0, bottom: 0, left: 0}, width = d3Container.node().getBoundingClientRect().width - margin.left - margin.right, height = height - margin.top - margin.bottom, padding = 20; // Format date var parseDate = d3.time.format("%m/%d/%y").parse, formatDate = d3.time.format("%a, %B %e"); // Add tooltip // ------------------------------ var tooltip = d3.tip() .attr('class', 'd3-tip') .html(function (d) { return "<ul class='list-unstyled mb-5'>" + "<li>" + "<div class='text-size-base mt-5 mb-5'><i class='icon-check2 position-left'></i>" + formatDate(d.date) + "</div>" + "</li>" + "<li>" + "Sales: &nbsp;" + "<span class='text-semibold pull-right'>" + d.alpha + "</span>" + "</li>" + "<li>" + "Revenue: &nbsp; " + "<span class='text-semibold pull-right'>" + "$" + (d.alpha * 25).toFixed(2) + "</span>" + "</li>" + "</ul>"; }); // Create chart // ------------------------------ // Add svg element var container = d3Container.append('svg'); // Add SVG group var svg = container .attr('width', width + margin.left + margin.right) .attr('height', height + margin.top + margin.bottom) .append("g") .attr("transform", "translate(" + margin.left + "," + margin.top + ")") .call(tooltip); // Load data // ------------------------------ dataset.forEach(function (d) { d.date = parseDate(d.date); d.alpha = +d.alpha; }); // Construct scales // ------------------------------ // Horizontal var x = d3.time.scale() .range([padding, width - padding]); // Vertical var y = d3.scale.linear() .range([height, 5]); // Set input domains // ------------------------------ // Horizontal x.domain(d3.extent(dataset, function (d) { return d.date; })); // Vertical y.domain([0, d3.max(dataset, function (d) { return Math.max(d.alpha); })]); // Construct chart layout // ------------------------------ // Line var line = d3.svg.line() .x(function(d) { return x(d.date); }) .y(function(d) { return y(d.alpha) }); // // Append chart elements // // Add mask for animation // ------------------------------ // Add clip path var clip = svg.append("defs") .append("clipPath") .attr("id", "clip-line-small"); // Add clip shape var clipRect = clip.append("rect") .attr('class', 'clip') .attr("width", 0) .attr("height", height); // Animate mask clipRect .transition() .duration(1000) .ease('linear') .attr("width", width); // Line // ------------------------------ // Path var path = svg.append('path') .attr({ 'd': line(dataset), "clip-path": "url(#clip-line-small)", 'class': 'd3-line d3-line-medium' }) .style('stroke', '#fff'); // Animate path svg.select('.line-tickets') .transition() .duration(1000) .ease('linear'); // Add vertical guide lines // ------------------------------ // Bind data var guide = svg.append('g') .selectAll('.d3-line-guides-group') .data(dataset); // Append lines guide .enter() .append('line') .attr('class', 'd3-line-guides') .attr('x1', function (d, i) { return x(d.date); }) .attr('y1', function (d, i) { return height; }) .attr('x2', function (d, i) { return x(d.date); }) .attr('y2', function (d, i) { return height; }) .style('stroke', 'rgba(255,255,255,0.3)') .style('stroke-dasharray', '4,2') .style('shape-rendering', 'crispEdges'); // Animate guide lines guide .transition() .duration(1000) .delay(function(d, i) { return i * 150; }) .attr('y2', function (d, i) { return y(d.alpha); }); // Alpha app points // ------------------------------ // Add points var points = svg.insert('g') .selectAll('.d3-line-circle') .data(dataset) .enter() .append('circle') .attr('class', 'd3-line-circle d3-line-circle-medium') .attr("cx", line.x()) .attr("cy", line.y()) .attr("r", 3) .style('stroke', '#fff') .style('fill', '#29B6F6'); // Animate points on page load points .style('opacity', 0) .transition() .duration(250) .ease('linear') .delay(1000) .style('opacity', 1); // Add user interaction points .on("mouseover", function (d) { tooltip.offset([-10, 0]).show(d); // Animate circle radius d3.select(this).transition().duration(250).attr('r', 4); }) // Hide tooltip .on("mouseout", function (d) { tooltip.hide(d); // Animate circle radius d3.select(this).transition().duration(250).attr('r', 3); }); // Change tooltip direction of first point d3.select(points[0][0]) .on("mouseover", function (d) { tooltip.offset([0, 10]).direction('e').show(d); // Animate circle radius d3.select(this).transition().duration(250).attr('r', 4); }) .on("mouseout", function (d) { tooltip.direction('n').hide(d); // Animate circle radius d3.select(this).transition().duration(250).attr('r', 3); }); // Change tooltip direction of last point d3.select(points[0][points.size() - 1]) .on("mouseover", function (d) { tooltip.offset([0, -10]).direction('w').show(d); // Animate circle radius d3.select(this).transition().duration(250).attr('r', 4); }) .on("mouseout", function (d) { tooltip.direction('n').hide(d); // Animate circle radius d3.select(this).transition().duration(250).attr('r', 3); }) // Resize chart // ------------------------------ // Call function on window resize $(window).on('resize', revenueResize); // Call function on sidebar width change $(document).on('click', '.sidebar-control', revenueResize); // Resize function // // Since D3 doesn't support SVG resize by default, // we need to manually specify parts of the graph that need to // be updated on window resize function revenueResize() { // Layout variables width = d3Container.node().getBoundingClientRect().width - margin.left - margin.right; // Layout // ------------------------- // Main svg width container.attr("width", width + margin.left + margin.right); // Width of appended group svg.attr("width", width + margin.left + margin.right); // Horizontal range x.range([padding, width - padding]); // Chart elements // ------------------------- // Mask clipRect.attr("width", width); // Line path svg.selectAll('.d3-line').attr("d", line(dataset)); // Circles svg.selectAll('.d3-line-circle').attr("cx", line.x()); // Guide lines svg.selectAll('.d3-line-guides') .attr('x1', function (d, i) { return x(d.date); }) .attr('x2', function (d, i) { return x(d.date); }); } } // Marketing campaigns progress pie chart // ------------------------------ // Initialize chart progressMeter("#today-progress", 20, 20, '#7986CB'); progressMeter("#yesterday-progress", 20, 20, '#7986CB'); // Chart setup function progressMeter(element, width, height, color) { // Basic setup // ------------------------------ // Main variables var d3Container = d3.select(element), border = 2, radius = Math.min(width / 2, height / 2) - border, twoPi = 2 * Math.PI, progress = $(element).data('progress'), total = 100; // Construct chart layout // ------------------------------ // Arc var arc = d3.svg.arc() .startAngle(0) .innerRadius(0) .outerRadius(radius) .endAngle(function(d) { return (d.value / d.size) * 2 * Math.PI; }) // Create chart // ------------------------------ // Add svg element var container = d3Container.append("svg"); // Add SVG group var svg = container .attr("width", width) .attr("height", height) .append("g") .attr("transform", "translate(" + width / 2 + "," + height / 2 + ")"); // // Append chart elements // // Progress group var meter = svg.append("g") .attr("class", "progress-meter"); // Background meter.append("path") .attr("d", arc.endAngle(twoPi)) .style('fill', '#fff') .style('stroke', color) .style('stroke-width', 1.5); // Foreground var foreground = meter.append("path") .style('fill', color); // Animate foreground path foreground .transition() .ease("cubic-out") .duration(2500) .attrTween("d", arcTween); // Tween arcs function arcTween() { var i = d3.interpolate(0, progress); return function(t) { var currentProgress = progress / (100/t); var endAngle = arc.endAngle(twoPi * (currentProgress)); return arc(i(endAngle)); }; } } // Marketing campaigns donut chart // ------------------------------ // Initialize chart campaignDonut("#campaigns-donut", 42); // Chart setup function campaignDonut(element, size) { // Basic setup // ------------------------------ // Add data set var data = [ { "browser": "Google Adwords", "icon": "<i class='icon-google position-left'></i>", "value": 1047, "color" : "#66BB6A" }, { "browser": "Social media", "icon": "<i class='icon-share4 position-left'></i>", "value": 2948, "color": "#9575CD" }, { "browser":"Youtube video", "icon": "<i class='icon-youtube position-left'></i>", "value": 3909, "color": "#FF7043" } ]; // Main variables var d3Container = d3.select(element), distance = 2, // reserve 2px space for mouseover arc moving radius = (size/2) - distance, sum = d3.sum(data, function(d) { return d.value; }) // Tooltip // ------------------------------ var tip = d3.tip() .attr('class', 'd3-tip') .offset([-10, 0]) .direction('e') .html(function (d) { return "<ul class='list-unstyled mb-5'>" + "<li>" + "<div class='text-size-base mb-5 mt-5'>" + d.data.icon + d.data.browser + "</div>" + "</li>" + "<li>" + "Visits: &nbsp;" + "<span class='text-semibold pull-right'>" + d.value + "</span>" + "</li>" + "<li>" + "Share: &nbsp;" + "<span class='text-semibold pull-right'>" + (100 / (sum / d.value)).toFixed(2) + "%" + "</span>" + "</li>" + "</ul>"; }) // Create chart // ------------------------------ // Add svg element var container = d3Container.append("svg").call(tip); // Add SVG group var svg = container .attr("width", size) .attr("height", size) .append("g") .attr("transform", "translate(" + (size / 2) + "," + (size / 2) + ")"); // Construct chart layout // ------------------------------ // Pie var pie = d3.layout.pie() .sort(null) .startAngle(Math.PI) .endAngle(3 * Math.PI) .value(function (d) { return d.value; }); // Arc var arc = d3.svg.arc() .outerRadius(radius) .innerRadius(radius / 2); // // Append chart elements // // Group chart elements var arcGroup = svg.selectAll(".d3-arc") .data(pie(data)) .enter() .append("g") .attr("class", "d3-arc") .style('stroke', '#fff') .style('cursor', 'pointer'); // Append path var arcPath = arcGroup .append("path") .style("fill", function (d) { return d.data.color; }); // Add tooltip arcPath .on('mouseover', function (d, i) { // Transition on mouseover d3.select(this) .transition() .duration(500) .ease('elastic') .attr('transform', function (d) { d.midAngle = ((d.endAngle - d.startAngle) / 2) + d.startAngle; var x = Math.sin(d.midAngle) * distance; var y = -Math.cos(d.midAngle) * distance; return 'translate(' + x + ',' + y + ')'; }); }) .on("mousemove", function (d) { // Show tooltip on mousemove tip.show(d) .style("top", (d3.event.pageY - 40) + "px") .style("left", (d3.event.pageX + 30) + "px"); }) .on('mouseout', function (d, i) { // Mouseout transition d3.select(this) .transition() .duration(500) .ease('bounce') .attr('transform', 'translate(0,0)'); // Hide tooltip tip.hide(d); }); // Animate chart on load arcPath .transition() .delay(function(d, i) { return i * 500; }) .duration(500) .attrTween("d", function(d) { var interpolate = d3.interpolate(d.startAngle,d.endAngle); return function(t) { d.endAngle = interpolate(t); return arc(d); }; }); } // Campaign status donut chart // ------------------------------ // Initialize chart campaignStatusPie("#campaign-status-pie", 42); // Chart setup function campaignStatusPie(element, size) { // Basic setup // ------------------------------ // Add data set var data = [ { "status": "Active campaigns", "icon": "<span class='status-mark border-blue-300 position-left'></span>", "value": 439, "color": "#29B6F6" }, { "status": "Closed campaigns", "icon": "<span class='status-mark border-danger-300 position-left'></span>", "value": 290, "color": "#EF5350" }, { "status": "Pending campaigns", "icon": "<span class='status-mark border-success-300 position-left'></span>", "value": 190, "color": "#81C784" }, { "status": "Campaigns on hold", "icon": "<span class='status-mark border-grey-300 position-left'></span>", "value": 148, "color": "#999" } ]; // Main variables var d3Container = d3.select(element), distance = 2, // reserve 2px space for mouseover arc moving radius = (size/2) - distance, sum = d3.sum(data, function(d) { return d.value; }) // Tooltip // ------------------------------ var tip = d3.tip() .attr('class', 'd3-tip') .offset([-10, 0]) .direction('e') .html(function (d) { return "<ul class='list-unstyled mb-5'>" + "<li>" + "<div class='text-size-base mb-5 mt-5'>" + d.data.icon + d.data.status + "</div>" + "</li>" + "<li>" + "Total: &nbsp;" + "<span class='text-semibold pull-right'>" + d.value + "</span>" + "</li>" + "<li>" + "Share: &nbsp;" + "<span class='text-semibold pull-right'>" + (100 / (sum / d.value)).toFixed(2) + "%" + "</span>" + "</li>" + "</ul>"; }) // Create chart // ------------------------------ // Add svg element var container = d3Container.append("svg").call(tip); // Add SVG group var svg = container .attr("width", size) .attr("height", size) .append("g") .attr("transform", "translate(" + (size / 2) + "," + (size / 2) + ")"); // Construct chart layout // ------------------------------ // Pie var pie = d3.layout.pie() .sort(null) .startAngle(Math.PI) .endAngle(3 * Math.PI) .value(function (d) { return d.value; }); // Arc var arc = d3.svg.arc() .outerRadius(radius) .innerRadius(radius / 2); // // Append chart elements // // Group chart elements var arcGroup = svg.selectAll(".d3-arc") .data(pie(data)) .enter() .append("g") .attr("class", "d3-arc") .style('stroke', '#fff') .style('cursor', 'pointer'); // Append path var arcPath = arcGroup .append("path") .style("fill", function (d) { return d.data.color; }); // Add tooltip arcPath .on('mouseover', function (d, i) { // Transition on mouseover d3.select(this) .transition() .duration(500) .ease('elastic') .attr('transform', function (d) { d.midAngle = ((d.endAngle - d.startAngle) / 2) + d.startAngle; var x = Math.sin(d.midAngle) * distance; var y = -Math.cos(d.midAngle) * distance; return 'translate(' + x + ',' + y + ')'; }); }) .on("mousemove", function (d) { // Show tooltip on mousemove tip.show(d) .style("top", (d3.event.pageY - 40) + "px") .style("left", (d3.event.pageX + 30) + "px"); }) .on('mouseout', function (d, i) { // Mouseout transition d3.select(this) .transition() .duration(500) .ease('bounce') .attr('transform', 'translate(0,0)'); // Hide tooltip tip.hide(d); }); // Animate chart on load arcPath .transition() .delay(function(d, i) { return i * 500; }) .duration(500) .attrTween("d", function(d) { var interpolate = d3.interpolate(d.startAngle,d.endAngle); return function(t) { d.endAngle = interpolate(t); return arc(d); }; }); } // Tickets status donut chart // ------------------------------ // Initialize chart ticketStatusDonut("#tickets-status", 42); // Chart setup function ticketStatusDonut(element, size) { // Basic setup // ------------------------------ // Add data set var data = [ { "status": "Pending tickets", "icon": "<i class='status-mark border-blue-300 position-left'></i>", "value": 295, "color": "#29B6F6" }, { "status": "Resolved tickets", "icon": "<i class='status-mark border-success-300 position-left'></i>", "value": 189, "color": "#66BB6A" }, { "status": "Closed tickets", "icon": "<i class='status-mark border-danger-300 position-left'></i>", "value": 277, "color": "#EF5350" } ]; // Main variables var d3Container = d3.select(element), distance = 2, // reserve 2px space for mouseover arc moving radius = (size/2) - distance, sum = d3.sum(data, function(d) { return d.value; }) // Tooltip // ------------------------------ var tip = d3.tip() .attr('class', 'd3-tip') .offset([-10, 0]) .direction('e') .html(function (d) { return "<ul class='list-unstyled mb-5'>" + "<li>" + "<div class='text-size-base mb-5 mt-5'>" + d.data.icon + d.data.status + "</div>" + "</li>" + "<li>" + "Total: &nbsp;" + "<span class='text-semibold pull-right'>" + d.value + "</span>" + "</li>" + "<li>" + "Share: &nbsp;" + "<span class='text-semibold pull-right'>" + (100 / (sum / d.value)).toFixed(2) + "%" + "</span>" + "</li>" + "</ul>"; }) // Create chart // ------------------------------ // Add svg element var container = d3Container.append("svg").call(tip); // Add SVG group var svg = container .attr("width", size) .attr("height", size) .append("g") .attr("transform", "translate(" + (size / 2) + "," + (size / 2) + ")"); // Construct chart layout // ------------------------------ // Pie var pie = d3.layout.pie() .sort(null) .startAngle(Math.PI) .endAngle(3 * Math.PI) .value(function (d) { return d.value; }); // Arc var arc = d3.svg.arc() .outerRadius(radius) .innerRadius(radius / 2); // // Append chart elements // // Group chart elements var arcGroup = svg.selectAll(".d3-arc") .data(pie(data)) .enter() .append("g") .attr("class", "d3-arc") .style('stroke', '#fff') .style('cursor', 'pointer'); // Append path var arcPath = arcGroup .append("path") .style("fill", function (d) { return d.data.color; }); // Add tooltip arcPath .on('mouseover', function (d, i) { // Transition on mouseover d3.select(this) .transition() .duration(500) .ease('elastic') .attr('transform', function (d) { d.midAngle = ((d.endAngle - d.startAngle) / 2) + d.startAngle; var x = Math.sin(d.midAngle) * distance; var y = -Math.cos(d.midAngle) * distance; return 'translate(' + x + ',' + y + ')'; }); }) .on("mousemove", function (d) { // Show tooltip on mousemove tip.show(d) .style("top", (d3.event.pageY - 40) + "px") .style("left", (d3.event.pageX + 30) + "px"); }) .on('mouseout', function (d, i) { // Mouseout transition d3.select(this) .transition() .duration(500) .ease('bounce') .attr('transform', 'translate(0,0)'); // Hide tooltip tip.hide(d); }); // Animate chart on load arcPath .transition() .delay(function(d, i) { return i * 500; }) .duration(500) .attrTween("d", function(d) { var interpolate = d3.interpolate(d.startAngle,d.endAngle); return function(t) { d.endAngle = interpolate(t); return arc(d); }; }); } // Bar charts with random data // ------------------------------ // Initialize charts generateBarChart("#hours-available-bars", 24, 40, true, "elastic", 1200, 50, "#EC407A", "hours"); generateBarChart("#goal-bars", 24, 40, true, "elastic", 1200, 50, "#5C6BC0", "goal"); generateBarChart("#members-online", 24, 50, true, "elastic", 1200, 50, "rgba(255,255,255,0.5)", "members"); // Chart setup function generateBarChart(element, barQty, height, animate, easing, duration, delay, color, tooltip) { // Basic setup // ------------------------------ // Add data set var bardata = []; for (var i=0; i < barQty; i++) { bardata.push(Math.round(Math.random()*10) + 10) } // Main variables var d3Container = d3.select(element), width = d3Container.node().getBoundingClientRect().width; // Construct scales // ------------------------------ // Horizontal var x = d3.scale.ordinal() .rangeBands([0, width], 0.3) // Vertical var y = d3.scale.linear() .range([0, height]); // Set input domains // ------------------------------ // Horizontal x.domain(d3.range(0, bardata.length)) // Vertical y.domain([0, d3.max(bardata)]) // Create chart // ------------------------------ // Add svg element var container = d3Container.append('svg'); // Add SVG group var svg = container .attr('width', width) .attr('height', height) .append('g'); // // Append chart elements // // Bars var bars = svg.selectAll('rect') .data(bardata) .enter() .append('rect') .attr('class', 'd3-random-bars') .attr('width', x.rangeBand()) .attr('x', function(d,i) { return x(i); }) .style('fill', color); // Tooltip // ------------------------------ var tip = d3.tip() .attr('class', 'd3-tip') .offset([-10, 0]); // Show and hide if(tooltip == "hours" || tooltip == "goal" || tooltip == "members") { bars.call(tip) .on('mouseover', tip.show) .on('mouseout', tip.hide); } // Daily meetings tooltip content if(tooltip == "hours") { tip.html(function (d, i) { return "<div class='text-center'>" + "<h6 class='no-margin'>" + d + "</h6>" + "<span class='text-size-small'>meetings</span>" + "<div class='text-size-small'>" + i + ":00" + "</div>" + "</div>" }); } // Statements tooltip content if(tooltip == "goal") { tip.html(function (d, i) { return "<div class='text-center'>" + "<h6 class='no-margin'>" + d + "</h6>" + "<span class='text-size-small'>statements</span>" + "<div class='text-size-small'>" + i + ":00" + "</div>" + "</div>" }); } // Online members tooltip content if(tooltip == "members") { tip.html(function (d, i) { return "<div class='text-center'>" + "<h6 class='no-margin'>" + d + "0" + "</h6>" + "<span class='text-size-small'>members</span>" + "<div class='text-size-small'>" + i + ":00" + "</div>" + "</div>" }); } // Bar loading animation // ------------------------------ // Choose between animated or static if(animate) { withAnimation(); } else { withoutAnimation(); } // Animate on load function withAnimation() { bars .attr('height', 0) .attr('y', height) .transition() .attr('height', function(d) { return y(d); }) .attr('y', function(d) { return height - y(d); }) .delay(function(d, i) { return i * delay; }) .duration(duration) .ease(easing); } // Load without animateion function withoutAnimation() { bars .attr('height', function(d) { return y(d); }) .attr('y', function(d) { return height - y(d); }) } // Resize chart // ------------------------------ // Call function on window resize $(window).on('resize', barsResize); // Call function on sidebar width change $(document).on('click', '.sidebar-control', barsResize); // Resize function // // Since D3 doesn't support SVG resize by default, // we need to manually specify parts of the graph that need to // be updated on window resize function barsResize() { // Layout variables width = d3Container.node().getBoundingClientRect().width; // Layout // ------------------------- // Main svg width container.attr("width", width); // Width of appended group svg.attr("width", width); // Horizontal range x.rangeBands([0, width], 0.3); // Chart elements // ------------------------- // Bars svg.selectAll('.d3-random-bars') .attr('width', x.rangeBand()) .attr('x', function(d,i) { return x(i); }); } } // Animated progress chart // ------------------------------ // Initialize charts progressCounter('#hours-available-progress', 38, 2, "#F06292", 0.68, "icon-watch text-pink-400", 'Hours available', '64% average') progressCounter('#goal-progress', 38, 2, "#5C6BC0", 0.82, "icon-trophy3 text-indigo-400", 'Productivity goal', '87% average') // Chart setup function progressCounter(element, radius, border, color, end, iconClass, textTitle, textAverage) { // Basic setup // ------------------------------ // Main variables var d3Container = d3.select(element), startPercent = 0, iconSize = 32, endPercent = end, twoPi = Math.PI * 2, formatPercent = d3.format('.0%'), boxSize = radius * 2; // Values count var count = Math.abs((endPercent - startPercent) / 0.01); // Values step var step = endPercent < startPercent ? -0.01 : 0.01; // Create chart // ------------------------------ // Add SVG element var container = d3Container.append('svg'); // Add SVG group var svg = container .attr('width', boxSize) .attr('height', boxSize) .append('g') .attr('transform', 'translate(' + (boxSize / 2) + ',' + (boxSize / 2) + ')'); // Construct chart layout // ------------------------------ // Arc var arc = d3.svg.arc() .startAngle(0) .innerRadius(radius) .outerRadius(radius - border); // // Append chart elements // // Paths // ------------------------------ // Background path svg.append('path') .attr('class', 'd3-progress-background') .attr('d', arc.endAngle(twoPi)) .style('fill', '#eee'); // Foreground path var foreground = svg.append('path') .attr('class', 'd3-progress-foreground') .attr('filter', 'url(#blur)') .style('fill', color) .style('stroke', color); // Front path var front = svg.append('path') .attr('class', 'd3-progress-front') .style('fill', color) .style('fill-opacity', 1); // Text // ------------------------------ // Percentage text value var numberText = d3.select(element) .append('h2') .attr('class', 'mt-15 mb-5') // Icon d3.select(element) .append("i") .attr("class", iconClass + " counter-icon") .attr('style', 'top: ' + ((boxSize - iconSize) / 2) + 'px'); // Title d3.select(element) .append('div') .text(textTitle); // Subtitle d3.select(element) .append('div') .attr('class', 'text-size-small text-muted') .text(textAverage); // Animation // ------------------------------ // Animate path function updateProgress(progress) { foreground.attr('d', arc.endAngle(twoPi * progress)); front.attr('d', arc.endAngle(twoPi * progress)); numberText.text(formatPercent(progress)); } // Animate text var progress = startPercent; (function loops() { updateProgress(progress); if (count > 0) { count--; progress += step; setTimeout(loops, 10); } })(); } // Bullet charts // ------------------------------ // Initialize chart bulletChart("#bullets", 80); // Chart setup function bulletChart(element, height) { // Bullet chart core // ------------------------------ bulletCore(); function bulletCore() { // Construct d3.bullet = function() { // Default layout variables var orient = "left", reverse = false, duration = 750, ranges = bulletRanges, markers = bulletMarkers, measures = bulletMeasures, height = 30, tickFormat = null; // For each small multiple… function bullet(g) { g.each(function(d, i) { // Define variables var rangez = ranges.call(this, d, i).slice().sort(d3.descending), markerz = markers.call(this, d, i).slice().sort(d3.descending), measurez = measures.call(this, d, i).slice().sort(d3.descending), g = d3.select(this); // Compute the new x-scale. var x1 = d3.scale.linear() .domain([0, Math.max(rangez[0], markerz[0], measurez[0])]) .range(reverse ? [width, 0] : [0, width]); // Retrieve the old x-scale, if this is an update. var x0 = this.__chart__ || d3.scale.linear() .domain([0, Infinity]) .range(x1.range()); // Stash the new scale. this.__chart__ = x1; // Derive width-scales from the x-scales. var w0 = bulletWidth(x0), w1 = bulletWidth(x1); // Setup range // ------------------------------ // Update the range rects var range = g.selectAll(".bullet-range") .data(rangez); // Append range rect range.enter() .append("rect") .attr("class", function(d, i) { return "bullet-range bullet-range-" + (i + 1); }) .attr("width", w0) .attr("height", height) .attr('rx', 2) .attr("x", reverse ? x0 : 0) // Add loading animation .transition() .duration(duration) .attr("width", w1) .attr("x", reverse ? x1 : 0); // Add update animation range.transition() .duration(duration) .attr("x", reverse ? x1 : 0) .attr("width", w1) .attr("height", height); // Setup measures // ------------------------------ // Update the measure rects var measure = g.selectAll(".bullet-measure") .data(measurez); // Append measure rect measure.enter() .append("rect") .attr("class", function(d, i) { return "bullet-measure bullet-measure-" + (i + 1); }) .attr("width", w0) .attr("height", height / 5) .attr("x", reverse ? x0 : 0) .attr("y", height / 2.5) .style("shape-rendering", "crispEdges"); // Add loading animation measure.transition() .duration(duration) .attr("width", w1) .attr("x", reverse ? x1 : 0); // Add update animation measure.transition() .duration(duration) .attr("width", w1) .attr("height", height / 5) .attr("x", reverse ? x1 : 0) .attr("y", height / 2.5); // Setup markers // ------------------------------ // Update the marker lines var marker = g.selectAll(".bullet-marker") .data(markerz); // Append marker line marker.enter() .append("line") .attr("class", function(d, i) { return "bullet-marker bullet-marker-" + (i + 1); }) .attr("x1", x0) .attr("x2", x0) .attr("y1", height / 6) .attr("y2", height * 5 / 6); // Add loading animation marker.transition() .duration(duration) .attr("x1", x1) .attr("x2", x1); // Add update animation marker.transition() .duration(duration) .attr("x1", x1) .attr("x2", x1) .attr("y1", height / 6) .attr("y2", height * 5 / 6); // Setup axes // ------------------------------ // Compute the tick format. var format = tickFormat || x1.tickFormat(8); // Update the tick groups. var tick = g.selectAll(".bullet-tick") .data(x1.ticks(8), function(d) { return this.textContent || format(d); }); // Initialize the ticks with the old scale, x0. var tickEnter = tick.enter() .append("g") .attr("class", "bullet-tick") .attr("transform", bulletTranslate(x0)) .style("opacity", 1e-6); // Append line tickEnter.append("line") .attr("y1", height) .attr("y2", (height * 7 / 6) + 3); // Append text tickEnter.append("text") .attr("text-anchor", "middle") .attr("dy", "1em") .attr("y", (height * 7 / 6) + 4) .text(format); // Transition the entering ticks to the new scale, x1. tickEnter.transition() .duration(duration) .attr("transform", bulletTranslate(x1)) .style("opacity", 1); // Transition the updating ticks to the new scale, x1. var tickUpdate = tick.transition() .duration(duration) .attr("transform", bulletTranslate(x1)) .style("opacity", 1); // Update tick line tickUpdate.select("line") .attr("y1", height + 3) .attr("y2", (height * 7 / 6) + 3); // Update tick text tickUpdate.select("text") .attr("y", (height * 7 / 6) + 4); // Transition the exiting ticks to the new scale, x1. tick.exit() .transition() .duration(duration) .attr("transform", bulletTranslate(x1)) .style("opacity", 1e-6) .remove(); // Resize chart // ------------------------------ // Call function on window resize $(window).on('resize', resizeBulletsCore); // Call function on sidebar width change $(document).on('click', '.sidebar-control', resizeBulletsCore); // Resize function // // Since D3 doesn't support SVG resize by default, // we need to manually specify parts of the graph that need to // be updated on window resize function resizeBulletsCore() { // Layout variables width = d3.select("#bullets").node().getBoundingClientRect().width - margin.left - margin.right; w1 = bulletWidth(x1); // Layout // ------------------------- // Horizontal range x1.range(reverse ? [width, 0] : [0, width]); // Chart elements // ------------------------- // Measures g.selectAll(".bullet-measure").attr("width", w1).attr("x", reverse ? x1 : 0); // Ranges g.selectAll(".bullet-range").attr("width", w1).attr("x", reverse ? x1 : 0); // Markers g.selectAll(".bullet-marker").attr("x1", x1).attr("x2", x1) // Ticks g.selectAll(".bullet-tick").attr("transform", bulletTranslate(x1)) } }); d3.timer.flush(); } // Constructor functions // ------------------------------ // Left, right, top, bottom bullet.orient = function(x) { if (!arguments.length) return orient; orient = x; reverse = orient == "right" || orient == "bottom"; return bullet; }; // Ranges (bad, satisfactory, good) bullet.ranges = function(x) { if (!arguments.length) return ranges; ranges = x; return bullet; }; // Markers (previous, goal) bullet.markers = function(x) { if (!arguments.length) return markers; markers = x; return bullet; }; // Measures (actual, forecast) bullet.measures = function(x) { if (!arguments.length) return measures; measures = x; return bullet; }; // Width bullet.width = function(x) { if (!arguments.length) return width; width = x; return bullet; }; // Height bullet.height = function(x) { if (!arguments.length) return height; height = x; return bullet; }; // Axex tick format bullet.tickFormat = function(x) { if (!arguments.length) return tickFormat; tickFormat = x; return bullet; }; // Transition duration bullet.duration = function(x) { if (!arguments.length) return duration; duration = x; return bullet; }; return bullet; }; // Ranges function bulletRanges(d) { return d.ranges; } // Markers function bulletMarkers(d) { return d.markers; } // Measures function bulletMeasures(d) { return d.measures; } // Positioning function bulletTranslate(x) { return function(d) { return "translate(" + x(d) + ",0)"; }; } // Width function bulletWidth(x) { var x0 = x(0); return function(d) { return Math.abs(x(d) - x0); }; } } // Basic setup // ------------------------------ // Main variables var d3Container = d3.select(element), margin = {top: 20, right: 10, bottom: 35, left: 10}, width = width = d3Container.node().getBoundingClientRect().width - margin.left - margin.right, height = height - margin.top - margin.bottom; // Construct chart layout // ------------------------------ var chart = d3.bullet() .width(width) .height(height); // Load data // ------------------------------ d3.json("assets/demo_data/dashboard/bullets.json", function(error, data) { // Show what's wrong if error if (error) return console.error(error); // Create SVG // ------------------------------ // SVG container var container = d3Container.selectAll("svg") .data(data) .enter() .append('svg'); // SVG group var svg = container .attr("class", function(d, i) { return "bullet-" + (i + 1); }) .attr('width', width + margin.left + margin.right) .attr('height', height + margin.top + margin.bottom) .append("g") .attr("transform", "translate(" + margin.left + "," + margin.top + ")") .call(chart); // Add title // ------------------------------ // Title group var title = svg.append("g") .style("text-anchor", "start"); // Bullet title text title.append("text") .attr("class", "bullet-title") .attr('y', -10) .text(function(d) { return d.title; }); // Bullet subtitle text title.append("text") .attr("class", "bullet-subtitle") .attr('x', width) .attr('y', -10) .style("text-anchor", "end") .text(function(d) { return d.subtitle; }) .style('opacity', 0) .transition() .duration(500) .delay(500) .style('opacity', 1); // Add random transition for demo // ------------------------------ // Bind data var interval = function() { svg.datum(randomize).call(chart.duration(750)); } // Set interval var intervalIds = []; intervalIds.push( setInterval(function() { interval() }, 5000) ); // Add Switchery toggle control var realtime = document.querySelector('.switcher'); var realtimeInit = new Switchery(realtime); realtime.onchange = function() { if(realtime.checked) { intervalIds.push(setInterval(function() { interval() }, 5000)); } else { for (var i=0; i < intervalIds.length; i++) { clearInterval(intervalIds[i]); } } }; // Resize chart // ------------------------------ // Call function on window resize $(window).on('resize', bulletResize); // Call function on sidebar width change $(document).on('click', '.sidebar-control', bulletResize); // Resize function // // Since D3 doesn't support SVG resize by default, // we need to manually specify parts of the graph that need to // be updated on window resize function bulletResize() { // Layout variables width = d3Container.node().getBoundingClientRect().width - margin.left - margin.right; // Layout // ------------------------- // Main svg width container.attr("width", width + margin.left + margin.right); // Width of appended group svg.attr("width", width + margin.left + margin.right); // Chart elements // ------------------------- // Subtitle svg.selectAll('.bullet-subtitle').attr("x", width); } }); // Randomizers // ------------------------------ function randomize(d) { if (!d.randomizer) d.randomizer = randomizer(d); d.ranges = d.ranges.map(d.randomizer); d.markers = d.markers.map(d.randomizer); d.measures = d.measures.map(d.randomizer); return d; } function randomizer(d) { var k = d3.max(d.ranges) * .2; return function(d) { return Math.max(0, d + k * (Math.random() - .5)); }; } } // Other codes // ------------------------------ // Grab first letter and insert to the icon $(".table tr").each(function (i) { // Title var $title = $(this).find('.letter-icon-title'), letter = $title.eq(0).text().charAt(0).toUpperCase(); // Icon var $icon = $(this).find('.letter-icon'); $icon.eq(0).text(letter); }); });
mo-norant/FinHeartBel
website/src/assets/js/pages/dashboard.js
JavaScript
gpl-3.0
117,495
/* * Copyright (C) ROCKCHIP, Inc. * Author:yzq<yzq@rock-chips.com> * * based on exynos_drm_hdmi.c * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <drm/drmP.h> #include <linux/kernel.h> #include <linux/wait.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <drm/rockchip_drm.h> #include "rockchip_drm_drv.h" #include "rockchip_drm_hdmi.h" #define to_context(dev) platform_get_drvdata(to_platform_device(dev)) #define to_subdrv(dev) to_context(dev) #define get_ctx_from_subdrv(subdrv) container_of(subdrv,\ struct drm_hdmi_context, subdrv); /* platform device pointer for common drm hdmi device. */ static struct platform_device *rockchip_drm_hdmi_pdev; /* Common hdmi subdrv needs to access the hdmi and mixer though context. * These should be initialied by the repective drivers */ static struct rockchip_drm_hdmi_context *hdmi_ctx; static struct rockchip_drm_hdmi_context *mixer_ctx; /* these callback points shoud be set by specific drivers. */ static struct rockchip_hdmi_ops *hdmi_ops; static struct rockchip_mixer_ops *mixer_ops; struct drm_hdmi_context { struct rockchip_drm_subdrv subdrv; struct rockchip_drm_hdmi_context *hdmi_ctx; struct rockchip_drm_hdmi_context *mixer_ctx; bool enabled[MIXER_WIN_NR]; }; int rockchip_platform_device_hdmi_register(void) { struct platform_device *pdev; if (rockchip_drm_hdmi_pdev) return -EEXIST; pdev = platform_device_register_simple( "rockchip-drm-hdmi", -1, NULL, 0); if (IS_ERR(pdev)) return PTR_ERR(pdev); rockchip_drm_hdmi_pdev = pdev; return 0; } void rockchip_platform_device_hdmi_unregister(void) { if (rockchip_drm_hdmi_pdev) { platform_device_unregister(rockchip_drm_hdmi_pdev); rockchip_drm_hdmi_pdev = NULL; } } void rockchip_hdmi_drv_attach(struct rockchip_drm_hdmi_context *ctx) { if (ctx) hdmi_ctx = ctx; } void rockchip_mixer_drv_attach(struct rockchip_drm_hdmi_context *ctx) { if (ctx) mixer_ctx = ctx; } void rockchip_hdmi_ops_register(struct rockchip_hdmi_ops *ops) { DRM_DEBUG_KMS("%s\n", __FILE__); if (ops) hdmi_ops = ops; } void rockchip_mixer_ops_register(struct rockchip_mixer_ops *ops) { DRM_DEBUG_KMS("%s\n", __FILE__); if (ops) mixer_ops = ops; } static bool drm_hdmi_is_connected(struct device *dev) { struct drm_hdmi_context *ctx = to_context(dev); DRM_DEBUG_KMS("%s\n", __FILE__); if (hdmi_ops && hdmi_ops->is_connected) return hdmi_ops->is_connected(ctx->hdmi_ctx->ctx); return false; } static struct edid *drm_hdmi_get_edid(struct device *dev, struct drm_connector *connector) { struct drm_hdmi_context *ctx = to_context(dev); DRM_DEBUG_KMS("%s\n", __FILE__); if (hdmi_ops && hdmi_ops->get_edid) return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector); return NULL; } static int drm_hdmi_check_timing(struct device *dev, void *timing) { struct drm_hdmi_context *ctx = to_context(dev); int ret = 0; DRM_DEBUG_KMS("%s\n", __FILE__); /* * Both, mixer and hdmi should be able to handle the requested mode. * If any of the two fails, return mode as BAD. */ if (mixer_ops && mixer_ops->check_timing) ret = mixer_ops->check_timing(ctx->mixer_ctx->ctx, timing); if (ret) return ret; if (hdmi_ops && hdmi_ops->check_timing) return hdmi_ops->check_timing(ctx->hdmi_ctx->ctx, timing); return 0; } static int drm_hdmi_power_on(struct device *dev, int mode) { struct drm_hdmi_context *ctx = to_context(dev); DRM_DEBUG_KMS("%s\n", __FILE__); if (hdmi_ops && hdmi_ops->power_on) return hdmi_ops->power_on(ctx->hdmi_ctx->ctx, mode); return 0; } static struct rockchip_drm_display_ops drm_hdmi_display_ops = { .type = ROCKCHIP_DISPLAY_TYPE_HDMI, .is_connected = drm_hdmi_is_connected, .get_edid = drm_hdmi_get_edid, .check_timing = drm_hdmi_check_timing, .power_on = drm_hdmi_power_on, }; static int drm_hdmi_enable_vblank(struct device *subdrv_dev) { struct drm_hdmi_context *ctx = to_context(subdrv_dev); struct rockchip_drm_subdrv *subdrv = &ctx->subdrv; struct rockchip_drm_manager *manager = subdrv->manager; DRM_DEBUG_KMS("%s\n", __FILE__); if (mixer_ops && mixer_ops->enable_vblank) return mixer_ops->enable_vblank(ctx->mixer_ctx->ctx, manager->pipe); return 0; } static void drm_hdmi_disable_vblank(struct device *subdrv_dev) { struct drm_hdmi_context *ctx = to_context(subdrv_dev); DRM_DEBUG_KMS("%s\n", __FILE__); if (mixer_ops && mixer_ops->disable_vblank) return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx); } static void drm_hdmi_wait_for_vblank(struct device *subdrv_dev) { struct drm_hdmi_context *ctx = to_context(subdrv_dev); DRM_DEBUG_KMS("%s\n", __FILE__); if (mixer_ops && mixer_ops->wait_for_vblank) mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx); } static void drm_hdmi_mode_fixup(struct device *subdrv_dev, struct drm_connector *connector, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_display_mode *m; int mode_ok; DRM_DEBUG_KMS("%s\n", __FILE__); drm_mode_set_crtcinfo(adjusted_mode, 0); mode_ok = drm_hdmi_check_timing(subdrv_dev, adjusted_mode); /* just return if user desired mode exists. */ if (mode_ok == 0) return; /* * otherwise, find the most suitable mode among modes and change it * to adjusted_mode. */ list_for_each_entry(m, &connector->modes, head) { mode_ok = drm_hdmi_check_timing(subdrv_dev, m); if (mode_ok == 0) { struct drm_mode_object base; struct list_head head; DRM_INFO("desired mode doesn't exist so\n"); DRM_INFO("use the most suitable mode among modes.\n"); DRM_DEBUG_KMS("Adjusted Mode: [%d]x[%d] [%d]Hz\n", m->hdisplay, m->vdisplay, m->vrefresh); /* preserve display mode header while copying. */ head = adjusted_mode->head; base = adjusted_mode->base; memcpy(adjusted_mode, m, sizeof(*m)); adjusted_mode->head = head; adjusted_mode->base = base; break; } } } static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode) { struct drm_hdmi_context *ctx = to_context(subdrv_dev); DRM_DEBUG_KMS("%s\n", __FILE__); if (hdmi_ops && hdmi_ops->mode_set) hdmi_ops->mode_set(ctx->hdmi_ctx->ctx, mode); } static void drm_hdmi_get_max_resol(struct device *subdrv_dev, unsigned int *width, unsigned int *height) { struct drm_hdmi_context *ctx = to_context(subdrv_dev); DRM_DEBUG_KMS("%s\n", __FILE__); if (hdmi_ops && hdmi_ops->get_max_resol) hdmi_ops->get_max_resol(ctx->hdmi_ctx->ctx, width, height); } static void drm_hdmi_commit(struct device *subdrv_dev) { struct drm_hdmi_context *ctx = to_context(subdrv_dev); DRM_DEBUG_KMS("%s\n", __FILE__); if (hdmi_ops && hdmi_ops->commit) hdmi_ops->commit(ctx->hdmi_ctx->ctx); } static void drm_hdmi_dpms(struct device *subdrv_dev, int mode) { struct drm_hdmi_context *ctx = to_context(subdrv_dev); DRM_DEBUG_KMS("%s\n", __FILE__); if (mixer_ops && mixer_ops->dpms) mixer_ops->dpms(ctx->mixer_ctx->ctx, mode); if (hdmi_ops && hdmi_ops->dpms) hdmi_ops->dpms(ctx->hdmi_ctx->ctx, mode); } static void drm_hdmi_apply(struct device *subdrv_dev) { struct drm_hdmi_context *ctx = to_context(subdrv_dev); int i; DRM_DEBUG_KMS("%s\n", __FILE__); for (i = 0; i < MIXER_WIN_NR; i++) { if (!ctx->enabled[i]) continue; if (mixer_ops && mixer_ops->win_commit) mixer_ops->win_commit(ctx->mixer_ctx->ctx, i); } if (hdmi_ops && hdmi_ops->commit) hdmi_ops->commit(ctx->hdmi_ctx->ctx); } static struct rockchip_drm_manager_ops drm_hdmi_manager_ops = { .dpms = drm_hdmi_dpms, .apply = drm_hdmi_apply, .enable_vblank = drm_hdmi_enable_vblank, .disable_vblank = drm_hdmi_disable_vblank, .wait_for_vblank = drm_hdmi_wait_for_vblank, .mode_fixup = drm_hdmi_mode_fixup, .mode_set = drm_hdmi_mode_set, .get_max_resol = drm_hdmi_get_max_resol, .commit = drm_hdmi_commit, }; static void drm_mixer_mode_set(struct device *subdrv_dev, struct rockchip_drm_overlay *overlay) { struct drm_hdmi_context *ctx = to_context(subdrv_dev); DRM_DEBUG_KMS("%s\n", __FILE__); if (mixer_ops && mixer_ops->win_mode_set) mixer_ops->win_mode_set(ctx->mixer_ctx->ctx, overlay); } static void drm_mixer_commit(struct device *subdrv_dev, int zpos) { struct drm_hdmi_context *ctx = to_context(subdrv_dev); int win = (zpos == DEFAULT_ZPOS) ? MIXER_DEFAULT_WIN : zpos; DRM_DEBUG_KMS("%s\n", __FILE__); if (win < 0 || win > MIXER_WIN_NR) { DRM_ERROR("mixer window[%d] is wrong\n", win); return; } if (mixer_ops && mixer_ops->win_commit) mixer_ops->win_commit(ctx->mixer_ctx->ctx, win); ctx->enabled[win] = true; } static void drm_mixer_disable(struct device *subdrv_dev, int zpos) { struct drm_hdmi_context *ctx = to_context(subdrv_dev); int win = (zpos == DEFAULT_ZPOS) ? MIXER_DEFAULT_WIN : zpos; DRM_DEBUG_KMS("%s\n", __FILE__); if (win < 0 || win > MIXER_WIN_NR) { DRM_ERROR("mixer window[%d] is wrong\n", win); return; } if (mixer_ops && mixer_ops->win_disable) mixer_ops->win_disable(ctx->mixer_ctx->ctx, win); ctx->enabled[win] = false; } static struct rockchip_drm_overlay_ops drm_hdmi_overlay_ops = { .mode_set = drm_mixer_mode_set, .commit = drm_mixer_commit, .disable = drm_mixer_disable, }; static struct rockchip_drm_manager hdmi_manager = { .pipe = -1, .ops = &drm_hdmi_manager_ops, .overlay_ops = &drm_hdmi_overlay_ops, .display_ops = &drm_hdmi_display_ops, }; static int hdmi_subdrv_probe(struct drm_device *drm_dev, struct device *dev) { struct rockchip_drm_subdrv *subdrv = to_subdrv(dev); struct drm_hdmi_context *ctx; DRM_DEBUG_KMS("%s\n", __FILE__); if (!hdmi_ctx) { DRM_ERROR("hdmi context not initialized.\n"); return -EFAULT; } if (!mixer_ctx) { DRM_ERROR("mixer context not initialized.\n"); return -EFAULT; } ctx = get_ctx_from_subdrv(subdrv); if (!ctx) { DRM_ERROR("no drm hdmi context.\n"); return -EFAULT; } ctx->hdmi_ctx = hdmi_ctx; ctx->mixer_ctx = mixer_ctx; ctx->hdmi_ctx->drm_dev = drm_dev; ctx->mixer_ctx->drm_dev = drm_dev; if (mixer_ops->iommu_on) mixer_ops->iommu_on(ctx->mixer_ctx->ctx, true); return 0; } static void hdmi_subdrv_remove(struct drm_device *drm_dev, struct device *dev) { struct drm_hdmi_context *ctx; struct rockchip_drm_subdrv *subdrv = to_subdrv(dev); ctx = get_ctx_from_subdrv(subdrv); if (mixer_ops->iommu_on) mixer_ops->iommu_on(ctx->mixer_ctx->ctx, false); } static int rockchip_drm_hdmi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rockchip_drm_subdrv *subdrv; struct drm_hdmi_context *ctx; DRM_DEBUG_KMS("%s\n", __FILE__); ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) { DRM_LOG_KMS("failed to alloc common hdmi context.\n"); return -ENOMEM; } subdrv = &ctx->subdrv; subdrv->dev = dev; subdrv->manager = &hdmi_manager; subdrv->probe = hdmi_subdrv_probe; subdrv->remove = hdmi_subdrv_remove; platform_set_drvdata(pdev, subdrv); rockchip_drm_subdrv_register(subdrv); return 0; } static int rockchip_drm_hdmi_remove(struct platform_device *pdev) { struct drm_hdmi_context *ctx = platform_get_drvdata(pdev); DRM_DEBUG_KMS("%s\n", __FILE__); rockchip_drm_subdrv_unregister(&ctx->subdrv); return 0; } struct platform_driver rockchip_drm_common_hdmi_driver = { .probe = rockchip_drm_hdmi_probe, .remove = rockchip_drm_hdmi_remove, .driver = { .name = "rockchip-drm-hdmi", .owner = THIS_MODULE, }, };
s20121035/rk3288_android5.1_repo
kernel/drivers/gpu/drm/rockchip/rockchip_drm_hdmi.c
C
gpl-3.0
11,811
/** * Macedonia translation * By PetarD petar.dimitrijevic@vorteksed.com.mk (utf8 encoding) * 23 April 2007 */ Ext.onReady(function() { if (Ext.Date) { Ext.Date.monthNames = ["Јануари", "Февруари", "Март", "Април", "Мај", "Јуни", "Јули", "Август", "Септември", "Октомври", "Ноември", "Декември"]; Ext.Date.dayNames = ["Недела", "Понеделник", "Вторник", "Среда", "Четврток", "Петок", "Сабота"]; } if (Ext.util && Ext.util.Format) { Ext.apply(Ext.util.Format, { thousandSeparator: '.', decimalSeparator: ',', currencySign: '\u0434\u0435\u043d', // Macedonian Denar dateFormat: 'd.m.Y' }); } }); Ext.define("Ext.locale.mk.view.View", { override: "Ext.view.View", emptyText: "" }); Ext.define("Ext.locale.mk.grid.plugin.DragDrop", { override: "Ext.grid.plugin.DragDrop", dragText: "{0} избрани редици" }); Ext.define("Ext.locale.mk.tab.Tab", { override: "Ext.tab.Tab", closeText: "Затвори tab" }); Ext.define("Ext.locale.mk.form.field.Base", { override: "Ext.form.field.Base", invalidText: "Вредноста во ова поле е невалидна" }); // changing the msg text below will affect the LoadMask Ext.define("Ext.locale.mk.view.AbstractView", { override: "Ext.view.AbstractView", loadingText: "Вчитувам..." }); Ext.define("Ext.locale.mk.picker.Date", { override: "Ext.picker.Date", todayText: "Денеска", minText: "Овој датум е пред најмалиот датум", maxText: "Овој датум е пред најголемиот датум", disabledDaysText: "", disabledDatesText: "", nextText: 'Следен месец (Control+Стрелка десно)', prevText: 'Претходен месец (Control+Стрелка лево)', monthYearText: 'Изберете месец (Control+Стрелка горе/Стрелка десно за менување година)', todayTip: "{0} (Spacebar)", format: "d.m.y" }); Ext.define("Ext.locale.mk.toolbar.Paging", { override: "Ext.PagingToolbar", beforePageText: "Страница", afterPageText: "од {0}", firstText: "Прва Страница", prevText: "Претходна Страница", nextText: "Следна Страница", lastText: "Последна Страница", refreshText: "Освежи", displayMsg: "Прикажувам {0} - {1} од {2}", emptyMsg: 'Нема податоци за приказ' }); Ext.define("Ext.locale.mk.form.field.Text", { override: "Ext.form.field.Text", minLengthText: "Минималната должина за ова поле е {0}", maxLengthText: "Максималната должина за ова поле е {0}", blankText: "Податоците во ова поле се потребни", regexText: "", emptyText: null }); Ext.define("Ext.locale.mk.form.field.Number", { override: "Ext.form.field.Number", minText: "Минималната вредност за ова поле е {0}", maxText: "Максималната вредност за ова поле е {0}", nanText: "{0} не е валиден број" }); Ext.define("Ext.locale.mk.form.field.Date", { override: "Ext.form.field.Date", disabledDaysText: "Неактивно", disabledDatesText: "Неактивно", minText: "Датумот во ова поле мора да биде пред {0}", maxText: "Датумот во ова поле мора да биде по {0}", invalidText: "{0} не е валиден датум - мора да биде во формат {1}", format: "d.m.y" }); Ext.define("Ext.locale.mk.form.field.ComboBox", { override: "Ext.form.field.ComboBox", valueNotFoundText: undefined }, function() { Ext.apply(Ext.form.field.ComboBox.prototype.defaultListConfig, { loadingText: "Вчитувам..." }); }); Ext.define("Ext.locale.mk.form.field.VTypes", { override: "Ext.form.field.VTypes", emailText: 'Ова поле треба да биде e-mail адреса во формат "user@example.com"', urlText: 'Ова поле треба да биде URL во формат "http:/' + '/www.example.com"', alphaText: 'Ова поле треба да содржи само букви и _', alphanumText: 'Ова поле треба да содржи само букви, бројки и _' }); Ext.define("Ext.locale.mk.grid.header.Container", { override: "Ext.grid.header.Container", sortAscText: "Сортирај Растечки", sortDescText: "Сортирај Опаѓачки", lockText: "Заклучи Колона", unlockText: "Отклучи колона", columnsText: "Колони" }); Ext.define("Ext.locale.mk.grid.PropertyColumnModel", { override: "Ext.grid.PropertyColumnModel", nameText: "Име", valueText: "Вредност", dateFormat: "m.d.Y" }); Ext.define("Ext.locale.mk.window.MessageBox", { override: "Ext.window.MessageBox", buttonText: { ok: "Потврди", cancel: "Поништи", yes: "Да", no: "Не" } }); // This is needed until we can refactor all of the locales into individual files Ext.define("Ext.locale.mk.Component", { override: "Ext.Component" });
applifireAlgo/ZenClubApp
zenws/src/main/webapp/ext/packages/ext-locale/build/ext-locale-mk-debug.js
JavaScript
gpl-3.0
5,568
<!DOCTYPE html> <meta charset=utf-8> <title>KeyframeEffectReadOnly constructor tests</title> <link rel="help" href="https://w3c.github.io/web-animations/#processing-a-keyframes-argument"> <script src="/resources/testharness.js"></script> <script src="/resources/testharnessreport.js"></script> <script src="../../testcommon.js"></script> <body> <div id="log"></div> <div id="target"></div> <script> 'use strict'; // Test the "process a keyframe-like object" procedure. // // This file only tests the KeyframeEffectReadOnly constructor since it is // assumed that the implementation of the KeyframeEffect constructor, // Animatable.animate() method, and KeyframeEffect.setKeyframes() method will // all share common machinery and it is not necessary to test each method. // Test that only animatable properties are accessed var gNonAnimatableProps = [ 'animation', // Shorthands where all the longhand sub-properties are not // animatable, are also not animatable. 'animationDelay', 'animationDirection', 'animationDuration', 'animationFillMode', 'animationIterationCount', 'animationName', 'animationPlayState', 'animationTimingFunction', 'transition', 'transitionDelay', 'transitionDuration', 'transitionProperty', 'transitionTimingFunction', 'display', 'unsupportedProperty', ]; function TestKeyframe(testProp) { var _propAccessCount = 0; Object.defineProperty(this, testProp, { get: function() { _propAccessCount++; }, enumerable: true }); Object.defineProperty(this, 'propAccessCount', { get: function() { return _propAccessCount; } }); } function GetTestKeyframeSequence(testProp) { return [ new TestKeyframe(testProp) ] } gNonAnimatableProps.forEach(function(prop) { test(function(t) { var testKeyframe = new TestKeyframe(prop); new KeyframeEffectReadOnly(null, testKeyframe); assert_equals(testKeyframe.propAccessCount, 0, 'Accessor not called'); }, 'non-animatable property \'' + prop + '\' is not accessed when using' + ' a property-indexed keyframe object'); }); gNonAnimatableProps.forEach(function(prop) { test(function(t) { var testKeyframes = GetTestKeyframeSequence(prop); new KeyframeEffectReadOnly(null, testKeyframes); assert_equals(testKeyframes[0].propAccessCount, 0, 'Accessor not called'); }, 'non-animatable property \'' + prop + '\' is not accessed when using' + ' a keyframe sequence'); }); // FIXME: Test that non-enumerable properties are not accessed // FIXME: Test that properties are accessed in ascending order by Unicode // codepoint // (There is an existing test for this in // keyframe-effect/constructor.html that should be moved here.) </script>
Glorf/servo
tests/wpt/web-platform-tests/web-animations/interfaces/KeyframeEffect/processing-a-keyframes-argument.html
HTML
mpl-2.0
2,732
define(['sinon', 'underscore', 'URI'], function(sinon, _, URI) { 'use strict'; var fakeServer, fakeRequests, expectRequest, expectJsonRequest, expectPostRequest, expectRequestURL, respondWithJson, respondWithError, respondWithTextError, respondWithNoContent; /* These utility methods are used by Jasmine tests to create a mock server or * get reference to mock requests. In either case, the cleanup (restore) is done with * an after function. * * This pattern is being used instead of the more common beforeEach/afterEach pattern * because we were seeing sporadic failures in the afterEach restore call. The cause of the * errors were that one test suite was incorrectly being linked as the parent of an unrelated * test suite (causing both suites' afterEach methods to be called). No solution for the root * cause has been found, but initializing sinon and cleaning it up on a method-by-method * basis seems to work. For more details, see STUD-1264. */ /** * Get a reference to the mocked server, and respond * to all requests with the specified statusCode. */ fakeServer = function (that, response) { var server = sinon.fakeServer.create(); that.after(function() { server.restore(); }); server.respondWith(response); return server; }; /** * Keep track of all requests to a fake server, and * return a reference to the Array. This allows tests * to respond for individual requests. */ fakeRequests = function (that) { var requests = [], xhr = sinon.useFakeXMLHttpRequest(); xhr.onCreate = function(request) { requests.push(request); }; that.after(function() { xhr.restore(); }); return requests; }; expectRequest = function(requests, method, url, body, requestIndex) { var request; if (_.isUndefined(requestIndex)) { requestIndex = requests.length - 1; } request = requests[requestIndex]; expect(request.url).toEqual(url); expect(request.method).toEqual(method); expect(request.requestBody).toEqual(body); }; expectJsonRequest = function(requests, method, url, jsonRequest, requestIndex) { var request; if (_.isUndefined(requestIndex)) { requestIndex = requests.length - 1; } request = requests[requestIndex]; expect(request.url).toEqual(url); expect(request.method).toEqual(method); expect(JSON.parse(request.requestBody)).toEqual(jsonRequest); }; /** * Expect that a JSON request be made with the given URL and parameters. * @param requests The collected requests * @param expectedUrl The expected URL excluding the parameters * @param expectedParameters An object representing the URL parameters * @param requestIndex An optional index for the request (by default, the last request is used) */ expectRequestURL = function(requests, expectedUrl, expectedParameters, requestIndex) { var request, parameters; if (_.isUndefined(requestIndex)) { requestIndex = requests.length - 1; } request = requests[requestIndex]; expect(new URI(request.url).path()).toEqual(expectedUrl); parameters = new URI(request.url).query(true); delete parameters._; // Ignore the cache-busting argument expect(parameters).toEqual(expectedParameters); }; /** * Intended for use with POST requests using application/x-www-form-urlencoded. */ expectPostRequest = function(requests, url, body, requestIndex) { var request; if (_.isUndefined(requestIndex)) { requestIndex = requests.length - 1; } request = requests[requestIndex]; expect(request.url).toEqual(url); expect(request.method).toEqual("POST"); expect(_.difference(request.requestBody.split('&'), body.split('&'))).toEqual([]); }; respondWithJson = function(requests, jsonResponse, requestIndex) { if (_.isUndefined(requestIndex)) { requestIndex = requests.length - 1; } requests[requestIndex].respond(200, { 'Content-Type': 'application/json' }, JSON.stringify(jsonResponse)); }; respondWithError = function(requests, statusCode, jsonResponse, requestIndex) { if (_.isUndefined(requestIndex)) { requestIndex = requests.length - 1; } if (_.isUndefined(statusCode)) { statusCode = 500; } if (_.isUndefined(jsonResponse)) { jsonResponse = {}; } requests[requestIndex].respond(statusCode, { 'Content-Type': 'application/json' }, JSON.stringify(jsonResponse) ); }; respondWithTextError = function(requests, statusCode, textResponse, requestIndex) { if (_.isUndefined(requestIndex)) { requestIndex = requests.length - 1; } if (_.isUndefined(statusCode)) { statusCode = 500; } if (_.isUndefined(textResponse)) { textResponse = ""; } requests[requestIndex].respond(statusCode, { 'Content-Type': 'text/plain' }, textResponse ); }; respondWithNoContent = function(requests, requestIndex) { if (_.isUndefined(requestIndex)) { requestIndex = requests.length - 1; } requests[requestIndex].respond(204, { 'Content-Type': 'application/json' }); }; return { server: fakeServer, requests: fakeRequests, expectRequest: expectRequest, expectJsonRequest: expectJsonRequest, expectPostRequest: expectPostRequest, expectRequestURL: expectRequestURL, respondWithJson: respondWithJson, respondWithError: respondWithError, respondWithTextError: respondWithTextError, respondWithNoContent: respondWithNoContent }; });
rismalrv/edx-platform
common/static/common/js/spec_helpers/ajax_helpers.js
JavaScript
agpl-3.0
6,142
/** * @license AngularJS v1.3.0-rc.4 * (c) 2010-2014 Google, Inc. http://angularjs.org * License: MIT */ (function(window, angular, undefined) {'use strict'; /** * @ngdoc module * @name ngCookies * @description * * # ngCookies * * The `ngCookies` module provides a convenient wrapper for reading and writing browser cookies. * * * <div doc-module-components="ngCookies"></div> * * See {@link ngCookies.$cookies `$cookies`} and * {@link ngCookies.$cookieStore `$cookieStore`} for usage. */ angular.module('ngCookies', ['ng']). /** * @ngdoc service * @name $cookies * * @description * Provides read/write access to browser's cookies. * * Only a simple Object is exposed and by adding or removing properties to/from this object, new * cookies are created/deleted at the end of current $eval. * The object's properties can only be strings. * * Requires the {@link ngCookies `ngCookies`} module to be installed. * * @example * * ```js * angular.module('cookiesExample', ['ngCookies']) * .controller('ExampleController', ['$cookies', function($cookies) { * // Retrieving a cookie * var favoriteCookie = $cookies.myFavorite; * // Setting a cookie * $cookies.myFavorite = 'oatmeal'; * }]); * ``` */ factory('$cookies', ['$rootScope', '$browser', function ($rootScope, $browser) { var cookies = {}, lastCookies = {}, lastBrowserCookies, runEval = false, copy = angular.copy, isUndefined = angular.isUndefined; //creates a poller fn that copies all cookies from the $browser to service & inits the service $browser.addPollFn(function() { var currentCookies = $browser.cookies(); if (lastBrowserCookies != currentCookies) { //relies on browser.cookies() impl lastBrowserCookies = currentCookies; copy(currentCookies, lastCookies); copy(currentCookies, cookies); if (runEval) $rootScope.$apply(); } })(); runEval = true; //at the end of each eval, push cookies //TODO: this should happen before the "delayed" watches fire, because if some cookies are not // strings or browser refuses to store some cookies, we update the model in the push fn. $rootScope.$watch(push); return cookies; /** * Pushes all the cookies from the service to the browser and verifies if all cookies were * stored. */ function push() { var name, value, browserCookies, updated; //delete any cookies deleted in $cookies for (name in lastCookies) { if (isUndefined(cookies[name])) { $browser.cookies(name, undefined); } } //update all cookies updated in $cookies for(name in cookies) { value = cookies[name]; if (!angular.isString(value)) { value = '' + value; cookies[name] = value; } if (value !== lastCookies[name]) { $browser.cookies(name, value); updated = true; } } //verify what was actually stored if (updated){ updated = false; browserCookies = $browser.cookies(); for (name in cookies) { if (cookies[name] !== browserCookies[name]) { //delete or reset all cookies that the browser dropped from $cookies if (isUndefined(browserCookies[name])) { delete cookies[name]; } else { cookies[name] = browserCookies[name]; } updated = true; } } } } }]). /** * @ngdoc service * @name $cookieStore * @requires $cookies * * @description * Provides a key-value (string-object) storage, that is backed by session cookies. * Objects put or retrieved from this storage are automatically serialized or * deserialized by angular's toJson/fromJson. * * Requires the {@link ngCookies `ngCookies`} module to be installed. * * @example * * ```js * angular.module('cookieStoreExample', ['ngCookies']) * .controller('ExampleController', ['$cookieStore', function($cookieStore) { * // Put cookie * $cookieStore.put('myFavorite','oatmeal'); * // Get cookie * var favoriteCookie = $cookieStore.get('myFavorite'); * // Removing a cookie * $cookieStore.remove('myFavorite'); * }]); * ``` */ factory('$cookieStore', ['$cookies', function($cookies) { return { /** * @ngdoc method * @name $cookieStore#get * * @description * Returns the value of given cookie key * * @param {string} key Id to use for lookup. * @returns {Object} Deserialized cookie value. */ get: function(key) { var value = $cookies[key]; return value ? angular.fromJson(value) : value; }, /** * @ngdoc method * @name $cookieStore#put * * @description * Sets a value for given cookie key * * @param {string} key Id for the `value`. * @param {Object} value Value to be stored. */ put: function(key, value) { $cookies[key] = angular.toJson(value); }, /** * @ngdoc method * @name $cookieStore#remove * * @description * Remove given cookie * * @param {string} key Id of the key-value pair to delete. */ remove: function(key) { delete $cookies[key]; } }; }]); })(window, window.angular);
imnes/eatrightapp
web/lib/angular-1.3.0-rc.4/angular-cookies.js
JavaScript
agpl-3.0
5,828
/***************************************************************************** * * XVID MPEG-4 VIDEO CODEC * - GMC interpolation module - * * Copyright(C) 2002-2003 Pascal Massimino <skal@planet-d.net> * * This program is free software ; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation ; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY ; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program ; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * $Id: gmc.cpp,v 1.1.1.1 2005-07-13 14:36:14 jeanlf Exp $ * ****************************************************************************/ #include "portab.h" #include "global.h" #include "gmc.h" /* ************************************************************ * Pts = 2 or 3 * * Warning! *src is the global frame pointer (that is: address * of pixel 0,0), not the macroblock one. * Conversely, *dst is the macroblock top-left address. */ static void Predict_16x16_C(const NEW_GMC_DATA * const This, byte *dst, const byte *src, int dststride, int srcstride, int x, int y, int rounding) { const int W = This->sW; const int H = This->sH; const int rho = 3 - This->accuracy; const int Rounder = ( (1<<7) - (rounding<<(2*rho)) ) << 16; const int dUx = This->dU[0]; const int dVx = This->dV[0]; const int dUy = This->dU[1]; const int dVy = This->dV[1]; int Uo = This->Uo + 16*(dUy*y + dUx*x); int Vo = This->Vo + 16*(dVy*y + dVx*x); int i, j; dst += 16; for (j=16; j>0; --j) { int U = Uo, V = Vo; Uo += dUy; Vo += dVy; for (i=-16; i<0; ++i) { unsigned int f0, f1, ri = 16, rj = 16; int Offset; int u = ( U >> 16 ) << rho; int v = ( V >> 16 ) << rho; U += dUx; V += dVx; if (u > 0 && u <= W) { ri = MTab[u&15]; Offset = u>>4; } else if (u > W) Offset = W>>4; else Offset = -1; if (v > 0 && v <= H) { rj = MTab[v&15]; Offset += (v>>4)*srcstride; } else if (v > H) Offset += (H>>4)*srcstride; else Offset -= srcstride; f0 = src[Offset + 0]; f0 |= src[Offset + 1] << 16; f1 = src[Offset + srcstride + 0]; f1 |= src[Offset + srcstride + 1] << 16; f0 = (ri*f0)>>16; f1 = (ri*f1) & 0x0fff0000; f0 |= f1; f0 = (rj*f0 + Rounder) >> 24; dst[i] = (byte)f0; } dst += dststride; } } //---------------------------- static void Predict_8x8_C(const NEW_GMC_DATA * const This, byte *uDst, const byte *uSrc, byte *vDst, const byte *vSrc, int dststride, int srcstride, int x, int y, int rounding) { const int W = This->sW >> 1; const int H = This->sH >> 1; const int rho = 3-This->accuracy; const int Rounder = ( 128 - (rounding<<(2*rho)) ) << 16; const int dUx = This->dU[0]; const int dVx = This->dV[0]; const int dUy = This->dU[1]; const int dVy = This->dV[1]; int Uo = This->Uco + 8*(dUy*y + dUx*x); int Vo = This->Vco + 8*(dVy*y + dVx*x); int i, j; uDst += 8; vDst += 8; for (j=8; j>0; --j) { int U = Uo, V = Vo; Uo += dUy; Vo += dVy; for (i=-8; i<0; ++i) { int Offset; dword f0, f1, ri, rj; int u, v; u = ( U >> 16 ) << rho; v = ( V >> 16 ) << rho; U += dUx; V += dVx; if (u > 0 && u <= W) { ri = MTab[u&15]; Offset = u>>4; } else { ri = 16; if (u>W) Offset = W>>4; else Offset = -1; } if (v > 0 && v <= H) { rj = MTab[v&15]; Offset += (v>>4)*srcstride; } else { rj = 16; if (v>H) Offset += (H>>4)*srcstride; else Offset -= srcstride; } f0 = uSrc[Offset + 0]; f0 |= uSrc[Offset + 1] << 16; f1 = uSrc[Offset + srcstride + 0]; f1 |= uSrc[Offset + srcstride + 1] << 16; f0 = (ri*f0)>>16; f1 = (ri*f1) & 0x0fff0000; f0 |= f1; f0 = (rj*f0 + Rounder) >> 24; uDst[i] = (byte)f0; f0 = vSrc[Offset + 0]; f0 |= vSrc[Offset + 1] << 16; f1 = vSrc[Offset + srcstride + 0]; f1 |= vSrc[Offset + srcstride + 1] << 16; f0 = (ri*f0)>>16; f1 = (ri*f1) & 0x0fff0000; f0 |= f1; f0 = (rj*f0 + Rounder) >> 24; vDst[i] = (byte)f0; } uDst += dststride; vDst += dststride; } } //---------------------------- static void get_average_mv_C(const NEW_GMC_DATA * const Dsp, VECTOR * const mv, int x, int y, int qpel) { int i, j; int vx = 0, vy = 0; int uo = Dsp->Uo + 16*(Dsp->dU[1]*y + Dsp->dU[0]*x); int vo = Dsp->Vo + 16*(Dsp->dV[1]*y + Dsp->dV[0]*x); for (j=16; j>0; --j) { int U, V; U = uo; uo += Dsp->dU[1]; V = vo; vo += Dsp->dV[1]; for (i=16; i>0; --i) { int u,v; u = U >> 16; U += Dsp->dU[0]; vx += u; v = V >> 16; V += Dsp->dV[0]; vy += v; } } vx -= (256*x+120) << (5+Dsp->accuracy); /* 120 = 15*16/2 */ vy -= (256*y+120) << (5+Dsp->accuracy); mv->x = RSHIFT( vx, 8+Dsp->accuracy - qpel ); mv->y = RSHIFT( vy, 8+Dsp->accuracy - qpel ); } //---------------------------- /* ************************************************************ * simplified version for 1 warp point */ static void Predict_1pt_16x16_C(const NEW_GMC_DATA * const This, byte *Dst, const byte *Src, int dststride, int srcstride, int x, int y, int rounding) { const int W = This->sW; const int H = This->sH; const int rho = 3-This->accuracy; const int Rounder = ( 128 - (rounding<<(2*rho)) ) << 16; int uo = This->Uo + (x<<8); /* ((16*x)<<4) */ int vo = This->Vo + (y<<8); const dword ri = MTab[uo & 15]; const dword rj = MTab[vo & 15]; int i, j; int Offset; if ((dword)vo<=(dword)H) Offset = (vo>>4)*srcstride; else if (vo>H) Offset = ( H>>4)*srcstride; else Offset =-16*srcstride; if ((dword)uo<=(dword)W) Offset += (uo>>4); else if (uo>W) Offset += ( W>>4); else Offset -= 16; Dst += 16; for(j=16; j>0; --j, Offset+=srcstride-16) { for(i=-16; i<0; ++i, ++Offset) { dword f0, f1; f0 = Src[ Offset +0 ]; f0 |= Src[ Offset +1 ] << 16; f1 = Src[ Offset+srcstride +0 ]; f1 |= Src[ Offset+srcstride +1 ] << 16; f0 = (ri*f0)>>16; f1 = (ri*f1) & 0x0fff0000; f0 |= f1; f0 = ( rj*f0 + Rounder ) >> 24; Dst[i] = (byte)f0; } Dst += dststride; } } //---------------------------- static void Predict_1pt_8x8_C(const NEW_GMC_DATA * const This, byte *uDst, const byte *uSrc, byte *vDst, const byte *vSrc, int dststride, int srcstride, int x, int y, int rounding) { const int W = This->sW >> 1; const int H = This->sH >> 1; const int rho = 3-This->accuracy; const int Rounder = ( 128 - (rounding<<(2*rho)) ) << 16; int uo = This->Uco + (x<<7); int vo = This->Vco + (y<<7); const dword rri = MTab[uo & 15]; const dword rrj = MTab[vo & 15]; int i, j; int Offset; if ((dword)vo<=(dword)H) Offset = (vo>>4)*srcstride; else if (vo>H) Offset = ( H>>4)*srcstride; else Offset =-8*srcstride; if ((dword)uo<=(dword)W) Offset += (uo>>4); else if (uo>W) Offset += (W>>4); else Offset -= 8; uDst += 8; vDst += 8; for(j=8; j>0; --j, Offset+=srcstride-8) { for(i=-8; i<0; ++i, Offset++) { dword f0, f1; f0 = uSrc[ Offset + 0 ]; f0 |= uSrc[ Offset + 1 ] << 16; f1 = uSrc[ Offset + srcstride + 0 ]; f1 |= uSrc[ Offset + srcstride + 1 ] << 16; f0 = (rri*f0)>>16; f1 = (rri*f1) & 0x0fff0000; f0 |= f1; f0 = ( rrj*f0 + Rounder ) >> 24; uDst[i] = (byte)f0; f0 = vSrc[ Offset + 0 ]; f0 |= vSrc[ Offset + 1 ] << 16; f1 = vSrc[ Offset + srcstride + 0 ]; f1 |= vSrc[ Offset + srcstride + 1 ] << 16; f0 = (rri*f0)>>16; f1 = (rri*f1) & 0x0fff0000; f0 |= f1; f0 = ( rrj*f0 + Rounder ) >> 24; vDst[i] = (byte)f0; } uDst += dststride; vDst += dststride; } } //---------------------------- static void get_average_mv_1pt_C(const NEW_GMC_DATA *const Dsp, VECTOR * const mv, int x, int y, int qpel) { mv->x = RSHIFT(Dsp->Uo<<qpel, 3); mv->y = RSHIFT(Dsp->Vo<<qpel, 3); } //---------------------------- void generate_GMCparameters(int nb_pts, int accuracy, const WARPPOINTS *pts, int width, int height, NEW_GMC_DATA *gmc) { gmc->sW = width << 4; gmc->sH = height << 4; gmc->accuracy = accuracy; gmc->num_wp = nb_pts; //reduce the number of points, if possible if(nb_pts<3 || (pts->duv[2].x==-pts->duv[1].y && pts->duv[2].y==pts->duv[1].x)) { if(nb_pts<2 || (pts->duv[1].x==0 && pts->duv[1].y==0)) { if(nb_pts<1 || (pts->duv[0].x==0 && pts->duv[0].y==0)) { nb_pts = 0; } else nb_pts = 1; } else nb_pts = 2; } else nb_pts = 3; //now, nb_pts stores the actual number of points required for interpolation if(nb_pts<=1) { if(nb_pts==1) { /* store as 4b fixed point */ gmc->Uo = pts->duv[0].x << accuracy; gmc->Vo = pts->duv[0].y << accuracy; gmc->Uco = ((pts->duv[0].x>>1) | (pts->duv[0].x&1)) << accuracy; /* DIV2RND() */ gmc->Vco = ((pts->duv[0].y>>1) | (pts->duv[0].y&1)) << accuracy; /* DIV2RND() */ } else { /* zero points?! */ gmc->Uo = gmc->Vo = 0; gmc->Uco = gmc->Vco = 0; } gmc->predict_16x16 = Predict_1pt_16x16_C; gmc->predict_8x8 = Predict_1pt_8x8_C; gmc->get_average_mv = get_average_mv_1pt_C; } else { /* 2 or 3 points */ const int rho = 3 - accuracy; /* = {3,2,1,0} for Acc={0,1,2,3} */ int Alpha = log2bin(width-1); int Ws = 1 << Alpha; gmc->dU[0] = 16*Ws + RDIV( 8*Ws*pts->duv[1].x, width ); /* dU/dx */ gmc->dV[0] = RDIV( 8*Ws*pts->duv[1].y, width ); /* dV/dx */ /* disabled, because possibly buggy? */ #if 0 if (nb_pts==2) { gmc->dU[1] = -gmc->dV[0]; /* -Sin */ gmc->dV[1] = gmc->dU[0] ; /* Cos */ } else #endif { const int Beta = log2bin(height-1); const int Hs = 1<<Beta; gmc->dU[1] = RDIV( 8*Hs*pts->duv[2].x, height ); /* dU/dy */ gmc->dV[1] = 16*Hs + RDIV( 8*Hs*pts->duv[2].y, height ); /* dV/dy */ if (Beta>Alpha) { gmc->dU[0] <<= (Beta-Alpha); gmc->dV[0] <<= (Beta-Alpha); Alpha = Beta; Ws = Hs; } else { gmc->dU[1] <<= Alpha - Beta; gmc->dV[1] <<= Alpha - Beta; } } /* upscale to 16b fixed-point */ gmc->dU[0] <<= (16-Alpha - rho); gmc->dU[1] <<= (16-Alpha - rho); gmc->dV[0] <<= (16-Alpha - rho); gmc->dV[1] <<= (16-Alpha - rho); gmc->Uo = ( pts->duv[0].x <<(16+ accuracy)) + (1<<15); gmc->Vo = ( pts->duv[0].y <<(16+ accuracy)) + (1<<15); gmc->Uco = ((pts->duv[0].x-1)<<(17+ accuracy)) + (1<<17); gmc->Vco = ((pts->duv[0].y-1)<<(17+ accuracy)) + (1<<17); gmc->Uco = (gmc->Uco + gmc->dU[0] + gmc->dU[1])>>2; gmc->Vco = (gmc->Vco + gmc->dV[0] + gmc->dV[1])>>2; gmc->predict_16x16 = Predict_16x16_C; gmc->predict_8x8 = Predict_8x8_C; gmc->get_average_mv = get_average_mv_C; } } //----------------------------
Bevara/Access-open
modules/xvid_dec/xvid_wce/gmc.cpp
C++
lgpl-2.1
11,051
/* Globally enable events. Copyright (C) 1999, 2001, 2005 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@cygnus.com>, 1999. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ #include "thread_dbP.h" td_err_e td_ta_set_event(const td_thragent_t *ta,td_thr_events_t *event) { td_thr_events_t old_event; int i; LOG ("td_ta_set_event"); /* Test whether the TA parameter is ok. */ if (! ta_ok (ta)) return TD_BADTA; /* Write the new value into the thread data structure. */ if (ps_pdread (ta->ph, ta->pthread_threads_eventsp, &old_event, sizeof (td_thr_events_t)) != PS_OK) return TD_ERR; /* XXX Other error value? */ /* Or the new bits in. */ for (i = 0; i < TD_EVENTSIZE; ++i) old_event.event_bits[i] |= event->event_bits[i]; /* Write the new value into the thread data structure. */ if (ps_pdwrite (ta->ph, ta->pthread_threads_eventsp, &old_event, sizeof (td_thr_events_t)) != PS_OK) return TD_ERR; /* XXX Other error value? */ return TD_OK; }
wbx-github/uclibc-ng
libpthread/linuxthreads_db/td_ta_set_event.c
C
lgpl-2.1
1,712
//===-- llvm/CodeGen/MachineInstr.h - MachineInstr class --------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains the declaration of the MachineInstr class, which is the // basic representation for all target dependent machine instructions used by // the back end. // //===----------------------------------------------------------------------===// #ifndef LLVM_CODEGEN_MACHINEINSTR_H #define LLVM_CODEGEN_MACHINEINSTR_H #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMapInfo.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/ilist.h" #include "llvm/ADT/ilist_node.h" #include "llvm/ADT/iterator_range.h" #include "llvm/CodeGen/MachineOperand.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/DebugLoc.h" #include "llvm/IR/InlineAsm.h" #include "llvm/MC/MCInstrDesc.h" #include "llvm/Support/ArrayRecycler.h" #include "llvm/Target/TargetOpcodes.h" namespace llvm { template <typename T> class SmallVectorImpl; class AliasAnalysis; class TargetInstrInfo; class TargetRegisterClass; class TargetRegisterInfo; class MachineFunction; class MachineMemOperand; //===----------------------------------------------------------------------===// /// MachineInstr - Representation of each machine instruction. /// /// This class isn't a POD type, but it must have a trivial destructor. When a /// MachineFunction is deleted, all the contained MachineInstrs are deallocated /// without having their destructor called. /// class MachineInstr : public ilist_node<MachineInstr> { public: typedef MachineMemOperand **mmo_iterator; /// Flags to specify different kinds of comments to output in /// assembly code. These flags carry semantic information not /// otherwise easily derivable from the IR text. /// enum CommentFlag { ReloadReuse = 0x1 }; enum MIFlag { NoFlags = 0, FrameSetup = 1 << 0, // Instruction is used as a part of // function frame setup code. BundledPred = 1 << 1, // Instruction has bundled predecessors. BundledSucc = 1 << 2 // Instruction has bundled successors. }; private: const MCInstrDesc *MCID; // Instruction descriptor. MachineBasicBlock *Parent; // Pointer to the owning basic block. // Operands are allocated by an ArrayRecycler. MachineOperand *Operands; // Pointer to the first operand. unsigned NumOperands; // Number of operands on instruction. typedef ArrayRecycler<MachineOperand>::Capacity OperandCapacity; OperandCapacity CapOperands; // Capacity of the Operands array. uint8_t Flags; // Various bits of additional // information about machine // instruction. uint8_t AsmPrinterFlags; // Various bits of information used by // the AsmPrinter to emit helpful // comments. This is *not* semantic // information. Do not use this for // anything other than to convey comment // information to AsmPrinter. uint8_t NumMemRefs; // Information on memory references. mmo_iterator MemRefs; DebugLoc debugLoc; // Source line information. MachineInstr(const MachineInstr&) LLVM_DELETED_FUNCTION; void operator=(const MachineInstr&) LLVM_DELETED_FUNCTION; // Use MachineFunction::DeleteMachineInstr() instead. ~MachineInstr() LLVM_DELETED_FUNCTION; // Intrusive list support friend struct ilist_traits<MachineInstr>; friend struct ilist_traits<MachineBasicBlock>; void setParent(MachineBasicBlock *P) { Parent = P; } /// MachineInstr ctor - This constructor creates a copy of the given /// MachineInstr in the given MachineFunction. MachineInstr(MachineFunction &, const MachineInstr &); /// MachineInstr ctor - This constructor create a MachineInstr and add the /// implicit operands. It reserves space for number of operands specified by /// MCInstrDesc. An explicit DebugLoc is supplied. MachineInstr(MachineFunction&, const MCInstrDesc &MCID, const DebugLoc dl, bool NoImp = false); // MachineInstrs are pool-allocated and owned by MachineFunction. friend class MachineFunction; public: const MachineBasicBlock* getParent() const { return Parent; } MachineBasicBlock* getParent() { return Parent; } /// getAsmPrinterFlags - Return the asm printer flags bitvector. /// uint8_t getAsmPrinterFlags() const { return AsmPrinterFlags; } /// clearAsmPrinterFlags - clear the AsmPrinter bitvector /// void clearAsmPrinterFlags() { AsmPrinterFlags = 0; } /// getAsmPrinterFlag - Return whether an AsmPrinter flag is set. /// bool getAsmPrinterFlag(CommentFlag Flag) const { return AsmPrinterFlags & Flag; } /// setAsmPrinterFlag - Set a flag for the AsmPrinter. /// void setAsmPrinterFlag(CommentFlag Flag) { AsmPrinterFlags |= (uint8_t)Flag; } /// clearAsmPrinterFlag - clear specific AsmPrinter flags /// void clearAsmPrinterFlag(CommentFlag Flag) { AsmPrinterFlags &= ~Flag; } /// getFlags - Return the MI flags bitvector. uint8_t getFlags() const { return Flags; } /// getFlag - Return whether an MI flag is set. bool getFlag(MIFlag Flag) const { return Flags & Flag; } /// setFlag - Set a MI flag. void setFlag(MIFlag Flag) { Flags |= (uint8_t)Flag; } void setFlags(unsigned flags) { // Filter out the automatically maintained flags. unsigned Mask = BundledPred | BundledSucc; Flags = (Flags & Mask) | (flags & ~Mask); } /// clearFlag - Clear a MI flag. void clearFlag(MIFlag Flag) { Flags &= ~((uint8_t)Flag); } /// isInsideBundle - Return true if MI is in a bundle (but not the first MI /// in a bundle). /// /// A bundle looks like this before it's finalized: /// ---------------- /// | MI | /// ---------------- /// | /// ---------------- /// | MI * | /// ---------------- /// | /// ---------------- /// | MI * | /// ---------------- /// In this case, the first MI starts a bundle but is not inside a bundle, the /// next 2 MIs are considered "inside" the bundle. /// /// After a bundle is finalized, it looks like this: /// ---------------- /// | Bundle | /// ---------------- /// | /// ---------------- /// | MI * | /// ---------------- /// | /// ---------------- /// | MI * | /// ---------------- /// | /// ---------------- /// | MI * | /// ---------------- /// The first instruction has the special opcode "BUNDLE". It's not "inside" /// a bundle, but the next three MIs are. bool isInsideBundle() const { return getFlag(BundledPred); } /// isBundled - Return true if this instruction part of a bundle. This is true /// if either itself or its following instruction is marked "InsideBundle". bool isBundled() const { return isBundledWithPred() || isBundledWithSucc(); } /// Return true if this instruction is part of a bundle, and it is not the /// first instruction in the bundle. bool isBundledWithPred() const { return getFlag(BundledPred); } /// Return true if this instruction is part of a bundle, and it is not the /// last instruction in the bundle. bool isBundledWithSucc() const { return getFlag(BundledSucc); } /// Bundle this instruction with its predecessor. This can be an unbundled /// instruction, or it can be the first instruction in a bundle. void bundleWithPred(); /// Bundle this instruction with its successor. This can be an unbundled /// instruction, or it can be the last instruction in a bundle. void bundleWithSucc(); /// Break bundle above this instruction. void unbundleFromPred(); /// Break bundle below this instruction. void unbundleFromSucc(); /// getDebugLoc - Returns the debug location id of this MachineInstr. /// DebugLoc getDebugLoc() const { return debugLoc; } /// \brief Return the debug variable referenced by /// this DBG_VALUE instruction. DIVariable getDebugVariable() const { assert(isDebugValue() && "not a DBG_VALUE"); DIVariable Var(getOperand(2).getMetadata()); assert(Var.Verify() && "not a DIVariable"); return Var; } /// \brief Return the complex address expression referenced by /// this DBG_VALUE instruction. DIExpression getDebugExpression() const { assert(isDebugValue() && "not a DBG_VALUE"); DIExpression Expr(getOperand(3).getMetadata()); assert(Expr.Verify() && "not a DIExpression"); return Expr; } /// emitError - Emit an error referring to the source location of this /// instruction. This should only be used for inline assembly that is somehow /// impossible to compile. Other errors should have been handled much /// earlier. /// /// If this method returns, the caller should try to recover from the error. /// void emitError(StringRef Msg) const; /// getDesc - Returns the target instruction descriptor of this /// MachineInstr. const MCInstrDesc &getDesc() const { return *MCID; } /// getOpcode - Returns the opcode of this MachineInstr. /// int getOpcode() const { return MCID->Opcode; } /// Access to explicit operands of the instruction. /// unsigned getNumOperands() const { return NumOperands; } const MachineOperand& getOperand(unsigned i) const { assert(i < getNumOperands() && "getOperand() out of range!"); return Operands[i]; } MachineOperand& getOperand(unsigned i) { assert(i < getNumOperands() && "getOperand() out of range!"); return Operands[i]; } /// getNumExplicitOperands - Returns the number of non-implicit operands. /// unsigned getNumExplicitOperands() const; /// iterator/begin/end - Iterate over all operands of a machine instruction. typedef MachineOperand *mop_iterator; typedef const MachineOperand *const_mop_iterator; mop_iterator operands_begin() { return Operands; } mop_iterator operands_end() { return Operands + NumOperands; } const_mop_iterator operands_begin() const { return Operands; } const_mop_iterator operands_end() const { return Operands + NumOperands; } iterator_range<mop_iterator> operands() { return iterator_range<mop_iterator>(operands_begin(), operands_end()); } iterator_range<const_mop_iterator> operands() const { return iterator_range<const_mop_iterator>(operands_begin(), operands_end()); } iterator_range<mop_iterator> explicit_operands() { return iterator_range<mop_iterator>( operands_begin(), operands_begin() + getNumExplicitOperands()); } iterator_range<const_mop_iterator> explicit_operands() const { return iterator_range<const_mop_iterator>( operands_begin(), operands_begin() + getNumExplicitOperands()); } iterator_range<mop_iterator> implicit_operands() { return iterator_range<mop_iterator>(explicit_operands().end(), operands_end()); } iterator_range<const_mop_iterator> implicit_operands() const { return iterator_range<const_mop_iterator>(explicit_operands().end(), operands_end()); } iterator_range<mop_iterator> defs() { return iterator_range<mop_iterator>( operands_begin(), operands_begin() + getDesc().getNumDefs()); } iterator_range<const_mop_iterator> defs() const { return iterator_range<const_mop_iterator>( operands_begin(), operands_begin() + getDesc().getNumDefs()); } iterator_range<mop_iterator> uses() { return iterator_range<mop_iterator>( operands_begin() + getDesc().getNumDefs(), operands_end()); } iterator_range<const_mop_iterator> uses() const { return iterator_range<const_mop_iterator>( operands_begin() + getDesc().getNumDefs(), operands_end()); } /// Access to memory operands of the instruction mmo_iterator memoperands_begin() const { return MemRefs; } mmo_iterator memoperands_end() const { return MemRefs + NumMemRefs; } bool memoperands_empty() const { return NumMemRefs == 0; } iterator_range<mmo_iterator> memoperands() { return iterator_range<mmo_iterator>(memoperands_begin(), memoperands_end()); } iterator_range<mmo_iterator> memoperands() const { return iterator_range<mmo_iterator>(memoperands_begin(), memoperands_end()); } /// hasOneMemOperand - Return true if this instruction has exactly one /// MachineMemOperand. bool hasOneMemOperand() const { return NumMemRefs == 1; } /// API for querying MachineInstr properties. They are the same as MCInstrDesc /// queries but they are bundle aware. enum QueryType { IgnoreBundle, // Ignore bundles AnyInBundle, // Return true if any instruction in bundle has property AllInBundle // Return true if all instructions in bundle have property }; /// hasProperty - Return true if the instruction (or in the case of a bundle, /// the instructions inside the bundle) has the specified property. /// The first argument is the property being queried. /// The second argument indicates whether the query should look inside /// instruction bundles. bool hasProperty(unsigned MCFlag, QueryType Type = AnyInBundle) const { // Inline the fast path for unbundled or bundle-internal instructions. if (Type == IgnoreBundle || !isBundled() || isBundledWithPred()) return getDesc().getFlags() & (1 << MCFlag); // If this is the first instruction in a bundle, take the slow path. return hasPropertyInBundle(1 << MCFlag, Type); } /// isVariadic - Return true if this instruction can have a variable number of /// operands. In this case, the variable operands will be after the normal /// operands but before the implicit definitions and uses (if any are /// present). bool isVariadic(QueryType Type = IgnoreBundle) const { return hasProperty(MCID::Variadic, Type); } /// hasOptionalDef - Set if this instruction has an optional definition, e.g. /// ARM instructions which can set condition code if 's' bit is set. bool hasOptionalDef(QueryType Type = IgnoreBundle) const { return hasProperty(MCID::HasOptionalDef, Type); } /// isPseudo - Return true if this is a pseudo instruction that doesn't /// correspond to a real machine instruction. /// bool isPseudo(QueryType Type = IgnoreBundle) const { return hasProperty(MCID::Pseudo, Type); } bool isReturn(QueryType Type = AnyInBundle) const { return hasProperty(MCID::Return, Type); } bool isCall(QueryType Type = AnyInBundle) const { return hasProperty(MCID::Call, Type); } /// isBarrier - Returns true if the specified instruction stops control flow /// from executing the instruction immediately following it. Examples include /// unconditional branches and return instructions. bool isBarrier(QueryType Type = AnyInBundle) const { return hasProperty(MCID::Barrier, Type); } /// isTerminator - Returns true if this instruction part of the terminator for /// a basic block. Typically this is things like return and branch /// instructions. /// /// Various passes use this to insert code into the bottom of a basic block, /// but before control flow occurs. bool isTerminator(QueryType Type = AnyInBundle) const { return hasProperty(MCID::Terminator, Type); } /// isBranch - Returns true if this is a conditional, unconditional, or /// indirect branch. Predicates below can be used to discriminate between /// these cases, and the TargetInstrInfo::AnalyzeBranch method can be used to /// get more information. bool isBranch(QueryType Type = AnyInBundle) const { return hasProperty(MCID::Branch, Type); } /// isIndirectBranch - Return true if this is an indirect branch, such as a /// branch through a register. bool isIndirectBranch(QueryType Type = AnyInBundle) const { return hasProperty(MCID::IndirectBranch, Type); } /// isConditionalBranch - Return true if this is a branch which may fall /// through to the next instruction or may transfer control flow to some other /// block. The TargetInstrInfo::AnalyzeBranch method can be used to get more /// information about this branch. bool isConditionalBranch(QueryType Type = AnyInBundle) const { return isBranch(Type) & !isBarrier(Type) & !isIndirectBranch(Type); } /// isUnconditionalBranch - Return true if this is a branch which always /// transfers control flow to some other block. The /// TargetInstrInfo::AnalyzeBranch method can be used to get more information /// about this branch. bool isUnconditionalBranch(QueryType Type = AnyInBundle) const { return isBranch(Type) & isBarrier(Type) & !isIndirectBranch(Type); } /// Return true if this instruction has a predicate operand that /// controls execution. It may be set to 'always', or may be set to other /// values. There are various methods in TargetInstrInfo that can be used to /// control and modify the predicate in this instruction. bool isPredicable(QueryType Type = AllInBundle) const { // If it's a bundle than all bundled instructions must be predicable for this // to return true. return hasProperty(MCID::Predicable, Type); } /// isCompare - Return true if this instruction is a comparison. bool isCompare(QueryType Type = IgnoreBundle) const { return hasProperty(MCID::Compare, Type); } /// isMoveImmediate - Return true if this instruction is a move immediate /// (including conditional moves) instruction. bool isMoveImmediate(QueryType Type = IgnoreBundle) const { return hasProperty(MCID::MoveImm, Type); } /// isBitcast - Return true if this instruction is a bitcast instruction. /// bool isBitcast(QueryType Type = IgnoreBundle) const { return hasProperty(MCID::Bitcast, Type); } /// isSelect - Return true if this instruction is a select instruction. /// bool isSelect(QueryType Type = IgnoreBundle) const { return hasProperty(MCID::Select, Type); } /// isNotDuplicable - Return true if this instruction cannot be safely /// duplicated. For example, if the instruction has a unique labels attached /// to it, duplicating it would cause multiple definition errors. bool isNotDuplicable(QueryType Type = AnyInBundle) const { return hasProperty(MCID::NotDuplicable, Type); } /// hasDelaySlot - Returns true if the specified instruction has a delay slot /// which must be filled by the code generator. bool hasDelaySlot(QueryType Type = AnyInBundle) const { return hasProperty(MCID::DelaySlot, Type); } /// canFoldAsLoad - Return true for instructions that can be folded as /// memory operands in other instructions. The most common use for this /// is instructions that are simple loads from memory that don't modify /// the loaded value in any way, but it can also be used for instructions /// that can be expressed as constant-pool loads, such as V_SETALLONES /// on x86, to allow them to be folded when it is beneficial. /// This should only be set on instructions that return a value in their /// only virtual register definition. bool canFoldAsLoad(QueryType Type = IgnoreBundle) const { return hasProperty(MCID::FoldableAsLoad, Type); } /// \brief Return true if this instruction behaves /// the same way as the generic REG_SEQUENCE instructions. /// E.g., on ARM, /// dX VMOVDRR rY, rZ /// is equivalent to /// dX = REG_SEQUENCE rY, ssub_0, rZ, ssub_1. /// /// Note that for the optimizers to be able to take advantage of /// this property, TargetInstrInfo::getRegSequenceLikeInputs has to be /// override accordingly. bool isRegSequenceLike(QueryType Type = IgnoreBundle) const { return hasProperty(MCID::RegSequence, Type); } /// \brief Return true if this instruction behaves /// the same way as the generic EXTRACT_SUBREG instructions. /// E.g., on ARM, /// rX, rY VMOVRRD dZ /// is equivalent to two EXTRACT_SUBREG: /// rX = EXTRACT_SUBREG dZ, ssub_0 /// rY = EXTRACT_SUBREG dZ, ssub_1 /// /// Note that for the optimizers to be able to take advantage of /// this property, TargetInstrInfo::getExtractSubregLikeInputs has to be /// override accordingly. bool isExtractSubregLike(QueryType Type = IgnoreBundle) const { return hasProperty(MCID::ExtractSubreg, Type); } /// \brief Return true if this instruction behaves /// the same way as the generic INSERT_SUBREG instructions. /// E.g., on ARM, /// dX = VSETLNi32 dY, rZ, Imm /// is equivalent to a INSERT_SUBREG: /// dX = INSERT_SUBREG dY, rZ, translateImmToSubIdx(Imm) /// /// Note that for the optimizers to be able to take advantage of /// this property, TargetInstrInfo::getInsertSubregLikeInputs has to be /// override accordingly. bool isInsertSubregLike(QueryType Type = IgnoreBundle) const { return hasProperty(MCID::InsertSubreg, Type); } //===--------------------------------------------------------------------===// // Side Effect Analysis //===--------------------------------------------------------------------===// /// mayLoad - Return true if this instruction could possibly read memory. /// Instructions with this flag set are not necessarily simple load /// instructions, they may load a value and modify it, for example. bool mayLoad(QueryType Type = AnyInBundle) const { if (isInlineAsm()) { unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); if (ExtraInfo & InlineAsm::Extra_MayLoad) return true; } return hasProperty(MCID::MayLoad, Type); } /// mayStore - Return true if this instruction could possibly modify memory. /// Instructions with this flag set are not necessarily simple store /// instructions, they may store a modified value based on their operands, or /// may not actually modify anything, for example. bool mayStore(QueryType Type = AnyInBundle) const { if (isInlineAsm()) { unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); if (ExtraInfo & InlineAsm::Extra_MayStore) return true; } return hasProperty(MCID::MayStore, Type); } //===--------------------------------------------------------------------===// // Flags that indicate whether an instruction can be modified by a method. //===--------------------------------------------------------------------===// /// isCommutable - Return true if this may be a 2- or 3-address /// instruction (of the form "X = op Y, Z, ..."), which produces the same /// result if Y and Z are exchanged. If this flag is set, then the /// TargetInstrInfo::commuteInstruction method may be used to hack on the /// instruction. /// /// Note that this flag may be set on instructions that are only commutable /// sometimes. In these cases, the call to commuteInstruction will fail. /// Also note that some instructions require non-trivial modification to /// commute them. bool isCommutable(QueryType Type = IgnoreBundle) const { return hasProperty(MCID::Commutable, Type); } /// isConvertibleTo3Addr - Return true if this is a 2-address instruction /// which can be changed into a 3-address instruction if needed. Doing this /// transformation can be profitable in the register allocator, because it /// means that the instruction can use a 2-address form if possible, but /// degrade into a less efficient form if the source and dest register cannot /// be assigned to the same register. For example, this allows the x86 /// backend to turn a "shl reg, 3" instruction into an LEA instruction, which /// is the same speed as the shift but has bigger code size. /// /// If this returns true, then the target must implement the /// TargetInstrInfo::convertToThreeAddress method for this instruction, which /// is allowed to fail if the transformation isn't valid for this specific /// instruction (e.g. shl reg, 4 on x86). /// bool isConvertibleTo3Addr(QueryType Type = IgnoreBundle) const { return hasProperty(MCID::ConvertibleTo3Addr, Type); } /// usesCustomInsertionHook - Return true if this instruction requires /// custom insertion support when the DAG scheduler is inserting it into a /// machine basic block. If this is true for the instruction, it basically /// means that it is a pseudo instruction used at SelectionDAG time that is /// expanded out into magic code by the target when MachineInstrs are formed. /// /// If this is true, the TargetLoweringInfo::InsertAtEndOfBasicBlock method /// is used to insert this into the MachineBasicBlock. bool usesCustomInsertionHook(QueryType Type = IgnoreBundle) const { return hasProperty(MCID::UsesCustomInserter, Type); } /// hasPostISelHook - Return true if this instruction requires *adjustment* /// after instruction selection by calling a target hook. For example, this /// can be used to fill in ARM 's' optional operand depending on whether /// the conditional flag register is used. bool hasPostISelHook(QueryType Type = IgnoreBundle) const { return hasProperty(MCID::HasPostISelHook, Type); } /// isRematerializable - Returns true if this instruction is a candidate for /// remat. This flag is deprecated, please don't use it anymore. If this /// flag is set, the isReallyTriviallyReMaterializable() method is called to /// verify the instruction is really rematable. bool isRematerializable(QueryType Type = AllInBundle) const { // It's only possible to re-mat a bundle if all bundled instructions are // re-materializable. return hasProperty(MCID::Rematerializable, Type); } /// isAsCheapAsAMove - Returns true if this instruction has the same cost (or /// less) than a move instruction. This is useful during certain types of /// optimizations (e.g., remat during two-address conversion or machine licm) /// where we would like to remat or hoist the instruction, but not if it costs /// more than moving the instruction into the appropriate register. Note, we /// are not marking copies from and to the same register class with this flag. bool isAsCheapAsAMove(QueryType Type = AllInBundle) const { // Only returns true for a bundle if all bundled instructions are cheap. return hasProperty(MCID::CheapAsAMove, Type); } /// hasExtraSrcRegAllocReq - Returns true if this instruction source operands /// have special register allocation requirements that are not captured by the /// operand register classes. e.g. ARM::STRD's two source registers must be an /// even / odd pair, ARM::STM registers have to be in ascending order. /// Post-register allocation passes should not attempt to change allocations /// for sources of instructions with this flag. bool hasExtraSrcRegAllocReq(QueryType Type = AnyInBundle) const { return hasProperty(MCID::ExtraSrcRegAllocReq, Type); } /// hasExtraDefRegAllocReq - Returns true if this instruction def operands /// have special register allocation requirements that are not captured by the /// operand register classes. e.g. ARM::LDRD's two def registers must be an /// even / odd pair, ARM::LDM registers have to be in ascending order. /// Post-register allocation passes should not attempt to change allocations /// for definitions of instructions with this flag. bool hasExtraDefRegAllocReq(QueryType Type = AnyInBundle) const { return hasProperty(MCID::ExtraDefRegAllocReq, Type); } enum MICheckType { CheckDefs, // Check all operands for equality CheckKillDead, // Check all operands including kill / dead markers IgnoreDefs, // Ignore all definitions IgnoreVRegDefs // Ignore virtual register definitions }; /// isIdenticalTo - Return true if this instruction is identical to (same /// opcode and same operands as) the specified instruction. bool isIdenticalTo(const MachineInstr *Other, MICheckType Check = CheckDefs) const; /// Unlink 'this' from the containing basic block, and return it without /// deleting it. /// /// This function can not be used on bundled instructions, use /// removeFromBundle() to remove individual instructions from a bundle. MachineInstr *removeFromParent(); /// Unlink this instruction from its basic block and return it without /// deleting it. /// /// If the instruction is part of a bundle, the other instructions in the /// bundle remain bundled. MachineInstr *removeFromBundle(); /// Unlink 'this' from the containing basic block and delete it. /// /// If this instruction is the header of a bundle, the whole bundle is erased. /// This function can not be used for instructions inside a bundle, use /// eraseFromBundle() to erase individual bundled instructions. void eraseFromParent(); /// Unlink 'this' from the containing basic block and delete it. /// /// For all definitions mark their uses in DBG_VALUE nodes /// as undefined. Otherwise like eraseFromParent(). void eraseFromParentAndMarkDBGValuesForRemoval(); /// Unlink 'this' form its basic block and delete it. /// /// If the instruction is part of a bundle, the other instructions in the /// bundle remain bundled. void eraseFromBundle(); bool isEHLabel() const { return getOpcode() == TargetOpcode::EH_LABEL; } bool isGCLabel() const { return getOpcode() == TargetOpcode::GC_LABEL; } /// isLabel - Returns true if the MachineInstr represents a label. /// bool isLabel() const { return isEHLabel() || isGCLabel(); } bool isCFIInstruction() const { return getOpcode() == TargetOpcode::CFI_INSTRUCTION; } // True if the instruction represents a position in the function. bool isPosition() const { return isLabel() || isCFIInstruction(); } bool isDebugValue() const { return getOpcode() == TargetOpcode::DBG_VALUE; } /// A DBG_VALUE is indirect iff the first operand is a register and /// the second operand is an immediate. bool isIndirectDebugValue() const { return isDebugValue() && getOperand(0).isReg() && getOperand(1).isImm(); } bool isPHI() const { return getOpcode() == TargetOpcode::PHI; } bool isKill() const { return getOpcode() == TargetOpcode::KILL; } bool isImplicitDef() const { return getOpcode()==TargetOpcode::IMPLICIT_DEF; } bool isInlineAsm() const { return getOpcode() == TargetOpcode::INLINEASM; } bool isMSInlineAsm() const { return getOpcode() == TargetOpcode::INLINEASM && getInlineAsmDialect(); } bool isStackAligningInlineAsm() const; InlineAsm::AsmDialect getInlineAsmDialect() const; bool isInsertSubreg() const { return getOpcode() == TargetOpcode::INSERT_SUBREG; } bool isSubregToReg() const { return getOpcode() == TargetOpcode::SUBREG_TO_REG; } bool isRegSequence() const { return getOpcode() == TargetOpcode::REG_SEQUENCE; } bool isBundle() const { return getOpcode() == TargetOpcode::BUNDLE; } bool isCopy() const { return getOpcode() == TargetOpcode::COPY; } bool isFullCopy() const { return isCopy() && !getOperand(0).getSubReg() && !getOperand(1).getSubReg(); } bool isExtractSubreg() const { return getOpcode() == TargetOpcode::EXTRACT_SUBREG; } /// isCopyLike - Return true if the instruction behaves like a copy. /// This does not include native copy instructions. bool isCopyLike() const { return isCopy() || isSubregToReg(); } /// isIdentityCopy - Return true is the instruction is an identity copy. bool isIdentityCopy() const { return isCopy() && getOperand(0).getReg() == getOperand(1).getReg() && getOperand(0).getSubReg() == getOperand(1).getSubReg(); } /// isTransient - Return true if this is a transient instruction that is /// either very likely to be eliminated during register allocation (such as /// copy-like instructions), or if this instruction doesn't have an /// execution-time cost. bool isTransient() const { switch(getOpcode()) { default: return false; // Copy-like instructions are usually eliminated during register allocation. case TargetOpcode::PHI: case TargetOpcode::COPY: case TargetOpcode::INSERT_SUBREG: case TargetOpcode::SUBREG_TO_REG: case TargetOpcode::REG_SEQUENCE: // Pseudo-instructions that don't produce any real output. case TargetOpcode::IMPLICIT_DEF: case TargetOpcode::KILL: case TargetOpcode::CFI_INSTRUCTION: case TargetOpcode::EH_LABEL: case TargetOpcode::GC_LABEL: case TargetOpcode::DBG_VALUE: return true; } } /// Return the number of instructions inside the MI bundle, excluding the /// bundle header. /// /// This is the number of instructions that MachineBasicBlock::iterator /// skips, 0 for unbundled instructions. unsigned getBundleSize() const; /// readsRegister - Return true if the MachineInstr reads the specified /// register. If TargetRegisterInfo is passed, then it also checks if there /// is a read of a super-register. /// This does not count partial redefines of virtual registers as reads: /// %reg1024:6 = OP. bool readsRegister(unsigned Reg, const TargetRegisterInfo *TRI = nullptr) const { return findRegisterUseOperandIdx(Reg, false, TRI) != -1; } /// readsVirtualRegister - Return true if the MachineInstr reads the specified /// virtual register. Take into account that a partial define is a /// read-modify-write operation. bool readsVirtualRegister(unsigned Reg) const { return readsWritesVirtualRegister(Reg).first; } /// readsWritesVirtualRegister - Return a pair of bools (reads, writes) /// indicating if this instruction reads or writes Reg. This also considers /// partial defines. /// If Ops is not null, all operand indices for Reg are added. std::pair<bool,bool> readsWritesVirtualRegister(unsigned Reg, SmallVectorImpl<unsigned> *Ops = nullptr) const; /// killsRegister - Return true if the MachineInstr kills the specified /// register. If TargetRegisterInfo is passed, then it also checks if there is /// a kill of a super-register. bool killsRegister(unsigned Reg, const TargetRegisterInfo *TRI = nullptr) const { return findRegisterUseOperandIdx(Reg, true, TRI) != -1; } /// definesRegister - Return true if the MachineInstr fully defines the /// specified register. If TargetRegisterInfo is passed, then it also checks /// if there is a def of a super-register. /// NOTE: It's ignoring subreg indices on virtual registers. bool definesRegister(unsigned Reg, const TargetRegisterInfo *TRI = nullptr) const { return findRegisterDefOperandIdx(Reg, false, false, TRI) != -1; } /// modifiesRegister - Return true if the MachineInstr modifies (fully define /// or partially define) the specified register. /// NOTE: It's ignoring subreg indices on virtual registers. bool modifiesRegister(unsigned Reg, const TargetRegisterInfo *TRI) const { return findRegisterDefOperandIdx(Reg, false, true, TRI) != -1; } /// registerDefIsDead - Returns true if the register is dead in this machine /// instruction. If TargetRegisterInfo is passed, then it also checks /// if there is a dead def of a super-register. bool registerDefIsDead(unsigned Reg, const TargetRegisterInfo *TRI = nullptr) const { return findRegisterDefOperandIdx(Reg, true, false, TRI) != -1; } /// findRegisterUseOperandIdx() - Returns the operand index that is a use of /// the specific register or -1 if it is not found. It further tightens /// the search criteria to a use that kills the register if isKill is true. int findRegisterUseOperandIdx(unsigned Reg, bool isKill = false, const TargetRegisterInfo *TRI = nullptr) const; /// findRegisterUseOperand - Wrapper for findRegisterUseOperandIdx, it returns /// a pointer to the MachineOperand rather than an index. MachineOperand *findRegisterUseOperand(unsigned Reg, bool isKill = false, const TargetRegisterInfo *TRI = nullptr) { int Idx = findRegisterUseOperandIdx(Reg, isKill, TRI); return (Idx == -1) ? nullptr : &getOperand(Idx); } /// findRegisterDefOperandIdx() - Returns the operand index that is a def of /// the specified register or -1 if it is not found. If isDead is true, defs /// that are not dead are skipped. If Overlap is true, then it also looks for /// defs that merely overlap the specified register. If TargetRegisterInfo is /// non-null, then it also checks if there is a def of a super-register. /// This may also return a register mask operand when Overlap is true. int findRegisterDefOperandIdx(unsigned Reg, bool isDead = false, bool Overlap = false, const TargetRegisterInfo *TRI = nullptr) const; /// findRegisterDefOperand - Wrapper for findRegisterDefOperandIdx, it returns /// a pointer to the MachineOperand rather than an index. MachineOperand *findRegisterDefOperand(unsigned Reg, bool isDead = false, const TargetRegisterInfo *TRI = nullptr) { int Idx = findRegisterDefOperandIdx(Reg, isDead, false, TRI); return (Idx == -1) ? nullptr : &getOperand(Idx); } /// findFirstPredOperandIdx() - Find the index of the first operand in the /// operand list that is used to represent the predicate. It returns -1 if /// none is found. int findFirstPredOperandIdx() const; /// findInlineAsmFlagIdx() - Find the index of the flag word operand that /// corresponds to operand OpIdx on an inline asm instruction. Returns -1 if /// getOperand(OpIdx) does not belong to an inline asm operand group. /// /// If GroupNo is not NULL, it will receive the number of the operand group /// containing OpIdx. /// /// The flag operand is an immediate that can be decoded with methods like /// InlineAsm::hasRegClassConstraint(). /// int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo = nullptr) const; /// getRegClassConstraint - Compute the static register class constraint for /// operand OpIdx. For normal instructions, this is derived from the /// MCInstrDesc. For inline assembly it is derived from the flag words. /// /// Returns NULL if the static register classs constraint cannot be /// determined. /// const TargetRegisterClass* getRegClassConstraint(unsigned OpIdx, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const; /// \brief Applies the constraints (def/use) implied by this MI on \p Reg to /// the given \p CurRC. /// If \p ExploreBundle is set and MI is part of a bundle, all the /// instructions inside the bundle will be taken into account. In other words, /// this method accumulates all the constrains of the operand of this MI and /// the related bundle if MI is a bundle or inside a bundle. /// /// Returns the register class that statisfies both \p CurRC and the /// constraints set by MI. Returns NULL if such a register class does not /// exist. /// /// \pre CurRC must not be NULL. const TargetRegisterClass *getRegClassConstraintEffectForVReg( unsigned Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ExploreBundle = false) const; /// \brief Applies the constraints (def/use) implied by the \p OpIdx operand /// to the given \p CurRC. /// /// Returns the register class that statisfies both \p CurRC and the /// constraints set by \p OpIdx MI. Returns NULL if such a register class /// does not exist. /// /// \pre CurRC must not be NULL. /// \pre The operand at \p OpIdx must be a register. const TargetRegisterClass * getRegClassConstraintEffect(unsigned OpIdx, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const; /// tieOperands - Add a tie between the register operands at DefIdx and /// UseIdx. The tie will cause the register allocator to ensure that the two /// operands are assigned the same physical register. /// /// Tied operands are managed automatically for explicit operands in the /// MCInstrDesc. This method is for exceptional cases like inline asm. void tieOperands(unsigned DefIdx, unsigned UseIdx); /// findTiedOperandIdx - Given the index of a tied register operand, find the /// operand it is tied to. Defs are tied to uses and vice versa. Returns the /// index of the tied operand which must exist. unsigned findTiedOperandIdx(unsigned OpIdx) const; /// isRegTiedToUseOperand - Given the index of a register def operand, /// check if the register def is tied to a source operand, due to either /// two-address elimination or inline assembly constraints. Returns the /// first tied use operand index by reference if UseOpIdx is not null. bool isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx = nullptr) const { const MachineOperand &MO = getOperand(DefOpIdx); if (!MO.isReg() || !MO.isDef() || !MO.isTied()) return false; if (UseOpIdx) *UseOpIdx = findTiedOperandIdx(DefOpIdx); return true; } /// isRegTiedToDefOperand - Return true if the use operand of the specified /// index is tied to a def operand. It also returns the def operand index by /// reference if DefOpIdx is not null. bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx = nullptr) const { const MachineOperand &MO = getOperand(UseOpIdx); if (!MO.isReg() || !MO.isUse() || !MO.isTied()) return false; if (DefOpIdx) *DefOpIdx = findTiedOperandIdx(UseOpIdx); return true; } /// clearKillInfo - Clears kill flags on all operands. /// void clearKillInfo(); /// substituteRegister - Replace all occurrences of FromReg with ToReg:SubIdx, /// properly composing subreg indices where necessary. void substituteRegister(unsigned FromReg, unsigned ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo); /// addRegisterKilled - We have determined MI kills a register. Look for the /// operand that uses it and mark it as IsKill. If AddIfNotFound is true, /// add a implicit operand if it's not found. Returns true if the operand /// exists / is added. bool addRegisterKilled(unsigned IncomingReg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound = false); /// clearRegisterKills - Clear all kill flags affecting Reg. If RegInfo is /// provided, this includes super-register kills. void clearRegisterKills(unsigned Reg, const TargetRegisterInfo *RegInfo); /// addRegisterDead - We have determined MI defined a register without a use. /// Look for the operand that defines it and mark it as IsDead. If /// AddIfNotFound is true, add a implicit operand if it's not found. Returns /// true if the operand exists / is added. bool addRegisterDead(unsigned Reg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound = false); /// Mark all subregister defs of register @p Reg with the undef flag. /// This function is used when we determined to have a subregister def in an /// otherwise undefined super register. void addRegisterDefReadUndef(unsigned Reg); /// addRegisterDefined - We have determined MI defines a register. Make sure /// there is an operand defining Reg. void addRegisterDefined(unsigned Reg, const TargetRegisterInfo *RegInfo = nullptr); /// setPhysRegsDeadExcept - Mark every physreg used by this instruction as /// dead except those in the UsedRegs list. /// /// On instructions with register mask operands, also add implicit-def /// operands for all registers in UsedRegs. void setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs, const TargetRegisterInfo &TRI); /// isSafeToMove - Return true if it is safe to move this instruction. If /// SawStore is set to true, it means that there is a store (or call) between /// the instruction's location and its intended destination. bool isSafeToMove(const TargetInstrInfo *TII, AliasAnalysis *AA, bool &SawStore) const; /// hasOrderedMemoryRef - Return true if this instruction may have an ordered /// or volatile memory reference, or if the information describing the memory /// reference is not available. Return false if it is known to have no /// ordered or volatile memory references. bool hasOrderedMemoryRef() const; /// isInvariantLoad - Return true if this instruction is loading from a /// location whose value is invariant across the function. For example, /// loading a value from the constant pool or from the argument area of /// a function if it does not change. This should only return true of *all* /// loads the instruction does are invariant (if it does multiple loads). bool isInvariantLoad(AliasAnalysis *AA) const; /// isConstantValuePHI - If the specified instruction is a PHI that always /// merges together the same virtual register, return the register, otherwise /// return 0. unsigned isConstantValuePHI() const; /// hasUnmodeledSideEffects - Return true if this instruction has side /// effects that are not modeled by mayLoad / mayStore, etc. /// For all instructions, the property is encoded in MCInstrDesc::Flags /// (see MCInstrDesc::hasUnmodeledSideEffects(). The only exception is /// INLINEASM instruction, in which case the side effect property is encoded /// in one of its operands (see InlineAsm::Extra_HasSideEffect). /// bool hasUnmodeledSideEffects() const; /// allDefsAreDead - Return true if all the defs of this instruction are dead. /// bool allDefsAreDead() const; /// copyImplicitOps - Copy implicit register operands from specified /// instruction to this instruction. void copyImplicitOps(MachineFunction &MF, const MachineInstr *MI); // // Debugging support // void print(raw_ostream &OS, const TargetMachine *TM = nullptr, bool SkipOpers = false) const; void dump() const; //===--------------------------------------------------------------------===// // Accessors used to build up machine instructions. /// Add the specified operand to the instruction. If it is an implicit /// operand, it is added to the end of the operand list. If it is an /// explicit operand it is added at the end of the explicit operand list /// (before the first implicit operand). /// /// MF must be the machine function that was used to allocate this /// instruction. /// /// MachineInstrBuilder provides a more convenient interface for creating /// instructions and adding operands. void addOperand(MachineFunction &MF, const MachineOperand &Op); /// Add an operand without providing an MF reference. This only works for /// instructions that are inserted in a basic block. /// /// MachineInstrBuilder and the two-argument addOperand(MF, MO) should be /// preferred. void addOperand(const MachineOperand &Op); /// setDesc - Replace the instruction descriptor (thus opcode) of /// the current instruction with a new one. /// void setDesc(const MCInstrDesc &tid) { MCID = &tid; } /// setDebugLoc - Replace current source information with new such. /// Avoid using this, the constructor argument is preferable. /// void setDebugLoc(const DebugLoc dl) { debugLoc = dl; assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor"); } /// RemoveOperand - Erase an operand from an instruction, leaving it with one /// fewer operand than it started with. /// void RemoveOperand(unsigned i); /// addMemOperand - Add a MachineMemOperand to the machine instruction. /// This function should be used only occasionally. The setMemRefs function /// is the primary method for setting up a MachineInstr's MemRefs list. void addMemOperand(MachineFunction &MF, MachineMemOperand *MO); /// setMemRefs - Assign this MachineInstr's memory reference descriptor /// list. This does not transfer ownership. void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd) { MemRefs = NewMemRefs; NumMemRefs = uint8_t(NewMemRefsEnd - NewMemRefs); assert(NumMemRefs == NewMemRefsEnd - NewMemRefs && "Too many memrefs"); } private: /// getRegInfo - If this instruction is embedded into a MachineFunction, /// return the MachineRegisterInfo object for the current function, otherwise /// return null. MachineRegisterInfo *getRegInfo(); /// untieRegOperand - Break any tie involving OpIdx. void untieRegOperand(unsigned OpIdx) { MachineOperand &MO = getOperand(OpIdx); if (MO.isReg() && MO.isTied()) { getOperand(findTiedOperandIdx(OpIdx)).TiedTo = 0; MO.TiedTo = 0; } } /// addImplicitDefUseOperands - Add all implicit def and use operands to /// this instruction. void addImplicitDefUseOperands(MachineFunction &MF); /// RemoveRegOperandsFromUseLists - Unlink all of the register operands in /// this instruction from their respective use lists. This requires that the /// operands already be on their use lists. void RemoveRegOperandsFromUseLists(MachineRegisterInfo&); /// AddRegOperandsToUseLists - Add all of the register operands in /// this instruction from their respective use lists. This requires that the /// operands not be on their use lists yet. void AddRegOperandsToUseLists(MachineRegisterInfo&); /// hasPropertyInBundle - Slow path for hasProperty when we're dealing with a /// bundle. bool hasPropertyInBundle(unsigned Mask, QueryType Type) const; /// \brief Implements the logic of getRegClassConstraintEffectForVReg for the /// this MI and the given operand index \p OpIdx. /// If the related operand does not constrained Reg, this returns CurRC. const TargetRegisterClass *getRegClassConstraintEffectForVRegImpl( unsigned OpIdx, unsigned Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const; }; /// MachineInstrExpressionTrait - Special DenseMapInfo traits to compare /// MachineInstr* by *value* of the instruction rather than by pointer value. /// The hashing and equality testing functions ignore definitions so this is /// useful for CSE, etc. struct MachineInstrExpressionTrait : DenseMapInfo<MachineInstr*> { static inline MachineInstr *getEmptyKey() { return nullptr; } static inline MachineInstr *getTombstoneKey() { return reinterpret_cast<MachineInstr*>(-1); } static unsigned getHashValue(const MachineInstr* const &MI); static bool isEqual(const MachineInstr* const &LHS, const MachineInstr* const &RHS) { if (RHS == getEmptyKey() || RHS == getTombstoneKey() || LHS == getEmptyKey() || LHS == getTombstoneKey()) return LHS == RHS; return LHS->isIdenticalTo(RHS, MachineInstr::IgnoreVRegDefs); } }; //===----------------------------------------------------------------------===// // Debugging Support inline raw_ostream& operator<<(raw_ostream &OS, const MachineInstr &MI) { MI.print(OS); return OS; } } // End llvm namespace #endif
perovic/root
interpreter/llvm/src/include/llvm/CodeGen/MachineInstr.h
C
lgpl-2.1
52,039
/* * Copyright (C) 2011 Cyril Mottier (http://www.cyrilmottier.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package greendroid.widget; import greendroid.util.Config; import java.util.LinkedList; import java.util.Queue; import android.content.Context; import android.database.DataSetObserver; import android.os.Handler; import android.os.Parcel; import android.os.Parcelable; import android.util.AttributeSet; import android.util.DisplayMetrics; import android.util.Log; import android.util.SparseArray; import android.view.MotionEvent; import android.view.VelocityTracker; import android.view.View; import android.view.ViewConfiguration; import android.view.ViewGroup; import android.view.animation.DecelerateInterpolator; import android.widget.Scroller; /** * <p> * A View that shows items in a "paged" manner. Pages can be scrolled * horizontally by swiping the View. The PagedView uses a reuse mechanism * similar to the one used by the ListView widget. Pages come from a * {@link PagedAdapter}. * </p> * <p> * Clients may listen to PagedView changes (scrolling, page change, etc.) using * an {@link OnPagedViewChangeListener} . * </p> * <p> * It is usually a good idea to show the user which page is currently on screen. * This can be easily done with a {@link PageIndicator}. * </p> * * @author Cyril Mottier */ public class PagedView extends ViewGroup { private static final String LOG_TAG = PagedView.class.getSimpleName(); /** * Clients may listen to changes occurring on a PagedView via this * interface. * * @author Cyril Mottier */ public interface OnPagedViewChangeListener { /** * Notify the client the current page has changed. * * @param pagedView The PagedView that changed its current page * @param previousPage The previously selected page * @param newPage The newly selected page */ void onPageChanged(PagedView pagedView, int previousPage, int newPage); /** * Notify the client the user started tracking. * * @param pagedView The PagedView the user started to track. */ void onStartTracking(PagedView pagedView); /** * Notify the client the user ended tracking. * * @param pagedView The PagedView the user ended to track. */ void onStopTracking(PagedView pagedView); } private static final int INVALID_PAGE = -1; private static final int MINIMUM_PAGE_CHANGE_VELOCITY = 500; private static final int VELOCITY_UNITS = 1000; private static final int FRAME_RATE = 1000 / 60; private final Handler mHandler = new Handler(); private int mPageCount; private int mCurrentPage; private int mTargetPage = INVALID_PAGE; private int mPagingTouchSlop; private int mMinimumVelocity; private int mMaximumVelocity; private int mPageSlop; private boolean mIsBeingDragged; private int mOffsetX; private int mStartMotionX; private int mStartOffsetX; private Scroller mScroller; private VelocityTracker mVelocityTracker; private OnPagedViewChangeListener mOnPageChangeListener; private PagedAdapter mAdapter; private SparseArray<View> mActiveViews = new SparseArray<View>(); private Queue<View> mRecycler = new LinkedList<View>(); public PagedView(Context context) { this(context, null); } public PagedView(Context context, AttributeSet attrs) { this(context, attrs, 0); } public PagedView(Context context, AttributeSet attrs, int defStyle) { super(context, attrs, defStyle); initPagedView(); } private void initPagedView() { final Context context = getContext(); mScroller = new Scroller(context, new DecelerateInterpolator()); final ViewConfiguration conf = ViewConfiguration.get(context); // getScaledPagingTouchSlop() only available in API Level 8 mPagingTouchSlop = conf.getScaledTouchSlop() * 2; mMaximumVelocity = conf.getScaledMaximumFlingVelocity(); final DisplayMetrics metrics = context.getResources().getDisplayMetrics(); mMinimumVelocity = (int) (metrics.density * MINIMUM_PAGE_CHANGE_VELOCITY + 0.5f); } @Override protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) { int widthMode = MeasureSpec.getMode(widthMeasureSpec); int heightMode = MeasureSpec.getMode(heightMeasureSpec); int widthSize = MeasureSpec.getSize(widthMeasureSpec); int heightSize = MeasureSpec.getSize(heightMeasureSpec); int childWidth = 0; int childHeight = 0; int itemCount = mAdapter == null ? 0 : mAdapter.getCount(); if (itemCount > 0) { if (widthMode == MeasureSpec.UNSPECIFIED || heightMode == MeasureSpec.UNSPECIFIED) { final View child = obtainView(mCurrentPage); measureChild(child, widthMeasureSpec, heightMeasureSpec); childWidth = child.getMeasuredWidth(); childHeight = child.getMeasuredHeight(); } if (widthMode == MeasureSpec.UNSPECIFIED) { widthSize = childWidth; } if (heightMode == MeasureSpec.UNSPECIFIED) { heightSize = childHeight; } } setMeasuredDimension(widthSize, heightSize); } @Override protected void onSizeChanged(int w, int h, int oldw, int oldh) { super.onSizeChanged(w, h, oldw, oldh); mPageSlop = (int) (w * 0.5); // Make sure the offset adapts itself to mCurrentPage mOffsetX = getOffsetForPage(mCurrentPage); } @Override protected void onLayout(boolean changed, int l, int t, int r, int b) { if (mPageCount <= 0) { return; } final int startPage = getPageForOffset(mOffsetX); final int endPage = getPageForOffset(mOffsetX - getWidth() + 1); recycleViews(startPage, endPage); for (int i = startPage; i <= endPage; i++) { View child = mActiveViews.get(i); if (child == null) { child = obtainView(i); } setupView(child, i); } } @Override public boolean onInterceptTouchEvent(MotionEvent ev) { /* * Shortcut the most recurring case: the user is in the dragging state * and he is moving his finger. We want to intercept this motion. */ final int action = ev.getAction(); if (action == MotionEvent.ACTION_MOVE && mIsBeingDragged) { return true; } final int x = (int) ev.getX(); switch (action) { case MotionEvent.ACTION_DOWN: mStartMotionX = x; /* * If currently scrolling and user touches the screen, initiate * drag; otherwise don't. mScroller.isFinished should be false * when being flinged. */ mIsBeingDragged = !mScroller.isFinished(); if (mIsBeingDragged) { mScroller.forceFinished(true); mHandler.removeCallbacks(mScrollerRunnable); } break; case MotionEvent.ACTION_MOVE: /* * mIsBeingDragged == false, otherwise the shortcut would have * caught it. Check whether the user has moved far enough from * his original down touch. */ final int xDiff = (int) Math.abs(x - mStartMotionX); if (xDiff > mPagingTouchSlop) { mIsBeingDragged = true; performStartTracking(x); } break; case MotionEvent.ACTION_CANCEL: case MotionEvent.ACTION_UP: /* * Release the drag */ mIsBeingDragged = false; break; } /* * Motion events are only intercepted during dragging mode. */ return mIsBeingDragged; } @Override public boolean onTouchEvent(MotionEvent ev) { final int action = ev.getAction(); final int x = (int) ev.getX(); if (mVelocityTracker == null) { mVelocityTracker = VelocityTracker.obtain(); } mVelocityTracker.addMovement(ev); switch (action) { case MotionEvent.ACTION_DOWN: if (!mScroller.isFinished()) { mScroller.forceFinished(true); mHandler.removeCallbacks(mScrollerRunnable); } performStartTracking(x); break; case MotionEvent.ACTION_MOVE: // Scroll to follow the motion event final int newOffset = mStartOffsetX - (mStartMotionX - x); if (newOffset > 0 || newOffset < getOffsetForPage(mPageCount - 1)) { mStartOffsetX = mOffsetX; mStartMotionX = x; } else { setOffsetX(newOffset); } break; case MotionEvent.ACTION_UP: case MotionEvent.ACTION_CANCEL: setOffsetX(mStartOffsetX - (mStartMotionX - x)); int direction = 0; final int slop = mStartMotionX - x; if (Math.abs(slop) > mPageSlop) { direction = (slop > 0) ? 1 : -1; } else { mVelocityTracker.computeCurrentVelocity(VELOCITY_UNITS, mMaximumVelocity); final int initialVelocity = (int) mVelocityTracker.getXVelocity(); if (Math.abs(initialVelocity) > mMinimumVelocity) { direction = (initialVelocity > 0) ? -1 : 1; } } if (mOnPageChangeListener != null) { mOnPageChangeListener.onStopTracking(this); } smoothScrollToPage(getActualCurrentPage() + direction); if (mVelocityTracker != null) { mVelocityTracker.recycle(); mVelocityTracker = null; } break; } return true; } /** * Set a listener to be notified of changes that may occur in this * {@link PagedView}. * * @param listener The listener to callback. */ public void setOnPageChangeListener(OnPagedViewChangeListener listener) { mOnPageChangeListener = listener; } /** * Sets the {@link PagedAdapter} used to fill this {@link PagedView} with * some basic information : the number of displayed pages, the pages, etc. * * @param adapter The {@link PagedAdapter} to set to this {@link PagedView} */ public void setAdapter(PagedAdapter adapter) { if (null != mAdapter) { mAdapter.unregisterDataSetObserver(mDataSetObserver); } // Reset mRecycler.clear(); mActiveViews.clear(); removeAllViews(); mAdapter = adapter; mTargetPage = INVALID_PAGE; mCurrentPage = 0; mOffsetX = 0; if (null != mAdapter) { mAdapter.registerDataSetObserver(mDataSetObserver); mPageCount = mAdapter.getCount(); } requestLayout(); invalidate(); } /** * Returns the current page. * * @return The current page */ public int getCurrentPage() { return mCurrentPage; } private int getActualCurrentPage() { return mTargetPage != INVALID_PAGE ? mTargetPage : mCurrentPage; } /** * Initiate an animated scrolling from the current position to the given * page * * @param page The page to scroll to. */ public void smoothScrollToPage(int page) { scrollToPage(page, true); } /** * Initiate an animated scrolling to the next page */ public void smoothScrollToNext() { smoothScrollToPage(getActualCurrentPage() + 1); } /** * Initiate an animated scrolling to the previous page */ public void smoothScrollToPrevious() { smoothScrollToPage(getActualCurrentPage() - 1); } /** * Instantly moves the PagedView from the current position to the given * page. * * @param page The page to scroll to. */ public void scrollToPage(int page) { scrollToPage(page, false); } /** * Instantly moves to the next page */ public void scrollToNext() { scrollToPage(getActualCurrentPage() + 1); } /** * Instantly moves to the previous page */ public void scrollToPrevious() { scrollToPage(getActualCurrentPage() - 1); } private void scrollToPage(int page, boolean animated) { // Make sure page is bound to correct values page = Math.max(0, Math.min(page, mPageCount - 1)); final int targetOffset = getOffsetForPage(page); final int dx = targetOffset - mOffsetX; if (dx == 0) { performPageChange(page); return; } if (animated) { mTargetPage = page; mScroller.startScroll(mOffsetX, 0, dx, 0); mHandler.post(mScrollerRunnable); } else { setOffsetX(targetOffset); performPageChange(page); } } private void setOffsetX(int offsetX) { if (offsetX == mOffsetX) { return; } final int startPage = getPageForOffset(offsetX); final int endPage = getPageForOffset(offsetX - getWidth() + 1); recycleViews(startPage, endPage); final int leftAndRightOffset = offsetX - mOffsetX; for (int i = startPage; i <= endPage; i++) { View child = mActiveViews.get(i); if (child == null) { child = obtainView(i); setupView(child, i); } child.offsetLeftAndRight(leftAndRightOffset); } mOffsetX = offsetX; invalidate(); } private int getOffsetForPage(int page) { return -(page * getWidth()); } private int getPageForOffset(int offset) { return -offset / getWidth(); } private void recycleViews(int start, int end) { // [start, end] <=> range of pages that needs to be displayed final SparseArray<View> activeViews = mActiveViews; final int count = activeViews.size(); for (int i = 0; i < count; i++) { final int key = activeViews.keyAt(i); if (key < start || key > end) { final View recycled = activeViews.valueAt(i); removeView(recycled); mRecycler.add(recycled); activeViews.delete(key); } } } private View obtainView(int position) { // Get a view from the recycler final View recycled = mRecycler.poll(); View child = mAdapter.getView(position, recycled, this); if (child == null) { throw new NullPointerException("PagedAdapter.getView must return a non-null View"); } if (recycled != null && child != recycled) { if (Config.GD_WARNING_LOGS_ENABLED) { Log.w(LOG_TAG, "Not reusing the convertView may impact PagedView performance."); } } addView(child); mActiveViews.put(position, child); return child; } private void setupView(View child, int position) { if (child == null) { return; } LayoutParams lp = child.getLayoutParams(); if (lp == null) { lp = new LayoutParams(LayoutParams.FILL_PARENT, LayoutParams.WRAP_CONTENT); } // Measure the view final int childWidthSpec = getChildMeasureSpec(MeasureSpec.makeMeasureSpec(getWidth(), MeasureSpec.EXACTLY), 0, lp.width); final int childHeightSpec = getChildMeasureSpec(MeasureSpec.makeMeasureSpec(getHeight(), MeasureSpec.EXACTLY), 0, lp.height); child.measure(childWidthSpec, childHeightSpec); // Layout the view final int childLeft = mOffsetX - getOffsetForPage(position); child.layout(childLeft, 0, childLeft + child.getMeasuredWidth(), child.getMeasuredHeight()); } private void performStartTracking(int startMotionX) { if (mOnPageChangeListener != null) { mOnPageChangeListener.onStartTracking(this); } mStartMotionX = startMotionX; mStartOffsetX = mOffsetX; } private void performPageChange(int newPage) { if (mCurrentPage != newPage) { if (mOnPageChangeListener != null) { mOnPageChangeListener.onPageChanged(this, mCurrentPage, newPage); } mCurrentPage = newPage; } } static class SavedState extends BaseSavedState { int currentPage; SavedState(Parcelable superState) { super(superState); } private SavedState(Parcel in) { super(in); currentPage = in.readInt(); } @Override public void writeToParcel(Parcel out, int flags) { super.writeToParcel(out, flags); out.writeInt(currentPage); } public static final Parcelable.Creator<SavedState> CREATOR = new Parcelable.Creator<SavedState>() { public SavedState createFromParcel(Parcel in) { return new SavedState(in); } public SavedState[] newArray(int size) { return new SavedState[size]; } }; } @Override public Parcelable onSaveInstanceState() { Parcelable superState = super.onSaveInstanceState(); SavedState ss = new SavedState(superState); ss.currentPage = mCurrentPage; return ss; } @Override public void onRestoreInstanceState(Parcelable state) { SavedState ss = (SavedState) state; super.onRestoreInstanceState(ss.getSuperState()); mCurrentPage = ss.currentPage; } private DataSetObserver mDataSetObserver = new DataSetObserver() { public void onInvalidated() { // Not handled }; public void onChanged() { // TODO Cyril : When data has changed we should normally // look for the position that as the same id is case // Adapter.hasStableIds() returns true. final int currentPage = mCurrentPage; setAdapter(mAdapter); mCurrentPage = currentPage; setOffsetX(getOffsetForPage(currentPage)); }; }; private Runnable mScrollerRunnable = new Runnable() { @Override public void run() { final Scroller scroller = mScroller; if (!scroller.isFinished()) { scroller.computeScrollOffset(); setOffsetX(scroller.getCurrX()); mHandler.postDelayed(this, FRAME_RATE); } else { performPageChange(mTargetPage); } } }; }
paristote/mobile-android-studio
greenDroid/src/main/java/greendroid/widget/PagedView.java
Java
lgpl-3.0
19,957
/* * Title: CloudSim Toolkit * Description: CloudSim (Cloud Simulation) Toolkit for Modeling and Simulation of Clouds * Licence: GPL - http://www.gnu.org/copyleft/gpl.html * * Copyright (c) 2009-2012, The University of Melbourne, Australia */ package org.cloudbus.cloudsim.core; import java.util.ArrayList; import java.util.Calendar; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import org.cloudbus.cloudsim.Log; import org.cloudbus.cloudsim.core.predicates.Predicate; import org.cloudbus.cloudsim.core.predicates.PredicateAny; import org.cloudbus.cloudsim.core.predicates.PredicateNone; /** * This class extends the CloudSimCore to enable network simulation in CloudSim. Also, it disables * all the network models from CloudSim, to provide a simpler simulation of networking. In the * network model used by CloudSim, a topology file written in BRITE format is used to describe the * network. Later, nodes in such file are mapped to CloudSim entities. Delay calculated from the * BRITE model are added to the messages send through CloudSim. Messages using the old model are * converted to the apropriate methods with the correct parameters. * * @author Rodrigo N. Calheiros * @author Anton Beloglazov * @since CloudSim Toolkit 1.0 */ public class CloudSim { /** The Constant CLOUDSIM_VERSION_STRING. */ private static final String CLOUDSIM_VERSION_STRING = "3.0"; /** The id of CIS entity. */ private static int cisId = -1; /** The id of CloudSimShutdown entity. */ @SuppressWarnings("unused") private static int shutdownId = -1; /** The CIS object. */ private static CloudInformationService cis = null; /** The Constant NOT_FOUND. */ private static final int NOT_FOUND = -1; /** The trace flag. */ @SuppressWarnings("unused") private static boolean traceFlag = false; /** The calendar. */ private static Calendar calendar = null; /** The termination time. */ private static double terminateAt = -1; /** The minimal time between events. Events within shorter periods after the last event are discarded. */ private static double minTimeBetweenEvents = 0.1; /** * Initialises all the common attributes. * * @param _calendar the _calendar * @param _traceFlag the _trace flag * @param numUser number of users * @throws Exception This happens when creating this entity before initialising CloudSim package * or this entity name is <tt>null</tt> or empty * @pre $none * @post $none */ private static void initCommonVariable(Calendar _calendar, boolean _traceFlag, int numUser) throws Exception { initialize(); // NOTE: the order for the below 3 lines are important traceFlag = _traceFlag; // Set the current Wall clock time as the starting time of // simulation if (_calendar == null) { calendar = Calendar.getInstance(); } else { calendar = _calendar; } // creates a CloudSimShutdown object CloudSimShutdown shutdown = new CloudSimShutdown("CloudSimShutdown", numUser); shutdownId = shutdown.getId(); } /** * Initialises CloudSim parameters. This method should be called before creating any entities. * <p> * Inside this method, it will create the following CloudSim entities: * <ul> * <li>CloudInformationService. * <li>CloudSimShutdown * </ul> * <p> * * @param numUser the number of User Entities created. This parameters indicates that * {@link gridsim.CloudSimShutdown} first waits for all user entities's * END_OF_SIMULATION signal before issuing terminate signal to other entities * @param cal starting time for this simulation. If it is <tt>null</tt>, then the time will be * taken from <tt>Calendar.getInstance()</tt> * @param traceFlag <tt>true</tt> if CloudSim trace need to be written * @see gridsim.CloudSimShutdown * @see CloudInformationService.CloudInformationService * @pre numUser >= 0 * @post $none */ public static void init(int numUser, Calendar cal, boolean traceFlag) { try { initCommonVariable(cal, traceFlag, numUser); // create a GIS object cis = new CloudInformationService("CloudInformationService"); // set all the above entity IDs cisId = cis.getId(); } catch (IllegalArgumentException s) { Log.printLine("CloudSim.init(): The simulation has been terminated due to an unexpected error"); Log.printLine(s.getMessage()); } catch (Exception e) { Log.printLine("CloudSim.init(): The simulation has been terminated due to an unexpected error"); Log.printLine(e.getMessage()); } } /** * Initialises CloudSim parameters. This method should be called before creating any entities. * <p> * Inside this method, it will create the following CloudSim entities: * <ul> * <li>CloudInformationService. * <li>CloudSimShutdown * </ul> * <p> * * @param numUser the number of User Entities created. This parameters indicates that * {@link gridsim.CloudSimShutdown} first waits for all user entities's * END_OF_SIMULATION signal before issuing terminate signal to other entities * @param cal starting time for this simulation. If it is <tt>null</tt>, then the time will be * taken from <tt>Calendar.getInstance()</tt> * @param traceFlag <tt>true</tt> if CloudSim trace need to be written * @param periodBetweenEvents - the minimal period between events. Events within shorter periods * after the last event are discarded. * @see gridsim.CloudSimShutdown * @see CloudInformationService.CloudInformationService * @pre numUser >= 0 * @post $none */ public static void init(int numUser, Calendar cal, boolean traceFlag, double periodBetweenEvents) { if (periodBetweenEvents <= 0) { throw new IllegalArgumentException("The minimal time between events should be positive, but is:" + periodBetweenEvents); } init(numUser, cal, traceFlag); minTimeBetweenEvents = periodBetweenEvents; } /** * Starts the execution of CloudSim simulation. It waits for complete execution of all entities, * i.e. until all entities threads reach non-RUNNABLE state or there are no more events in the * future event queue. * <p> * <b>Note</b>: This method should be called after all the entities have been setup and added. * * @return the double * @throws NullPointerException This happens when creating this entity before initialising * CloudSim package or this entity name is <tt>null</tt> or empty. * @see gridsim.CloudSim#init(int, Calendar, boolean) * @pre $none * @post $none */ public static double startSimulation() throws NullPointerException { Log.printLine("Starting CloudSim version " + CLOUDSIM_VERSION_STRING); try { double clock = run(); // reset all static variables cisId = -1; shutdownId = -1; cis = null; calendar = null; traceFlag = false; return clock; } catch (IllegalArgumentException e) { e.printStackTrace(); throw new NullPointerException("CloudSim.startCloudSimulation() :" + " Error - you haven't initialized CloudSim."); } } /** * Stops Cloud Simulation (based on {@link Simulation#runStop()}). This should be only called if * any of the user defined entities <b>explicitly</b> want to terminate simulation during * execution. * * @throws NullPointerException This happens when creating this entity before initialising * CloudSim package or this entity name is <tt>null</tt> or empty * @see gridsim.CloudSim#init(int, Calendar, boolean) * @see Simulation#runStop() * @pre $none * @post $none */ public static void stopSimulation() throws NullPointerException { try { runStop(); } catch (IllegalArgumentException e) { throw new NullPointerException("CloudSim.stopCloudSimulation() : " + "Error - can't stop Cloud Simulation."); } } /** * This method is called if one wants to terminate the simulation. * * @return true, if successful; false otherwise. */ public static boolean terminateSimulation() { running = false; printMessage("Simulation: Reached termination time."); return true; } /** * This method is called if one wants to terminate the simulation at a given time. * * @param time the time at which the simulation has to be terminated * @return true, if successful otherwise. */ public static boolean terminateSimulation(double time) { if (time <= clock) { return false; } else { terminateAt = time; } return true; } /** * Returns the minimum time between events. Events within shorter periods after the last event are discarded. * @return the minimum time between events. */ public static double getMinTimeBetweenEvents() { return minTimeBetweenEvents; } /** * Gets a new copy of initial simulation Calendar. * * @return a new copy of Calendar object or if CloudSim hasn't been initialized * @see gridsim.CloudSim#init(int, Calendar, boolean, String[], String[], String) * @see gridsim.CloudSim#init(int, Calendar, boolean) * @pre $none * @post $none */ public static Calendar getSimulationCalendar() { // make a new copy Calendar clone = calendar; if (calendar != null) { clone = (Calendar) calendar.clone(); } return clone; } /** * Gets the entity ID of <tt>CloudInformationService</tt>. * * @return the Entity ID or if it is not found * @pre $none * @post $result >= -1 */ public static int getCloudInfoServiceEntityId() { return cisId; } /** * Sends a request to Cloud Information Service (GIS) entity to get the list of all Cloud * hostList. * * @return A List containing CloudResource ID (as an Integer object) or if a CIS entity hasn't * been created before * @pre $none * @post $none */ public static List<Integer> getCloudResourceList() { if (cis == null) { return null; } return cis.getList(); } // ======== SIMULATION METHODS ===============// /** The entities. */ private static List<SimEntity> entities; /** The future event queue. */ protected static FutureQueue future; /** The deferred event queue. */ protected static DeferredQueue deferred; /** The simulation clock. */ private static double clock; /** Flag for checking if the simulation is running. */ private static boolean running; /** The entities by name. */ private static Map<String, SimEntity> entitiesByName; // The predicates used in entity wait methods /** The wait predicates. */ private static Map<Integer, Predicate> waitPredicates; /** The paused. */ private static boolean paused = false; /** The pause at. */ private static long pauseAt = -1; /** The abrupt terminate. */ private static boolean abruptTerminate = false; /** * Initialise the simulation for stand alone simulations. This function should be called at the * start of the simulation. */ protected static void initialize() { Log.printLine("Initialising..."); entities = new ArrayList<SimEntity>(); entitiesByName = new LinkedHashMap<String, SimEntity>(); future = new FutureQueue(); deferred = new DeferredQueue(); waitPredicates = new HashMap<Integer, Predicate>(); clock = 0; running = false; } // The two standard predicates /** A standard predicate that matches any event. */ public final static PredicateAny SIM_ANY = new PredicateAny(); /** A standard predicate that does not match any events. */ public final static PredicateNone SIM_NONE = new PredicateNone(); // Public access methods /** * Get the current simulation time. * * @return the simulation time */ public static double clock() { return clock; } /** * Get the current number of entities in the simulation. * * @return The number of entities */ public static int getNumEntities() { return entities.size(); } /** * Get the entity with a given id. * * @param id the entity's unique id number * @return The entity, or if it could not be found */ public static SimEntity getEntity(int id) { return entities.get(id); } /** * Get the entity with a given name. * * @param name The entity's name * @return The entity */ public static SimEntity getEntity(String name) { return entitiesByName.get(name); } /** * Get the id of an entity with a given name. * * @param name The entity's name * @return The entity's unique id number */ public static int getEntityId(String name) { SimEntity obj = entitiesByName.get(name); if (obj == null) { return NOT_FOUND; } else { return obj.getId(); } } /** * Gets name of the entity given its entity ID. * * @param entityID the entity ID * @return the Entity name or if this object does not have one * @pre entityID > 0 * @post $none */ public static String getEntityName(int entityID) { try { return getEntity(entityID).getName(); } catch (IllegalArgumentException e) { return null; } catch (Exception e) { return null; } } /** * Gets name of the entity given its entity ID. * * @param entityID the entity ID * @return the Entity name or if this object does not have one * @pre entityID > 0 * @post $none */ public static String getEntityName(Integer entityID) { if (entityID != null) { return getEntityName(entityID.intValue()); } return null; } /** * Returns a list of entities created for the simulation. * * @return the entity iterator */ public static List<SimEntity> getEntityList() { // create a new list to prevent the user from changing // the list of entities used by Simulation List<SimEntity> list = new LinkedList<SimEntity>(); list.addAll(entities); return list; } // Public update methods /** * Add a new entity to the simulation. This is present for compatibility with existing * simulations since entities are automatically added to the simulation upon instantiation. * * @param e The new entity */ public static void addEntity(SimEntity e) { SimEvent evt; if (running) { // Post an event to make this entity evt = new SimEvent(SimEvent.CREATE, clock, 1, 0, 0, e); future.addEvent(evt); } if (e.getId() == -1) { // Only add once! int id = entities.size(); e.setId(id); entities.add(e); entitiesByName.put(e.getName(), e); } } /** * Internal method used to add a new entity to the simulation when the simulation is running. It * should <b>not</b> be called from user simulations. * * @param e The new entity */ protected static void addEntityDynamically(SimEntity e) { if (e == null) { throw new IllegalArgumentException("Adding null entity."); } else { printMessage("Adding: " + e.getName()); } e.startEntity(); } /** * Internal method used to run one tick of the simulation. This method should <b>not</b> be * called in simulations. * * @return true, if successful otherwise */ public static boolean runClockTick() { SimEntity ent; boolean queue_empty; int entities_size = entities.size(); for (int i = 0; i < entities_size; i++) { ent = entities.get(i); if (ent.getState() == SimEntity.RUNNABLE) { ent.run(); } } // If there are more future events then deal with them if (future.size() > 0) { List<SimEvent> toRemove = new ArrayList<SimEvent>(); Iterator<SimEvent> fit = future.iterator(); queue_empty = false; SimEvent first = fit.next(); processEvent(first); future.remove(first); fit = future.iterator(); // Check if next events are at same time... boolean trymore = fit.hasNext(); while (trymore) { SimEvent next = fit.next(); if (next.eventTime() == first.eventTime()) { processEvent(next); toRemove.add(next); trymore = fit.hasNext(); } else { trymore = false; } } future.removeAll(toRemove); } else { queue_empty = true; running = false; printMessage("Simulation: No more future events"); } return queue_empty; } /** * Internal method used to stop the simulation. This method should <b>not</b> be used directly. */ public static void runStop() { printMessage("Simulation completed."); } /** * Used to hold an entity for some time. * * @param src the src * @param delay the delay */ public static void hold(int src, long delay) { SimEvent e = new SimEvent(SimEvent.HOLD_DONE, clock + delay, src); future.addEvent(e); entities.get(src).setState(SimEntity.HOLDING); } /** * Used to pause an entity for some time. * * @param src the src * @param delay the delay */ public static void pause(int src, double delay) { SimEvent e = new SimEvent(SimEvent.HOLD_DONE, clock + delay, src); future.addEvent(e); entities.get(src).setState(SimEntity.HOLDING); } /** * Used to send an event from one entity to another. * * @param src the src * @param dest the dest * @param delay the delay * @param tag the tag * @param data the data */ public static void send(int src, int dest, double delay, int tag, Object data) { if (delay < 0) { throw new IllegalArgumentException("Send delay can't be negative."); } SimEvent e = new SimEvent(SimEvent.SEND, clock + delay, src, dest, tag, data); future.addEvent(e); } /** * Used to send an event from one entity to another, with priority in the queue. * * @param src the src * @param dest the dest * @param delay the delay * @param tag the tag * @param data the data */ public static void sendFirst(int src, int dest, double delay, int tag, Object data) { if (delay < 0) { throw new IllegalArgumentException("Send delay can't be negative."); } SimEvent e = new SimEvent(SimEvent.SEND, clock + delay, src, dest, tag, data); future.addEventFirst(e); } /** * Sets an entity's state to be waiting. The predicate used to wait for an event is now passed * to Sim_system. Only events that satisfy the predicate will be passed to the entity. This is * done to avoid unnecessary context switches. * * @param src the src * @param p the p */ public static void wait(int src, Predicate p) { entities.get(src).setState(SimEntity.WAITING); if (p != SIM_ANY) { // If a predicate has been used store it in order to check it waitPredicates.put(src, p); } } /** * Checks if events for a specific entity are present in the deferred event queue. * * @param d the d * @param p the p * @return the int */ public static int waiting(int d, Predicate p) { int count = 0; SimEvent event; Iterator<SimEvent> iterator = deferred.iterator(); while (iterator.hasNext()) { event = iterator.next(); if ((event.getDestination() == d) && (p.match(event))) { count++; } } return count; } /** * Selects an event matching a predicate. * * @param src the src * @param p the p * @return the sim event */ public static SimEvent select(int src, Predicate p) { SimEvent ev = null; Iterator<SimEvent> iterator = deferred.iterator(); while (iterator.hasNext()) { ev = iterator.next(); if (ev.getDestination() == src && p.match(ev)) { iterator.remove(); break; } } return ev; } /** * Find first deferred event matching a predicate. * * @param src the src * @param p the p * @return the sim event */ public static SimEvent findFirstDeferred(int src, Predicate p) { SimEvent ev = null; Iterator<SimEvent> iterator = deferred.iterator(); while (iterator.hasNext()) { ev = iterator.next(); if (ev.getDestination() == src && p.match(ev)) { break; } } return ev; } /** * Removes an event from the event queue. * * @param src the src * @param p the p * @return the sim event */ public static SimEvent cancel(int src, Predicate p) { SimEvent ev = null; Iterator<SimEvent> iter = future.iterator(); while (iter.hasNext()) { ev = iter.next(); if (ev.getSource() == src && p.match(ev)) { iter.remove(); break; } } return ev; } /** * Removes all events that match a given predicate from the future event queue returns true if * at least one event has been cancelled; false otherwise. * * @param src the src * @param p the p * @return true, if successful */ public static boolean cancelAll(int src, Predicate p) { SimEvent ev = null; int previousSize = future.size(); Iterator<SimEvent> iter = future.iterator(); while (iter.hasNext()) { ev = iter.next(); if (ev.getSource() == src && p.match(ev)) { iter.remove(); } } return previousSize < future.size(); } // // Private internal methods // /** * Processes an event. * * @param e the e */ private static void processEvent(SimEvent e) { int dest, src; SimEntity dest_ent; // Update the system's clock if (e.eventTime() < clock) { throw new IllegalArgumentException("Past event detected."); } clock = e.eventTime(); // Ok now process it switch (e.getType()) { case SimEvent.ENULL: throw new IllegalArgumentException("Event has a null type."); case SimEvent.CREATE: SimEntity newe = (SimEntity) e.getData(); addEntityDynamically(newe); break; case SimEvent.SEND: // Check for matching wait dest = e.getDestination(); if (dest < 0) { throw new IllegalArgumentException("Attempt to send to a null entity detected."); } else { int tag = e.getTag(); dest_ent = entities.get(dest); if (dest_ent.getState() == SimEntity.WAITING) { Integer destObj = Integer.valueOf(dest); Predicate p = waitPredicates.get(destObj); if ((p == null) || (tag == 9999) || (p.match(e))) { dest_ent.setEventBuffer((SimEvent) e.clone()); dest_ent.setState(SimEntity.RUNNABLE); waitPredicates.remove(destObj); } else { deferred.addEvent(e); } } else { deferred.addEvent(e); } } break; case SimEvent.HOLD_DONE: src = e.getSource(); if (src < 0) { throw new IllegalArgumentException("Null entity holding."); } else { entities.get(src).setState(SimEntity.RUNNABLE); } break; default: break; } } /** * Internal method used to start the simulation. This method should <b>not</b> be used by user * simulations. */ public static void runStart() { running = true; // Start all the entities for (SimEntity ent : entities) { ent.startEntity(); } printMessage("Entities started."); } /** * Check if the simulation is still running. This method should be used by entities to check if * they should continue executing. * * @return if the simulation is still running, otherwise */ public static boolean running() { return running; } /** * This method is called if one wants to pause the simulation. * * @return true, if successful otherwise. */ public static boolean pauseSimulation() { paused = true; return paused; } /** * This method is called if one wants to pause the simulation at a given time. * * @param time the time at which the simulation has to be paused * @return true, if successful otherwise. */ public static boolean pauseSimulation(long time) { if (time <= clock) { return false; } else { pauseAt = time; } return true; } /** * This method is called if one wants to resume the simulation that has previously been paused. * * @return if the simulation has been restarted or or otherwise. */ public static boolean resumeSimulation() { paused = false; if (pauseAt <= clock) { pauseAt = -1; } return !paused; } /** * Start the simulation running. This should be called after all the entities have been setup * and added, and their ports linked. * * @return the double last clock value */ public static double run() { if (!running) { runStart(); } while (true) { if (runClockTick() || abruptTerminate) { break; } // this block allows termination of simulation at a specific time if (terminateAt > 0.0 && clock >= terminateAt) { terminateSimulation(); clock = terminateAt; break; } if (pauseAt != -1 && ((future.size() > 0 && clock <= pauseAt && pauseAt <= future.iterator().next() .eventTime()) || future.size() == 0 && pauseAt <= clock)) { pauseSimulation(); clock = pauseAt; } while (paused) { try { Thread.sleep(100); } catch (InterruptedException e) { e.printStackTrace(); } } } double clock = clock(); finishSimulation(); runStop(); return clock; } /** * Internal method that allows the entities to terminate. This method should <b>not</b> be used * in user simulations. */ public static void finishSimulation() { // Allow all entities to exit their body method if (!abruptTerminate) { for (SimEntity ent : entities) { if (ent.getState() != SimEntity.FINISHED) { ent.run(); } } } for (SimEntity ent : entities) { ent.shutdownEntity(); } // reset all static variables // Private data members entities = null; entitiesByName = null; future = null; deferred = null; clock = 0L; running = false; waitPredicates = null; paused = false; pauseAt = -1; abruptTerminate = false; } /** * Abruptally terminate. */ public static void abruptallyTerminate() { abruptTerminate = true; } /** * Prints a message about the progress of the simulation. * * @param message the message */ private static void printMessage(String message) { Log.printLine(message); } /** * Checks if is paused. * * @return true, if is paused */ public static boolean isPaused() { return paused; } }
hewolf/VirtualMachinePlacement
src/org/cloudbus/cloudsim/core/CloudSim.java
Java
lgpl-3.0
26,689
// +build !ignore_autogenerated /* Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by deepcopy-gen. DO NOT EDIT. package apiserver import ( runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AdmissionConfiguration) DeepCopyInto(out *AdmissionConfiguration) { *out = *in out.TypeMeta = in.TypeMeta if in.Plugins != nil { in, out := &in.Plugins, &out.Plugins *out = make([]AdmissionPluginConfiguration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionConfiguration. func (in *AdmissionConfiguration) DeepCopy() *AdmissionConfiguration { if in == nil { return nil } out := new(AdmissionConfiguration) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *AdmissionConfiguration) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AdmissionPluginConfiguration) DeepCopyInto(out *AdmissionPluginConfiguration) { *out = *in if in.Configuration != nil { in, out := &in.Configuration, &out.Configuration *out = new(runtime.Unknown) (*in).DeepCopyInto(*out) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPluginConfiguration. func (in *AdmissionPluginConfiguration) DeepCopy() *AdmissionPluginConfiguration { if in == nil { return nil } out := new(AdmissionPluginConfiguration) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Connection) DeepCopyInto(out *Connection) { *out = *in if in.Transport != nil { in, out := &in.Transport, &out.Transport *out = new(Transport) (*in).DeepCopyInto(*out) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Connection. func (in *Connection) DeepCopy() *Connection { if in == nil { return nil } out := new(Connection) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EgressSelection) DeepCopyInto(out *EgressSelection) { *out = *in in.Connection.DeepCopyInto(&out.Connection) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressSelection. func (in *EgressSelection) DeepCopy() *EgressSelection { if in == nil { return nil } out := new(EgressSelection) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EgressSelectorConfiguration) DeepCopyInto(out *EgressSelectorConfiguration) { *out = *in out.TypeMeta = in.TypeMeta if in.EgressSelections != nil { in, out := &in.EgressSelections, &out.EgressSelections *out = make([]EgressSelection, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressSelectorConfiguration. func (in *EgressSelectorConfiguration) DeepCopy() *EgressSelectorConfiguration { if in == nil { return nil } out := new(EgressSelectorConfiguration) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *EgressSelectorConfiguration) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TCPTransport) DeepCopyInto(out *TCPTransport) { *out = *in if in.TLSConfig != nil { in, out := &in.TLSConfig, &out.TLSConfig *out = new(TLSConfig) **out = **in } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPTransport. func (in *TCPTransport) DeepCopy() *TCPTransport { if in == nil { return nil } out := new(TCPTransport) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TLSConfig) DeepCopyInto(out *TLSConfig) { *out = *in return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig. func (in *TLSConfig) DeepCopy() *TLSConfig { if in == nil { return nil } out := new(TLSConfig) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TracingConfiguration) DeepCopyInto(out *TracingConfiguration) { *out = *in out.TypeMeta = in.TypeMeta if in.Endpoint != nil { in, out := &in.Endpoint, &out.Endpoint *out = new(string) **out = **in } if in.SamplingRatePerMillion != nil { in, out := &in.SamplingRatePerMillion, &out.SamplingRatePerMillion *out = new(int32) **out = **in } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfiguration. func (in *TracingConfiguration) DeepCopy() *TracingConfiguration { if in == nil { return nil } out := new(TracingConfiguration) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *TracingConfiguration) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Transport) DeepCopyInto(out *Transport) { *out = *in if in.TCP != nil { in, out := &in.TCP, &out.TCP *out = new(TCPTransport) (*in).DeepCopyInto(*out) } if in.UDS != nil { in, out := &in.UDS, &out.UDS *out = new(UDSTransport) **out = **in } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Transport. func (in *Transport) DeepCopy() *Transport { if in == nil { return nil } out := new(Transport) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UDSTransport) DeepCopyInto(out *UDSTransport) { *out = *in return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDSTransport. func (in *UDSTransport) DeepCopy() *UDSTransport { if in == nil { return nil } out := new(UDSTransport) in.DeepCopyInto(out) return out }
sallyom/origin
vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go
GO
apache-2.0
7,499
// bslmf_islvaluereference.cpp -*-C++-*- #include <bslmf_islvaluereference.h> #include <bsls_ident.h> BSLS_IDENT("$Id$ $CSID$") // ---------------------------------------------------------------------------- // Copyright 2013 Bloomberg Finance L.P. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // ----------------------------- END-OF-FILE ----------------------------------
bowlofstew/bde
groups/bsl/bslmf/bslmf_islvaluereference.cpp
C++
apache-2.0
928
//========= Copyright © 1996-2005, Valve Corporation, All rights reserved. ==== // //============================================================================= #ifndef AI_TACTICALSERVICES_H #define AI_TACTICALSERVICES_H #include "ai_component.h" #if defined( _WIN32 ) #pragma once #endif class CAI_Network; class CAI_Pathfinder; enum FlankType_t { FLANKTYPE_NONE = 0, FLANKTYPE_ARC, // Stay flFlankParam degrees of arc away from vecFlankRefPos FLANKTYPE_RADIUS, // Stay flFlankParam units away from vecFlankRefPos }; //----------------------------------------------------------------------------- class CAI_TacticalServices : public CAI_Component { public: CAI_TacticalServices( CAI_BaseNPC *pOuter ) : CAI_Component(pOuter), m_pNetwork( NULL ) { m_bAllowFindLateralLos = true; } void Init( CAI_Network *pNetwork ); bool FindLos( const Vector &threatPos, const Vector &threatEyePos, float minThreatDist, float maxThreatDist, float blockTime, Vector *pResult ); bool FindLos( const Vector &threatPos, const Vector &threatEyePos, float minThreatDist, float maxThreatDist, float blockTime, FlankType_t eFlankType, const Vector &VecFlankRefPos, float flFlankParam, Vector *pResult ); bool FindLateralLos( const Vector &threatPos, Vector *pResult ); bool FindBackAwayPos( const Vector &vecThreat, Vector *pResult ); bool FindCoverPos( const Vector &vThreatPos, const Vector &vThreatEyePos, float flMinDist, float flMaxDist, Vector *pResult ); bool FindCoverPos( const Vector &vNearPos, const Vector &vThreatPos, const Vector &vThreatEyePos, float flMinDist, float flMaxDist, Vector *pResult ); bool FindLateralCover( const Vector &vecThreat, float flMinDist, Vector *pResult ); bool FindLateralCover( const Vector &vecThreat, float flMinDist, float distToCheck, int numChecksPerDir, Vector *pResult ); bool FindLateralCover( const Vector &vNearPos, const Vector &vecThreat, float flMinDist, float distToCheck, int numChecksPerDir, Vector *pResult ); void AllowFindLateralLos( bool bAllow ) { m_bAllowFindLateralLos = bAllow; } private: // Checks lateral cover bool TestLateralCover( const Vector &vecCheckStart, const Vector &vecCheckEnd, float flMinDist ); bool TestLateralLos( const Vector &vecCheckStart, const Vector &vecCheckEnd ); int FindBackAwayNode( const Vector &vecThreat ); int FindCoverNode( const Vector &vThreatPos, const Vector &vThreatEyePos, float flMinDist, float flMaxDist ); int FindCoverNode( const Vector &vNearPos, const Vector &vThreatPos, const Vector &vThreatEyePos, float flMinDist, float flMaxDist ); int FindLosNode( const Vector &vThreatPos, const Vector &vThreatEyePos, float flMinThreatDist, float flMaxThreatDist, float flBlockTime, FlankType_t eFlankType, const Vector &vThreatFacing, float flFlankParam ); Vector GetNodePos( int ); CAI_Network *GetNetwork() { return m_pNetwork; } const CAI_Network *GetNetwork() const { return m_pNetwork; } CAI_Pathfinder *GetPathfinder() { return m_pPathfinder; } const CAI_Pathfinder *GetPathfinder() const { return m_pPathfinder; } CAI_Network *m_pNetwork; CAI_Pathfinder *m_pPathfinder; bool m_bAllowFindLateralLos; // Allows us to turn Lateral LOS checking on/off. DECLARE_SIMPLE_DATADESC(); }; //----------------------------------------------------------------------------- #endif // AI_TACTICALSERVICES_H
ppittle/AlienSwarmDirectorMod
trunk/src/game/server/ai_tacticalservices.h
C
apache-2.0
3,393
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.druid.query.topn; /** */ public class TopNAlgorithmSelector { private final int cardinality; private final int numBytesPerRecord; private volatile boolean hasExtractionFn; private volatile boolean aggregateAllMetrics; private volatile boolean aggregateTopNMetricFirst; public TopNAlgorithmSelector(int cardinality, int numBytesPerRecord) { this.cardinality = cardinality; this.numBytesPerRecord = numBytesPerRecord; } public void setHasExtractionFn(boolean hasExtractionFn) { this.hasExtractionFn = hasExtractionFn; } public void setAggregateAllMetrics(boolean aggregateAllMetrics) { this.aggregateAllMetrics = aggregateAllMetrics; } public void setAggregateTopNMetricFirst(boolean aggregateTopNMetricFirst) { // These are just heuristics based on an analysis of where an inflection point may lie to switch // between different algorithms if (cardinality > 400000 && numBytesPerRecord > 100) { this.aggregateTopNMetricFirst = aggregateTopNMetricFirst; } } public boolean isHasExtractionFn() { return hasExtractionFn; } public boolean isAggregateAllMetrics() { return aggregateAllMetrics; } public boolean isAggregateTopNMetricFirst() { return aggregateTopNMetricFirst; } }
nishantmonu51/druid
processing/src/main/java/org/apache/druid/query/topn/TopNAlgorithmSelector.java
Java
apache-2.0
2,113
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/core/kernels/immutable_constant_op.h" #include <algorithm> #include <tuple> #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/graph/graph_def_builder.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session.h" namespace tensorflow { namespace { // A safe alignment that equal to memmapped page alignment on many modern // architectures. constexpr size_t kTestAlignment = 4096; constexpr size_t kTestTensorSize = 4; constexpr size_t kTestTensorSizeBytes = kTestTensorSize * sizeof(float); // A test ReadOnlyMemoryRegion implementation. class TestReadOnlyMemoryRegion : public ReadOnlyMemoryRegion { public: TestReadOnlyMemoryRegion() = delete; explicit TestReadOnlyMemoryRegion(uint64 length) : memptr_(cpu_allocator()->AllocateRaw(kTestAlignment, length)), length_(length) {} ~TestReadOnlyMemoryRegion() override { cpu_allocator()->DeallocateRaw(memptr_); } const void* data() override { return memptr_; } float* GetWritableDataStart() { return reinterpret_cast<float*>(memptr_); } uint64 length() override { return length_; } protected: void* memptr_; uint64 length_; }; // A mock file system and environment class that creates ReadOnlyMemoryRegion // from allocated memory. class TestFileSystem : public NullFileSystem { public: ~TestFileSystem() override = default; Status NewReadOnlyMemoryRegionFromFile( const string& fname, std::unique_ptr<ReadOnlyMemoryRegion>* result) override { float val = 0; StringPiece scheme, host, path; io::ParseURI(fname, &scheme, &host, &path); // For the tests create in-memory regions with float values equal to the // region name. if (path == "/2") { val = 2.0f; } else if (path == "/3") { val = 3.0f; } else { val = 0.0f; } auto region = new TestReadOnlyMemoryRegion(kTestTensorSizeBytes); std::fill_n(region->GetWritableDataStart(), kTestTensorSize, val); result->reset(region); return Status::OK(); } }; REGISTER_FILE_SYSTEM("test", TestFileSystem); struct ImmutableConstantOpTest {}; TEST(ImmutableConstantOpTest, Simple) { const TensorShape kTestTensorShape({4, 1}); const TensorShape kTestTensorShapeT({1, 4}); auto root = Scope::NewRootScope().ExitOnError(); auto node1 = ops::ImmutableConst(root, DT_FLOAT, kTestTensorShape, "test:///2"); auto node2 = ops::ImmutableConst(root, DT_FLOAT, kTestTensorShapeT, "test:///3"); auto result = ops::MatMul(root, node1, node2); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); SessionOptions session_options; session_options.env = Env::Default(); session_options.config.mutable_graph_options() ->mutable_optimizer_options() ->set_opt_level(OptimizerOptions_Level_L0); std::unique_ptr<Session> session(NewSession(session_options)); ASSERT_TRUE(session != nullptr) << "Failed to create session"; TF_ASSERT_OK(session->Create(graph_def)) << "Can't create test graph"; std::vector<Tensor> outputs; TF_ASSERT_OK(session->Run({}, {result.node()->name() + ":0"}, {}, &outputs)); ASSERT_EQ(outputs.size(), 1); EXPECT_EQ(outputs.front().flat<float>()(0), 2.0f * 3.0f); EXPECT_EQ(outputs.front().flat<float>()(1), 2.0f * 3.0f); EXPECT_EQ(outputs.front().flat<float>()(2), 2.0f * 3.0f); EXPECT_EQ(outputs.front().flat<float>()(kTestTensorSize - 1), 2.0f * 3.0f); } // Creates a test graph with two immutable_const tensors and a simple math // operation, one of nodes has wrong size, check that error properly reported. TEST(ImmutableConstantOpTest, ExecutionError) { const TensorShape kBadTensorShape({40, 100}); const TensorShape kTestTensorShapeT({1, 4}); auto root = Scope::DisabledShapeInferenceScope().ExitOnError(); auto node1 = ops::ImmutableConst(root, DT_FLOAT, kBadTensorShape, "test:///2"); auto node2 = ops::ImmutableConst(root, DT_FLOAT, kTestTensorShapeT, "test:///3"); auto result = ops::MatMul(root, node1, node2); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); SessionOptions session_options; session_options.env = Env::Default(); std::unique_ptr<Session> session(NewSession(session_options)); ASSERT_TRUE(session != nullptr) << "Failed to create session"; TF_ASSERT_OK(session->Create(graph_def)) << "Can't create test graph"; std::vector<Tensor> outputs; // Check that the run returned error. EXPECT_EQ( session->Run({}, {result.node()->name() + ":0"}, {}, &outputs).code(), error::INTERNAL); } Status CreateTempFile(Env* env, float value, uint64 size, string* filename) { const string dir = testing::TmpDir(); *filename = io::JoinPath(dir, strings::StrCat("file_", value)); std::unique_ptr<WritableFile> file; TF_RETURN_IF_ERROR(env->NewWritableFile(*filename, &file)); for (uint64 i = 0; i < size; ++i) { StringPiece sp; sp.set(&value, sizeof(value)); TF_RETURN_IF_ERROR(file->Append(sp)); } TF_RETURN_IF_ERROR(file->Close()); return Status::OK(); } TEST(ImmutableConstantOpTest, FromFile) { const TensorShape kFileTensorShape({1000, 1}); Env* env = Env::Default(); auto root = Scope::NewRootScope().ExitOnError(); string two_file, three_file; TF_ASSERT_OK(CreateTempFile(env, 2.0f, 1000, &two_file)); TF_ASSERT_OK(CreateTempFile(env, 3.0f, 1000, &three_file)); auto node1 = ops::ImmutableConst(root, DT_FLOAT, kFileTensorShape, two_file); auto node2 = ops::ImmutableConst(root, DT_FLOAT, kFileTensorShape, three_file); auto result = ops::MatMul(root, node1, node2, ops::MatMul::TransposeB(true)); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); SessionOptions session_options; session_options.config.mutable_graph_options() ->mutable_optimizer_options() ->set_opt_level(OptimizerOptions_Level_L0); std::unique_ptr<Session> session(NewSession(session_options)); ASSERT_TRUE(session != nullptr) << "Failed to create session"; TF_ASSERT_OK(session->Create(graph_def)) << "Can't create test graph"; std::vector<Tensor> outputs; TF_ASSERT_OK(session->Run({}, {result.node()->name() + ":0"}, {}, &outputs)); ASSERT_EQ(outputs.size(), 1); EXPECT_EQ(outputs.front().flat<float>()(0), 2.0f * 3.0f); EXPECT_EQ(outputs.front().flat<float>()(1), 2.0f * 3.0f); EXPECT_EQ(outputs.front().flat<float>()(2), 2.0f * 3.0f); } } // namespace } // namespace tensorflow
jostep/tensorflow
tensorflow/core/kernels/immutable_constant_op_test.cc
C++
apache-2.0
7,333
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The Half Normal distribution class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import random_ops from tensorflow.python.ops.distributions import distribution from tensorflow.python.ops.distributions import special_math __all__ = [ "HalfNormal", ] class HalfNormal(distribution.Distribution): """The Half Normal distribution with scale `scale`. #### Mathematical details The half normal is a transformation of a centered normal distribution. If some random variable `X` has normal distribution, ```none X ~ Normal(0.0, scale) Y = |X| ``` Then `Y` will have half normal distribution. The probability density function (pdf) is: ```none pdf(x; scale, x > 0) = sqrt(2) / (scale * sqrt(pi)) * exp(- 1/2 * (x / scale) ** 2) ) ``` Where `scale = sigma` is the standard deviation of the underlying normal distribution. #### Examples Examples of initialization of one or a batch of distributions. ```python # Define a single scalar HalfNormal distribution. dist = tf.contrib.distributions.HalfNormal(scale=3.0) # Evaluate the cdf at 1, returning a scalar. dist.cdf(1.) # Define a batch of two scalar valued HalfNormals. # The first has scale 11.0, the second 22.0 dist = tf.contrib.distributions.HalfNormal(scale=[11.0, 22.0]) # Evaluate the pdf of the first distribution on 1.0, and the second on 1.5, # returning a length two tensor. dist.prob([1.0, 1.5]) # Get 3 samples, returning a 3 x 2 tensor. dist.sample([3]) ``` """ def __init__(self, scale, validate_args=False, allow_nan_stats=True, name="HalfNormal"): """Construct HalfNormals with scale `scale`. Args: scale: Floating point tensor; the scales of the distribution(s). Must contain only positive values. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. """ parameters = locals() with ops.name_scope(name, values=[scale]): with ops.control_dependencies([check_ops.assert_positive(scale)] if validate_args else []): self._scale = array_ops.identity(scale, name="scale") super(HalfNormal, self).__init__( dtype=self._scale.dtype, reparameterization_type=distribution.FULLY_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=[self._scale], name=name) @staticmethod def _param_shapes(sample_shape): return {"scale": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)} @property def scale(self): """Distribution parameter for the scale.""" return self._scale def _batch_shape_tensor(self): return array_ops.shape(self.scale) def _batch_shape(self): return self.scale.shape def _event_shape_tensor(self): return constant_op.constant([], dtype=dtypes.int32) def _event_shape(self): return tensor_shape.scalar() def _sample_n(self, n, seed=None): shape = array_ops.concat([[n], self.batch_shape_tensor()], 0) sampled = random_ops.random_normal( shape=shape, mean=0., stddev=1., dtype=self.dtype, seed=seed) return math_ops.abs(sampled * self.scale) def _prob(self, x): coeff = np.sqrt(2) / self.scale / np.sqrt(np.pi) pdf = coeff * math_ops.exp(- 0.5 * (x / self.scale) ** 2) return pdf * math_ops.cast(x >= 0, self.dtype) def _cdf(self, x): truncated_x = nn.relu(x) return math_ops.erf(truncated_x / self.scale / np.sqrt(2.0)) def _entropy(self): return 0.5 * math_ops.log(np.pi * self.scale ** 2.0 / 2.0) + 0.5 def _mean(self): return self.scale * np.sqrt(2.0) / np.sqrt(np.pi) def _quantile(self, p): return np.sqrt(2.0) * self.scale * special_math.erfinv(p) def _mode(self): return array_ops.zeros(self.batch_shape_tensor()) def _variance(self): return self.scale ** 2.0 * (1.0 - 2.0 / np.pi)
Mistobaan/tensorflow
tensorflow/contrib/distributions/python/ops/half_normal.py
Python
apache-2.0
5,678
// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /** * @fileoverview Unifies the bite.project subsystem within the context of a * background script. The constructor as a the initializer for the subsystem * causes the rest of the system to initialize. * * @author jasonstredwick@google.com (Jason Stredwick) */ goog.provide('bite.project.Background'); /** * Constructs an object that manages the project UX within the background. * @constructor * @export */ bite.project.Background = function() { }; goog.addSingletonGetter(bite.project.Background); /** * Handles messages for the project subsystem and redirects as appropriate. * @param {!Object} request The data sent. * @param {MessageSender} sender An object containing information about the * script context that sent the request. * @param {function(!*): void} response Optional function to call when the * request completes; only call when appropriate. * @private */ bite.project.Background.prototype.onRequest_ = function(request, sender, response) { }; /** * Create the content instance to initialize the project subsystem in the * context of a content script. */ bite.project.Background.getInstance();
fighterlyt/bite-project
extension/src/project/test/background.js
JavaScript
apache-2.0
1,765
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.runtime.rest.messages; import org.apache.flink.runtime.rest.HttpMethodWrapper; import org.apache.flink.runtime.rest.handler.job.JobConfigHandler; import org.apache.flink.shaded.netty4.io.netty.handler.codec.http.HttpResponseStatus; /** * Message headers for the {@link JobConfigHandler}. */ public class JobConfigHeaders implements MessageHeaders<EmptyRequestBody, JobConfigInfo, JobMessageParameters> { private static final JobConfigHeaders INSTANCE = new JobConfigHeaders(); public static final String URL = "/jobs/:jobid/config"; private JobConfigHeaders() {} @Override public Class<EmptyRequestBody> getRequestClass() { return EmptyRequestBody.class; } @Override public Class<JobConfigInfo> getResponseClass() { return JobConfigInfo.class; } @Override public HttpResponseStatus getResponseStatusCode() { return HttpResponseStatus.OK; } @Override public JobMessageParameters getUnresolvedMessageParameters() { return new JobMessageParameters(); } @Override public HttpMethodWrapper getHttpMethod() { return HttpMethodWrapper.GET; } @Override public String getTargetRestEndpointURL() { return URL; } public static JobConfigHeaders getInstance() { return INSTANCE; } @Override public String getDescription() { return "Returns the configuration of a job."; } }
jinglining/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/messages/JobConfigHeaders.java
Java
apache-2.0
2,151
/* Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.camunda.bpm.engine.impl.jobexecutor; import java.util.List; import org.camunda.bpm.engine.impl.ProcessEngineImpl; /** * @author Thorben Lindhauer * */ public class NotifyAcquisitionRejectedJobsHandler implements RejectedJobsHandler { @Override public void jobsRejected(List<String> jobIds, ProcessEngineImpl processEngine, JobExecutor jobExecutor) { AcquireJobsRunnable acquireJobsRunnable = jobExecutor.getAcquireJobsRunnable(); if (acquireJobsRunnable instanceof SequentialJobAcquisitionRunnable) { JobAcquisitionContext context = ((SequentialJobAcquisitionRunnable) acquireJobsRunnable).getAcquisitionContext(); context.submitRejectedBatch(processEngine.getName(), jobIds); } else { jobExecutor.getExecuteJobsRunnable(jobIds, processEngine).run(); } } }
subhrajyotim/camunda-bpm-platform
engine/src/main/java/org/camunda/bpm/engine/impl/jobexecutor/NotifyAcquisitionRejectedJobsHandler.java
Java
apache-2.0
1,382
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_CORE_TPU_GRAPH_REWRITE_NODEDEF_BUILDER_H_ #define TENSORFLOW_CORE_TPU_GRAPH_REWRITE_NODEDEF_BUILDER_H_ #include <string> #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/lib/core/status.h" namespace tensorflow { // Convenience builder to build NodeDefs without specifying the inputs. This is // similar to NodeDefBuilder except inputs are not specified. // TODO(jpienaar): Clean up NodeDefBuilder and remove this class. class IncompleteNodeDefBuilder { public: IncompleteNodeDefBuilder(const string& name, const string& op, const NodeDebugInfo& debug); IncompleteNodeDefBuilder& AddAttr(const string& attr, const DataType& type); IncompleteNodeDefBuilder& AddAttr(const string& attr, int val); IncompleteNodeDefBuilder& Device(const string& device); Status Build(Graph* graph, Node** n); static IncompleteNodeDefBuilder Identity(const string& name, const DataType& type, const NodeDebugInfo& debug); static IncompleteNodeDefBuilder Merge(const string& name, const DataType& type, const NodeDebugInfo& debug, int n); static IncompleteNodeDefBuilder Switch(const string& name, const DataType& type, const NodeDebugInfo& debug); private: NodeDef nodedef_; }; } // namespace tensorflow #endif // TENSORFLOW_CORE_TPU_GRAPH_REWRITE_NODEDEF_BUILDER_H_
sarvex/tensorflow
tensorflow/core/tpu/graph_rewrite/incomplete_nodedef_builder.h
C
apache-2.0
2,316
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # include(CMakeDependentOption) # Additional components option(BUILD_COMPILER "Build Thrift compiler" ON) option(BUILD_TESTING "Build with unit tests" ON) option(BUILD_EXAMPLES "Build examples" ON) option(BUILD_TUTORIALS "Build Thrift tutorials" ON) option(BUILD_LIBRARIES "Build Thrift libraries" ON) # Libraries to build # Each language library can be enabled or disabled using the WITH_<LANG> flag. # By default CMake checks if the required dependencies for a language are present # and enables the library if all are found. This means the default is to build as # much as possible but leaving out libraries if their dependencies are not met. # C++ option(WITH_CPP "Build C++ Thrift library" ON) find_package(Boost 1.53 QUIET) CMAKE_DEPENDENT_OPTION(BUILD_CPP "Build C++ library" ON "BUILD_LIBRARIES;WITH_CPP;Boost_FOUND" OFF) # NOTE: Currently the following options are C++ specific, # but in future other libraries might reuse them. # So they are not dependent on WITH_CPP but setting them without WITH_CPP currently # has no effect. find_package(ZLIB QUIET) CMAKE_DEPENDENT_OPTION(WITH_ZLIB "Build with ZLIB support" ON "ZLIB_FOUND" OFF) find_package(Libevent QUIET) CMAKE_DEPENDENT_OPTION(WITH_LIBEVENT "Build with libevent support" ON "Libevent_FOUND" OFF) find_package(Qt4 QUIET COMPONENTS QtCore QtNetwork) CMAKE_DEPENDENT_OPTION(WITH_QT4 "Build with Qt4 support" ON "QT4_FOUND" OFF) find_package(Qt5 QUIET COMPONENTS Core Network) CMAKE_DEPENDENT_OPTION(WITH_QT5 "Build with Qt5 support" ON "Qt5_FOUND" OFF) if(${WITH_QT4} AND ${WITH_QT5} AND ${CMAKE_MAJOR_VERSION} LESS 3) # cmake < 3.0.0 causes conflict when building both Qt4 and Qt5 set(WITH_QT4 OFF) endif() find_package(OpenSSL QUIET) CMAKE_DEPENDENT_OPTION(WITH_OPENSSL "Build with OpenSSL support" ON "OPENSSL_FOUND" OFF) option(WITH_STDTHREADS "Build with C++ std::thread support" OFF) CMAKE_DEPENDENT_OPTION(WITH_BOOSTTHREADS "Build with Boost threads support" OFF "NOT WITH_STDTHREADS;Boost_FOUND" OFF) # C GLib option(WITH_C_GLIB "Build C (GLib) Thrift library" ON) find_package(GLIB QUIET COMPONENTS gobject) CMAKE_DEPENDENT_OPTION(BUILD_C_GLIB "Build C (GLib) library" ON "BUILD_LIBRARIES;WITH_C_GLIB;GLIB_FOUND" OFF) # Java option(WITH_JAVA "Build Java Thrift library" ON) find_package(Java QUIET) find_package(Ant QUIET) CMAKE_DEPENDENT_OPTION(BUILD_JAVA "Build Java library" ON "BUILD_LIBRARIES;WITH_JAVA;JAVA_FOUND;ANT_FOUND" OFF) # Python option(WITH_PYTHON "Build Python Thrift library" ON) find_package(PythonInterp QUIET) # for Python executable find_package(PythonLibs QUIET) # for Python.h CMAKE_DEPENDENT_OPTION(BUILD_PYTHON "Build Python library" ON "BUILD_LIBRARIES;WITH_PYTHON;PYTHONLIBS_FOUND" OFF) # Common library options option(WITH_SHARED_LIB "Build shared libraries" ON) option(WITH_STATIC_LIB "Build static libraries" ON) if (NOT WITH_SHARED_LIB AND NOT WITH_STATIC_LIB) message(FATAL_ERROR "Cannot build with both shared and static outputs disabled!") endif() #NOTE: C++ compiler options are defined in the lib/cpp/CMakeLists.txt # Visual Studio only options if(MSVC) option(WITH_MT "Build using MT instead of MD (MSVC only)" OFF) endif(MSVC) macro(MESSAGE_DEP flag summary) if(NOT ${flag}) message(STATUS " - ${summary}") endif() endmacro(MESSAGE_DEP flag summary) macro(PRINT_CONFIG_SUMMARY) message(STATUS "----------------------------------------------------------") message(STATUS "Thrift version: ${thrift_VERSION} (${thrift_VERSION_MAJOR}.${thrift_VERSION_MINOR}.${thrift_VERSION_PATCH})") message(STATUS "Thrift package version: ${PACKAGE_VERSION}") message(STATUS "Build configuration Summary") message(STATUS " Build Thrift compiler: ${BUILD_COMPILER}") message(STATUS " Build with unit tests: ${BUILD_TESTING}") message(STATUS " Build examples: ${BUILD_EXAMPLES}") message(STATUS " Build Thrift libraries: ${BUILD_LIBRARIES}") message(STATUS " Language libraries:") message(STATUS " Build C++ library: ${BUILD_CPP}") MESSAGE_DEP(WITH_CPP "Disabled by via WITH_CCP=OFF") MESSAGE_DEP(Boost_FOUND "Boost headers missing") message(STATUS " Build C (GLib) library: ${BUILD_C_GLIB}") MESSAGE_DEP(WITH_C_GLIB "Disabled by via WITH_C_GLIB=OFF") MESSAGE_DEP(GLIB_FOUND "GLib missing") message(STATUS " Build Java library: ${BUILD_JAVA}") MESSAGE_DEP(WITH_JAVA "Disabled by via WITH_JAVA=OFF") MESSAGE_DEP(JAVA_FOUND "Java Runtime missing") MESSAGE_DEP(ANT_FOUND "Ant missing") message(STATUS " Build Python library: ${BUILD_PYTHON}") MESSAGE_DEP(WITH_PYTHON "Disabled by via WITH_PYTHON=OFF") MESSAGE_DEP(PYTHONLIBS_FOUND "Python libraries missing") message(STATUS " Library features:") message(STATUS " Build shared libraries: ${WITH_SHARED_LIB}") message(STATUS " Build static libraries: ${WITH_STATIC_LIB}") message(STATUS " Build with ZLIB support: ${WITH_ZLIB}") message(STATUS " Build with libevent support: ${WITH_LIBEVENT}") message(STATUS " Build with Qt4 support: ${WITH_QT4}") message(STATUS " Build with Qt5 support: ${WITH_QT5}") message(STATUS " Build with OpenSSL support: ${WITH_OPENSSL}") message(STATUS " Build with Boost thread support: ${WITH_BOOSTTHREADS}") message(STATUS " Build with C++ std::thread support: ${WITH_STDTHREADS}") message(STATUS "----------------------------------------------------------") endmacro(PRINT_CONFIG_SUMMARY)
redhat-developer-demos/openshift-next-demo
msa/bonjour/node_modules/zipkin/node_modules/thrift/build/cmake/DefineOptions.cmake
CMake
apache-2.0
6,527
//Copyright © 2014 Sony Computer Entertainment America LLC. See License.txt. using System; using System.Collections.Generic; namespace Sce.Atf.Adaptation { /// <summary> /// This class wraps an IList of one type to implement IList of another type</summary> /// <typeparam name="T">Underlying list type</typeparam> /// <typeparam name="U">Adapted list type</typeparam> /// <remarks>This adapter class can be used to simulate interface covariance, where /// an IList of Type1 can be made to implement an IList of Type2, as long as Type1 /// implements or can be adapted to Type2.</remarks> public class AdaptableList<T, U> : ListAdapter<T, U> where T : class where U : class { /// <summary> /// Constructor</summary> /// <param name="list">List to adapt</param> public AdaptableList(IList<T> list) : base(list) { } /// <summary> /// Converts the item to the adapted list type; throws an InvalidOperationException /// if the item can't be converted</summary> /// <param name="item">Item to convert</param> /// <returns>Item, converted to the adapted list type</returns> protected override T Convert(U item) { T t = item.As<T>(); if (t == null && item != null) throw new InvalidOperationException("Item of wrong type for underlying collection"); return t; } /// <summary> /// Converts the item from the adapted list type; returns null if the item can't be /// converted</summary> /// <param name="item">Item to convert</param> /// <returns>Item, converted to the adapted list type</returns> protected override U Convert(T item) { U u = item.As<U>(); return u; } } }
jethac/ATF
Framework/Atf.Core/Adaptation/AdaptableList.cs
C#
apache-2.0
1,886
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.commons.math3.stat.descriptive.summary; import org.apache.commons.math3.stat.descriptive.StorelessUnivariateStatistic; import org.apache.commons.math3.stat.descriptive.StorelessUnivariateStatisticAbstractTest; import org.apache.commons.math3.stat.descriptive.UnivariateStatistic; import org.junit.Assert; import org.junit.Test; /** * Test cases for the {@link SumOfSquares} class. * * @version $Id: SumSqTest.java 1244107 2012-02-14 16:17:55Z erans $ */ public class SumSqTest extends StorelessUnivariateStatisticAbstractTest{ protected SumOfSquares stat; /** * {@inheritDoc} */ @Override public UnivariateStatistic getUnivariateStatistic() { return new SumOfSquares(); } /** * {@inheritDoc} */ @Override public double expectedValue() { return this.sumSq; } @Test public void testSpecialValues() { SumOfSquares sumSq = new SumOfSquares(); Assert.assertEquals(0, sumSq.getResult(), 0); sumSq.increment(2d); Assert.assertEquals(4d, sumSq.getResult(), 0); sumSq.increment(Double.POSITIVE_INFINITY); Assert.assertEquals(Double.POSITIVE_INFINITY, sumSq.getResult(), 0); sumSq.increment(Double.NEGATIVE_INFINITY); Assert.assertEquals(Double.POSITIVE_INFINITY, sumSq.getResult(), 0); sumSq.increment(Double.NaN); Assert.assertTrue(Double.isNaN(sumSq.getResult())); sumSq.increment(1); Assert.assertTrue(Double.isNaN(sumSq.getResult())); } @Override protected void checkClearValue(StorelessUnivariateStatistic statistic){ Assert.assertEquals(0, statistic.getResult(), 0); } }
tknandu/CommonsMath_Modifed
math (trunk)/src/test/java/org/apache/commons/math3/stat/descriptive/summary/SumSqTest.java
Java
apache-2.0
2,507
defineSuite([ 'Widgets/Animation/Animation', 'Core/defined', 'Specs/pollToPromise', 'Widgets/Animation/AnimationViewModel', 'Widgets/ClockViewModel' ], function( Animation, defined, pollToPromise, AnimationViewModel, ClockViewModel) { 'use strict'; var container; var animation; afterEach(function() { if (defined(animation)) { animation = animation.destroy(); } if (defined(container) && defined(container.parentNode)) { container.parentNode.removeChild(container); } }); it('Can create and destroy', function() { var clockViewModel = new ClockViewModel(); var animationViewModel = new AnimationViewModel(clockViewModel); animation = new Animation(document.body, animationViewModel); }); it('Can create with container not in the DOM', function() { container = document.createElement('div'); var clockViewModel = new ClockViewModel(); var animationViewModel = new AnimationViewModel(clockViewModel); var animation = new Animation(container, animationViewModel); //Verify applyThemeChanges is called when we add the container to the DOM. spyOn(animation, 'applyThemeChanges').and.callThrough(); document.body.appendChild(container); //This is done via polling because we can't control when the DOM decides to //fire the Mutation event. return pollToPromise(function() { return animation.applyThemeChanges.calls.count() === 1; }); }); it('Can destroy without container ever being in the DOM', function() { container = document.createElement('div'); var clockViewModel = new ClockViewModel(); var animationViewModel = new AnimationViewModel(clockViewModel); animation = new Animation(container, animationViewModel); }); });
ian-donaldson/world.exposed
Specs/Widgets/Animation/AnimationSpec.js
JavaScript
apache-2.0
1,967
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta name="generator" content= "HTML Tidy for Linux/x86 (vers 1 September 2005), see www.w3.org" /> <title>Creative Commons Legal Code</title> <meta http-equiv="content-type" content= "text/html; charset=utf-8" /> <link rel="stylesheet" type="text/css" href= "http://creativecommons.org/includes/deed3.css" media="screen" /> <link rel="stylesheet" type="text/css" href= "http://creativecommons.org/includes/deed3-print.css" media= "print" /> <!--[if lt IE 7]><link rel="stylesheet" type="text/css" href="http://creativecommons.org/includes/deed3-ie.css" media="screen" /><![endif]--> <script type="text/javascript" src= "http://creativecommons.org/includes/errata.js"> </script> </head> <body> <p align="center" id="header"><a href= "http://creativecommons.org/">Creative Commons</a></p> <div id="deed" class="green"> <div id="deed-head"> <div id="cc-logo"> <img src= "http://creativecommons.org/images/deed/cc-logo.jpg" alt= "" /> </div> <h1><span>Creative Commons Legal Code</span></h1> <div id="deed-license"> <h2>Attribution-ShareAlike 3.0 Unported</h2> </div> </div> <div id="deed-main"> <div id="deed-main-content"> <img src= "http://creativecommons.org/images/international/unported.png" alt="" /> <blockquote> CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE LEGAL SERVICES. DISTRIBUTION OF THIS LICENSE DOES NOT CREATE AN ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES REGARDING THE INFORMATION PROVIDED, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM ITS USE. </blockquote> <h3><em>License</em></h3> <p>THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.</p> <p>BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY BE CONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS.</p> <p><strong>1. Definitions</strong></p> <ol type="a"> <li><strong>"Adaptation"</strong> means a work based upon the Work, or upon the Work and other pre-existing works, such as a translation, adaptation, derivative work, arrangement of music or other alterations of a literary or artistic work, or phonogram or performance and includes cinematographic adaptations or any other form in which the Work may be recast, transformed, or adapted including in any form recognizably derived from the original, except that a work that constitutes a Collection will not be considered an Adaptation for the purpose of this License. For the avoidance of doubt, where the Work is a musical work, performance or phonogram, the synchronization of the Work in timed-relation with a moving image ("synching") will be considered an Adaptation for the purpose of this License.</li> <li><strong>"Collection"</strong> means a collection of literary or artistic works, such as encyclopedias and anthologies, or performances, phonograms or broadcasts, or other works or subject matter other than works listed in Section 1(f) below, which, by reason of the selection and arrangement of their contents, constitute intellectual creations, in which the Work is included in its entirety in unmodified form along with one or more other contributions, each constituting separate and independent works in themselves, which together are assembled into a collective whole. A work that constitutes a Collection will not be considered an Adaptation (as defined below) for the purposes of this License.</li> <li><strong>"Creative Commons Compatible License"</strong> means a license that is listed at http://creativecommons.org/compatiblelicenses that has been approved by Creative Commons as being essentially equivalent to this License, including, at a minimum, because that license: (i) contains terms that have the same purpose, meaning and effect as the License Elements of this License; and, (ii) explicitly permits the relicensing of adaptations of works made available under that license under this License or a Creative Commons jurisdiction license with the same License Elements as this License.</li> <li><strong>"Distribute"</strong> means to make available to the public the original and copies of the Work or Adaptation, as appropriate, through sale or other transfer of ownership.</li> <li><strong>"License Elements"</strong> means the following high-level license attributes as selected by Licensor and indicated in the title of this License: Attribution, ShareAlike.</li> <li><strong>"Licensor"</strong> means the individual, individuals, entity or entities that offer(s) the Work under the terms of this License.</li> <li><strong>"Original Author"</strong> means, in the case of a literary or artistic work, the individual, individuals, entity or entities who created the Work or if no individual or entity can be identified, the publisher; and in addition (i) in the case of a performance the actors, singers, musicians, dancers, and other persons who act, sing, deliver, declaim, play in, interpret or otherwise perform literary or artistic works or expressions of folklore; (ii) in the case of a phonogram the producer being the person or legal entity who first fixes the sounds of a performance or other sounds; and, (iii) in the case of broadcasts, the organization that transmits the broadcast.</li> <li><strong>"Work"</strong> means the literary and/or artistic work offered under the terms of this License including without limitation any production in the literary, scientific and artistic domain, whatever may be the mode or form of its expression including digital form, such as a book, pamphlet and other writing; a lecture, address, sermon or other work of the same nature; a dramatic or dramatico-musical work; a choreographic work or entertainment in dumb show; a musical composition with or without words; a cinematographic work to which are assimilated works expressed by a process analogous to cinematography; a work of drawing, painting, architecture, sculpture, engraving or lithography; a photographic work to which are assimilated works expressed by a process analogous to photography; a work of applied art; an illustration, map, plan, sketch or three-dimensional work relative to geography, topography, architecture or science; a performance; a broadcast; a phonogram; a compilation of data to the extent it is protected as a copyrightable work; or a work performed by a variety or circus performer to the extent it is not otherwise considered a literary or artistic work.</li> <li><strong>"You"</strong> means an individual or entity exercising rights under this License who has not previously violated the terms of this License with respect to the Work, or who has received express permission from the Licensor to exercise rights under this License despite a previous violation.</li> <li><strong>"Publicly Perform"</strong> means to perform public recitations of the Work and to communicate to the public those public recitations, by any means or process, including by wire or wireless means or public digital performances; to make available to the public Works in such a way that members of the public may access these Works from a place and at a place individually chosen by them; to perform the Work to the public by any means or process and the communication to the public of the performances of the Work, including by public digital performance; to broadcast and rebroadcast the Work by any means including signs, sounds or images.</li> <li><strong>"Reproduce"</strong> means to make copies of the Work by any means including without limitation by sound or visual recordings and the right of fixation and reproducing fixations of the Work, including storage of a protected performance or phonogram in digital form or other electronic medium.</li> </ol> <p><strong>2. Fair Dealing Rights.</strong> Nothing in this License is intended to reduce, limit, or restrict any uses free from copyright or rights arising from limitations or exceptions that are provided for in connection with the copyright protection under copyright law or other applicable laws.</p> <p><strong>3. License Grant.</strong> Subject to the terms and conditions of this License, Licensor hereby grants You a worldwide, royalty-free, non-exclusive, perpetual (for the duration of the applicable copyright) license to exercise the rights in the Work as stated below:</p> <ol type="a"> <li>to Reproduce the Work, to incorporate the Work into one or more Collections, and to Reproduce the Work as incorporated in the Collections;</li> <li>to create and Reproduce Adaptations provided that any such Adaptation, including any translation in any medium, takes reasonable steps to clearly label, demarcate or otherwise identify that changes were made to the original Work. For example, a translation could be marked "The original work was translated from English to Spanish," or a modification could indicate "The original work has been modified.";</li> <li>to Distribute and Publicly Perform the Work including as incorporated in Collections; and,</li> <li>to Distribute and Publicly Perform Adaptations.</li> <li> <p>For the avoidance of doubt:</p> <ol type="i"> <li><strong>Non-waivable Compulsory License Schemes</strong>. In those jurisdictions in which the right to collect royalties through any statutory or compulsory licensing scheme cannot be waived, the Licensor reserves the exclusive right to collect such royalties for any exercise by You of the rights granted under this License;</li> <li><strong>Waivable Compulsory License Schemes</strong>. In those jurisdictions in which the right to collect royalties through any statutory or compulsory licensing scheme can be waived, the Licensor waives the exclusive right to collect such royalties for any exercise by You of the rights granted under this License; and,</li> <li><strong>Voluntary License Schemes</strong>. The Licensor waives the right to collect royalties, whether individually or, in the event that the Licensor is a member of a collecting society that administers voluntary licensing schemes, via that society, from any exercise by You of the rights granted under this License.</li> </ol> </li> </ol> <p>The above rights may be exercised in all media and formats whether now known or hereafter devised. The above rights include the right to make such modifications as are technically necessary to exercise the rights in other media and formats. Subject to Section 8(f), all rights not expressly granted by Licensor are hereby reserved.</p> <p><strong>4. Restrictions.</strong> The license granted in Section 3 above is expressly made subject to and limited by the following restrictions:</p> <ol type="a"> <li>You may Distribute or Publicly Perform the Work only under the terms of this License. You must include a copy of, or the Uniform Resource Identifier (URI) for, this License with every copy of the Work You Distribute or Publicly Perform. You may not offer or impose any terms on the Work that restrict the terms of this License or the ability of the recipient of the Work to exercise the rights granted to that recipient under the terms of the License. You may not sublicense the Work. You must keep intact all notices that refer to this License and to the disclaimer of warranties with every copy of the Work You Distribute or Publicly Perform. When You Distribute or Publicly Perform the Work, You may not impose any effective technological measures on the Work that restrict the ability of a recipient of the Work from You to exercise the rights granted to that recipient under the terms of the License. This Section 4(a) applies to the Work as incorporated in a Collection, but this does not require the Collection apart from the Work itself to be made subject to the terms of this License. If You create a Collection, upon notice from any Licensor You must, to the extent practicable, remove from the Collection any credit as required by Section 4(c), as requested. If You create an Adaptation, upon notice from any Licensor You must, to the extent practicable, remove from the Adaptation any credit as required by Section 4(c), as requested.</li> <li>You may Distribute or Publicly Perform an Adaptation only under the terms of: (i) this License; (ii) a later version of this License with the same License Elements as this License; (iii) a Creative Commons jurisdiction license (either this or a later license version) that contains the same License Elements as this License (e.g., Attribution-ShareAlike 3.0 US)); (iv) a Creative Commons Compatible License. If you license the Adaptation under one of the licenses mentioned in (iv), you must comply with the terms of that license. If you license the Adaptation under the terms of any of the licenses mentioned in (i), (ii) or (iii) (the "Applicable License"), you must comply with the terms of the Applicable License generally and the following provisions: (I) You must include a copy of, or the URI for, the Applicable License with every copy of each Adaptation You Distribute or Publicly Perform; (II) You may not offer or impose any terms on the Adaptation that restrict the terms of the Applicable License or the ability of the recipient of the Adaptation to exercise the rights granted to that recipient under the terms of the Applicable License; (III) You must keep intact all notices that refer to the Applicable License and to the disclaimer of warranties with every copy of the Work as included in the Adaptation You Distribute or Publicly Perform; (IV) when You Distribute or Publicly Perform the Adaptation, You may not impose any effective technological measures on the Adaptation that restrict the ability of a recipient of the Adaptation from You to exercise the rights granted to that recipient under the terms of the Applicable License. This Section 4(b) applies to the Adaptation as incorporated in a Collection, but this does not require the Collection apart from the Adaptation itself to be made subject to the terms of the Applicable License.</li> <li>If You Distribute, or Publicly Perform the Work or any Adaptations or Collections, You must, unless a request has been made pursuant to Section 4(a), keep intact all copyright notices for the Work and provide, reasonable to the medium or means You are utilizing: (i) the name of the Original Author (or pseudonym, if applicable) if supplied, and/or if the Original Author and/or Licensor designate another party or parties (e.g., a sponsor institute, publishing entity, journal) for attribution ("Attribution Parties") in Licensor's copyright notice, terms of service or by other reasonable means, the name of such party or parties; (ii) the title of the Work if supplied; (iii) to the extent reasonably practicable, the URI, if any, that Licensor specifies to be associated with the Work, unless such URI does not refer to the copyright notice or licensing information for the Work; and (iv) , consistent with Ssection 3(b), in the case of an Adaptation, a credit identifying the use of the Work in the Adaptation (e.g., "French translation of the Work by Original Author," or "Screenplay based on original Work by Original Author"). The credit required by this Section 4(c) may be implemented in any reasonable manner; provided, however, that in the case of a Adaptation or Collection, at a minimum such credit will appear, if a credit for all contributing authors of the Adaptation or Collection appears, then as part of these credits and in a manner at least as prominent as the credits for the other contributing authors. For the avoidance of doubt, You may only use the credit required by this Section for the purpose of attribution in the manner set out above and, by exercising Your rights under this License, You may not implicitly or explicitly assert or imply any connection with, sponsorship or endorsement by the Original Author, Licensor and/or Attribution Parties, as appropriate, of You or Your use of the Work, without the separate, express prior written permission of the Original Author, Licensor and/or Attribution Parties.</li> <li>Except as otherwise agreed in writing by the Licensor or as may be otherwise permitted by applicable law, if You Reproduce, Distribute or Publicly Perform the Work either by itself or as part of any Adaptations or Collections, You must not distort, mutilate, modify or take other derogatory action in relation to the Work which would be prejudicial to the Original Author's honor or reputation. Licensor agrees that in those jurisdictions (e.g. Japan), in which any exercise of the right granted in Section 3(b) of this License (the right to make Adaptations) would be deemed to be a distortion, mutilation, modification or other derogatory action prejudicial to the Original Author's honor and reputation, the Licensor will waive or not assert, as appropriate, this Section, to the fullest extent permitted by the applicable national law, to enable You to reasonably exercise Your right under Section 3(b) of this License (right to make Adaptations) but not otherwise.</li> </ol> <p><strong>5. Representations, Warranties and Disclaimer</strong></p> <p>UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU.</p> <p><strong>6. Limitation on Liability.</strong> EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.</p> <p><strong>7. Termination</strong></p> <ol type="a"> <li>This License and the rights granted hereunder will terminate automatically upon any breach by You of the terms of this License. Individuals or entities who have received Adaptations or Collections from You under this License, however, will not have their licenses terminated provided such individuals or entities remain in full compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will survive any termination of this License.</li> <li>Subject to the above terms and conditions, the license granted here is perpetual (for the duration of the applicable copyright in the Work). Notwithstanding the above, Licensor reserves the right to release the Work under different license terms or to stop distributing the Work at any time; provided, however that any such election will not serve to withdraw this License (or any other license that has been, or is required to be, granted under the terms of this License), and this License will continue in full force and effect unless terminated as stated above.</li> </ol> <p><strong>8. Miscellaneous</strong></p> <ol type="a"> <li>Each time You Distribute or Publicly Perform the Work or a Collection, the Licensor offers to the recipient a license to the Work on the same terms and conditions as the license granted to You under this License.</li> <li>Each time You Distribute or Publicly Perform an Adaptation, Licensor offers to the recipient a license to the original Work on the same terms and conditions as the license granted to You under this License.</li> <li>If any provision of this License is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this License, and without further action by the parties to this agreement, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable.</li> <li>No term or provision of this License shall be deemed waived and no breach consented to unless such waiver or consent shall be in writing and signed by the party to be charged with such waiver or consent.</li> <li>This License constitutes the entire agreement between the parties with respect to the Work licensed here. There are no understandings, agreements or representations with respect to the Work not specified here. Licensor shall not be bound by any additional provisions that may appear in any communication from You. This License may not be modified without the mutual written agreement of the Licensor and You.</li> <li>The rights granted under, and the subject matter referenced, in this License were drafted utilizing the terminology of the Berne Convention for the Protection of Literary and Artistic Works (as amended on September 28, 1979), the Rome Convention of 1961, the WIPO Copyright Treaty of 1996, the WIPO Performances and Phonograms Treaty of 1996 and the Universal Copyright Convention (as revised on July 24, 1971). These rights and subject matter take effect in the relevant jurisdiction in which the License terms are sought to be enforced according to the corresponding provisions of the implementation of those treaty provisions in the applicable national law. If the standard suite of rights granted under applicable copyright law includes additional rights not granted under this License, such additional rights are deemed to be included in the License; this License is not intended to restrict the license of any rights under applicable law.</li> </ol> <!-- BREAKOUT FOR CC NOTICE. NOT A PART OF THE LICENSE --> <blockquote> <h3>Creative Commons Notice</h3> <p>Creative Commons is not a party to this License, and makes no warranty whatsoever in connection with the Work. Creative Commons will not be liable to You or any party on any legal theory for any damages whatsoever, including without limitation any general, special, incidental or consequential damages arising in connection to this license. Notwithstanding the foregoing two (2) sentences, if Creative Commons has expressly identified itself as the Licensor hereunder, it shall have all rights and obligations of Licensor.</p> <p>Except for the limited purpose of indicating to the public that the Work is licensed under the CCPL, Creative Commons does not authorize the use by either party of the trademark "Creative Commons" or any related trademark or logo of Creative Commons without the prior written consent of Creative Commons. Any permitted use will be in compliance with Creative Commons' then-current trademark usage guidelines, as may be published on its website or otherwise made available upon request from time to time. For the avoidance of doubt, this trademark restriction does not form part of the License.</p> <p>Creative Commons may be contacted at <a href= "http://creativecommons.org/">http://creativecommons.org/</a>.</p> </blockquote><!-- END CC NOTICE --> </div> </div> <div id="deed-foot"> <p id="footer"><a href="./">« Back to Commons Deed</a></p> </div> </div> </body> </html>
rogers0/namebench
third_party/get_auth_ns/LICENSE.html
HTML
apache-2.0
28,073
package org.apache.lucene.search; /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.util.AttributeSource; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.ToStringUtils; /** * A Query that matches documents within an range of terms. * * <p>This query matches the documents looking for terms that fall into the * supplied range according to {@link * Byte#compareTo(Byte)}. It is not intended * for numerical ranges; use {@link NumericRangeQuery} instead. * * <p>This query uses the {@link * MultiTermQuery#CONSTANT_SCORE_AUTO_REWRITE_DEFAULT} * rewrite method. * @since 2.9 */ public class TermRangeQuery extends MultiTermQuery { private BytesRef lowerTerm; private BytesRef upperTerm; private boolean includeLower; private boolean includeUpper; /** * Constructs a query selecting all terms greater/equal than <code>lowerTerm</code> * but less/equal than <code>upperTerm</code>. * * <p> * If an endpoint is null, it is said * to be "open". Either or both endpoints may be open. Open endpoints may not * be exclusive (you can't select all but the first or last term without * explicitly specifying the term to exclude.) * * @param field The field that holds both lower and upper terms. * @param lowerTerm * The term text at the lower end of the range * @param upperTerm * The term text at the upper end of the range * @param includeLower * If true, the <code>lowerTerm</code> is * included in the range. * @param includeUpper * If true, the <code>upperTerm</code> is * included in the range. */ public TermRangeQuery(String field, BytesRef lowerTerm, BytesRef upperTerm, boolean includeLower, boolean includeUpper) { super(field); this.lowerTerm = lowerTerm; this.upperTerm = upperTerm; this.includeLower = includeLower; this.includeUpper = includeUpper; } /** * Factory that creates a new TermRangeQuery using Strings for term text. */ public static TermRangeQuery newStringRange(String field, String lowerTerm, String upperTerm, boolean includeLower, boolean includeUpper) { BytesRef lower = lowerTerm == null ? null : new BytesRef(lowerTerm); BytesRef upper = upperTerm == null ? null : new BytesRef(upperTerm); return new TermRangeQuery(field, lower, upper, includeLower, includeUpper); } /** Returns the lower value of this range query */ public BytesRef getLowerTerm() { return lowerTerm; } /** Returns the upper value of this range query */ public BytesRef getUpperTerm() { return upperTerm; } /** Returns <code>true</code> if the lower endpoint is inclusive */ public boolean includesLower() { return includeLower; } /** Returns <code>true</code> if the upper endpoint is inclusive */ public boolean includesUpper() { return includeUpper; } @Override protected TermsEnum getTermsEnum(Terms terms, AttributeSource atts) throws IOException { if (lowerTerm != null && upperTerm != null && lowerTerm.compareTo(upperTerm) > 0) { return TermsEnum.EMPTY; } TermsEnum tenum = terms.iterator(null); if ((lowerTerm == null || (includeLower && lowerTerm.length == 0)) && upperTerm == null) { return tenum; } return new TermRangeTermsEnum(tenum, lowerTerm, upperTerm, includeLower, includeUpper); } /** Prints a user-readable version of this query. */ @Override public String toString(String field) { StringBuilder buffer = new StringBuilder(); if (!getField().equals(field)) { buffer.append(getField()); buffer.append(":"); } buffer.append(includeLower ? '[' : '{'); // TODO: all these toStrings for queries should just output the bytes, it might not be UTF-8! buffer.append(lowerTerm != null ? ("*".equals(Term.toString(lowerTerm)) ? "\\*" : Term.toString(lowerTerm)) : "*"); buffer.append(" TO "); buffer.append(upperTerm != null ? ("*".equals(Term.toString(upperTerm)) ? "\\*" : Term.toString(upperTerm)) : "*"); buffer.append(includeUpper ? ']' : '}'); buffer.append(ToStringUtils.boost(getBoost())); return buffer.toString(); } @Override public int hashCode() { final int prime = 31; int result = super.hashCode(); result = prime * result + (includeLower ? 1231 : 1237); result = prime * result + (includeUpper ? 1231 : 1237); result = prime * result + ((lowerTerm == null) ? 0 : lowerTerm.hashCode()); result = prime * result + ((upperTerm == null) ? 0 : upperTerm.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (!super.equals(obj)) return false; if (getClass() != obj.getClass()) return false; TermRangeQuery other = (TermRangeQuery) obj; if (includeLower != other.includeLower) return false; if (includeUpper != other.includeUpper) return false; if (lowerTerm == null) { if (other.lowerTerm != null) return false; } else if (!lowerTerm.equals(other.lowerTerm)) return false; if (upperTerm == null) { if (other.upperTerm != null) return false; } else if (!upperTerm.equals(other.upperTerm)) return false; return true; } }
smartan/lucene
src/main/java/org/apache/lucene/search/TermRangeQuery.java
Java
apache-2.0
6,271
/* this ALWAYS GENERATED file contains the definitions for the interfaces */ /* File created by MIDL compiler version 8.01.0622 */ /* @@MIDL_FILE_HEADING( ) */ /* verify that the <rpcndr.h> version is high enough to compile this file*/ #ifndef __REQUIRED_RPCNDR_H_VERSION__ #define __REQUIRED_RPCNDR_H_VERSION__ 500 #endif /* verify that the <rpcsal.h> version is high enough to compile this file*/ #ifndef __REQUIRED_RPCSAL_H_VERSION__ #define __REQUIRED_RPCSAL_H_VERSION__ 100 #endif #include "rpc.h" #include "rpcndr.h" #ifndef __RPCNDR_H_VERSION__ #error this stub requires an updated version of <rpcndr.h> #endif /* __RPCNDR_H_VERSION__ */ #ifndef COM_NO_WINDOWS_H #include "windows.h" #include "ole2.h" #endif /*COM_NO_WINDOWS_H*/ #ifndef __dxgi1_3_h__ #define __dxgi1_3_h__ #if defined(_MSC_VER) && (_MSC_VER >= 1020) #pragma once #endif /* Forward Declarations */ #ifndef __IDXGIDevice3_FWD_DEFINED__ #define __IDXGIDevice3_FWD_DEFINED__ typedef interface IDXGIDevice3 IDXGIDevice3; #endif /* __IDXGIDevice3_FWD_DEFINED__ */ #ifndef __IDXGISwapChain2_FWD_DEFINED__ #define __IDXGISwapChain2_FWD_DEFINED__ typedef interface IDXGISwapChain2 IDXGISwapChain2; #endif /* __IDXGISwapChain2_FWD_DEFINED__ */ #ifndef __IDXGIOutput2_FWD_DEFINED__ #define __IDXGIOutput2_FWD_DEFINED__ typedef interface IDXGIOutput2 IDXGIOutput2; #endif /* __IDXGIOutput2_FWD_DEFINED__ */ #ifndef __IDXGIFactory3_FWD_DEFINED__ #define __IDXGIFactory3_FWD_DEFINED__ typedef interface IDXGIFactory3 IDXGIFactory3; #endif /* __IDXGIFactory3_FWD_DEFINED__ */ #ifndef __IDXGIDecodeSwapChain_FWD_DEFINED__ #define __IDXGIDecodeSwapChain_FWD_DEFINED__ typedef interface IDXGIDecodeSwapChain IDXGIDecodeSwapChain; #endif /* __IDXGIDecodeSwapChain_FWD_DEFINED__ */ #ifndef __IDXGIFactoryMedia_FWD_DEFINED__ #define __IDXGIFactoryMedia_FWD_DEFINED__ typedef interface IDXGIFactoryMedia IDXGIFactoryMedia; #endif /* __IDXGIFactoryMedia_FWD_DEFINED__ */ #ifndef __IDXGISwapChainMedia_FWD_DEFINED__ #define __IDXGISwapChainMedia_FWD_DEFINED__ typedef interface IDXGISwapChainMedia IDXGISwapChainMedia; #endif /* __IDXGISwapChainMedia_FWD_DEFINED__ */ #ifndef __IDXGIOutput3_FWD_DEFINED__ #define __IDXGIOutput3_FWD_DEFINED__ typedef interface IDXGIOutput3 IDXGIOutput3; #endif /* __IDXGIOutput3_FWD_DEFINED__ */ /* header files for imported files */ #include "dxgi1_2.h" #ifdef __cplusplus extern "C"{ #endif /* interface __MIDL_itf_dxgi1_3_0000_0000 */ /* [local] */ #include <winapifamily.h> // BK - pragma region App Family #if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) #define DXGI_CREATE_FACTORY_DEBUG 0x1 HRESULT WINAPI CreateDXGIFactory2(UINT Flags, REFIID riid, _COM_Outptr_ void **ppFactory); HRESULT WINAPI DXGIGetDebugInterface1(UINT Flags, REFIID riid, _COM_Outptr_ void **pDebug); extern RPC_IF_HANDLE __MIDL_itf_dxgi1_3_0000_0000_v0_0_c_ifspec; extern RPC_IF_HANDLE __MIDL_itf_dxgi1_3_0000_0000_v0_0_s_ifspec; #ifndef __IDXGIDevice3_INTERFACE_DEFINED__ #define __IDXGIDevice3_INTERFACE_DEFINED__ /* interface IDXGIDevice3 */ /* [unique][local][uuid][object] */ EXTERN_C const IID IID_IDXGIDevice3; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("6007896c-3244-4afd-bf18-a6d3beda5023") IDXGIDevice3 : public IDXGIDevice2 { public: virtual void STDMETHODCALLTYPE Trim( void) = 0; }; #else /* C style interface */ typedef struct IDXGIDevice3Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( IDXGIDevice3 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( IDXGIDevice3 * This); ULONG ( STDMETHODCALLTYPE *Release )( IDXGIDevice3 * This); HRESULT ( STDMETHODCALLTYPE *SetPrivateData )( IDXGIDevice3 * This, /* [annotation][in] */ _In_ REFGUID Name, /* [in] */ UINT DataSize, /* [annotation][in] */ _In_reads_bytes_(DataSize) const void *pData); HRESULT ( STDMETHODCALLTYPE *SetPrivateDataInterface )( IDXGIDevice3 * This, /* [annotation][in] */ _In_ REFGUID Name, /* [annotation][in] */ _In_opt_ const IUnknown *pUnknown); HRESULT ( STDMETHODCALLTYPE *GetPrivateData )( IDXGIDevice3 * This, /* [annotation][in] */ _In_ REFGUID Name, /* [annotation][out][in] */ _Inout_ UINT *pDataSize, /* [annotation][out] */ _Out_writes_bytes_(*pDataSize) void *pData); HRESULT ( STDMETHODCALLTYPE *GetParent )( IDXGIDevice3 * This, /* [annotation][in] */ _In_ REFIID riid, /* [annotation][retval][out] */ _COM_Outptr_ void **ppParent); HRESULT ( STDMETHODCALLTYPE *GetAdapter )( IDXGIDevice3 * This, /* [annotation][out] */ _COM_Outptr_ IDXGIAdapter **pAdapter); HRESULT ( STDMETHODCALLTYPE *CreateSurface )( IDXGIDevice3 * This, /* [annotation][in] */ _In_ const DXGI_SURFACE_DESC *pDesc, /* [in] */ UINT NumSurfaces, /* [in] */ DXGI_USAGE Usage, /* [annotation][in] */ _In_opt_ const DXGI_SHARED_RESOURCE *pSharedResource, /* [annotation][out] */ _COM_Outptr_ IDXGISurface **ppSurface); HRESULT ( STDMETHODCALLTYPE *QueryResourceResidency )( IDXGIDevice3 * This, /* [annotation][size_is][in] */ _In_reads_(NumResources) IUnknown *const *ppResources, /* [annotation][size_is][out] */ _Out_writes_(NumResources) DXGI_RESIDENCY *pResidencyStatus, /* [in] */ UINT NumResources); HRESULT ( STDMETHODCALLTYPE *SetGPUThreadPriority )( IDXGIDevice3 * This, /* [in] */ INT Priority); HRESULT ( STDMETHODCALLTYPE *GetGPUThreadPriority )( IDXGIDevice3 * This, /* [annotation][retval][out] */ _Out_ INT *pPriority); HRESULT ( STDMETHODCALLTYPE *SetMaximumFrameLatency )( IDXGIDevice3 * This, /* [in] */ UINT MaxLatency); HRESULT ( STDMETHODCALLTYPE *GetMaximumFrameLatency )( IDXGIDevice3 * This, /* [annotation][out] */ _Out_ UINT *pMaxLatency); HRESULT ( STDMETHODCALLTYPE *OfferResources )( IDXGIDevice3 * This, /* [annotation][in] */ _In_ UINT NumResources, /* [annotation][size_is][in] */ _In_reads_(NumResources) IDXGIResource *const *ppResources, /* [annotation][in] */ _In_ DXGI_OFFER_RESOURCE_PRIORITY Priority); HRESULT ( STDMETHODCALLTYPE *ReclaimResources )( IDXGIDevice3 * This, /* [annotation][in] */ _In_ UINT NumResources, /* [annotation][size_is][in] */ _In_reads_(NumResources) IDXGIResource *const *ppResources, /* [annotation][size_is][out] */ _Out_writes_all_opt_(NumResources) BOOL *pDiscarded); HRESULT ( STDMETHODCALLTYPE *EnqueueSetEvent )( IDXGIDevice3 * This, /* [annotation][in] */ _In_ HANDLE hEvent); void ( STDMETHODCALLTYPE *Trim )( IDXGIDevice3 * This); END_INTERFACE } IDXGIDevice3Vtbl; interface IDXGIDevice3 { CONST_VTBL struct IDXGIDevice3Vtbl *lpVtbl; }; #ifdef COBJMACROS #define IDXGIDevice3_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define IDXGIDevice3_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define IDXGIDevice3_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define IDXGIDevice3_SetPrivateData(This,Name,DataSize,pData) \ ( (This)->lpVtbl -> SetPrivateData(This,Name,DataSize,pData) ) #define IDXGIDevice3_SetPrivateDataInterface(This,Name,pUnknown) \ ( (This)->lpVtbl -> SetPrivateDataInterface(This,Name,pUnknown) ) #define IDXGIDevice3_GetPrivateData(This,Name,pDataSize,pData) \ ( (This)->lpVtbl -> GetPrivateData(This,Name,pDataSize,pData) ) #define IDXGIDevice3_GetParent(This,riid,ppParent) \ ( (This)->lpVtbl -> GetParent(This,riid,ppParent) ) #define IDXGIDevice3_GetAdapter(This,pAdapter) \ ( (This)->lpVtbl -> GetAdapter(This,pAdapter) ) #define IDXGIDevice3_CreateSurface(This,pDesc,NumSurfaces,Usage,pSharedResource,ppSurface) \ ( (This)->lpVtbl -> CreateSurface(This,pDesc,NumSurfaces,Usage,pSharedResource,ppSurface) ) #define IDXGIDevice3_QueryResourceResidency(This,ppResources,pResidencyStatus,NumResources) \ ( (This)->lpVtbl -> QueryResourceResidency(This,ppResources,pResidencyStatus,NumResources) ) #define IDXGIDevice3_SetGPUThreadPriority(This,Priority) \ ( (This)->lpVtbl -> SetGPUThreadPriority(This,Priority) ) #define IDXGIDevice3_GetGPUThreadPriority(This,pPriority) \ ( (This)->lpVtbl -> GetGPUThreadPriority(This,pPriority) ) #define IDXGIDevice3_SetMaximumFrameLatency(This,MaxLatency) \ ( (This)->lpVtbl -> SetMaximumFrameLatency(This,MaxLatency) ) #define IDXGIDevice3_GetMaximumFrameLatency(This,pMaxLatency) \ ( (This)->lpVtbl -> GetMaximumFrameLatency(This,pMaxLatency) ) #define IDXGIDevice3_OfferResources(This,NumResources,ppResources,Priority) \ ( (This)->lpVtbl -> OfferResources(This,NumResources,ppResources,Priority) ) #define IDXGIDevice3_ReclaimResources(This,NumResources,ppResources,pDiscarded) \ ( (This)->lpVtbl -> ReclaimResources(This,NumResources,ppResources,pDiscarded) ) #define IDXGIDevice3_EnqueueSetEvent(This,hEvent) \ ( (This)->lpVtbl -> EnqueueSetEvent(This,hEvent) ) #define IDXGIDevice3_Trim(This) \ ( (This)->lpVtbl -> Trim(This) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __IDXGIDevice3_INTERFACE_DEFINED__ */ /* interface __MIDL_itf_dxgi1_3_0000_0001 */ /* [local] */ typedef struct DXGI_MATRIX_3X2_F { FLOAT _11; FLOAT _12; FLOAT _21; FLOAT _22; FLOAT _31; FLOAT _32; } DXGI_MATRIX_3X2_F; extern RPC_IF_HANDLE __MIDL_itf_dxgi1_3_0000_0001_v0_0_c_ifspec; extern RPC_IF_HANDLE __MIDL_itf_dxgi1_3_0000_0001_v0_0_s_ifspec; #ifndef __IDXGISwapChain2_INTERFACE_DEFINED__ #define __IDXGISwapChain2_INTERFACE_DEFINED__ /* interface IDXGISwapChain2 */ /* [unique][local][uuid][object] */ EXTERN_C const IID IID_IDXGISwapChain2; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("a8be2ac4-199f-4946-b331-79599fb98de7") IDXGISwapChain2 : public IDXGISwapChain1 { public: virtual HRESULT STDMETHODCALLTYPE SetSourceSize( UINT Width, UINT Height) = 0; virtual HRESULT STDMETHODCALLTYPE GetSourceSize( /* [annotation][out] */ _Out_ UINT *pWidth, /* [annotation][out] */ _Out_ UINT *pHeight) = 0; virtual HRESULT STDMETHODCALLTYPE SetMaximumFrameLatency( UINT MaxLatency) = 0; virtual HRESULT STDMETHODCALLTYPE GetMaximumFrameLatency( /* [annotation][out] */ _Out_ UINT *pMaxLatency) = 0; virtual HANDLE STDMETHODCALLTYPE GetFrameLatencyWaitableObject( void) = 0; virtual HRESULT STDMETHODCALLTYPE SetMatrixTransform( const DXGI_MATRIX_3X2_F *pMatrix) = 0; virtual HRESULT STDMETHODCALLTYPE GetMatrixTransform( /* [annotation][out] */ _Out_ DXGI_MATRIX_3X2_F *pMatrix) = 0; }; #else /* C style interface */ typedef struct IDXGISwapChain2Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( IDXGISwapChain2 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( IDXGISwapChain2 * This); ULONG ( STDMETHODCALLTYPE *Release )( IDXGISwapChain2 * This); HRESULT ( STDMETHODCALLTYPE *SetPrivateData )( IDXGISwapChain2 * This, /* [annotation][in] */ _In_ REFGUID Name, /* [in] */ UINT DataSize, /* [annotation][in] */ _In_reads_bytes_(DataSize) const void *pData); HRESULT ( STDMETHODCALLTYPE *SetPrivateDataInterface )( IDXGISwapChain2 * This, /* [annotation][in] */ _In_ REFGUID Name, /* [annotation][in] */ _In_opt_ const IUnknown *pUnknown); HRESULT ( STDMETHODCALLTYPE *GetPrivateData )( IDXGISwapChain2 * This, /* [annotation][in] */ _In_ REFGUID Name, /* [annotation][out][in] */ _Inout_ UINT *pDataSize, /* [annotation][out] */ _Out_writes_bytes_(*pDataSize) void *pData); HRESULT ( STDMETHODCALLTYPE *GetParent )( IDXGISwapChain2 * This, /* [annotation][in] */ _In_ REFIID riid, /* [annotation][retval][out] */ _COM_Outptr_ void **ppParent); HRESULT ( STDMETHODCALLTYPE *GetDevice )( IDXGISwapChain2 * This, /* [annotation][in] */ _In_ REFIID riid, /* [annotation][retval][out] */ _COM_Outptr_ void **ppDevice); HRESULT ( STDMETHODCALLTYPE *Present )( IDXGISwapChain2 * This, /* [in] */ UINT SyncInterval, /* [in] */ UINT Flags); HRESULT ( STDMETHODCALLTYPE *GetBuffer )( IDXGISwapChain2 * This, /* [in] */ UINT Buffer, /* [annotation][in] */ _In_ REFIID riid, /* [annotation][out][in] */ _COM_Outptr_ void **ppSurface); HRESULT ( STDMETHODCALLTYPE *SetFullscreenState )( IDXGISwapChain2 * This, /* [in] */ BOOL Fullscreen, /* [annotation][in] */ _In_opt_ IDXGIOutput *pTarget); HRESULT ( STDMETHODCALLTYPE *GetFullscreenState )( IDXGISwapChain2 * This, /* [annotation][out] */ _Out_opt_ BOOL *pFullscreen, /* [annotation][out] */ _COM_Outptr_opt_result_maybenull_ IDXGIOutput **ppTarget); HRESULT ( STDMETHODCALLTYPE *GetDesc )( IDXGISwapChain2 * This, /* [annotation][out] */ _Out_ DXGI_SWAP_CHAIN_DESC *pDesc); HRESULT ( STDMETHODCALLTYPE *ResizeBuffers )( IDXGISwapChain2 * This, /* [in] */ UINT BufferCount, /* [in] */ UINT Width, /* [in] */ UINT Height, /* [in] */ DXGI_FORMAT NewFormat, /* [in] */ UINT SwapChainFlags); HRESULT ( STDMETHODCALLTYPE *ResizeTarget )( IDXGISwapChain2 * This, /* [annotation][in] */ _In_ const DXGI_MODE_DESC *pNewTargetParameters); HRESULT ( STDMETHODCALLTYPE *GetContainingOutput )( IDXGISwapChain2 * This, /* [annotation][out] */ _COM_Outptr_ IDXGIOutput **ppOutput); HRESULT ( STDMETHODCALLTYPE *GetFrameStatistics )( IDXGISwapChain2 * This, /* [annotation][out] */ _Out_ DXGI_FRAME_STATISTICS *pStats); HRESULT ( STDMETHODCALLTYPE *GetLastPresentCount )( IDXGISwapChain2 * This, /* [annotation][out] */ _Out_ UINT *pLastPresentCount); HRESULT ( STDMETHODCALLTYPE *GetDesc1 )( IDXGISwapChain2 * This, /* [annotation][out] */ _Out_ DXGI_SWAP_CHAIN_DESC1 *pDesc); HRESULT ( STDMETHODCALLTYPE *GetFullscreenDesc )( IDXGISwapChain2 * This, /* [annotation][out] */ _Out_ DXGI_SWAP_CHAIN_FULLSCREEN_DESC *pDesc); HRESULT ( STDMETHODCALLTYPE *GetHwnd )( IDXGISwapChain2 * This, /* [annotation][out] */ _Out_ HWND *pHwnd); HRESULT ( STDMETHODCALLTYPE *GetCoreWindow )( IDXGISwapChain2 * This, /* [annotation][in] */ _In_ REFIID refiid, /* [annotation][out] */ _COM_Outptr_ void **ppUnk); HRESULT ( STDMETHODCALLTYPE *Present1 )( IDXGISwapChain2 * This, /* [in] */ UINT SyncInterval, /* [in] */ UINT PresentFlags, /* [annotation][in] */ _In_ const DXGI_PRESENT_PARAMETERS *pPresentParameters); BOOL ( STDMETHODCALLTYPE *IsTemporaryMonoSupported )( IDXGISwapChain2 * This); HRESULT ( STDMETHODCALLTYPE *GetRestrictToOutput )( IDXGISwapChain2 * This, /* [annotation][out] */ _Out_ IDXGIOutput **ppRestrictToOutput); HRESULT ( STDMETHODCALLTYPE *SetBackgroundColor )( IDXGISwapChain2 * This, /* [annotation][in] */ _In_ const DXGI_RGBA *pColor); HRESULT ( STDMETHODCALLTYPE *GetBackgroundColor )( IDXGISwapChain2 * This, /* [annotation][out] */ _Out_ DXGI_RGBA *pColor); HRESULT ( STDMETHODCALLTYPE *SetRotation )( IDXGISwapChain2 * This, /* [annotation][in] */ _In_ DXGI_MODE_ROTATION Rotation); HRESULT ( STDMETHODCALLTYPE *GetRotation )( IDXGISwapChain2 * This, /* [annotation][out] */ _Out_ DXGI_MODE_ROTATION *pRotation); HRESULT ( STDMETHODCALLTYPE *SetSourceSize )( IDXGISwapChain2 * This, UINT Width, UINT Height); HRESULT ( STDMETHODCALLTYPE *GetSourceSize )( IDXGISwapChain2 * This, /* [annotation][out] */ _Out_ UINT *pWidth, /* [annotation][out] */ _Out_ UINT *pHeight); HRESULT ( STDMETHODCALLTYPE *SetMaximumFrameLatency )( IDXGISwapChain2 * This, UINT MaxLatency); HRESULT ( STDMETHODCALLTYPE *GetMaximumFrameLatency )( IDXGISwapChain2 * This, /* [annotation][out] */ _Out_ UINT *pMaxLatency); HANDLE ( STDMETHODCALLTYPE *GetFrameLatencyWaitableObject )( IDXGISwapChain2 * This); HRESULT ( STDMETHODCALLTYPE *SetMatrixTransform )( IDXGISwapChain2 * This, const DXGI_MATRIX_3X2_F *pMatrix); HRESULT ( STDMETHODCALLTYPE *GetMatrixTransform )( IDXGISwapChain2 * This, /* [annotation][out] */ _Out_ DXGI_MATRIX_3X2_F *pMatrix); END_INTERFACE } IDXGISwapChain2Vtbl; interface IDXGISwapChain2 { CONST_VTBL struct IDXGISwapChain2Vtbl *lpVtbl; }; #ifdef COBJMACROS #define IDXGISwapChain2_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define IDXGISwapChain2_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define IDXGISwapChain2_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define IDXGISwapChain2_SetPrivateData(This,Name,DataSize,pData) \ ( (This)->lpVtbl -> SetPrivateData(This,Name,DataSize,pData) ) #define IDXGISwapChain2_SetPrivateDataInterface(This,Name,pUnknown) \ ( (This)->lpVtbl -> SetPrivateDataInterface(This,Name,pUnknown) ) #define IDXGISwapChain2_GetPrivateData(This,Name,pDataSize,pData) \ ( (This)->lpVtbl -> GetPrivateData(This,Name,pDataSize,pData) ) #define IDXGISwapChain2_GetParent(This,riid,ppParent) \ ( (This)->lpVtbl -> GetParent(This,riid,ppParent) ) #define IDXGISwapChain2_GetDevice(This,riid,ppDevice) \ ( (This)->lpVtbl -> GetDevice(This,riid,ppDevice) ) #define IDXGISwapChain2_Present(This,SyncInterval,Flags) \ ( (This)->lpVtbl -> Present(This,SyncInterval,Flags) ) #define IDXGISwapChain2_GetBuffer(This,Buffer,riid,ppSurface) \ ( (This)->lpVtbl -> GetBuffer(This,Buffer,riid,ppSurface) ) #define IDXGISwapChain2_SetFullscreenState(This,Fullscreen,pTarget) \ ( (This)->lpVtbl -> SetFullscreenState(This,Fullscreen,pTarget) ) #define IDXGISwapChain2_GetFullscreenState(This,pFullscreen,ppTarget) \ ( (This)->lpVtbl -> GetFullscreenState(This,pFullscreen,ppTarget) ) #define IDXGISwapChain2_GetDesc(This,pDesc) \ ( (This)->lpVtbl -> GetDesc(This,pDesc) ) #define IDXGISwapChain2_ResizeBuffers(This,BufferCount,Width,Height,NewFormat,SwapChainFlags) \ ( (This)->lpVtbl -> ResizeBuffers(This,BufferCount,Width,Height,NewFormat,SwapChainFlags) ) #define IDXGISwapChain2_ResizeTarget(This,pNewTargetParameters) \ ( (This)->lpVtbl -> ResizeTarget(This,pNewTargetParameters) ) #define IDXGISwapChain2_GetContainingOutput(This,ppOutput) \ ( (This)->lpVtbl -> GetContainingOutput(This,ppOutput) ) #define IDXGISwapChain2_GetFrameStatistics(This,pStats) \ ( (This)->lpVtbl -> GetFrameStatistics(This,pStats) ) #define IDXGISwapChain2_GetLastPresentCount(This,pLastPresentCount) \ ( (This)->lpVtbl -> GetLastPresentCount(This,pLastPresentCount) ) #define IDXGISwapChain2_GetDesc1(This,pDesc) \ ( (This)->lpVtbl -> GetDesc1(This,pDesc) ) #define IDXGISwapChain2_GetFullscreenDesc(This,pDesc) \ ( (This)->lpVtbl -> GetFullscreenDesc(This,pDesc) ) #define IDXGISwapChain2_GetHwnd(This,pHwnd) \ ( (This)->lpVtbl -> GetHwnd(This,pHwnd) ) #define IDXGISwapChain2_GetCoreWindow(This,refiid,ppUnk) \ ( (This)->lpVtbl -> GetCoreWindow(This,refiid,ppUnk) ) #define IDXGISwapChain2_Present1(This,SyncInterval,PresentFlags,pPresentParameters) \ ( (This)->lpVtbl -> Present1(This,SyncInterval,PresentFlags,pPresentParameters) ) #define IDXGISwapChain2_IsTemporaryMonoSupported(This) \ ( (This)->lpVtbl -> IsTemporaryMonoSupported(This) ) #define IDXGISwapChain2_GetRestrictToOutput(This,ppRestrictToOutput) \ ( (This)->lpVtbl -> GetRestrictToOutput(This,ppRestrictToOutput) ) #define IDXGISwapChain2_SetBackgroundColor(This,pColor) \ ( (This)->lpVtbl -> SetBackgroundColor(This,pColor) ) #define IDXGISwapChain2_GetBackgroundColor(This,pColor) \ ( (This)->lpVtbl -> GetBackgroundColor(This,pColor) ) #define IDXGISwapChain2_SetRotation(This,Rotation) \ ( (This)->lpVtbl -> SetRotation(This,Rotation) ) #define IDXGISwapChain2_GetRotation(This,pRotation) \ ( (This)->lpVtbl -> GetRotation(This,pRotation) ) #define IDXGISwapChain2_SetSourceSize(This,Width,Height) \ ( (This)->lpVtbl -> SetSourceSize(This,Width,Height) ) #define IDXGISwapChain2_GetSourceSize(This,pWidth,pHeight) \ ( (This)->lpVtbl -> GetSourceSize(This,pWidth,pHeight) ) #define IDXGISwapChain2_SetMaximumFrameLatency(This,MaxLatency) \ ( (This)->lpVtbl -> SetMaximumFrameLatency(This,MaxLatency) ) #define IDXGISwapChain2_GetMaximumFrameLatency(This,pMaxLatency) \ ( (This)->lpVtbl -> GetMaximumFrameLatency(This,pMaxLatency) ) #define IDXGISwapChain2_GetFrameLatencyWaitableObject(This) \ ( (This)->lpVtbl -> GetFrameLatencyWaitableObject(This) ) #define IDXGISwapChain2_SetMatrixTransform(This,pMatrix) \ ( (This)->lpVtbl -> SetMatrixTransform(This,pMatrix) ) #define IDXGISwapChain2_GetMatrixTransform(This,pMatrix) \ ( (This)->lpVtbl -> GetMatrixTransform(This,pMatrix) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __IDXGISwapChain2_INTERFACE_DEFINED__ */ #ifndef __IDXGIOutput2_INTERFACE_DEFINED__ #define __IDXGIOutput2_INTERFACE_DEFINED__ /* interface IDXGIOutput2 */ /* [unique][local][uuid][object] */ EXTERN_C const IID IID_IDXGIOutput2; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("595e39d1-2724-4663-99b1-da969de28364") IDXGIOutput2 : public IDXGIOutput1 { public: virtual BOOL STDMETHODCALLTYPE SupportsOverlays( void) = 0; }; #else /* C style interface */ typedef struct IDXGIOutput2Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( IDXGIOutput2 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( IDXGIOutput2 * This); ULONG ( STDMETHODCALLTYPE *Release )( IDXGIOutput2 * This); HRESULT ( STDMETHODCALLTYPE *SetPrivateData )( IDXGIOutput2 * This, /* [annotation][in] */ _In_ REFGUID Name, /* [in] */ UINT DataSize, /* [annotation][in] */ _In_reads_bytes_(DataSize) const void *pData); HRESULT ( STDMETHODCALLTYPE *SetPrivateDataInterface )( IDXGIOutput2 * This, /* [annotation][in] */ _In_ REFGUID Name, /* [annotation][in] */ _In_opt_ const IUnknown *pUnknown); HRESULT ( STDMETHODCALLTYPE *GetPrivateData )( IDXGIOutput2 * This, /* [annotation][in] */ _In_ REFGUID Name, /* [annotation][out][in] */ _Inout_ UINT *pDataSize, /* [annotation][out] */ _Out_writes_bytes_(*pDataSize) void *pData); HRESULT ( STDMETHODCALLTYPE *GetParent )( IDXGIOutput2 * This, /* [annotation][in] */ _In_ REFIID riid, /* [annotation][retval][out] */ _COM_Outptr_ void **ppParent); HRESULT ( STDMETHODCALLTYPE *GetDesc )( IDXGIOutput2 * This, /* [annotation][out] */ _Out_ DXGI_OUTPUT_DESC *pDesc); HRESULT ( STDMETHODCALLTYPE *GetDisplayModeList )( IDXGIOutput2 * This, /* [in] */ DXGI_FORMAT EnumFormat, /* [in] */ UINT Flags, /* [annotation][out][in] */ _Inout_ UINT *pNumModes, /* [annotation][out] */ _Out_writes_to_opt_(*pNumModes,*pNumModes) DXGI_MODE_DESC *pDesc); HRESULT ( STDMETHODCALLTYPE *FindClosestMatchingMode )( IDXGIOutput2 * This, /* [annotation][in] */ _In_ const DXGI_MODE_DESC *pModeToMatch, /* [annotation][out] */ _Out_ DXGI_MODE_DESC *pClosestMatch, /* [annotation][in] */ _In_opt_ IUnknown *pConcernedDevice); HRESULT ( STDMETHODCALLTYPE *WaitForVBlank )( IDXGIOutput2 * This); HRESULT ( STDMETHODCALLTYPE *TakeOwnership )( IDXGIOutput2 * This, /* [annotation][in] */ _In_ IUnknown *pDevice, BOOL Exclusive); void ( STDMETHODCALLTYPE *ReleaseOwnership )( IDXGIOutput2 * This); HRESULT ( STDMETHODCALLTYPE *GetGammaControlCapabilities )( IDXGIOutput2 * This, /* [annotation][out] */ _Out_ DXGI_GAMMA_CONTROL_CAPABILITIES *pGammaCaps); HRESULT ( STDMETHODCALLTYPE *SetGammaControl )( IDXGIOutput2 * This, /* [annotation][in] */ _In_ const DXGI_GAMMA_CONTROL *pArray); HRESULT ( STDMETHODCALLTYPE *GetGammaControl )( IDXGIOutput2 * This, /* [annotation][out] */ _Out_ DXGI_GAMMA_CONTROL *pArray); HRESULT ( STDMETHODCALLTYPE *SetDisplaySurface )( IDXGIOutput2 * This, /* [annotation][in] */ _In_ IDXGISurface *pScanoutSurface); HRESULT ( STDMETHODCALLTYPE *GetDisplaySurfaceData )( IDXGIOutput2 * This, /* [annotation][in] */ _In_ IDXGISurface *pDestination); HRESULT ( STDMETHODCALLTYPE *GetFrameStatistics )( IDXGIOutput2 * This, /* [annotation][out] */ _Out_ DXGI_FRAME_STATISTICS *pStats); HRESULT ( STDMETHODCALLTYPE *GetDisplayModeList1 )( IDXGIOutput2 * This, /* [in] */ DXGI_FORMAT EnumFormat, /* [in] */ UINT Flags, /* [annotation][out][in] */ _Inout_ UINT *pNumModes, /* [annotation][out] */ _Out_writes_to_opt_(*pNumModes,*pNumModes) DXGI_MODE_DESC1 *pDesc); HRESULT ( STDMETHODCALLTYPE *FindClosestMatchingMode1 )( IDXGIOutput2 * This, /* [annotation][in] */ _In_ const DXGI_MODE_DESC1 *pModeToMatch, /* [annotation][out] */ _Out_ DXGI_MODE_DESC1 *pClosestMatch, /* [annotation][in] */ _In_opt_ IUnknown *pConcernedDevice); HRESULT ( STDMETHODCALLTYPE *GetDisplaySurfaceData1 )( IDXGIOutput2 * This, /* [annotation][in] */ _In_ IDXGIResource *pDestination); HRESULT ( STDMETHODCALLTYPE *DuplicateOutput )( IDXGIOutput2 * This, /* [annotation][in] */ _In_ IUnknown *pDevice, /* [annotation][out] */ _COM_Outptr_ IDXGIOutputDuplication **ppOutputDuplication); BOOL ( STDMETHODCALLTYPE *SupportsOverlays )( IDXGIOutput2 * This); END_INTERFACE } IDXGIOutput2Vtbl; interface IDXGIOutput2 { CONST_VTBL struct IDXGIOutput2Vtbl *lpVtbl; }; #ifdef COBJMACROS #define IDXGIOutput2_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define IDXGIOutput2_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define IDXGIOutput2_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define IDXGIOutput2_SetPrivateData(This,Name,DataSize,pData) \ ( (This)->lpVtbl -> SetPrivateData(This,Name,DataSize,pData) ) #define IDXGIOutput2_SetPrivateDataInterface(This,Name,pUnknown) \ ( (This)->lpVtbl -> SetPrivateDataInterface(This,Name,pUnknown) ) #define IDXGIOutput2_GetPrivateData(This,Name,pDataSize,pData) \ ( (This)->lpVtbl -> GetPrivateData(This,Name,pDataSize,pData) ) #define IDXGIOutput2_GetParent(This,riid,ppParent) \ ( (This)->lpVtbl -> GetParent(This,riid,ppParent) ) #define IDXGIOutput2_GetDesc(This,pDesc) \ ( (This)->lpVtbl -> GetDesc(This,pDesc) ) #define IDXGIOutput2_GetDisplayModeList(This,EnumFormat,Flags,pNumModes,pDesc) \ ( (This)->lpVtbl -> GetDisplayModeList(This,EnumFormat,Flags,pNumModes,pDesc) ) #define IDXGIOutput2_FindClosestMatchingMode(This,pModeToMatch,pClosestMatch,pConcernedDevice) \ ( (This)->lpVtbl -> FindClosestMatchingMode(This,pModeToMatch,pClosestMatch,pConcernedDevice) ) #define IDXGIOutput2_WaitForVBlank(This) \ ( (This)->lpVtbl -> WaitForVBlank(This) ) #define IDXGIOutput2_TakeOwnership(This,pDevice,Exclusive) \ ( (This)->lpVtbl -> TakeOwnership(This,pDevice,Exclusive) ) #define IDXGIOutput2_ReleaseOwnership(This) \ ( (This)->lpVtbl -> ReleaseOwnership(This) ) #define IDXGIOutput2_GetGammaControlCapabilities(This,pGammaCaps) \ ( (This)->lpVtbl -> GetGammaControlCapabilities(This,pGammaCaps) ) #define IDXGIOutput2_SetGammaControl(This,pArray) \ ( (This)->lpVtbl -> SetGammaControl(This,pArray) ) #define IDXGIOutput2_GetGammaControl(This,pArray) \ ( (This)->lpVtbl -> GetGammaControl(This,pArray) ) #define IDXGIOutput2_SetDisplaySurface(This,pScanoutSurface) \ ( (This)->lpVtbl -> SetDisplaySurface(This,pScanoutSurface) ) #define IDXGIOutput2_GetDisplaySurfaceData(This,pDestination) \ ( (This)->lpVtbl -> GetDisplaySurfaceData(This,pDestination) ) #define IDXGIOutput2_GetFrameStatistics(This,pStats) \ ( (This)->lpVtbl -> GetFrameStatistics(This,pStats) ) #define IDXGIOutput2_GetDisplayModeList1(This,EnumFormat,Flags,pNumModes,pDesc) \ ( (This)->lpVtbl -> GetDisplayModeList1(This,EnumFormat,Flags,pNumModes,pDesc) ) #define IDXGIOutput2_FindClosestMatchingMode1(This,pModeToMatch,pClosestMatch,pConcernedDevice) \ ( (This)->lpVtbl -> FindClosestMatchingMode1(This,pModeToMatch,pClosestMatch,pConcernedDevice) ) #define IDXGIOutput2_GetDisplaySurfaceData1(This,pDestination) \ ( (This)->lpVtbl -> GetDisplaySurfaceData1(This,pDestination) ) #define IDXGIOutput2_DuplicateOutput(This,pDevice,ppOutputDuplication) \ ( (This)->lpVtbl -> DuplicateOutput(This,pDevice,ppOutputDuplication) ) #define IDXGIOutput2_SupportsOverlays(This) \ ( (This)->lpVtbl -> SupportsOverlays(This) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __IDXGIOutput2_INTERFACE_DEFINED__ */ #ifndef __IDXGIFactory3_INTERFACE_DEFINED__ #define __IDXGIFactory3_INTERFACE_DEFINED__ /* interface IDXGIFactory3 */ /* [unique][local][uuid][object] */ EXTERN_C const IID IID_IDXGIFactory3; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("25483823-cd46-4c7d-86ca-47aa95b837bd") IDXGIFactory3 : public IDXGIFactory2 { public: virtual UINT STDMETHODCALLTYPE GetCreationFlags( void) = 0; }; #else /* C style interface */ typedef struct IDXGIFactory3Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( IDXGIFactory3 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( IDXGIFactory3 * This); ULONG ( STDMETHODCALLTYPE *Release )( IDXGIFactory3 * This); HRESULT ( STDMETHODCALLTYPE *SetPrivateData )( IDXGIFactory3 * This, /* [annotation][in] */ _In_ REFGUID Name, /* [in] */ UINT DataSize, /* [annotation][in] */ _In_reads_bytes_(DataSize) const void *pData); HRESULT ( STDMETHODCALLTYPE *SetPrivateDataInterface )( IDXGIFactory3 * This, /* [annotation][in] */ _In_ REFGUID Name, /* [annotation][in] */ _In_opt_ const IUnknown *pUnknown); HRESULT ( STDMETHODCALLTYPE *GetPrivateData )( IDXGIFactory3 * This, /* [annotation][in] */ _In_ REFGUID Name, /* [annotation][out][in] */ _Inout_ UINT *pDataSize, /* [annotation][out] */ _Out_writes_bytes_(*pDataSize) void *pData); HRESULT ( STDMETHODCALLTYPE *GetParent )( IDXGIFactory3 * This, /* [annotation][in] */ _In_ REFIID riid, /* [annotation][retval][out] */ _COM_Outptr_ void **ppParent); HRESULT ( STDMETHODCALLTYPE *EnumAdapters )( IDXGIFactory3 * This, /* [in] */ UINT Adapter, /* [annotation][out] */ _COM_Outptr_ IDXGIAdapter **ppAdapter); HRESULT ( STDMETHODCALLTYPE *MakeWindowAssociation )( IDXGIFactory3 * This, HWND WindowHandle, UINT Flags); HRESULT ( STDMETHODCALLTYPE *GetWindowAssociation )( IDXGIFactory3 * This, /* [annotation][out] */ _Out_ HWND *pWindowHandle); HRESULT ( STDMETHODCALLTYPE *CreateSwapChain )( IDXGIFactory3 * This, /* [annotation][in] */ _In_ IUnknown *pDevice, /* [annotation][in] */ _In_ DXGI_SWAP_CHAIN_DESC *pDesc, /* [annotation][out] */ _COM_Outptr_ IDXGISwapChain **ppSwapChain); HRESULT ( STDMETHODCALLTYPE *CreateSoftwareAdapter )( IDXGIFactory3 * This, /* [in] */ HMODULE Module, /* [annotation][out] */ _COM_Outptr_ IDXGIAdapter **ppAdapter); HRESULT ( STDMETHODCALLTYPE *EnumAdapters1 )( IDXGIFactory3 * This, /* [in] */ UINT Adapter, /* [annotation][out] */ _COM_Outptr_ IDXGIAdapter1 **ppAdapter); BOOL ( STDMETHODCALLTYPE *IsCurrent )( IDXGIFactory3 * This); BOOL ( STDMETHODCALLTYPE *IsWindowedStereoEnabled )( IDXGIFactory3 * This); HRESULT ( STDMETHODCALLTYPE *CreateSwapChainForHwnd )( IDXGIFactory3 * This, /* [annotation][in] */ _In_ IUnknown *pDevice, /* [annotation][in] */ _In_ HWND hWnd, /* [annotation][in] */ _In_ const DXGI_SWAP_CHAIN_DESC1 *pDesc, /* [annotation][in] */ _In_opt_ const DXGI_SWAP_CHAIN_FULLSCREEN_DESC *pFullscreenDesc, /* [annotation][in] */ _In_opt_ IDXGIOutput *pRestrictToOutput, /* [annotation][out] */ _COM_Outptr_ IDXGISwapChain1 **ppSwapChain); HRESULT ( STDMETHODCALLTYPE *CreateSwapChainForCoreWindow )( IDXGIFactory3 * This, /* [annotation][in] */ _In_ IUnknown *pDevice, /* [annotation][in] */ _In_ IUnknown *pWindow, /* [annotation][in] */ _In_ const DXGI_SWAP_CHAIN_DESC1 *pDesc, /* [annotation][in] */ _In_opt_ IDXGIOutput *pRestrictToOutput, /* [annotation][out] */ _COM_Outptr_ IDXGISwapChain1 **ppSwapChain); HRESULT ( STDMETHODCALLTYPE *GetSharedResourceAdapterLuid )( IDXGIFactory3 * This, /* [annotation] */ _In_ HANDLE hResource, /* [annotation] */ _Out_ LUID *pLuid); HRESULT ( STDMETHODCALLTYPE *RegisterStereoStatusWindow )( IDXGIFactory3 * This, /* [annotation][in] */ _In_ HWND WindowHandle, /* [annotation][in] */ _In_ UINT wMsg, /* [annotation][out] */ _Out_ DWORD *pdwCookie); HRESULT ( STDMETHODCALLTYPE *RegisterStereoStatusEvent )( IDXGIFactory3 * This, /* [annotation][in] */ _In_ HANDLE hEvent, /* [annotation][out] */ _Out_ DWORD *pdwCookie); void ( STDMETHODCALLTYPE *UnregisterStereoStatus )( IDXGIFactory3 * This, /* [annotation][in] */ _In_ DWORD dwCookie); HRESULT ( STDMETHODCALLTYPE *RegisterOcclusionStatusWindow )( IDXGIFactory3 * This, /* [annotation][in] */ _In_ HWND WindowHandle, /* [annotation][in] */ _In_ UINT wMsg, /* [annotation][out] */ _Out_ DWORD *pdwCookie); HRESULT ( STDMETHODCALLTYPE *RegisterOcclusionStatusEvent )( IDXGIFactory3 * This, /* [annotation][in] */ _In_ HANDLE hEvent, /* [annotation][out] */ _Out_ DWORD *pdwCookie); void ( STDMETHODCALLTYPE *UnregisterOcclusionStatus )( IDXGIFactory3 * This, /* [annotation][in] */ _In_ DWORD dwCookie); HRESULT ( STDMETHODCALLTYPE *CreateSwapChainForComposition )( IDXGIFactory3 * This, /* [annotation][in] */ _In_ IUnknown *pDevice, /* [annotation][in] */ _In_ const DXGI_SWAP_CHAIN_DESC1 *pDesc, /* [annotation][in] */ _In_opt_ IDXGIOutput *pRestrictToOutput, /* [annotation][out] */ _COM_Outptr_ IDXGISwapChain1 **ppSwapChain); UINT ( STDMETHODCALLTYPE *GetCreationFlags )( IDXGIFactory3 * This); END_INTERFACE } IDXGIFactory3Vtbl; interface IDXGIFactory3 { CONST_VTBL struct IDXGIFactory3Vtbl *lpVtbl; }; #ifdef COBJMACROS #define IDXGIFactory3_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define IDXGIFactory3_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define IDXGIFactory3_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define IDXGIFactory3_SetPrivateData(This,Name,DataSize,pData) \ ( (This)->lpVtbl -> SetPrivateData(This,Name,DataSize,pData) ) #define IDXGIFactory3_SetPrivateDataInterface(This,Name,pUnknown) \ ( (This)->lpVtbl -> SetPrivateDataInterface(This,Name,pUnknown) ) #define IDXGIFactory3_GetPrivateData(This,Name,pDataSize,pData) \ ( (This)->lpVtbl -> GetPrivateData(This,Name,pDataSize,pData) ) #define IDXGIFactory3_GetParent(This,riid,ppParent) \ ( (This)->lpVtbl -> GetParent(This,riid,ppParent) ) #define IDXGIFactory3_EnumAdapters(This,Adapter,ppAdapter) \ ( (This)->lpVtbl -> EnumAdapters(This,Adapter,ppAdapter) ) #define IDXGIFactory3_MakeWindowAssociation(This,WindowHandle,Flags) \ ( (This)->lpVtbl -> MakeWindowAssociation(This,WindowHandle,Flags) ) #define IDXGIFactory3_GetWindowAssociation(This,pWindowHandle) \ ( (This)->lpVtbl -> GetWindowAssociation(This,pWindowHandle) ) #define IDXGIFactory3_CreateSwapChain(This,pDevice,pDesc,ppSwapChain) \ ( (This)->lpVtbl -> CreateSwapChain(This,pDevice,pDesc,ppSwapChain) ) #define IDXGIFactory3_CreateSoftwareAdapter(This,Module,ppAdapter) \ ( (This)->lpVtbl -> CreateSoftwareAdapter(This,Module,ppAdapter) ) #define IDXGIFactory3_EnumAdapters1(This,Adapter,ppAdapter) \ ( (This)->lpVtbl -> EnumAdapters1(This,Adapter,ppAdapter) ) #define IDXGIFactory3_IsCurrent(This) \ ( (This)->lpVtbl -> IsCurrent(This) ) #define IDXGIFactory3_IsWindowedStereoEnabled(This) \ ( (This)->lpVtbl -> IsWindowedStereoEnabled(This) ) #define IDXGIFactory3_CreateSwapChainForHwnd(This,pDevice,hWnd,pDesc,pFullscreenDesc,pRestrictToOutput,ppSwapChain) \ ( (This)->lpVtbl -> CreateSwapChainForHwnd(This,pDevice,hWnd,pDesc,pFullscreenDesc,pRestrictToOutput,ppSwapChain) ) #define IDXGIFactory3_CreateSwapChainForCoreWindow(This,pDevice,pWindow,pDesc,pRestrictToOutput,ppSwapChain) \ ( (This)->lpVtbl -> CreateSwapChainForCoreWindow(This,pDevice,pWindow,pDesc,pRestrictToOutput,ppSwapChain) ) #define IDXGIFactory3_GetSharedResourceAdapterLuid(This,hResource,pLuid) \ ( (This)->lpVtbl -> GetSharedResourceAdapterLuid(This,hResource,pLuid) ) #define IDXGIFactory3_RegisterStereoStatusWindow(This,WindowHandle,wMsg,pdwCookie) \ ( (This)->lpVtbl -> RegisterStereoStatusWindow(This,WindowHandle,wMsg,pdwCookie) ) #define IDXGIFactory3_RegisterStereoStatusEvent(This,hEvent,pdwCookie) \ ( (This)->lpVtbl -> RegisterStereoStatusEvent(This,hEvent,pdwCookie) ) #define IDXGIFactory3_UnregisterStereoStatus(This,dwCookie) \ ( (This)->lpVtbl -> UnregisterStereoStatus(This,dwCookie) ) #define IDXGIFactory3_RegisterOcclusionStatusWindow(This,WindowHandle,wMsg,pdwCookie) \ ( (This)->lpVtbl -> RegisterOcclusionStatusWindow(This,WindowHandle,wMsg,pdwCookie) ) #define IDXGIFactory3_RegisterOcclusionStatusEvent(This,hEvent,pdwCookie) \ ( (This)->lpVtbl -> RegisterOcclusionStatusEvent(This,hEvent,pdwCookie) ) #define IDXGIFactory3_UnregisterOcclusionStatus(This,dwCookie) \ ( (This)->lpVtbl -> UnregisterOcclusionStatus(This,dwCookie) ) #define IDXGIFactory3_CreateSwapChainForComposition(This,pDevice,pDesc,pRestrictToOutput,ppSwapChain) \ ( (This)->lpVtbl -> CreateSwapChainForComposition(This,pDevice,pDesc,pRestrictToOutput,ppSwapChain) ) #define IDXGIFactory3_GetCreationFlags(This) \ ( (This)->lpVtbl -> GetCreationFlags(This) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __IDXGIFactory3_INTERFACE_DEFINED__ */ /* interface __MIDL_itf_dxgi1_3_0000_0004 */ /* [local] */ #endif /* WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) */ // BK - pragma endregion // BK - pragma region App Family #if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) typedef struct DXGI_DECODE_SWAP_CHAIN_DESC { UINT Flags; } DXGI_DECODE_SWAP_CHAIN_DESC; typedef enum DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAGS { DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAG_NOMINAL_RANGE = 0x1, DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAG_BT709 = 0x2, DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAG_xvYCC = 0x4 } DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAGS; extern RPC_IF_HANDLE __MIDL_itf_dxgi1_3_0000_0004_v0_0_c_ifspec; extern RPC_IF_HANDLE __MIDL_itf_dxgi1_3_0000_0004_v0_0_s_ifspec; #ifndef __IDXGIDecodeSwapChain_INTERFACE_DEFINED__ #define __IDXGIDecodeSwapChain_INTERFACE_DEFINED__ /* interface IDXGIDecodeSwapChain */ /* [unique][local][uuid][object] */ EXTERN_C const IID IID_IDXGIDecodeSwapChain; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("2633066b-4514-4c7a-8fd8-12ea98059d18") IDXGIDecodeSwapChain : public IUnknown { public: virtual HRESULT STDMETHODCALLTYPE PresentBuffer( UINT BufferToPresent, UINT SyncInterval, UINT Flags) = 0; virtual HRESULT STDMETHODCALLTYPE SetSourceRect( const RECT *pRect) = 0; virtual HRESULT STDMETHODCALLTYPE SetTargetRect( const RECT *pRect) = 0; virtual HRESULT STDMETHODCALLTYPE SetDestSize( UINT Width, UINT Height) = 0; virtual HRESULT STDMETHODCALLTYPE GetSourceRect( /* [annotation][out] */ _Out_ RECT *pRect) = 0; virtual HRESULT STDMETHODCALLTYPE GetTargetRect( /* [annotation][out] */ _Out_ RECT *pRect) = 0; virtual HRESULT STDMETHODCALLTYPE GetDestSize( /* [annotation][out] */ _Out_ UINT *pWidth, /* [annotation][out] */ _Out_ UINT *pHeight) = 0; virtual HRESULT STDMETHODCALLTYPE SetColorSpace( DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAGS ColorSpace) = 0; virtual DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAGS STDMETHODCALLTYPE GetColorSpace( void) = 0; }; #else /* C style interface */ typedef struct IDXGIDecodeSwapChainVtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( IDXGIDecodeSwapChain * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( IDXGIDecodeSwapChain * This); ULONG ( STDMETHODCALLTYPE *Release )( IDXGIDecodeSwapChain * This); HRESULT ( STDMETHODCALLTYPE *PresentBuffer )( IDXGIDecodeSwapChain * This, UINT BufferToPresent, UINT SyncInterval, UINT Flags); HRESULT ( STDMETHODCALLTYPE *SetSourceRect )( IDXGIDecodeSwapChain * This, const RECT *pRect); HRESULT ( STDMETHODCALLTYPE *SetTargetRect )( IDXGIDecodeSwapChain * This, const RECT *pRect); HRESULT ( STDMETHODCALLTYPE *SetDestSize )( IDXGIDecodeSwapChain * This, UINT Width, UINT Height); HRESULT ( STDMETHODCALLTYPE *GetSourceRect )( IDXGIDecodeSwapChain * This, /* [annotation][out] */ _Out_ RECT *pRect); HRESULT ( STDMETHODCALLTYPE *GetTargetRect )( IDXGIDecodeSwapChain * This, /* [annotation][out] */ _Out_ RECT *pRect); HRESULT ( STDMETHODCALLTYPE *GetDestSize )( IDXGIDecodeSwapChain * This, /* [annotation][out] */ _Out_ UINT *pWidth, /* [annotation][out] */ _Out_ UINT *pHeight); HRESULT ( STDMETHODCALLTYPE *SetColorSpace )( IDXGIDecodeSwapChain * This, DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAGS ColorSpace); DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAGS ( STDMETHODCALLTYPE *GetColorSpace )( IDXGIDecodeSwapChain * This); END_INTERFACE } IDXGIDecodeSwapChainVtbl; interface IDXGIDecodeSwapChain { CONST_VTBL struct IDXGIDecodeSwapChainVtbl *lpVtbl; }; #ifdef COBJMACROS #define IDXGIDecodeSwapChain_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define IDXGIDecodeSwapChain_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define IDXGIDecodeSwapChain_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define IDXGIDecodeSwapChain_PresentBuffer(This,BufferToPresent,SyncInterval,Flags) \ ( (This)->lpVtbl -> PresentBuffer(This,BufferToPresent,SyncInterval,Flags) ) #define IDXGIDecodeSwapChain_SetSourceRect(This,pRect) \ ( (This)->lpVtbl -> SetSourceRect(This,pRect) ) #define IDXGIDecodeSwapChain_SetTargetRect(This,pRect) \ ( (This)->lpVtbl -> SetTargetRect(This,pRect) ) #define IDXGIDecodeSwapChain_SetDestSize(This,Width,Height) \ ( (This)->lpVtbl -> SetDestSize(This,Width,Height) ) #define IDXGIDecodeSwapChain_GetSourceRect(This,pRect) \ ( (This)->lpVtbl -> GetSourceRect(This,pRect) ) #define IDXGIDecodeSwapChain_GetTargetRect(This,pRect) \ ( (This)->lpVtbl -> GetTargetRect(This,pRect) ) #define IDXGIDecodeSwapChain_GetDestSize(This,pWidth,pHeight) \ ( (This)->lpVtbl -> GetDestSize(This,pWidth,pHeight) ) #define IDXGIDecodeSwapChain_SetColorSpace(This,ColorSpace) \ ( (This)->lpVtbl -> SetColorSpace(This,ColorSpace) ) #define IDXGIDecodeSwapChain_GetColorSpace(This) \ ( (This)->lpVtbl -> GetColorSpace(This) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __IDXGIDecodeSwapChain_INTERFACE_DEFINED__ */ #ifndef __IDXGIFactoryMedia_INTERFACE_DEFINED__ #define __IDXGIFactoryMedia_INTERFACE_DEFINED__ /* interface IDXGIFactoryMedia */ /* [unique][local][uuid][object] */ EXTERN_C const IID IID_IDXGIFactoryMedia; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("41e7d1f2-a591-4f7b-a2e5-fa9c843e1c12") IDXGIFactoryMedia : public IUnknown { public: virtual HRESULT STDMETHODCALLTYPE CreateSwapChainForCompositionSurfaceHandle( /* [annotation][in] */ _In_ IUnknown *pDevice, /* [annotation][in] */ _In_opt_ HANDLE hSurface, /* [annotation][in] */ _In_ const DXGI_SWAP_CHAIN_DESC1 *pDesc, /* [annotation][in] */ _In_opt_ IDXGIOutput *pRestrictToOutput, /* [annotation][out] */ _COM_Outptr_ IDXGISwapChain1 **ppSwapChain) = 0; virtual HRESULT STDMETHODCALLTYPE CreateDecodeSwapChainForCompositionSurfaceHandle( /* [annotation][in] */ _In_ IUnknown *pDevice, /* [annotation][in] */ _In_opt_ HANDLE hSurface, /* [annotation][in] */ _In_ DXGI_DECODE_SWAP_CHAIN_DESC *pDesc, /* [annotation][in] */ _In_ IDXGIResource *pYuvDecodeBuffers, /* [annotation][in] */ _In_opt_ IDXGIOutput *pRestrictToOutput, /* [annotation][out] */ _COM_Outptr_ IDXGIDecodeSwapChain **ppSwapChain) = 0; }; #else /* C style interface */ typedef struct IDXGIFactoryMediaVtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( IDXGIFactoryMedia * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( IDXGIFactoryMedia * This); ULONG ( STDMETHODCALLTYPE *Release )( IDXGIFactoryMedia * This); HRESULT ( STDMETHODCALLTYPE *CreateSwapChainForCompositionSurfaceHandle )( IDXGIFactoryMedia * This, /* [annotation][in] */ _In_ IUnknown *pDevice, /* [annotation][in] */ _In_opt_ HANDLE hSurface, /* [annotation][in] */ _In_ const DXGI_SWAP_CHAIN_DESC1 *pDesc, /* [annotation][in] */ _In_opt_ IDXGIOutput *pRestrictToOutput, /* [annotation][out] */ _COM_Outptr_ IDXGISwapChain1 **ppSwapChain); HRESULT ( STDMETHODCALLTYPE *CreateDecodeSwapChainForCompositionSurfaceHandle )( IDXGIFactoryMedia * This, /* [annotation][in] */ _In_ IUnknown *pDevice, /* [annotation][in] */ _In_opt_ HANDLE hSurface, /* [annotation][in] */ _In_ DXGI_DECODE_SWAP_CHAIN_DESC *pDesc, /* [annotation][in] */ _In_ IDXGIResource *pYuvDecodeBuffers, /* [annotation][in] */ _In_opt_ IDXGIOutput *pRestrictToOutput, /* [annotation][out] */ _COM_Outptr_ IDXGIDecodeSwapChain **ppSwapChain); END_INTERFACE } IDXGIFactoryMediaVtbl; interface IDXGIFactoryMedia { CONST_VTBL struct IDXGIFactoryMediaVtbl *lpVtbl; }; #ifdef COBJMACROS #define IDXGIFactoryMedia_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define IDXGIFactoryMedia_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define IDXGIFactoryMedia_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define IDXGIFactoryMedia_CreateSwapChainForCompositionSurfaceHandle(This,pDevice,hSurface,pDesc,pRestrictToOutput,ppSwapChain) \ ( (This)->lpVtbl -> CreateSwapChainForCompositionSurfaceHandle(This,pDevice,hSurface,pDesc,pRestrictToOutput,ppSwapChain) ) #define IDXGIFactoryMedia_CreateDecodeSwapChainForCompositionSurfaceHandle(This,pDevice,hSurface,pDesc,pYuvDecodeBuffers,pRestrictToOutput,ppSwapChain) \ ( (This)->lpVtbl -> CreateDecodeSwapChainForCompositionSurfaceHandle(This,pDevice,hSurface,pDesc,pYuvDecodeBuffers,pRestrictToOutput,ppSwapChain) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __IDXGIFactoryMedia_INTERFACE_DEFINED__ */ /* interface __MIDL_itf_dxgi1_3_0000_0006 */ /* [local] */ typedef enum DXGI_FRAME_PRESENTATION_MODE { DXGI_FRAME_PRESENTATION_MODE_COMPOSED = 0, DXGI_FRAME_PRESENTATION_MODE_OVERLAY = 1, DXGI_FRAME_PRESENTATION_MODE_NONE = 2, DXGI_FRAME_PRESENTATION_MODE_COMPOSITION_FAILURE = 3 } DXGI_FRAME_PRESENTATION_MODE; typedef struct DXGI_FRAME_STATISTICS_MEDIA { UINT PresentCount; UINT PresentRefreshCount; UINT SyncRefreshCount; LARGE_INTEGER SyncQPCTime; LARGE_INTEGER SyncGPUTime; DXGI_FRAME_PRESENTATION_MODE CompositionMode; UINT ApprovedPresentDuration; } DXGI_FRAME_STATISTICS_MEDIA; extern RPC_IF_HANDLE __MIDL_itf_dxgi1_3_0000_0006_v0_0_c_ifspec; extern RPC_IF_HANDLE __MIDL_itf_dxgi1_3_0000_0006_v0_0_s_ifspec; #ifndef __IDXGISwapChainMedia_INTERFACE_DEFINED__ #define __IDXGISwapChainMedia_INTERFACE_DEFINED__ /* interface IDXGISwapChainMedia */ /* [unique][local][uuid][object] */ EXTERN_C const IID IID_IDXGISwapChainMedia; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("dd95b90b-f05f-4f6a-bd65-25bfb264bd84") IDXGISwapChainMedia : public IUnknown { public: virtual HRESULT STDMETHODCALLTYPE GetFrameStatisticsMedia( /* [annotation][out] */ _Out_ DXGI_FRAME_STATISTICS_MEDIA *pStats) = 0; virtual HRESULT STDMETHODCALLTYPE SetPresentDuration( UINT Duration) = 0; virtual HRESULT STDMETHODCALLTYPE CheckPresentDurationSupport( UINT DesiredPresentDuration, /* [annotation][out] */ _Out_ UINT *pClosestSmallerPresentDuration, /* [annotation][out] */ _Out_ UINT *pClosestLargerPresentDuration) = 0; }; #else /* C style interface */ typedef struct IDXGISwapChainMediaVtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( IDXGISwapChainMedia * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( IDXGISwapChainMedia * This); ULONG ( STDMETHODCALLTYPE *Release )( IDXGISwapChainMedia * This); HRESULT ( STDMETHODCALLTYPE *GetFrameStatisticsMedia )( IDXGISwapChainMedia * This, /* [annotation][out] */ _Out_ DXGI_FRAME_STATISTICS_MEDIA *pStats); HRESULT ( STDMETHODCALLTYPE *SetPresentDuration )( IDXGISwapChainMedia * This, UINT Duration); HRESULT ( STDMETHODCALLTYPE *CheckPresentDurationSupport )( IDXGISwapChainMedia * This, UINT DesiredPresentDuration, /* [annotation][out] */ _Out_ UINT *pClosestSmallerPresentDuration, /* [annotation][out] */ _Out_ UINT *pClosestLargerPresentDuration); END_INTERFACE } IDXGISwapChainMediaVtbl; interface IDXGISwapChainMedia { CONST_VTBL struct IDXGISwapChainMediaVtbl *lpVtbl; }; #ifdef COBJMACROS #define IDXGISwapChainMedia_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define IDXGISwapChainMedia_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define IDXGISwapChainMedia_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define IDXGISwapChainMedia_GetFrameStatisticsMedia(This,pStats) \ ( (This)->lpVtbl -> GetFrameStatisticsMedia(This,pStats) ) #define IDXGISwapChainMedia_SetPresentDuration(This,Duration) \ ( (This)->lpVtbl -> SetPresentDuration(This,Duration) ) #define IDXGISwapChainMedia_CheckPresentDurationSupport(This,DesiredPresentDuration,pClosestSmallerPresentDuration,pClosestLargerPresentDuration) \ ( (This)->lpVtbl -> CheckPresentDurationSupport(This,DesiredPresentDuration,pClosestSmallerPresentDuration,pClosestLargerPresentDuration) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __IDXGISwapChainMedia_INTERFACE_DEFINED__ */ /* interface __MIDL_itf_dxgi1_3_0000_0007 */ /* [local] */ typedef enum DXGI_OVERLAY_SUPPORT_FLAG { DXGI_OVERLAY_SUPPORT_FLAG_DIRECT = 0x1, DXGI_OVERLAY_SUPPORT_FLAG_SCALING = 0x2 } DXGI_OVERLAY_SUPPORT_FLAG; #endif /* WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) */ // BK - pragma endregion // BK - pragma region App Family #if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) extern RPC_IF_HANDLE __MIDL_itf_dxgi1_3_0000_0007_v0_0_c_ifspec; extern RPC_IF_HANDLE __MIDL_itf_dxgi1_3_0000_0007_v0_0_s_ifspec; #ifndef __IDXGIOutput3_INTERFACE_DEFINED__ #define __IDXGIOutput3_INTERFACE_DEFINED__ /* interface IDXGIOutput3 */ /* [unique][local][uuid][object] */ EXTERN_C const IID IID_IDXGIOutput3; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("8a6bb301-7e7e-41F4-a8e0-5b32f7f99b18") IDXGIOutput3 : public IDXGIOutput2 { public: virtual HRESULT STDMETHODCALLTYPE CheckOverlaySupport( /* [annotation][in] */ _In_ DXGI_FORMAT EnumFormat, /* [annotation][out] */ _In_ IUnknown *pConcernedDevice, /* [annotation][out] */ _Out_ UINT *pFlags) = 0; }; #else /* C style interface */ typedef struct IDXGIOutput3Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( IDXGIOutput3 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( IDXGIOutput3 * This); ULONG ( STDMETHODCALLTYPE *Release )( IDXGIOutput3 * This); HRESULT ( STDMETHODCALLTYPE *SetPrivateData )( IDXGIOutput3 * This, /* [annotation][in] */ _In_ REFGUID Name, /* [in] */ UINT DataSize, /* [annotation][in] */ _In_reads_bytes_(DataSize) const void *pData); HRESULT ( STDMETHODCALLTYPE *SetPrivateDataInterface )( IDXGIOutput3 * This, /* [annotation][in] */ _In_ REFGUID Name, /* [annotation][in] */ _In_opt_ const IUnknown *pUnknown); HRESULT ( STDMETHODCALLTYPE *GetPrivateData )( IDXGIOutput3 * This, /* [annotation][in] */ _In_ REFGUID Name, /* [annotation][out][in] */ _Inout_ UINT *pDataSize, /* [annotation][out] */ _Out_writes_bytes_(*pDataSize) void *pData); HRESULT ( STDMETHODCALLTYPE *GetParent )( IDXGIOutput3 * This, /* [annotation][in] */ _In_ REFIID riid, /* [annotation][retval][out] */ _COM_Outptr_ void **ppParent); HRESULT ( STDMETHODCALLTYPE *GetDesc )( IDXGIOutput3 * This, /* [annotation][out] */ _Out_ DXGI_OUTPUT_DESC *pDesc); HRESULT ( STDMETHODCALLTYPE *GetDisplayModeList )( IDXGIOutput3 * This, /* [in] */ DXGI_FORMAT EnumFormat, /* [in] */ UINT Flags, /* [annotation][out][in] */ _Inout_ UINT *pNumModes, /* [annotation][out] */ _Out_writes_to_opt_(*pNumModes,*pNumModes) DXGI_MODE_DESC *pDesc); HRESULT ( STDMETHODCALLTYPE *FindClosestMatchingMode )( IDXGIOutput3 * This, /* [annotation][in] */ _In_ const DXGI_MODE_DESC *pModeToMatch, /* [annotation][out] */ _Out_ DXGI_MODE_DESC *pClosestMatch, /* [annotation][in] */ _In_opt_ IUnknown *pConcernedDevice); HRESULT ( STDMETHODCALLTYPE *WaitForVBlank )( IDXGIOutput3 * This); HRESULT ( STDMETHODCALLTYPE *TakeOwnership )( IDXGIOutput3 * This, /* [annotation][in] */ _In_ IUnknown *pDevice, BOOL Exclusive); void ( STDMETHODCALLTYPE *ReleaseOwnership )( IDXGIOutput3 * This); HRESULT ( STDMETHODCALLTYPE *GetGammaControlCapabilities )( IDXGIOutput3 * This, /* [annotation][out] */ _Out_ DXGI_GAMMA_CONTROL_CAPABILITIES *pGammaCaps); HRESULT ( STDMETHODCALLTYPE *SetGammaControl )( IDXGIOutput3 * This, /* [annotation][in] */ _In_ const DXGI_GAMMA_CONTROL *pArray); HRESULT ( STDMETHODCALLTYPE *GetGammaControl )( IDXGIOutput3 * This, /* [annotation][out] */ _Out_ DXGI_GAMMA_CONTROL *pArray); HRESULT ( STDMETHODCALLTYPE *SetDisplaySurface )( IDXGIOutput3 * This, /* [annotation][in] */ _In_ IDXGISurface *pScanoutSurface); HRESULT ( STDMETHODCALLTYPE *GetDisplaySurfaceData )( IDXGIOutput3 * This, /* [annotation][in] */ _In_ IDXGISurface *pDestination); HRESULT ( STDMETHODCALLTYPE *GetFrameStatistics )( IDXGIOutput3 * This, /* [annotation][out] */ _Out_ DXGI_FRAME_STATISTICS *pStats); HRESULT ( STDMETHODCALLTYPE *GetDisplayModeList1 )( IDXGIOutput3 * This, /* [in] */ DXGI_FORMAT EnumFormat, /* [in] */ UINT Flags, /* [annotation][out][in] */ _Inout_ UINT *pNumModes, /* [annotation][out] */ _Out_writes_to_opt_(*pNumModes,*pNumModes) DXGI_MODE_DESC1 *pDesc); HRESULT ( STDMETHODCALLTYPE *FindClosestMatchingMode1 )( IDXGIOutput3 * This, /* [annotation][in] */ _In_ const DXGI_MODE_DESC1 *pModeToMatch, /* [annotation][out] */ _Out_ DXGI_MODE_DESC1 *pClosestMatch, /* [annotation][in] */ _In_opt_ IUnknown *pConcernedDevice); HRESULT ( STDMETHODCALLTYPE *GetDisplaySurfaceData1 )( IDXGIOutput3 * This, /* [annotation][in] */ _In_ IDXGIResource *pDestination); HRESULT ( STDMETHODCALLTYPE *DuplicateOutput )( IDXGIOutput3 * This, /* [annotation][in] */ _In_ IUnknown *pDevice, /* [annotation][out] */ _COM_Outptr_ IDXGIOutputDuplication **ppOutputDuplication); BOOL ( STDMETHODCALLTYPE *SupportsOverlays )( IDXGIOutput3 * This); HRESULT ( STDMETHODCALLTYPE *CheckOverlaySupport )( IDXGIOutput3 * This, /* [annotation][in] */ _In_ DXGI_FORMAT EnumFormat, /* [annotation][out] */ _In_ IUnknown *pConcernedDevice, /* [annotation][out] */ _Out_ UINT *pFlags); END_INTERFACE } IDXGIOutput3Vtbl; interface IDXGIOutput3 { CONST_VTBL struct IDXGIOutput3Vtbl *lpVtbl; }; #ifdef COBJMACROS #define IDXGIOutput3_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define IDXGIOutput3_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define IDXGIOutput3_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define IDXGIOutput3_SetPrivateData(This,Name,DataSize,pData) \ ( (This)->lpVtbl -> SetPrivateData(This,Name,DataSize,pData) ) #define IDXGIOutput3_SetPrivateDataInterface(This,Name,pUnknown) \ ( (This)->lpVtbl -> SetPrivateDataInterface(This,Name,pUnknown) ) #define IDXGIOutput3_GetPrivateData(This,Name,pDataSize,pData) \ ( (This)->lpVtbl -> GetPrivateData(This,Name,pDataSize,pData) ) #define IDXGIOutput3_GetParent(This,riid,ppParent) \ ( (This)->lpVtbl -> GetParent(This,riid,ppParent) ) #define IDXGIOutput3_GetDesc(This,pDesc) \ ( (This)->lpVtbl -> GetDesc(This,pDesc) ) #define IDXGIOutput3_GetDisplayModeList(This,EnumFormat,Flags,pNumModes,pDesc) \ ( (This)->lpVtbl -> GetDisplayModeList(This,EnumFormat,Flags,pNumModes,pDesc) ) #define IDXGIOutput3_FindClosestMatchingMode(This,pModeToMatch,pClosestMatch,pConcernedDevice) \ ( (This)->lpVtbl -> FindClosestMatchingMode(This,pModeToMatch,pClosestMatch,pConcernedDevice) ) #define IDXGIOutput3_WaitForVBlank(This) \ ( (This)->lpVtbl -> WaitForVBlank(This) ) #define IDXGIOutput3_TakeOwnership(This,pDevice,Exclusive) \ ( (This)->lpVtbl -> TakeOwnership(This,pDevice,Exclusive) ) #define IDXGIOutput3_ReleaseOwnership(This) \ ( (This)->lpVtbl -> ReleaseOwnership(This) ) #define IDXGIOutput3_GetGammaControlCapabilities(This,pGammaCaps) \ ( (This)->lpVtbl -> GetGammaControlCapabilities(This,pGammaCaps) ) #define IDXGIOutput3_SetGammaControl(This,pArray) \ ( (This)->lpVtbl -> SetGammaControl(This,pArray) ) #define IDXGIOutput3_GetGammaControl(This,pArray) \ ( (This)->lpVtbl -> GetGammaControl(This,pArray) ) #define IDXGIOutput3_SetDisplaySurface(This,pScanoutSurface) \ ( (This)->lpVtbl -> SetDisplaySurface(This,pScanoutSurface) ) #define IDXGIOutput3_GetDisplaySurfaceData(This,pDestination) \ ( (This)->lpVtbl -> GetDisplaySurfaceData(This,pDestination) ) #define IDXGIOutput3_GetFrameStatistics(This,pStats) \ ( (This)->lpVtbl -> GetFrameStatistics(This,pStats) ) #define IDXGIOutput3_GetDisplayModeList1(This,EnumFormat,Flags,pNumModes,pDesc) \ ( (This)->lpVtbl -> GetDisplayModeList1(This,EnumFormat,Flags,pNumModes,pDesc) ) #define IDXGIOutput3_FindClosestMatchingMode1(This,pModeToMatch,pClosestMatch,pConcernedDevice) \ ( (This)->lpVtbl -> FindClosestMatchingMode1(This,pModeToMatch,pClosestMatch,pConcernedDevice) ) #define IDXGIOutput3_GetDisplaySurfaceData1(This,pDestination) \ ( (This)->lpVtbl -> GetDisplaySurfaceData1(This,pDestination) ) #define IDXGIOutput3_DuplicateOutput(This,pDevice,ppOutputDuplication) \ ( (This)->lpVtbl -> DuplicateOutput(This,pDevice,ppOutputDuplication) ) #define IDXGIOutput3_SupportsOverlays(This) \ ( (This)->lpVtbl -> SupportsOverlays(This) ) #define IDXGIOutput3_CheckOverlaySupport(This,EnumFormat,pConcernedDevice,pFlags) \ ( (This)->lpVtbl -> CheckOverlaySupport(This,EnumFormat,pConcernedDevice,pFlags) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __IDXGIOutput3_INTERFACE_DEFINED__ */ /* interface __MIDL_itf_dxgi1_3_0000_0008 */ /* [local] */ #endif /* WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) */ // BK - pragma endregion DEFINE_GUID(IID_IDXGIDevice3,0x6007896c,0x3244,0x4afd,0xbf,0x18,0xa6,0xd3,0xbe,0xda,0x50,0x23); DEFINE_GUID(IID_IDXGISwapChain2,0xa8be2ac4,0x199f,0x4946,0xb3,0x31,0x79,0x59,0x9f,0xb9,0x8d,0xe7); DEFINE_GUID(IID_IDXGIOutput2,0x595e39d1,0x2724,0x4663,0x99,0xb1,0xda,0x96,0x9d,0xe2,0x83,0x64); DEFINE_GUID(IID_IDXGIFactory3,0x25483823,0xcd46,0x4c7d,0x86,0xca,0x47,0xaa,0x95,0xb8,0x37,0xbd); DEFINE_GUID(IID_IDXGIDecodeSwapChain,0x2633066b,0x4514,0x4c7a,0x8f,0xd8,0x12,0xea,0x98,0x05,0x9d,0x18); DEFINE_GUID(IID_IDXGIFactoryMedia,0x41e7d1f2,0xa591,0x4f7b,0xa2,0xe5,0xfa,0x9c,0x84,0x3e,0x1c,0x12); DEFINE_GUID(IID_IDXGISwapChainMedia,0xdd95b90b,0xf05f,0x4f6a,0xbd,0x65,0x25,0xbf,0xb2,0x64,0xbd,0x84); DEFINE_GUID(IID_IDXGIOutput3,0x8a6bb301,0x7e7e,0x41F4,0xa8,0xe0,0x5b,0x32,0xf7,0xf9,0x9b,0x18); extern RPC_IF_HANDLE __MIDL_itf_dxgi1_3_0000_0008_v0_0_c_ifspec; extern RPC_IF_HANDLE __MIDL_itf_dxgi1_3_0000_0008_v0_0_s_ifspec; /* Additional Prototypes for ALL interfaces */ /* end of Additional Prototypes */ #ifdef __cplusplus } #endif #endif
fluffyfreak/bgfx
3rdparty/dxsdk/include/dxgi1_3.h
C
bsd-2-clause
71,234
#ifndef SHA_H #define SHA_H /* NIST Secure Hash Algorithm */ /* heavily modified from Peter C. Gutmann's implementation */ /* Useful defines & typedefs */ typedef unsigned char BYTE; typedef unsigned long LONG; #define SHA_BLOCKSIZE 64 #define SHA_DIGESTSIZE 20 typedef struct { LONG digest[5]; /* message digest */ LONG count_lo, count_hi; /* 64-bit bit count */ LONG data[16]; /* SHA data buffer */ } SHA_INFO; void sha_init(SHA_INFO *); void sha_update(SHA_INFO *, BYTE *, int); void sha_final(SHA_INFO *); void sha_stream(SHA_INFO *, FILE *); void sha_print(SHA_INFO *); #endif /* SHA_H */
JianpingZeng/xcc
xcc/test/mediabench4/sha/sha.h
C
bsd-3-clause
618
// RUN: %clang_cc1 %s -verify -fsyntax-only void good() { int dont_initialize_me __attribute((uninitialized)); } void bad() { int im_bad __attribute((uninitialized("zero"))); // expected-error {{'uninitialized' attribute takes no arguments}} static int im_baaad __attribute((uninitialized)); // expected-warning {{'uninitialized' attribute only applies to local variables}} } extern int come_on __attribute((uninitialized)); // expected-warning {{'uninitialized' attribute only applies to local variables}} int you_know __attribute((uninitialized)); // expected-warning {{'uninitialized' attribute only applies to local variables}} static int and_the_whole_world_has_to __attribute((uninitialized)); // expected-warning {{'uninitialized' attribute only applies to local variables}} void answer_right_now() __attribute((uninitialized)) {} // expected-warning {{'uninitialized' attribute only applies to local variables}} void just_to_tell_you_once_again(__attribute((uninitialized)) int whos_bad) {} // expected-warning {{'uninitialized' attribute only applies to local variables}} struct TheWordIsOut { __attribute((uninitialized)) int youre_doin_wrong; // expected-warning {{'uninitialized' attribute only applies to local variables}} } __attribute((uninitialized)); // expected-warning {{'uninitialized' attribute only applies to local variables}}
endlessm/chromium-browser
third_party/llvm/clang/test/Sema/attr-uninitialized.c
C
bsd-3-clause
1,452
// Protocol Buffers - Google's data interchange format // Copyright 2008 Google Inc. All rights reserved. // https://developers.google.com/protocol-buffers/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include <sstream> #include <algorithm> #include <map> #include <google/protobuf/compiler/code_generator.h> #include <google/protobuf/compiler/plugin.h> #include <google/protobuf/descriptor.h> #include <google/protobuf/descriptor.pb.h> #include <google/protobuf/io/printer.h> #include <google/protobuf/io/zero_copy_stream.h> #include <google/protobuf/stubs/strutil.h> #include <google/protobuf/wire_format.h> #include <google/protobuf/wire_format_lite.h> #include <google/protobuf/compiler/csharp/csharp_doc_comment.h> #include <google/protobuf/compiler/csharp/csharp_enum.h> #include <google/protobuf/compiler/csharp/csharp_field_base.h> #include <google/protobuf/compiler/csharp/csharp_helpers.h> #include <google/protobuf/compiler/csharp/csharp_message.h> #include <google/protobuf/compiler/csharp/csharp_names.h> using google::protobuf::internal::scoped_ptr; namespace google { namespace protobuf { namespace compiler { namespace csharp { bool CompareFieldNumbers(const FieldDescriptor* d1, const FieldDescriptor* d2) { return d1->number() < d2->number(); } MessageGenerator::MessageGenerator(const Descriptor* descriptor, const Options* options) : SourceGeneratorBase(descriptor->file(), options), descriptor_(descriptor) { // sorted field names for (int i = 0; i < descriptor_->field_count(); i++) { field_names_.push_back(descriptor_->field(i)->name()); } std::sort(field_names_.begin(), field_names_.end()); // fields by number for (int i = 0; i < descriptor_->field_count(); i++) { fields_by_number_.push_back(descriptor_->field(i)); } std::sort(fields_by_number_.begin(), fields_by_number_.end(), CompareFieldNumbers); } MessageGenerator::~MessageGenerator() { } std::string MessageGenerator::class_name() { return descriptor_->name(); } std::string MessageGenerator::full_class_name() { return GetClassName(descriptor_); } const std::vector<std::string>& MessageGenerator::field_names() { return field_names_; } const std::vector<const FieldDescriptor*>& MessageGenerator::fields_by_number() { return fields_by_number_; } void MessageGenerator::Generate(io::Printer* printer) { map<string, string> vars; vars["class_name"] = class_name(); vars["access_level"] = class_access_level(); WriteMessageDocComment(printer, descriptor_); printer->Print( "[global::System.Diagnostics.DebuggerNonUserCodeAttribute()]\n"); WriteGeneratedCodeAttributes(printer); printer->Print( vars, "$access_level$ sealed partial class $class_name$ : pb::IMessage<$class_name$> {\n"); printer->Indent(); // All static fields and properties printer->Print( vars, "private static readonly pb::MessageParser<$class_name$> _parser = new pb::MessageParser<$class_name$>(() => new $class_name$());\n" "public static pb::MessageParser<$class_name$> Parser { get { return _parser; } }\n\n"); // Access the message descriptor via the relevant file descriptor or containing message descriptor. if (!descriptor_->containing_type()) { vars["descriptor_accessor"] = GetReflectionClassName(descriptor_->file()) + ".Descriptor.MessageTypes[" + SimpleItoa(descriptor_->index()) + "]"; } else { vars["descriptor_accessor"] = GetClassName(descriptor_->containing_type()) + ".Descriptor.NestedTypes[" + SimpleItoa(descriptor_->index()) + "]"; } printer->Print( vars, "public static pbr::MessageDescriptor Descriptor {\n" " get { return $descriptor_accessor$; }\n" "}\n" "\n" "pbr::MessageDescriptor pb::IMessage.Descriptor {\n" " get { return Descriptor; }\n" "}\n" "\n"); // Parameterless constructor and partial OnConstruction method. printer->Print( vars, "public $class_name$() {\n" " OnConstruction();\n" "}\n\n" "partial void OnConstruction();\n\n"); GenerateCloningCode(printer); GenerateFreezingCode(printer); // Fields/properties for (int i = 0; i < descriptor_->field_count(); i++) { const FieldDescriptor* fieldDescriptor = descriptor_->field(i); // Rats: we lose the debug comment here :( printer->Print( "/// <summary>Field number for the \"$field_name$\" field.</summary>\n" "public const int $field_constant_name$ = $index$;\n", "field_name", fieldDescriptor->name(), "field_constant_name", GetFieldConstantName(fieldDescriptor), "index", SimpleItoa(fieldDescriptor->number())); scoped_ptr<FieldGeneratorBase> generator( CreateFieldGeneratorInternal(fieldDescriptor)); generator->GenerateMembers(printer); printer->Print("\n"); } // oneof properties for (int i = 0; i < descriptor_->oneof_decl_count(); i++) { vars["name"] = UnderscoresToCamelCase(descriptor_->oneof_decl(i)->name(), false); vars["property_name"] = UnderscoresToCamelCase(descriptor_->oneof_decl(i)->name(), true); vars["original_name"] = descriptor_->oneof_decl(i)->name(); printer->Print( vars, "private object $name$_;\n" "/// <summary>Enum of possible cases for the \"$original_name$\" oneof.</summary>\n" "public enum $property_name$OneofCase {\n"); printer->Indent(); printer->Print("None = 0,\n"); for (int j = 0; j < descriptor_->oneof_decl(i)->field_count(); j++) { const FieldDescriptor* field = descriptor_->oneof_decl(i)->field(j); printer->Print("$field_property_name$ = $index$,\n", "field_property_name", GetPropertyName(field), "index", SimpleItoa(field->number())); } printer->Outdent(); printer->Print("}\n"); // TODO: Should we put the oneof .proto comments here? // It's unclear exactly where they should go. printer->Print( vars, "private $property_name$OneofCase $name$Case_ = $property_name$OneofCase.None;\n" "public $property_name$OneofCase $property_name$Case {\n" " get { return $name$Case_; }\n" "}\n\n" "public void Clear$property_name$() {\n" " $name$Case_ = $property_name$OneofCase.None;\n" " $name$_ = null;\n" "}\n\n"); } // Standard methods GenerateFrameworkMethods(printer); GenerateMessageSerializationMethods(printer); GenerateMergingMethods(printer); // Nested messages and enums if (HasNestedGeneratedTypes()) { printer->Print( vars, "#region Nested types\n" "/// <summary>Container for nested types declared in the $class_name$ message type.</summary>\n" "[global::System.Diagnostics.DebuggerNonUserCodeAttribute()]\n"); WriteGeneratedCodeAttributes(printer); printer->Print("public static partial class Types {\n"); printer->Indent(); for (int i = 0; i < descriptor_->enum_type_count(); i++) { EnumGenerator enumGenerator(descriptor_->enum_type(i), this->options()); enumGenerator.Generate(printer); } for (int i = 0; i < descriptor_->nested_type_count(); i++) { // Don't generate nested types for maps... if (!IsMapEntryMessage(descriptor_->nested_type(i))) { MessageGenerator messageGenerator( descriptor_->nested_type(i), this->options()); messageGenerator.Generate(printer); } } printer->Outdent(); printer->Print("}\n" "#endregion\n" "\n"); } printer->Outdent(); printer->Print("}\n"); printer->Print("\n"); } // Helper to work out whether we need to generate a class to hold nested types/enums. // Only tricky because we don't want to generate map entry types. bool MessageGenerator::HasNestedGeneratedTypes() { if (descriptor_->enum_type_count() > 0) { return true; } for (int i = 0; i < descriptor_->nested_type_count(); i++) { if (!IsMapEntryMessage(descriptor_->nested_type(i))) { return true; } } return false; } void MessageGenerator::GenerateCloningCode(io::Printer* printer) { map<string, string> vars; vars["class_name"] = class_name(); printer->Print( vars, "public $class_name$($class_name$ other) : this() {\n"); printer->Indent(); // Clone non-oneof fields first for (int i = 0; i < descriptor_->field_count(); i++) { if (!descriptor_->field(i)->containing_oneof()) { scoped_ptr<FieldGeneratorBase> generator( CreateFieldGeneratorInternal(descriptor_->field(i))); generator->GenerateCloningCode(printer); } } // Clone just the right field for each oneof for (int i = 0; i < descriptor_->oneof_decl_count(); ++i) { vars["name"] = UnderscoresToCamelCase(descriptor_->oneof_decl(i)->name(), false); vars["property_name"] = UnderscoresToCamelCase( descriptor_->oneof_decl(i)->name(), true); printer->Print(vars, "switch (other.$property_name$Case) {\n"); printer->Indent(); for (int j = 0; j < descriptor_->oneof_decl(i)->field_count(); j++) { const FieldDescriptor* field = descriptor_->oneof_decl(i)->field(j); scoped_ptr<FieldGeneratorBase> generator(CreateFieldGeneratorInternal(field)); vars["field_property_name"] = GetPropertyName(field); printer->Print( vars, "case $property_name$OneofCase.$field_property_name$:\n"); printer->Indent(); generator->GenerateCloningCode(printer); printer->Print("break;\n"); printer->Outdent(); } printer->Outdent(); printer->Print("}\n\n"); } printer->Outdent(); printer->Print("}\n\n"); printer->Print( vars, "public $class_name$ Clone() {\n" " return new $class_name$(this);\n" "}\n\n"); } void MessageGenerator::GenerateFreezingCode(io::Printer* printer) { } void MessageGenerator::GenerateFrameworkMethods(io::Printer* printer) { map<string, string> vars; vars["class_name"] = class_name(); // Equality printer->Print( vars, "public override bool Equals(object other) {\n" " return Equals(other as $class_name$);\n" "}\n\n" "public bool Equals($class_name$ other) {\n" " if (ReferenceEquals(other, null)) {\n" " return false;\n" " }\n" " if (ReferenceEquals(other, this)) {\n" " return true;\n" " }\n"); printer->Indent(); for (int i = 0; i < descriptor_->field_count(); i++) { scoped_ptr<FieldGeneratorBase> generator( CreateFieldGeneratorInternal(descriptor_->field(i))); generator->WriteEquals(printer); } for (int i = 0; i < descriptor_->oneof_decl_count(); i++) { printer->Print("if ($property_name$Case != other.$property_name$Case) return false;\n", "property_name", UnderscoresToCamelCase(descriptor_->oneof_decl(i)->name(), true)); } printer->Outdent(); printer->Print( " return true;\n" "}\n\n"); // GetHashCode // Start with a non-zero value to easily distinguish between null and "empty" messages. printer->Print( "public override int GetHashCode() {\n" " int hash = 1;\n"); printer->Indent(); for (int i = 0; i < descriptor_->field_count(); i++) { scoped_ptr<FieldGeneratorBase> generator( CreateFieldGeneratorInternal(descriptor_->field(i))); generator->WriteHash(printer); } for (int i = 0; i < descriptor_->oneof_decl_count(); i++) { printer->Print("hash ^= (int) $name$Case_;\n", "name", UnderscoresToCamelCase(descriptor_->oneof_decl(i)->name(), false)); } printer->Print("return hash;\n"); printer->Outdent(); printer->Print("}\n\n"); printer->Print( "public override string ToString() {\n" " return pb::JsonFormatter.ToDiagnosticString(this);\n" "}\n\n"); } void MessageGenerator::GenerateMessageSerializationMethods(io::Printer* printer) { printer->Print( "public void WriteTo(pb::CodedOutputStream output) {\n"); printer->Indent(); // Serialize all the fields for (int i = 0; i < fields_by_number().size(); i++) { scoped_ptr<FieldGeneratorBase> generator( CreateFieldGeneratorInternal(fields_by_number()[i])); generator->GenerateSerializationCode(printer); } // TODO(jonskeet): Memoize size of frozen messages? printer->Outdent(); printer->Print( "}\n" "\n" "public int CalculateSize() {\n"); printer->Indent(); printer->Print("int size = 0;\n"); for (int i = 0; i < descriptor_->field_count(); i++) { scoped_ptr<FieldGeneratorBase> generator( CreateFieldGeneratorInternal(descriptor_->field(i))); generator->GenerateSerializedSizeCode(printer); } printer->Print("return size;\n"); printer->Outdent(); printer->Print("}\n\n"); } void MessageGenerator::GenerateMergingMethods(io::Printer* printer) { // Note: These are separate from GenerateMessageSerializationMethods() // because they need to be generated even for messages that are optimized // for code size. map<string, string> vars; vars["class_name"] = class_name(); printer->Print( vars, "public void MergeFrom($class_name$ other) {\n"); printer->Indent(); printer->Print( "if (other == null) {\n" " return;\n" "}\n"); // Merge non-oneof fields for (int i = 0; i < descriptor_->field_count(); i++) { if (!descriptor_->field(i)->containing_oneof()) { scoped_ptr<FieldGeneratorBase> generator( CreateFieldGeneratorInternal(descriptor_->field(i))); generator->GenerateMergingCode(printer); } } // Merge oneof fields for (int i = 0; i < descriptor_->oneof_decl_count(); ++i) { vars["name"] = UnderscoresToCamelCase(descriptor_->oneof_decl(i)->name(), false); vars["property_name"] = UnderscoresToCamelCase(descriptor_->oneof_decl(i)->name(), true); printer->Print(vars, "switch (other.$property_name$Case) {\n"); printer->Indent(); for (int j = 0; j < descriptor_->oneof_decl(i)->field_count(); j++) { const FieldDescriptor* field = descriptor_->oneof_decl(i)->field(j); vars["field_property_name"] = GetPropertyName(field); printer->Print( vars, "case $property_name$OneofCase.$field_property_name$:\n" " $field_property_name$ = other.$field_property_name$;\n" " break;\n"); } printer->Outdent(); printer->Print("}\n\n"); } printer->Outdent(); printer->Print("}\n\n"); printer->Print("public void MergeFrom(pb::CodedInputStream input) {\n"); printer->Indent(); printer->Print( "uint tag;\n" "while ((tag = input.ReadTag()) != 0) {\n" " switch(tag) {\n"); printer->Indent(); printer->Indent(); printer->Print( "default:\n" " input.SkipLastField();\n" // We're not storing the data, but we still need to consume it. " break;\n"); for (int i = 0; i < fields_by_number().size(); i++) { const FieldDescriptor* field = fields_by_number()[i]; internal::WireFormatLite::WireType wt = internal::WireFormat::WireTypeForFieldType(field->type()); uint32 tag = internal::WireFormatLite::MakeTag(field->number(), wt); // Handle both packed and unpacked repeated fields with the same Read*Array call; // the two generated cases are the packed and unpacked tags. // TODO(jonskeet): Check that is_packable is equivalent to // is_repeated && wt in { VARINT, FIXED32, FIXED64 }. // It looks like it is... if (field->is_packable()) { printer->Print( "case $packed_tag$:\n", "packed_tag", SimpleItoa( internal::WireFormatLite::MakeTag( field->number(), internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED))); } printer->Print("case $tag$: {\n", "tag", SimpleItoa(tag)); printer->Indent(); scoped_ptr<FieldGeneratorBase> generator( CreateFieldGeneratorInternal(field)); generator->GenerateParsingCode(printer); printer->Print("break;\n"); printer->Outdent(); printer->Print("}\n"); } printer->Outdent(); printer->Print("}\n"); // switch printer->Outdent(); printer->Print("}\n"); // while printer->Outdent(); printer->Print("}\n\n"); // method } int MessageGenerator::GetFieldOrdinal(const FieldDescriptor* descriptor) { for (int i = 0; i < field_names().size(); i++) { if (field_names()[i] == descriptor->name()) { return i; } } GOOGLE_LOG(DFATAL)<< "Could not find ordinal for field " << descriptor->name(); return -1; } FieldGeneratorBase* MessageGenerator::CreateFieldGeneratorInternal( const FieldDescriptor* descriptor) { return CreateFieldGenerator(descriptor, GetFieldOrdinal(descriptor), this->options()); } } // namespace csharp } // namespace compiler } // namespace protobuf } // namespace google
danakj/chromium
third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_message.cc
C++
bsd-3-clause
18,364
/* * Copyright (c) 2006 The Regents of The University of Michigan * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer; * redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution; * neither the name of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Authors: Kevin Lim */ #include <string> #include "cpu/checker/cpu_impl.hh" #include "cpu/ozone/dyn_inst.hh" #include "cpu/ozone/ozone_impl.hh" #include "cpu/inst_seq.hh" #include "params/OzoneChecker.hh" #include "sim/process.hh" #include "sim/sim_object.hh" class MemObject; template class Checker<RefCountingPtr<OzoneDynInst<OzoneImpl> > >; /** * Specific non-templated derived class used for SimObject configuration. */ class OzoneChecker : public Checker<RefCountingPtr<OzoneDynInst<OzoneImpl> > > { public: OzoneChecker(Params *p) : Checker<RefCountingPtr<OzoneDynInst<OzoneImpl> > >(p) { } }; //////////////////////////////////////////////////////////////////////// // // CheckerCPU Simulation Object // OzoneChecker * OzoneCheckerParams::create() { OzoneChecker::Params *params = new OzoneChecker::Params(); params->name = name; params->numberOfThreads = 1; params->max_insts_any_thread = 0; params->max_insts_all_threads = 0; params->max_loads_any_thread = 0; params->max_loads_all_threads = 0; params->exitOnError = exitOnError; params->updateOnError = updateOnError; params->warnOnlyOnLoadError = warnOnlyOnLoadError; params->deferRegistration = defer_registration; params->functionTrace = function_trace; params->functionTraceStart = function_trace_start; params->clock = clock; // Hack to touch all parameters. Consider not deriving Checker // from BaseCPU..it's not really a CPU in the end. Counter temp; temp = max_insts_any_thread; temp = max_insts_all_threads; temp = max_loads_any_thread; temp = max_loads_all_threads; Tick temp2 = progress_interval; temp2++; params->progress_interval = 0; params->itb = itb; params->dtb = dtb; params->system = system; params->cpu_id = cpu_id; params->profile = profile; params->process = workload; OzoneChecker *cpu = new OzoneChecker(params); return cpu; }
aferr/LatticeMemCtl
src/cpu/ozone/checker_builder.cc
C++
bsd-3-clause
3,555
/* Test diagnostics for old-style definition not matching prior prototype are present and give correct location for that prototype (bug 15698). Unprototyped built-in function with user prototype at inner scope. */ /* Origin: Joseph Myers <joseph@codesourcery.com> */ /* { dg-do compile } */ /* { dg-options "-std=gnu99" } */ void f(void) { int isnan(void); } /* { dg-error "error: prototype declaration" } */ int isnan(a) int a; {} /* { dg-error "error: number of arguments doesn't match prototype" } */
shaotuanchen/sunflower_exp
tools/source/gcc-4.2.4/gcc/testsuite/gcc.dg/pr15698-5.c
C
bsd-3-clause
516
using System; using System.Threading.Tasks; namespace PrivacyPrompts { public interface IPrivacyManager { Task RequestAccess(); string CheckAccess(); } }
nervevau2/monotouch-samples
PrivacyPrompts/PrivacyPrompts/PrivacyManager/IPrivacyManager.cs
C#
mit
166
<?php namespace Test; use Doctrine\ORM\Mapping as ORM; use Doctrine\Common\Collections\ArrayCollection; /** * Test\Bureau * * This is a long comment for the bureaus table. It will appear in the doctrine * class and long lines will be wrapped. * * Multiple lines can be entered as well. * * @ORM\Entity() * @ORM\Table(name="bureaus", indexes={@ORM\Index(name="fk_bureaus_foo1", columns={"foo_id"})}, uniqueConstraints={@ORM\UniqueConstraint(name="testIndex", columns={"room"})}) */ class Bureau { /** * @ORM\Id * @ORM\Column(type="integer") * @ORM\GeneratedValue(strategy="AUTO") */ protected $id; /** * Comment for the room field. This comment will be used for the field in the * doctrine class and long lines will wrap with the correct indentation. * * New Lines are supported as well. * * @ORM\Column(type="string", length=45, nullable=true) */ protected $room; /** * @ORM\OneToOne(targetEntity="Foo", inversedBy="bureau") * @ORM\JoinColumn(name="foo_id", referencedColumnName="id", onDelete="CASCADE", nullable=false) */ protected $foo; /** * @ORM\ManyToMany(targetEntity="User", mappedBy="bureaus") */ protected $users; public function __construct() { $this->users = new ArrayCollection(); } /** * Set the value of id. * * @param integer $id * @return \Test\Bureau */ public function setId($id) { $this->id = $id; return $this; } /** * Get the value of id. * * @return integer */ public function getId() { return $this->id; } /** * Set the value of room. * * @param string $room * @return \Test\Bureau */ public function setRoom($room) { $this->room = $room; return $this; } /** * Get the value of room. * * @return string */ public function getRoom() { return $this->room; } /** * Set Foo entity (one to one). * * @param \Test\Foo $foo * @return \Test\Bureau */ public function setFoo(Foo $foo = null) { $foo->setBureau($this); $this->foo = $foo; return $this; } /** * Get Foo entity (one to one). * * @return \Test\Foo */ public function getFoo() { return $this->foo; } /** * Add User entity to collection. * * @param \Test\User $user * @return \Test\Bureau */ public function addUser(User $user) { $this->users[] = $user; return $this; } /** * Get User entity collection. * * @return \Doctrine\Common\Collections\Collection */ public function getUsers() { return $this->users; } public function __sleep() { return array('id', 'room', 'foo_id'); } }
manzolo/formazione
src/Wb/SchemaExporterBundle/example/diff/Model/Bureau.php
PHP
mit
2,567
/************************************************************* * * MathJax/localization/cdo/TeX.js * * Copyright (c) 2009-2016 The MathJax Consortium * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ MathJax.Localization.addTranslation("cdo","TeX",{ version: "2.7.2", isLoaded: true, strings: { } }); MathJax.Ajax.loadComplete("[MathJax]/localization/cdo/TeX.js");
MarkUsProject/Markus
vendor/assets/javascripts/MathJax_lib/localization/cdo/TeX.js
JavaScript
mit
928
<!-- Copyright (c) Microsoft. All rights reserved. Licensed under the MIT license. See LICENSE in the project root for license information. --> <div class="ms-MessageBanner"> <div class="ms-MessageBanner-content"> <div class="ms-MessageBanner-text"> <div class="ms-MessageBanner-clipper"> You've reached your total storage on OneDrive. Please upgrade your storage plan if you need more storage. </div> </div> <button class="ms-MessageBanner-expand"> <i class="ms-Icon ms-Icon--chevronsDown"></i> </button> <div class="ms-MessageBanner-action"> <button class="ms-Button ms-Button--primary ms-fontColor-neutralLight">Get More Storage</button> </div> </div> <button class="ms-MessageBanner-close"> <i class="ms-Icon ms-Icon--x"></i> </button> </div>
zkoehne/Office-UI-Fabric
src/components/MessageBanner/MessageBanner.html
HTML
mit
816
#ifndef _MOTOROLA_PGTABLE_H #define _MOTOROLA_PGTABLE_H /* * Definitions for MMU descriptors */ #define _PAGE_PRESENT 0x001 #define _PAGE_SHORT 0x002 #define _PAGE_RONLY 0x004 #define _PAGE_ACCESSED 0x008 #define _PAGE_DIRTY 0x010 #define _PAGE_SUPER 0x080 /* 68040 supervisor only */ #define _PAGE_GLOBAL040 0x400 /* 68040 global bit, used for kva descs */ #define _PAGE_NOCACHE030 0x040 /* 68030 no-cache mode */ #define _PAGE_NOCACHE 0x060 /* 68040 cache mode, non-serialized */ #define _PAGE_NOCACHE_S 0x040 /* 68040 no-cache mode, serialized */ #define _PAGE_CACHE040 0x020 /* 68040 cache mode, cachable, copyback */ #define _PAGE_CACHE040W 0x000 /* 68040 cache mode, cachable, write-through */ #define _DESCTYPE_MASK 0x003 #define _CACHEMASK040 (~0x060) #define _TABLE_MASK (0xfffffe00) #define _PAGE_TABLE (_PAGE_SHORT) #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NOCACHE) #define _PAGE_PROTNONE 0x004 #define _PAGE_FILE 0x008 /* pagecache or swap? */ #ifndef __ASSEMBLY__ /* This is the cache mode to be used for pages containing page descriptors for * processors >= '040. It is in pte_mknocache(), and the variable is defined * and initialized in head.S */ extern int m68k_pgtable_cachemode; /* This is the cache mode for normal pages, for supervisor access on * processors >= '040. It is used in pte_mkcache(), and the variable is * defined and initialized in head.S */ #if defined(CPU_M68060_ONLY) && defined(CONFIG_060_WRITETHROUGH) #define m68k_supervisor_cachemode _PAGE_CACHE040W #elif defined(CPU_M68040_OR_M68060_ONLY) #define m68k_supervisor_cachemode _PAGE_CACHE040 #elif defined(CPU_M68020_OR_M68030_ONLY) #define m68k_supervisor_cachemode 0 #else extern int m68k_supervisor_cachemode; #endif #if defined(CPU_M68040_OR_M68060_ONLY) #define mm_cachebits _PAGE_CACHE040 #elif defined(CPU_M68020_OR_M68030_ONLY) #define mm_cachebits 0 #else extern unsigned long mm_cachebits; #endif #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED | mm_cachebits) #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | mm_cachebits) #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | mm_cachebits) #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | mm_cachebits) #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | mm_cachebits) /* Alternate definitions that are compile time constants, for initializing protection_map. The cachebits are fixed later. */ #define PAGE_NONE_C __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) #define PAGE_SHARED_C __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) #define PAGE_COPY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED) #define PAGE_READONLY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED) /* * The m68k can't do page protection for execute, and considers that the same are read. * Also, write permissions imply read permissions. This is the closest we can get.. */ #define __P000 PAGE_NONE_C #define __P001 PAGE_READONLY_C #define __P010 PAGE_COPY_C #define __P011 PAGE_COPY_C #define __P100 PAGE_READONLY_C #define __P101 PAGE_READONLY_C #define __P110 PAGE_COPY_C #define __P111 PAGE_COPY_C #define __S000 PAGE_NONE_C #define __S001 PAGE_READONLY_C #define __S010 PAGE_SHARED_C #define __S011 PAGE_SHARED_C #define __S100 PAGE_READONLY_C #define __S101 PAGE_READONLY_C #define __S110 PAGE_SHARED_C #define __S111 PAGE_SHARED_C /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */ #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; } static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) { unsigned long ptbl = virt_to_phys(ptep) | _PAGE_TABLE | _PAGE_ACCESSED; unsigned long *ptr = pmdp->pmd; short i = 16; while (--i >= 0) { *ptr++ = ptbl; ptbl += (sizeof(pte_t)*PTRS_PER_PTE/16); } } static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp) { pgd_val(*pgdp) = _PAGE_TABLE | _PAGE_ACCESSED | __pa(pmdp); } #define __pte_page(pte) ((unsigned long)__va(pte_val(pte) & PAGE_MASK)) #define __pmd_page(pmd) ((unsigned long)__va(pmd_val(pmd) & _TABLE_MASK)) #define __pgd_page(pgd) ((unsigned long)__va(pgd_val(pgd) & _TABLE_MASK)) #define pte_none(pte) (!pte_val(pte)) #define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE)) #define pte_clear(mm,addr,ptep) ({ pte_val(*(ptep)) = 0; }) #define pte_page(pte) (mem_map + ((unsigned long)(__va(pte_val(pte)) - PAGE_OFFSET) >> PAGE_SHIFT)) #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_bad(pmd) ((pmd_val(pmd) & _DESCTYPE_MASK) != _PAGE_TABLE) #define pmd_present(pmd) (pmd_val(pmd) & _PAGE_TABLE) #define pmd_clear(pmdp) ({ \ unsigned long *__ptr = pmdp->pmd; \ short __i = 16; \ while (--__i >= 0) \ *__ptr++ = 0; \ }) #define pmd_page(pmd) (mem_map + ((unsigned long)(__va(pmd_val(pmd)) - PAGE_OFFSET) >> PAGE_SHIFT)) #define pgd_none(pgd) (!pgd_val(pgd)) #define pgd_bad(pgd) ((pgd_val(pgd) & _DESCTYPE_MASK) != _PAGE_TABLE) #define pgd_present(pgd) (pgd_val(pgd) & _PAGE_TABLE) #define pgd_clear(pgdp) ({ pgd_val(*pgdp) = 0; }) #define pgd_page(pgd) (mem_map + ((unsigned long)(__va(pgd_val(pgd)) - PAGE_OFFSET) >> PAGE_SHIFT)) #define pte_ERROR(e) \ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) #define pmd_ERROR(e) \ printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) #define pgd_ERROR(e) \ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) /* * The following only work if pte_present() is true. * Undefined behaviour if not.. */ static inline int pte_read(pte_t pte) { return 1; } static inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_RONLY); } static inline int pte_exec(pte_t pte) { return 1; } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_RONLY; return pte; } static inline pte_t pte_rdprotect(pte_t pte) { return pte; } static inline pte_t pte_exprotect(pte_t pte) { return pte; } static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_RONLY; return pte; } static inline pte_t pte_mkread(pte_t pte) { return pte; } static inline pte_t pte_mkexec(pte_t pte) { return pte; } static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mknocache(pte_t pte) { pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | m68k_pgtable_cachemode; return pte; } static inline pte_t pte_mkcache(pte_t pte) { pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | m68k_supervisor_cachemode; return pte; } #define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address)) #define pgd_index(address) ((address) >> PGDIR_SHIFT) /* to find an entry in a page-table-directory */ static inline pgd_t *pgd_offset(struct mm_struct *mm, unsigned long address) { return mm->pgd + pgd_index(address); } #define swapper_pg_dir kernel_pg_dir extern pgd_t kernel_pg_dir[128]; static inline pgd_t *pgd_offset_k(unsigned long address) { return kernel_pg_dir + (address >> PGDIR_SHIFT); } /* Find an entry in the second-level page table.. */ static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address) { return (pmd_t *)__pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PMD-1)); } /* Find an entry in the third-level page table.. */ static inline pte_t *pte_offset_kernel(pmd_t *pmdp, unsigned long address) { return (pte_t *)__pmd_page(*pmdp) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); } #define pte_offset_map(pmdp,address) ((pte_t *)kmap(pmd_page(*pmdp)) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) #define pte_offset_map_nested(pmdp, address) pte_offset_map(pmdp, address) #define pte_unmap(pte) kunmap(pte) #define pte_unmap_nested(pte) kunmap(pte) /* * Allocate and free page tables. The xxx_kernel() versions are * used to allocate a kernel page table - this turns on ASN bits * if any. */ /* Prior to calling these routines, the page should have been flushed * from both the cache and ATC, or the CPU might not notice that the * cache setting for the page has been changed. -jskov */ static inline void nocache_page(void *vaddr) { unsigned long addr = (unsigned long)vaddr; if (CPU_IS_040_OR_060) { pgd_t *dir; pmd_t *pmdp; pte_t *ptep; dir = pgd_offset_k(addr); pmdp = pmd_offset(dir, addr); ptep = pte_offset_kernel(pmdp, addr); *ptep = pte_mknocache(*ptep); } } static inline void cache_page(void *vaddr) { unsigned long addr = (unsigned long)vaddr; if (CPU_IS_040_OR_060) { pgd_t *dir; pmd_t *pmdp; pte_t *ptep; dir = pgd_offset_k(addr); pmdp = pmd_offset(dir, addr); ptep = pte_offset_kernel(pmdp, addr); *ptep = pte_mkcache(*ptep); } } #define PTE_FILE_MAX_BITS 28 static inline unsigned long pte_to_pgoff(pte_t pte) { return pte.pte >> 4; } static inline pte_t pgoff_to_pte(unsigned off) { pte_t pte = { (off << 4) + _PAGE_FILE }; return pte; } /* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */ #define __swp_type(x) (((x).val >> 4) & 0xff) #define __swp_offset(x) ((x).val >> 12) #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 4) | ((offset) << 12) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #endif /* !__ASSEMBLY__ */ #endif /* _MOTOROLA_PGTABLE_H */
impedimentToProgress/UCI-BlueChip
snapgear_linux/linux-2.6.21.1/include/asm-m68k/motorola_pgtable.h
C
mit
10,118
using System; using UIKit; using Foundation; using ListerKit; namespace ListerToday { [Register("CheckBoxCell")] public class CheckBoxCell : UITableViewCell { [Outlet("label")] public UILabel Label { get; private set; } [Outlet("checkBox")] public CheckBox CheckBox { get; private set; } public CheckBoxCell (IntPtr handle) : base(handle) { } public override void PrepareForReuse () { TextLabel.Text = string.Empty; TextLabel.TextColor = UIColor.White; CheckBox.Checked = false; CheckBox.Hidden = false; CheckBox.TintColor = UIColor.Clear; } } }
davidrynn/monotouch-samples
ios8/Lister/ListerToday/CheckBoxCell.cs
C#
mit
598
/* * MDSS MDP Interface (used by framebuffer core) * * Copyright (c) 2007-2016, The Linux Foundation. All rights reserved. * Copyright (C) 2007 Google Incorporated * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/clk.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/hrtimer.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iommu.h> #include <linux/iopoll.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/regulator/consumer.h> #include <linux/regulator/rpm-smd-regulator.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/spinlock.h> #include <linux/semaphore.h> #include <linux/uaccess.h> #include <linux/clk/msm-clk.h> #include <linux/irqdomain.h> #include <linux/irq.h> #include <linux/msm-bus.h> #include <linux/msm-bus-board.h> #include <soc/qcom/scm.h> #include <soc/qcom/rpm-smd.h> #include "mdss.h" #include "mdss_fb.h" #include "mdss_mdp.h" #include "mdss_panel.h" #include "mdss_debug.h" #include "mdss_mdp_debug.h" #include "mdss_smmu.h" #include "mdss_mdp_trace.h" #define AXI_HALT_TIMEOUT_US 0x4000 #define AUTOSUSPEND_TIMEOUT_MS 200 #define DEFAULT_MDP_PIPE_WIDTH 2048 #define RES_1080p (1088*1920) #define RES_UHD (3840*2160) struct mdss_data_type *mdss_res; static u32 mem_protect_sd_ctrl_id; static int mdss_fb_mem_get_iommu_domain(void) { return mdss_smmu_get_domain_id(MDSS_IOMMU_DOMAIN_UNSECURE); } struct msm_mdp_interface mdp5 = { .init_fnc = mdss_mdp_overlay_init, .fb_mem_get_iommu_domain = mdss_fb_mem_get_iommu_domain, .fb_stride = mdss_mdp_fb_stride, .check_dsi_status = mdss_check_dsi_ctrl_status, .get_format_params = mdss_mdp_get_format_params, }; #define IB_QUOTA 2000000000 #define AB_QUOTA 2000000000 #define MAX_AXI_PORT_COUNT 3 #define MEM_PROTECT_SD_CTRL 0xF #define MEM_PROTECT_SD_CTRL_FLAT 0x14 static DEFINE_SPINLOCK(mdp_lock); static DEFINE_SPINLOCK(mdss_mdp_intr_lock); static DEFINE_MUTEX(mdp_clk_lock); static DEFINE_MUTEX(mdp_iommu_ref_cnt_lock); static DEFINE_MUTEX(mdp_fs_idle_pc_lock); static struct mdss_panel_intf pan_types[] = { {"dsi", MDSS_PANEL_INTF_DSI}, {"edp", MDSS_PANEL_INTF_EDP}, {"hdmi", MDSS_PANEL_INTF_HDMI}, }; static char mdss_mdp_panel[MDSS_MAX_PANEL_LEN]; struct mdss_hw mdss_mdp_hw = { .hw_ndx = MDSS_HW_MDP, .ptr = NULL, .irq_handler = mdss_mdp_isr, }; /* define for h/w block with external driver */ struct mdss_hw mdss_misc_hw = { .hw_ndx = MDSS_HW_MISC, .ptr = NULL, .irq_handler = NULL, }; #ifdef CONFIG_MSM_BUS_SCALING #define MDP_REG_BUS_VECTOR_ENTRY(ab_val, ib_val) \ { \ .src = MSM_BUS_MASTER_AMPSS_M0, \ .dst = MSM_BUS_SLAVE_DISPLAY_CFG, \ .ab = (ab_val), \ .ib = (ib_val), \ } #define BUS_VOTE_19_MHZ 153600000 #define BUS_VOTE_40_MHZ 320000000 #define BUS_VOTE_80_MHZ 640000000 static struct msm_bus_vectors mdp_reg_bus_vectors[] = { MDP_REG_BUS_VECTOR_ENTRY(0, 0), MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_19_MHZ), MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_40_MHZ), MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_80_MHZ), }; static struct msm_bus_paths mdp_reg_bus_usecases[ARRAY_SIZE( mdp_reg_bus_vectors)]; static struct msm_bus_scale_pdata mdp_reg_bus_scale_table = { .usecase = mdp_reg_bus_usecases, .num_usecases = ARRAY_SIZE(mdp_reg_bus_usecases), .name = "mdss_reg", .active_only = true, }; #endif u32 invalid_mdp107_wb_output_fmts[] = { MDP_XRGB_8888, MDP_RGBX_8888, MDP_BGRX_8888, }; /* * struct intr_call - array of intr handlers * @func: intr handler * @arg: requested argument to the handler */ struct intr_callback { void (*func)(void *); void *arg; }; /* * struct mdss_mdp_intr_reg - array of MDP intr register sets * @clr_off: offset to CLEAR reg * @en_off: offset to ENABLE reg * @status_off: offset to STATUS reg */ struct mdss_mdp_intr_reg { u32 clr_off; u32 en_off; u32 status_off; }; /* * struct mdss_mdp_irq - maps each irq with i/f * @intr_type: type of interface * @intf_num: i/f the irq is associated with * @irq_mask: corresponding bit in the reg set * @reg_idx: which reg set to program */ struct mdss_mdp_irq { u32 intr_type; u32 intf_num; u32 irq_mask; u32 reg_idx; }; static struct mdss_mdp_intr_reg mdp_intr_reg[] = { { MDSS_MDP_REG_INTR_CLEAR, MDSS_MDP_REG_INTR_EN, MDSS_MDP_REG_INTR_STATUS }, { MDSS_MDP_REG_INTR2_CLEAR, MDSS_MDP_REG_INTR2_EN, MDSS_MDP_REG_INTR2_STATUS } }; static struct mdss_mdp_irq mdp_irq_map[] = { { MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN, 1, MDSS_MDP_INTR_INTF_0_UNDERRUN, 0}, { MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN, 2, MDSS_MDP_INTR_INTF_1_UNDERRUN, 0}, { MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN, 3, MDSS_MDP_INTR_INTF_2_UNDERRUN, 0}, { MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN, 4, MDSS_MDP_INTR_INTF_3_UNDERRUN, 0}, { MDSS_MDP_IRQ_TYPE_INTF_VSYNC, 1, MDSS_MDP_INTR_INTF_0_VSYNC, 0}, { MDSS_MDP_IRQ_TYPE_INTF_VSYNC, 2, MDSS_MDP_INTR_INTF_1_VSYNC, 0}, { MDSS_MDP_IRQ_TYPE_INTF_VSYNC, 3, MDSS_MDP_INTR_INTF_2_VSYNC, 0}, { MDSS_MDP_IRQ_TYPE_INTF_VSYNC, 4, MDSS_MDP_INTR_INTF_3_VSYNC, 0}, { MDSS_MDP_IRQ_TYPE_PING_PONG_COMP, 0, MDSS_MDP_INTR_PING_PONG_0_DONE, 0}, { MDSS_MDP_IRQ_TYPE_PING_PONG_COMP, 1, MDSS_MDP_INTR_PING_PONG_1_DONE, 0}, { MDSS_MDP_IRQ_TYPE_PING_PONG_COMP, 2, MDSS_MDP_INTR_PING_PONG_2_DONE, 0}, { MDSS_MDP_IRQ_TYPE_PING_PONG_COMP, 3, MDSS_MDP_INTR_PING_PONG_3_DONE, 0}, { MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR, 0, MDSS_MDP_INTR_PING_PONG_0_RD_PTR, 0}, { MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR, 1, MDSS_MDP_INTR_PING_PONG_1_RD_PTR, 0}, { MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR, 2, MDSS_MDP_INTR_PING_PONG_2_RD_PTR, 0}, { MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR, 3, MDSS_MDP_INTR_PING_PONG_3_RD_PTR, 0}, { MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR, 0, MDSS_MDP_INTR_PING_PONG_0_WR_PTR, 0}, { MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR, 1, MDSS_MDP_INTR_PING_PONG_1_WR_PTR, 0}, { MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR, 2, MDSS_MDP_INTR_PING_PONG_2_WR_PTR, 0}, { MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR, 3, MDSS_MDP_INTR_PING_PONG_3_WR_PTR, 0}, { MDSS_MDP_IRQ_TYPE_WB_ROT_COMP, 0, MDSS_MDP_INTR_WB_0_DONE, 0}, { MDSS_MDP_IRQ_TYPE_WB_ROT_COMP, 1, MDSS_MDP_INTR_WB_1_DONE, 0}, { MDSS_MDP_IRQ_TYPE_WB_WFD_COMP, 0, MDSS_MDP_INTR_WB_2_DONE, 0}, { MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF, 0, MDSS_MDP_INTR_PING_PONG_0_AUTOREFRESH_DONE, 0}, { MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF, 1, MDSS_MDP_INTR_PING_PONG_1_AUTOREFRESH_DONE, 0}, { MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF, 2, MDSS_MDP_INTR_PING_PONG_2_AUTOREFRESH_DONE, 0}, { MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF, 3, MDSS_MDP_INTR_PING_PONG_3_AUTOREFRESH_DONE, 0}, { MDSS_MDP_IRQ_TYPE_CWB_OVERFLOW, 2, MDSS_MDP_INTR2_PING_PONG_2_CWB_OVERFLOW, 1}, { MDSS_MDP_IRQ_TYPE_CWB_OVERFLOW, 3, MDSS_MDP_INTR2_PING_PONG_2_CWB_OVERFLOW, 1} }; static struct intr_callback *mdp_intr_cb; static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on); static int mdss_mdp_parse_dt(struct platform_device *pdev); static int mdss_mdp_parse_dt_pipe(struct platform_device *pdev); static int mdss_mdp_parse_dt_mixer(struct platform_device *pdev); static int mdss_mdp_parse_dt_wb(struct platform_device *pdev); static int mdss_mdp_parse_dt_ctl(struct platform_device *pdev); static int mdss_mdp_parse_dt_video_intf(struct platform_device *pdev); static int mdss_mdp_parse_dt_handler(struct platform_device *pdev, char *prop_name, u32 *offsets, int len); static int mdss_mdp_parse_dt_prop_len(struct platform_device *pdev, char *prop_name); static int mdss_mdp_parse_dt_smp(struct platform_device *pdev); static int mdss_mdp_parse_dt_prefill(struct platform_device *pdev); static int mdss_mdp_parse_dt_misc(struct platform_device *pdev); static int mdss_mdp_parse_dt_ad_cfg(struct platform_device *pdev); static int mdss_mdp_parse_dt_bus_scale(struct platform_device *pdev); static int mdss_mdp_parse_dt_ppb_off(struct platform_device *pdev); static int mdss_mdp_parse_dt_cdm(struct platform_device *pdev); static int mdss_mdp_parse_dt_dsc(struct platform_device *pdev); static inline u32 is_mdp_irq_enabled(void) { struct mdss_data_type *mdata = mdss_mdp_get_mdata(); int i; for (i = 0; i < ARRAY_SIZE(mdp_intr_reg); i++) if (mdata->mdp_irq_mask[i] != 0) return 1; if (mdata->mdp_hist_irq_mask) return 1; if (mdata->mdp_intf_irq_mask) return 1; return 0; } u32 mdss_mdp_fb_stride(u32 fb_index, u32 xres, int bpp) { /* The adreno GPU hardware requires that the pitch be aligned to 32 pixels for color buffers, so for the cases where the GPU is writing directly to fb0, the framebuffer pitch also needs to be 32 pixel aligned */ if (fb_index == 0) return ALIGN(xres, 32) * bpp; else return xres * bpp; } static void mdss_irq_mask(struct irq_data *data) { struct mdss_data_type *mdata = irq_data_get_irq_chip_data(data); unsigned long irq_flags; if (!mdata) return; pr_debug("irq_domain_mask %lu\n", data->hwirq); if (data->hwirq < 32) { spin_lock_irqsave(&mdp_lock, irq_flags); mdata->mdss_util->disable_irq(&mdss_misc_hw); spin_unlock_irqrestore(&mdp_lock, irq_flags); } } static void mdss_irq_unmask(struct irq_data *data) { struct mdss_data_type *mdata = irq_data_get_irq_chip_data(data); unsigned long irq_flags; if (!mdata) return; pr_debug("irq_domain_unmask %lu\n", data->hwirq); if (data->hwirq < 32) { spin_lock_irqsave(&mdp_lock, irq_flags); mdata->mdss_util->enable_irq(&mdss_misc_hw); spin_unlock_irqrestore(&mdp_lock, irq_flags); } } static struct irq_chip mdss_irq_chip = { .name = "mdss", .irq_mask = mdss_irq_mask, .irq_unmask = mdss_irq_unmask, }; static int mdss_irq_domain_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw) { struct mdss_data_type *mdata = d->host_data; /* check here if virq is a valid interrupt line */ irq_set_chip_and_handler(virq, &mdss_irq_chip, handle_level_irq); irq_set_chip_data(virq, mdata); set_irq_flags(virq, IRQF_VALID); return 0; } static struct irq_domain_ops mdss_irq_domain_ops = { .map = mdss_irq_domain_map, .xlate = irq_domain_xlate_onecell, }; static irqreturn_t mdss_irq_handler(int irq, void *ptr) { struct mdss_data_type *mdata = ptr; u32 intr; if (!mdata) return IRQ_NONE; else if (!mdss_get_irq_enable_state(&mdss_mdp_hw)) return IRQ_HANDLED; intr = MDSS_REG_READ(mdata, MDSS_REG_HW_INTR_STATUS); mdss_mdp_hw.irq_info->irq_buzy = true; if (intr & MDSS_INTR_MDP) { spin_lock(&mdp_lock); mdata->mdss_util->irq_dispatch(MDSS_HW_MDP, irq, ptr); spin_unlock(&mdp_lock); intr &= ~MDSS_INTR_MDP; } if (intr & MDSS_INTR_DSI0) { mdata->mdss_util->irq_dispatch(MDSS_HW_DSI0, irq, ptr); intr &= ~MDSS_INTR_DSI0; } if (intr & MDSS_INTR_DSI1) { mdata->mdss_util->irq_dispatch(MDSS_HW_DSI1, irq, ptr); intr &= ~MDSS_INTR_DSI1; } if (intr & MDSS_INTR_EDP) { mdata->mdss_util->irq_dispatch(MDSS_HW_EDP, irq, ptr); intr &= ~MDSS_INTR_EDP; } if (intr & MDSS_INTR_HDMI) { mdata->mdss_util->irq_dispatch(MDSS_HW_HDMI, irq, ptr); intr &= ~MDSS_INTR_HDMI; } /* route misc. interrupts to external drivers */ while (intr) { irq_hw_number_t hwirq = fls(intr) - 1; generic_handle_irq(irq_find_mapping( mdata->irq_domain, hwirq)); intr &= ~(1 << hwirq); } mdss_mdp_hw.irq_info->irq_buzy = false; return IRQ_HANDLED; } #ifdef CONFIG_MSM_BUS_SCALING static int mdss_mdp_bus_scale_register(struct mdss_data_type *mdata) { struct msm_bus_scale_pdata *reg_bus_pdata; int i, rc; if (!mdata->bus_hdl) { rc = mdss_mdp_parse_dt_bus_scale(mdata->pdev); if (rc) { pr_err("Error in device tree : bus scale\n"); return rc; } mdata->bus_hdl = msm_bus_scale_register_client(mdata->bus_scale_table); if (!mdata->bus_hdl) { pr_err("bus_client register failed\n"); return -EINVAL; } pr_debug("register bus_hdl=%x\n", mdata->bus_hdl); } if (!mdata->reg_bus_scale_table) { reg_bus_pdata = &mdp_reg_bus_scale_table; for (i = 0; i < reg_bus_pdata->num_usecases; i++) { mdp_reg_bus_usecases[i].num_paths = 1; mdp_reg_bus_usecases[i].vectors = &mdp_reg_bus_vectors[i]; } mdata->reg_bus_scale_table = reg_bus_pdata; } if (!mdata->reg_bus_hdl) { mdata->reg_bus_hdl = msm_bus_scale_register_client( mdata->reg_bus_scale_table); if (!mdata->reg_bus_hdl) /* Continue without reg_bus scaling */ pr_warn("reg_bus_client register failed\n"); else pr_debug("register reg_bus_hdl=%x\n", mdata->reg_bus_hdl); } if (mdata->hw_rt_bus_scale_table && !mdata->hw_rt_bus_hdl) { mdata->hw_rt_bus_hdl = msm_bus_scale_register_client( mdata->hw_rt_bus_scale_table); if (!mdata->hw_rt_bus_hdl) /* Continue without reg_bus scaling */ pr_warn("hw_rt_bus client register failed\n"); else pr_debug("register hw_rt_bus=%x\n", mdata->hw_rt_bus_hdl); } /* * Following call will not result in actual vote rather update the * current index and ab/ib value. When continuous splash is enabled, * actual vote will happen when splash handoff is done. */ return mdss_bus_scale_set_quota(MDSS_MDP_RT, AB_QUOTA, IB_QUOTA); } static void mdss_mdp_bus_scale_unregister(struct mdss_data_type *mdata) { pr_debug("unregister bus_hdl=%x\n", mdata->bus_hdl); if (mdata->bus_hdl) msm_bus_scale_unregister_client(mdata->bus_hdl); pr_debug("unregister reg_bus_hdl=%x\n", mdata->reg_bus_hdl); if (mdata->reg_bus_hdl) { msm_bus_scale_unregister_client(mdata->reg_bus_hdl); mdata->reg_bus_hdl = 0; } if (mdata->hw_rt_bus_hdl) { msm_bus_scale_unregister_client(mdata->hw_rt_bus_hdl); mdata->hw_rt_bus_hdl = 0; } } /* * Caller needs to hold mdata->bus_lock lock before calling this function. */ static int mdss_mdp_bus_scale_set_quota(u64 ab_quota_rt, u64 ab_quota_nrt, u64 ib_quota_rt, u64 ib_quota_nrt) { int new_uc_idx; u64 ab_quota[MAX_AXI_PORT_COUNT] = {0, 0}; u64 ib_quota[MAX_AXI_PORT_COUNT] = {0, 0}; int rc; if (mdss_res->bus_hdl < 1) { pr_err("invalid bus handle %d\n", mdss_res->bus_hdl); return -EINVAL; } if (!ab_quota_rt && !ab_quota_nrt && !ib_quota_rt && !ib_quota_nrt) { new_uc_idx = 0; } else { int i; struct msm_bus_vectors *vect = NULL; struct msm_bus_scale_pdata *bw_table = mdss_res->bus_scale_table; u32 nrt_axi_port_cnt = mdss_res->nrt_axi_port_cnt; u32 total_axi_port_cnt = mdss_res->axi_port_cnt; u32 rt_axi_port_cnt = total_axi_port_cnt - nrt_axi_port_cnt; int match_cnt = 0; if (!bw_table || !total_axi_port_cnt || total_axi_port_cnt > MAX_AXI_PORT_COUNT) { pr_err("invalid input\n"); return -EINVAL; } if (mdss_res->bus_channels) { ib_quota_rt = div_u64(ib_quota_rt, mdss_res->bus_channels); ib_quota_nrt = div_u64(ib_quota_nrt, mdss_res->bus_channels); } if (mdss_res->has_fixed_qos_arbiter_enabled || nrt_axi_port_cnt) { ab_quota_rt = div_u64(ab_quota_rt, rt_axi_port_cnt); ab_quota_nrt = div_u64(ab_quota_nrt, nrt_axi_port_cnt); for (i = 0; i < total_axi_port_cnt; i++) { if (i < rt_axi_port_cnt) { ab_quota[i] = ab_quota_rt; ib_quota[i] = ib_quota_rt; } else { ab_quota[i] = ab_quota_nrt; ib_quota[i] = ib_quota_nrt; } } } else { ab_quota[0] = div_u64(ab_quota_rt + ab_quota_nrt, total_axi_port_cnt); ib_quota[0] = ib_quota_rt + ib_quota_nrt; for (i = 1; i < total_axi_port_cnt; i++) { ab_quota[i] = ab_quota[0]; ib_quota[i] = ib_quota[0]; } } for (i = 0; i < total_axi_port_cnt; i++) { vect = &bw_table->usecase [mdss_res->curr_bw_uc_idx].vectors[i]; /* avoid performing updates for small changes */ if ((ab_quota[i] == vect->ab) && (ib_quota[i] == vect->ib)) match_cnt++; } if (match_cnt == total_axi_port_cnt) { pr_debug("skip BW vote\n"); return 0; } new_uc_idx = (mdss_res->curr_bw_uc_idx % (bw_table->num_usecases - 1)) + 1; for (i = 0; i < total_axi_port_cnt; i++) { vect = &bw_table->usecase[new_uc_idx].vectors[i]; vect->ab = ab_quota[i]; vect->ib = ib_quota[i]; pr_debug("uc_idx=%d %s path idx=%d ab=%llu ib=%llu\n", new_uc_idx, (i < rt_axi_port_cnt) ? "rt" : "nrt" , i, vect->ab, vect->ib); } } mdss_res->curr_bw_uc_idx = new_uc_idx; mdss_res->ao_bw_uc_idx = new_uc_idx; if ((mdss_res->bus_ref_cnt == 0) && mdss_res->curr_bw_uc_idx) { rc = 0; } else { /* vote BW if bus_bw_cnt > 0 or uc_idx is zero */ ATRACE_BEGIN("msm_bus_scale_req"); rc = msm_bus_scale_client_update_request(mdss_res->bus_hdl, new_uc_idx); ATRACE_END("msm_bus_scale_req"); } return rc; } struct reg_bus_client *mdss_reg_bus_vote_client_create(char *client_name) { struct reg_bus_client *client; static u32 id; if (client_name == NULL) { pr_err("client name is null\n"); return ERR_PTR(-EINVAL); } client = kzalloc(sizeof(struct reg_bus_client), GFP_KERNEL); if (!client) return ERR_PTR(-ENOMEM); mutex_lock(&mdss_res->reg_bus_lock); strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN); client->usecase_ndx = VOTE_INDEX_DISABLE; client->id = id; pr_debug("bus vote client %s created:%pK id :%d\n", client_name, client, id); id++; list_add(&client->list, &mdss_res->reg_bus_clist); mutex_unlock(&mdss_res->reg_bus_lock); return client; } void mdss_reg_bus_vote_client_destroy(struct reg_bus_client *client) { if (!client) { pr_err("reg bus vote: invalid client handle\n"); } else { pr_debug("bus vote client %s destroyed:%pK id:%u\n", client->name, client, client->id); mutex_lock(&mdss_res->reg_bus_lock); list_del_init(&client->list); mutex_unlock(&mdss_res->reg_bus_lock); kfree(client); } } int mdss_update_reg_bus_vote(struct reg_bus_client *bus_client, u32 usecase_ndx) { int ret = 0; bool changed = false; u32 max_usecase_ndx = VOTE_INDEX_DISABLE; struct reg_bus_client *client, *temp_client; if (!mdss_res || !mdss_res->reg_bus_hdl || !bus_client) return 0; mutex_lock(&mdss_res->reg_bus_lock); bus_client->usecase_ndx = usecase_ndx; list_for_each_entry_safe(client, temp_client, &mdss_res->reg_bus_clist, list) { if (client->usecase_ndx < VOTE_INDEX_MAX && client->usecase_ndx > max_usecase_ndx) max_usecase_ndx = client->usecase_ndx; } if (mdss_res->reg_bus_usecase_ndx != max_usecase_ndx) { changed = true; mdss_res->reg_bus_usecase_ndx = max_usecase_ndx; } pr_debug("%pS: changed=%d current idx=%d request client %s id:%u idx:%d\n", __builtin_return_address(0), changed, max_usecase_ndx, bus_client->name, bus_client->id, usecase_ndx); MDSS_XLOG(changed, max_usecase_ndx, bus_client->id, usecase_ndx); if (changed) ret = msm_bus_scale_client_update_request(mdss_res->reg_bus_hdl, max_usecase_ndx); mutex_unlock(&mdss_res->reg_bus_lock); return ret; } int mdss_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota) { int rc = 0; int i; u64 total_ab_rt = 0, total_ib_rt = 0; u64 total_ab_nrt = 0, total_ib_nrt = 0; mutex_lock(&mdss_res->bus_lock); mdss_res->ab[client] = ab_quota; mdss_res->ib[client] = ib_quota; trace_mdp_perf_update_bus(client, ab_quota, ib_quota); for (i = 0; i < MDSS_MAX_BUS_CLIENTS; i++) { if (i == MDSS_MDP_NRT) { total_ab_nrt = mdss_res->ab[i]; total_ib_nrt = mdss_res->ib[i]; } else { total_ab_rt += mdss_res->ab[i]; total_ib_rt = max(total_ib_rt, mdss_res->ib[i]); } } rc = mdss_mdp_bus_scale_set_quota(total_ab_rt, total_ab_nrt, total_ib_rt, total_ib_nrt); mutex_unlock(&mdss_res->bus_lock); return rc; } #else static int mdss_mdp_bus_scale_register(struct mdss_data_type *mdata) { return 0; } static void mdss_mdp_bus_scale_unregister(struct mdss_data_type *mdata) { } int mdss_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota) { pr_debug("No bus scaling! client=%d ab=%llu ib=%llu\n", client, ab_quota, ib_quota); return 0; } struct reg_bus_client *mdss_reg_bus_vote_client_create(char *client_name) { return NULL; } void mdss_reg_bus_vote_client_destroy(struct reg_bus_client *client) { } int mdss_update_reg_bus_vote(struct reg_bus_client *bus_client, u32 usecase_ndx) { pr_debug("%pS: No reg scaling! usecase=%u\n", __builtin_return_address(0), usecase_ndx); return 0; } #endif static int mdss_mdp_intr2index(u32 intr_type, u32 intf_num) { int i; for (i = 0; i < ARRAY_SIZE(mdp_irq_map); i++) { if (intr_type == mdp_irq_map[i].intr_type && intf_num == mdp_irq_map[i].intf_num) return i; } return -EINVAL; } u32 mdss_mdp_get_irq_mask(u32 intr_type, u32 intf_num) { int idx = mdss_mdp_intr2index(intr_type, intf_num); return (idx < 0) ? 0 : mdp_irq_map[idx].irq_mask; } void mdss_mdp_enable_hw_irq(struct mdss_data_type *mdata) { mdata->mdss_util->enable_irq(&mdss_mdp_hw); } void mdss_mdp_disable_hw_irq(struct mdss_data_type *mdata) { if (!is_mdp_irq_enabled()) mdata->mdss_util->disable_irq(&mdss_mdp_hw); } /* function assumes that mdp is clocked to access hw registers */ void mdss_mdp_irq_clear(struct mdss_data_type *mdata, u32 intr_type, u32 intf_num) { unsigned long irq_flags; int irq_idx; struct mdss_mdp_intr_reg reg; struct mdss_mdp_irq irq; irq_idx = mdss_mdp_intr2index(intr_type, intf_num); if (irq_idx < 0) { pr_err("invalid irq request\n"); return; } irq = mdp_irq_map[irq_idx]; reg = mdp_intr_reg[irq.reg_idx]; pr_debug("clearing mdp irq mask=%x\n", irq.irq_mask); spin_lock_irqsave(&mdp_lock, irq_flags); writel_relaxed(irq.irq_mask, mdata->mdp_base + reg.clr_off); spin_unlock_irqrestore(&mdp_lock, irq_flags); } int mdss_mdp_irq_enable(u32 intr_type, u32 intf_num) { int irq_idx, idx; unsigned long irq_flags; int ret = 0; struct mdss_data_type *mdata = mdss_mdp_get_mdata(); struct mdss_mdp_intr_reg reg; struct mdss_mdp_irq irq; irq_idx = mdss_mdp_intr2index(intr_type, intf_num); if (irq_idx < 0) { pr_err("invalid irq request\n"); return -EINVAL; } irq = mdp_irq_map[irq_idx]; reg = mdp_intr_reg[irq.reg_idx]; spin_lock_irqsave(&mdp_lock, irq_flags); if (mdata->mdp_irq_mask[irq.reg_idx] & irq.irq_mask) { pr_warn("MDSS MDP IRQ-0x%x is already set, mask=%x\n", irq.irq_mask, mdata->mdp_irq_mask[idx]); ret = -EBUSY; } else { pr_debug("MDP IRQ mask old=%x new=%x\n", mdata->mdp_irq_mask[irq.reg_idx], irq.irq_mask); mdata->mdp_irq_mask[irq.reg_idx] |= irq.irq_mask; writel_relaxed(irq.irq_mask, mdata->mdp_base + reg.clr_off); writel_relaxed(mdata->mdp_irq_mask[irq.reg_idx], mdata->mdp_base + reg.en_off); mdata->mdss_util->enable_irq(&mdss_mdp_hw); } spin_unlock_irqrestore(&mdp_lock, irq_flags); return ret; } int mdss_mdp_hist_irq_enable(u32 irq) { int ret = 0; struct mdss_data_type *mdata = mdss_mdp_get_mdata(); if (mdata->mdp_hist_irq_mask & irq) { pr_warn("MDSS MDP Hist IRQ-0x%x is already set, mask=%x\n", irq, mdata->mdp_hist_irq_mask); ret = -EBUSY; } else { pr_debug("mask old=%x new=%x\n", mdata->mdp_hist_irq_mask, irq); mdata->mdp_hist_irq_mask |= irq; writel_relaxed(irq, mdata->mdp_base + MDSS_MDP_REG_HIST_INTR_CLEAR); writel_relaxed(mdata->mdp_hist_irq_mask, mdata->mdp_base + MDSS_MDP_REG_HIST_INTR_EN); mdata->mdss_util->enable_irq(&mdss_mdp_hw); } return ret; } void mdss_mdp_irq_disable(u32 intr_type, u32 intf_num) { int irq_idx; unsigned long irq_flags; struct mdss_data_type *mdata = mdss_mdp_get_mdata(); struct mdss_mdp_intr_reg reg; struct mdss_mdp_irq irq; irq_idx = mdss_mdp_intr2index(intr_type, intf_num); if (irq_idx < 0) { pr_err("invalid irq request\n"); return; } irq = mdp_irq_map[irq_idx]; reg = mdp_intr_reg[irq.reg_idx]; spin_lock_irqsave(&mdp_lock, irq_flags); if (!(mdata->mdp_irq_mask[irq.reg_idx] & irq.irq_mask)) { pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n", irq.irq_mask, mdata->mdp_irq_mask[irq.reg_idx]); } else { mdata->mdp_irq_mask[irq.reg_idx] &= ~irq.irq_mask; writel_relaxed(mdata->mdp_irq_mask[irq.reg_idx], mdata->mdp_base + reg.en_off); if (!is_mdp_irq_enabled()) mdata->mdss_util->disable_irq(&mdss_mdp_hw); } spin_unlock_irqrestore(&mdp_lock, irq_flags); } /* This function is used to check and clear the status of MDP interrupts */ void mdss_mdp_intr_check_and_clear(u32 intr_type, u32 intf_num) { u32 status; int irq_idx; unsigned long irq_flags; struct mdss_data_type *mdata = mdss_mdp_get_mdata(); struct mdss_mdp_intr_reg reg; struct mdss_mdp_irq irq; irq_idx = mdss_mdp_intr2index(intr_type, intf_num); if (irq_idx < 0) { pr_err("invalid irq request\n"); return; } irq = mdp_irq_map[irq_idx]; reg = mdp_intr_reg[irq.reg_idx]; spin_lock_irqsave(&mdp_lock, irq_flags); status = irq.irq_mask & readl_relaxed(mdata->mdp_base + reg.status_off); if (status) { pr_debug("clearing irq: intr_type:%d, intf_num:%d\n", intr_type, intf_num); writel_relaxed(irq.irq_mask, mdata->mdp_base + reg.clr_off); } spin_unlock_irqrestore(&mdp_lock, irq_flags); } void mdss_mdp_hist_irq_disable(u32 irq) { struct mdss_data_type *mdata = mdss_mdp_get_mdata(); if (!(mdata->mdp_hist_irq_mask & irq)) { pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n", irq, mdata->mdp_hist_irq_mask); } else { mdata->mdp_hist_irq_mask &= ~irq; writel_relaxed(mdata->mdp_hist_irq_mask, mdata->mdp_base + MDSS_MDP_REG_HIST_INTR_EN); if (!is_mdp_irq_enabled()) mdata->mdss_util->disable_irq(&mdss_mdp_hw); } } /** * mdss_mdp_irq_disable_nosync() - disable mdp irq * @intr_type: mdp interface type * @intf_num: mdp interface num * * This fucntion is called from interrupt context * mdp_lock is already held at up stream (mdss_irq_handler) * therefore spin_lock(&mdp_lock) is not allowed here * */ void mdss_mdp_irq_disable_nosync(u32 intr_type, u32 intf_num) { int irq_idx; struct mdss_data_type *mdata = mdss_mdp_get_mdata(); struct mdss_mdp_intr_reg reg; struct mdss_mdp_irq irq; irq_idx = mdss_mdp_intr2index(intr_type, intf_num); if (irq_idx < 0) { pr_err("invalid irq request\n"); return; } irq = mdp_irq_map[irq_idx]; reg = mdp_intr_reg[irq.reg_idx]; if (!(mdata->mdp_irq_mask[irq.reg_idx] & irq.irq_mask)) { pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n", irq.irq_mask, mdata->mdp_irq_mask[irq.reg_idx]); } else { mdata->mdp_irq_mask[irq.reg_idx] &= ~irq.irq_mask; writel_relaxed(mdata->mdp_irq_mask[irq.reg_idx], mdata->mdp_base + reg.en_off); if (!is_mdp_irq_enabled()) mdata->mdss_util->disable_irq_nosync(&mdss_mdp_hw); } } int mdss_mdp_set_intr_callback(u32 intr_type, u32 intf_num, void (*fnc_ptr)(void *), void *arg) { unsigned long flags; int index; index = mdss_mdp_intr2index(intr_type, intf_num); if (index < 0) { pr_warn("invalid intr type=%u intf_numf_num=%u\n", intr_type, intf_num); return -EINVAL; } spin_lock_irqsave(&mdss_mdp_intr_lock, flags); WARN(mdp_intr_cb[index].func && fnc_ptr, "replacing current intr callback for ndx=%d\n", index); mdp_intr_cb[index].func = fnc_ptr; mdp_intr_cb[index].arg = arg; spin_unlock_irqrestore(&mdss_mdp_intr_lock, flags); return 0; } int mdss_mdp_set_intr_callback_nosync(u32 intr_type, u32 intf_num, void (*fnc_ptr)(void *), void *arg) { int index; index = mdss_mdp_intr2index(intr_type, intf_num); if (index < 0) { pr_warn("invalid intr Typee=%u intf_num=%u\n", intr_type, intf_num); return -EINVAL; } WARN(mdp_intr_cb[index].func && fnc_ptr, "replacing current intr callbackack for ndx=%d\n", index); mdp_intr_cb[index].func = fnc_ptr; mdp_intr_cb[index].arg = arg; return 0; } static inline void mdss_mdp_intr_done(int index) { void (*fnc)(void *); void *arg; spin_lock(&mdss_mdp_intr_lock); fnc = mdp_intr_cb[index].func; arg = mdp_intr_cb[index].arg; spin_unlock(&mdss_mdp_intr_lock); if (fnc) fnc(arg); } irqreturn_t mdss_mdp_isr(int irq, void *ptr) { struct mdss_data_type *mdata = ptr; u32 isr, mask, hist_isr, hist_mask; int i, j; if (!mdata->clk_ena) return IRQ_HANDLED; for (i = 0; i < ARRAY_SIZE(mdp_intr_reg); i++) { struct mdss_mdp_intr_reg reg = mdp_intr_reg[i]; isr = readl_relaxed(mdata->mdp_base + reg.status_off); if (isr == 0) continue; mask = readl_relaxed(mdata->mdp_base + reg.en_off); writel_relaxed(isr, mdata->mdp_base + reg.clr_off); pr_debug("%s: reg:%d isr=%x mask=%x\n", __func__, i+1, isr, mask); isr &= mask; if (isr == 0) continue; for (j = 0; j < ARRAY_SIZE(mdp_irq_map); j++) if (mdp_irq_map[j].reg_idx == i && (isr & mdp_irq_map[j].irq_mask)) mdss_mdp_intr_done(j); if (!i) { if (isr & MDSS_MDP_INTR_PING_PONG_0_DONE) mdss_misr_crc_collect(mdata, DISPLAY_MISR_DSI0, false); if (isr & MDSS_MDP_INTR_PING_PONG_1_DONE) mdss_misr_crc_collect(mdata, DISPLAY_MISR_DSI1, false); if (isr & MDSS_MDP_INTR_INTF_0_VSYNC) mdss_misr_crc_collect(mdata, DISPLAY_MISR_EDP, true); if (isr & MDSS_MDP_INTR_INTF_1_VSYNC) mdss_misr_crc_collect(mdata, DISPLAY_MISR_DSI0, true); if (isr & MDSS_MDP_INTR_INTF_2_VSYNC) mdss_misr_crc_collect(mdata, DISPLAY_MISR_DSI1, true); if (isr & MDSS_MDP_INTR_INTF_3_VSYNC) mdss_misr_crc_collect(mdata, DISPLAY_MISR_HDMI, true); if (isr & MDSS_MDP_INTR_WB_0_DONE) mdss_misr_crc_collect(mdata, DISPLAY_MISR_MDP, true); if (isr & MDSS_MDP_INTR_WB_1_DONE) mdss_misr_crc_collect(mdata, DISPLAY_MISR_MDP, true); if (isr & MDSS_MDP_INTR_WB_2_DONE) mdss_misr_crc_collect(mdata, DISPLAY_MISR_MDP, true); } } hist_isr = readl_relaxed(mdata->mdp_base + MDSS_MDP_REG_HIST_INTR_STATUS); if (hist_isr != 0) { hist_mask = readl_relaxed(mdata->mdp_base + MDSS_MDP_REG_HIST_INTR_EN); writel_relaxed(hist_isr, mdata->mdp_base + MDSS_MDP_REG_HIST_INTR_CLEAR); hist_isr &= hist_mask; if (hist_isr != 0) mdss_mdp_hist_intr_done(hist_isr); } mdss_mdp_video_isr(mdata->video_intf, mdata->nintf); return IRQ_HANDLED; } static int mdss_mdp_clk_update(u32 clk_idx, u32 enable) { int ret = -ENODEV; struct clk *clk = mdss_mdp_get_clk(clk_idx); if (clk) { pr_debug("clk=%d en=%d\n", clk_idx, enable); if (enable) { if (clk_idx == MDSS_CLK_MDP_VSYNC) clk_set_rate(clk, 19200000); ret = clk_prepare_enable(clk); } else { clk_disable_unprepare(clk); ret = 0; } } return ret; } int mdss_mdp_vsync_clk_enable(int enable, bool locked) { int ret = 0; pr_debug("clk enable=%d\n", enable); if (!locked) mutex_lock(&mdp_clk_lock); if (mdss_res->vsync_ena != enable) { mdss_res->vsync_ena = enable; ret = mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable); } if (!locked) mutex_unlock(&mdp_clk_lock); return ret; } void mdss_mdp_set_clk_rate(unsigned long rate) { struct mdss_data_type *mdata = mdss_res; unsigned long clk_rate; struct clk *clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE); unsigned long min_clk_rate; min_clk_rate = max(rate, mdata->perf_tune.min_mdp_clk); if (clk) { mutex_lock(&mdp_clk_lock); if (min_clk_rate < mdata->max_mdp_clk_rate) clk_rate = clk_round_rate(clk, min_clk_rate); else clk_rate = mdata->max_mdp_clk_rate; if (IS_ERR_VALUE(clk_rate)) { pr_err("unable to round rate err=%ld\n", clk_rate); } else if (clk_rate != clk_get_rate(clk)) { if (IS_ERR_VALUE(clk_set_rate(clk, clk_rate))) pr_err("clk_set_rate failed\n"); else pr_debug("mdp clk rate=%lu\n", clk_rate); } mutex_unlock(&mdp_clk_lock); } else { pr_err("mdp src clk not setup properly\n"); } } unsigned long mdss_mdp_get_clk_rate(u32 clk_idx, bool locked) { unsigned long clk_rate = 0; struct clk *clk = mdss_mdp_get_clk(clk_idx); if (clk) { if (!locked) mutex_lock(&mdp_clk_lock); clk_rate = clk_get_rate(clk); if (!locked) mutex_unlock(&mdp_clk_lock); } return clk_rate; } /** * mdss_bus_rt_bw_vote() -- place bus bandwidth request * @enable: value of enable or disable * * hw_rt table has two entries, 0 and Min Vote (1Mhz) * while attaching SMMU and for few TZ operations which * happen at very early stage, we will request Min Vote * thru this handle. * */ static int mdss_bus_rt_bw_vote(bool enable) { struct mdss_data_type *mdata = mdss_mdp_get_mdata(); int rc = 0; bool changed = false; if (!mdata->hw_rt_bus_hdl || mdata->handoff_pending) return 0; if (enable) { if (mdata->hw_rt_bus_ref_cnt == 0) changed = true; mdata->hw_rt_bus_ref_cnt++; } else { if (mdata->hw_rt_bus_ref_cnt != 0) { mdata->hw_rt_bus_ref_cnt--; if (mdata->hw_rt_bus_ref_cnt == 0) changed = true; } else { pr_warn("%s: bus bw votes are not balanced\n", __func__); } } pr_debug("%pS: task:%s bw_cnt=%d changed=%d enable=%d\n", __builtin_return_address(0), current->group_leader->comm, mdata->hw_rt_bus_ref_cnt, changed, enable); if (changed) { rc = msm_bus_scale_client_update_request(mdata->hw_rt_bus_hdl, enable ? 1 : 0); if (rc) pr_err("%s: Bus bandwidth vote failed\n", __func__); } return rc; } /** * __mdss_mdp_reg_access_clk_enable - Enable minimum MDSS clocks required * for register access */ static inline void __mdss_mdp_reg_access_clk_enable( struct mdss_data_type *mdata, bool enable) { if (enable) { mdss_update_reg_bus_vote(mdata->reg_bus_clt, VOTE_INDEX_LOW); mdss_bus_rt_bw_vote(true); mdss_mdp_clk_update(MDSS_CLK_AHB, 1); mdss_mdp_clk_update(MDSS_CLK_AXI, 1); mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, 1); } else { mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, 0); mdss_mdp_clk_update(MDSS_CLK_AXI, 0); mdss_mdp_clk_update(MDSS_CLK_AHB, 0); mdss_bus_rt_bw_vote(false); mdss_update_reg_bus_vote(mdata->reg_bus_clt, VOTE_INDEX_DISABLE); } } int __mdss_mdp_vbif_halt(struct mdss_data_type *mdata, bool is_nrt) { int rc = 0; void __iomem *base; u32 halt_ack_mask = BIT(0), status; /* if not real time vbif */ if (is_nrt) base = mdata->vbif_nrt_io.base; else base = mdata->vbif_io.base; if (!base) { /* some targets might not have a nrt port */ goto vbif_done; } /* force vbif clock on */ MDSS_VBIF_WRITE(mdata, MMSS_VBIF_CLKON, 1, is_nrt); /* request halt */ MDSS_VBIF_WRITE(mdata, MMSS_VBIF_AXI_HALT_CTRL0, 1, is_nrt); rc = readl_poll_timeout(base + MMSS_VBIF_AXI_HALT_CTRL1, status, (status & halt_ack_mask), 1000, AXI_HALT_TIMEOUT_US); if (rc == -ETIMEDOUT) { pr_err("VBIF axi is not halting. TIMEDOUT.\n"); goto vbif_done; } pr_debug("VBIF axi is halted\n"); vbif_done: return rc; } /** * mdss_mdp_vbif_axi_halt() - Halt MDSS AXI ports * @mdata: pointer to the global mdss data structure. * * This function can be called during deep suspend, display off or for * debugging purposes. On success it should be assumed that AXI ports connected * to RT VBIF are in idle state and would not fetch any more data. */ static void mdss_mdp_vbif_axi_halt(struct mdss_data_type *mdata) { __mdss_mdp_reg_access_clk_enable(mdata, true); /* real time ports */ __mdss_mdp_vbif_halt(mdata, false); /* non-real time ports */ __mdss_mdp_vbif_halt(mdata, true); __mdss_mdp_reg_access_clk_enable(mdata, false); } int mdss_iommu_ctrl(int enable) { struct mdss_data_type *mdata = mdss_mdp_get_mdata(); int rc = 0; mutex_lock(&mdp_iommu_ref_cnt_lock); pr_debug("%pS: enable:%d ref_cnt:%d attach:%d hoff:%d\n", __builtin_return_address(0), enable, mdata->iommu_ref_cnt, mdata->iommu_attached, mdata->handoff_pending); if (enable) { /* * delay iommu attach until continous splash screen has * finished handoff, as it may still be working with phys addr */ if (!mdata->iommu_attached && !mdata->handoff_pending) { mdss_bus_rt_bw_vote(true); rc = mdss_smmu_attach(mdata); } mdata->iommu_ref_cnt++; } else { if (mdata->iommu_ref_cnt) { mdata->iommu_ref_cnt--; if (mdata->iommu_ref_cnt == 0) { rc = mdss_smmu_detach(mdata); mdss_bus_rt_bw_vote(false); } } else { pr_err("unbalanced iommu ref\n"); } } mutex_unlock(&mdp_iommu_ref_cnt_lock); if (IS_ERR_VALUE(rc)) return rc; else return mdata->iommu_ref_cnt; } static void mdss_mdp_memory_retention_enter(void) { struct clk *mdss_mdp_clk = NULL; struct clk *mdp_vote_clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE); if (mdp_vote_clk) { mdss_mdp_clk = clk_get_parent(mdp_vote_clk); if (mdss_mdp_clk) { clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_MEM); clk_set_flags(mdss_mdp_clk, CLKFLAG_PERIPH_OFF_SET); clk_set_flags(mdss_mdp_clk, CLKFLAG_NORETAIN_PERIPH); } } } static void mdss_mdp_memory_retention_exit(void) { struct clk *mdss_mdp_clk = NULL; struct clk *mdp_vote_clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE); if (mdp_vote_clk) { mdss_mdp_clk = clk_get_parent(mdp_vote_clk); if (mdss_mdp_clk) { clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_MEM); clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_PERIPH); clk_set_flags(mdss_mdp_clk, CLKFLAG_PERIPH_OFF_CLEAR); } } } /** * mdss_mdp_idle_pc_restore() - Restore MDSS settings when exiting idle pc * * MDSS GDSC can be voted off during idle-screen usecase for MIPI DSI command * mode displays, referred to as MDSS idle power collapse. Upon subsequent * frame update, MDSS GDSC needs to turned back on and hw state needs to be * restored. */ static int mdss_mdp_idle_pc_restore(void) { struct mdss_data_type *mdata = mdss_mdp_get_mdata(); int rc = 0; mutex_lock(&mdp_fs_idle_pc_lock); if (!mdata->idle_pc) { pr_debug("no idle pc, no need to restore\n"); goto end; } pr_debug("called from %pS\n", __builtin_return_address(0)); rc = mdss_iommu_ctrl(1); if (IS_ERR_VALUE(rc)) { pr_err("mdss iommu attach failed rc=%d\n", rc); goto end; } mdss_hw_init(mdata); mdss_iommu_ctrl(0); /** * sleep 10 microseconds to make sure AD auto-reinitialization * is done */ udelay(10); mdss_mdp_memory_retention_exit(); mdss_mdp_ctl_restore(true); mdata->idle_pc = false; end: mutex_unlock(&mdp_fs_idle_pc_lock); return rc; } /** * mdss_bus_bandwidth_ctrl() -- place bus bandwidth request * @enable: value of enable or disable * * Function place bus bandwidth request to allocate saved bandwidth * if enabled or free bus bandwidth allocation if disabled. * Bus bandwidth is required by mdp.For dsi, it only requires to send * dcs coammnd. It returns error if bandwidth request fails. */ void mdss_bus_bandwidth_ctrl(int enable) { struct mdss_data_type *mdata = mdss_mdp_get_mdata(); int changed = 0; mutex_lock(&mdata->bus_lock); if (enable) { if (mdata->bus_ref_cnt == 0) changed++; mdata->bus_ref_cnt++; } else { if (mdata->bus_ref_cnt) { mdata->bus_ref_cnt--; if (mdata->bus_ref_cnt == 0) changed++; } else { pr_err("Can not be turned off\n"); } } pr_debug("%pS: task:%s bw_cnt=%d changed=%d enable=%d\n", __builtin_return_address(0), current->group_leader->comm, mdata->bus_ref_cnt, changed, enable); if (changed) { MDSS_XLOG(mdata->bus_ref_cnt, enable); if (!enable) { if (!mdata->handoff_pending) { msm_bus_scale_client_update_request( mdata->bus_hdl, 0); mdata->ao_bw_uc_idx = 0; } pm_runtime_mark_last_busy(&mdata->pdev->dev); pm_runtime_put_autosuspend(&mdata->pdev->dev); } else { pm_runtime_get_sync(&mdata->pdev->dev); msm_bus_scale_client_update_request( mdata->bus_hdl, mdata->curr_bw_uc_idx); } } mutex_unlock(&mdata->bus_lock); } EXPORT_SYMBOL(mdss_bus_bandwidth_ctrl); void mdss_mdp_clk_ctrl(int enable) { struct mdss_data_type *mdata = mdss_mdp_get_mdata(); static int mdp_clk_cnt; unsigned long flags; int changed = 0; int rc = 0; mutex_lock(&mdp_clk_lock); if (enable) { if (mdp_clk_cnt == 0) changed++; mdp_clk_cnt++; } else { if (mdp_clk_cnt) { mdp_clk_cnt--; if (mdp_clk_cnt == 0) changed++; } else { pr_err("Can not be turned off\n"); } } if (changed) MDSS_XLOG(mdp_clk_cnt, enable, current->pid); pr_debug("%pS: task:%s clk_cnt=%d changed=%d enable=%d\n", __builtin_return_address(0), current->group_leader->comm, mdata->bus_ref_cnt, changed, enable); if (changed) { if (enable) { pm_runtime_get_sync(&mdata->pdev->dev); mdss_update_reg_bus_vote(mdata->reg_bus_clt, VOTE_INDEX_LOW); rc = mdss_iommu_ctrl(1); if (IS_ERR_VALUE(rc)) pr_err("IOMMU attach failed\n"); /* Active+Sleep */ msm_bus_scale_client_update_context(mdata->bus_hdl, false, mdata->curr_bw_uc_idx); } spin_lock_irqsave(&mdp_lock, flags); mdata->clk_ena = enable; spin_unlock_irqrestore(&mdp_lock, flags); mdss_mdp_clk_update(MDSS_CLK_AHB, enable); mdss_mdp_clk_update(MDSS_CLK_AXI, enable); mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, enable); mdss_mdp_clk_update(MDSS_CLK_MDP_LUT, enable); if (mdata->vsync_ena) mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable); if (!enable) { /* release iommu control */ mdss_iommu_ctrl(0); /* Active-Only */ msm_bus_scale_client_update_context(mdata->bus_hdl, true, mdata->ao_bw_uc_idx); mdss_update_reg_bus_vote(mdata->reg_bus_clt, VOTE_INDEX_DISABLE); pm_runtime_mark_last_busy(&mdata->pdev->dev); pm_runtime_put_autosuspend(&mdata->pdev->dev); } } if (enable && changed) mdss_mdp_idle_pc_restore(); mutex_unlock(&mdp_clk_lock); } static inline int mdss_mdp_irq_clk_register(struct mdss_data_type *mdata, char *clk_name, int clk_idx) { struct clk *tmp; if (clk_idx >= MDSS_MAX_CLK) { pr_err("invalid clk index %d\n", clk_idx); return -EINVAL; } tmp = devm_clk_get(&mdata->pdev->dev, clk_name); if (IS_ERR(tmp)) { pr_err("unable to get clk: %s\n", clk_name); return PTR_ERR(tmp); } mdata->mdp_clk[clk_idx] = tmp; return 0; } #define SEC_DEVICE_MDSS 1 static void __mdss_restore_sec_cfg(struct mdss_data_type *mdata) { int ret, scm_ret = 0; if (test_bit(MDSS_CAPS_SCM_RESTORE_NOT_REQUIRED, mdata->mdss_caps_map)) return; pr_debug("restoring mdss secure config\n"); __mdss_mdp_reg_access_clk_enable(mdata, true); ret = scm_restore_sec_cfg(SEC_DEVICE_MDSS, 0, &scm_ret); if (ret || scm_ret) pr_warn("scm_restore_sec_cfg failed %d %d\n", ret, scm_ret); __mdss_mdp_reg_access_clk_enable(mdata, false); } static int mdss_mdp_gdsc_notifier_call(struct notifier_block *self, unsigned long event, void *data) { struct mdss_data_type *mdata; mdata = container_of(self, struct mdss_data_type, gdsc_cb); if (event & REGULATOR_EVENT_ENABLE) { __mdss_restore_sec_cfg(mdata); } else if (event & REGULATOR_EVENT_PRE_DISABLE) { pr_debug("mdss gdsc is getting disabled\n"); /* halt the vbif transactions */ mdss_mdp_vbif_axi_halt(mdata); } return NOTIFY_OK; } static int mdss_mdp_irq_clk_setup(struct mdss_data_type *mdata) { int ret; ret = of_property_read_u32(mdata->pdev->dev.of_node, "qcom,max-clk-rate", &mdata->max_mdp_clk_rate); if (ret) { pr_err("failed to get max mdp clock rate\n"); return ret; } pr_debug("max mdp clk rate=%d\n", mdata->max_mdp_clk_rate); ret = devm_request_irq(&mdata->pdev->dev, mdss_mdp_hw.irq_info->irq, mdss_irq_handler, IRQF_DISABLED, "MDSS", mdata); if (ret) { pr_err("mdp request_irq() failed!\n"); return ret; } disable_irq(mdss_mdp_hw.irq_info->irq); mdata->fs = devm_regulator_get(&mdata->pdev->dev, "vdd"); if (IS_ERR_OR_NULL(mdata->fs)) { mdata->fs = NULL; pr_err("unable to get gdsc regulator\n"); return -EINVAL; } mdata->venus = devm_regulator_get_optional(&mdata->pdev->dev, "gdsc-venus"); if (IS_ERR_OR_NULL(mdata->venus)) { mdata->venus = NULL; pr_debug("unable to get venus gdsc regulator\n"); } mdata->fs_ena = false; mdata->gdsc_cb.notifier_call = mdss_mdp_gdsc_notifier_call; mdata->gdsc_cb.priority = 5; if (regulator_register_notifier(mdata->fs, &(mdata->gdsc_cb))) pr_warn("GDSC notification registration failed!\n"); else mdata->regulator_notif_register = true; mdata->vdd_cx = devm_regulator_get_optional(&mdata->pdev->dev, "vdd-cx"); if (IS_ERR_OR_NULL(mdata->vdd_cx)) { pr_debug("unable to get CX reg. rc=%d\n", PTR_RET(mdata->vdd_cx)); mdata->vdd_cx = NULL; } mdata->reg_bus_clt = mdss_reg_bus_vote_client_create("mdp\0"); if (IS_ERR(mdata->reg_bus_clt)) { pr_err("bus client register failed\n"); return PTR_ERR(mdata->reg_bus_clt); } if (mdss_mdp_irq_clk_register(mdata, "bus_clk", MDSS_CLK_AXI) || mdss_mdp_irq_clk_register(mdata, "iface_clk", MDSS_CLK_AHB) || mdss_mdp_irq_clk_register(mdata, "core_clk", MDSS_CLK_MDP_CORE)) return -EINVAL; /* lut_clk is not present on all MDSS revisions */ mdss_mdp_irq_clk_register(mdata, "lut_clk", MDSS_CLK_MDP_LUT); /* vsync_clk is optional for non-smart panels */ mdss_mdp_irq_clk_register(mdata, "vsync_clk", MDSS_CLK_MDP_VSYNC); /* Setting the default clock rate to the max supported.*/ mdss_mdp_set_clk_rate(mdata->max_mdp_clk_rate); pr_debug("mdp clk rate=%ld\n", mdss_mdp_get_clk_rate(MDSS_CLK_MDP_CORE, false)); return 0; } static void mdss_debug_enable_clock(int on) { if (on) mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON); else mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF); } static int mdss_mdp_debug_init(struct platform_device *pdev, struct mdss_data_type *mdata) { int rc; struct mdss_debug_base *dbg_blk; mdata->debug_inf.debug_enable_clock = mdss_debug_enable_clock; rc = mdss_debugfs_init(mdata); if (rc) return rc; rc = mdss_mdp_debugfs_init(mdata); if (rc) { mdss_debugfs_remove(mdata); return rc; } mdss_debug_register_io("mdp", &mdata->mdss_io, &dbg_blk); mdss_debug_register_dump_range(pdev, dbg_blk, "qcom,regs-dump-mdp", "qcom,regs-dump-names-mdp", "qcom,regs-dump-xin-id-mdp"); if (mdata->vbif_io.base) mdss_debug_register_io("vbif", &mdata->vbif_io, NULL); if (mdata->vbif_nrt_io.base) mdss_debug_register_io("vbif_nrt", &mdata->vbif_nrt_io, NULL); return 0; } static u32 mdss_get_props(void) { u32 props = 0; void __iomem *props_base = ioremap(0xFC4B8114, 4); if (props_base) { props = readl_relaxed(props_base); iounmap(props_base); } return props; } void mdss_mdp_init_default_prefill_factors(struct mdss_data_type *mdata) { mdata->prefill_data.prefill_factors.fmt_mt_nv12_factor = 8; mdata->prefill_data.prefill_factors.fmt_mt_factor = 4; mdata->prefill_data.prefill_factors.fmt_linear_factor = 1; mdata->prefill_data.prefill_factors.scale_factor = 1; mdata->prefill_data.prefill_factors.xtra_ff_factor = 2; if (test_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map)) { mdata->prefill_data.ts_threshold = 25; mdata->prefill_data.ts_end = 8; mdata->prefill_data.ts_rate.numer = 1; mdata->prefill_data.ts_rate.denom = 4; mdata->prefill_data.ts_overhead = 2; } } static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata) { mdata->per_pipe_ib_factor.numer = 0; mdata->per_pipe_ib_factor.denom = 0; mdata->apply_post_scale_bytes = true; mdata->hflip_buffer_reused = true; /* prevent disable of prefill calculations */ mdata->min_prefill_lines = 0xffff; /* clock gating feature is enabled by default */ mdata->enable_gate = true; mdata->pixel_ram_size = 0; mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL_FLAT; mdss_mdp_hw_rev_debug_caps_init(mdata); switch (mdata->mdp_rev) { case MDSS_MDP_HW_REV_107: mdss_set_quirk(mdata, MDSS_QUIRK_ROTCDP); case MDSS_MDP_HW_REV_107_1: mdss_mdp_format_flag_removal(invalid_mdp107_wb_output_fmts, ARRAY_SIZE(invalid_mdp107_wb_output_fmts), VALID_MDP_WB_INTF_FORMAT); case MDSS_MDP_HW_REV_107_2: mdata->max_target_zorder = 7; /* excluding base layer */ mdata->max_cursor_size = 128; mdata->per_pipe_ib_factor.numer = 8; mdata->per_pipe_ib_factor.denom = 5; mdata->apply_post_scale_bytes = false; mdata->hflip_buffer_reused = false; mdata->min_prefill_lines = 21; mdata->has_ubwc = true; mdata->pixel_ram_size = 50 * 1024; set_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map); set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map); set_bit(MDSS_QOS_CDP, mdata->mdss_qos_map); set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map); set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map); set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map); set_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map); set_bit(MDSS_CAPS_SCM_RESTORE_NOT_REQUIRED, mdata->mdss_caps_map); set_bit(MDSS_CAPS_3D_MUX_UNDERRUN_RECOVERY_SUPPORTED, mdata->mdss_caps_map); mdss_mdp_init_default_prefill_factors(mdata); mdss_set_quirk(mdata, MDSS_QUIRK_DSC_RIGHT_ONLY_PU); mdss_set_quirk(mdata, MDSS_QUIRK_DSC_2SLICE_PU_THRPUT); mdss_set_quirk(mdata, MDSS_QUIRK_HDR_SUPPORT_ENABLED); break; case MDSS_MDP_HW_REV_105: case MDSS_MDP_HW_REV_109: mdss_set_quirk(mdata, MDSS_QUIRK_BWCPANIC); mdata->max_target_zorder = 7; /* excluding base layer */ mdata->max_cursor_size = 128; set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map); set_bit(MDSS_CAPS_3D_MUX_UNDERRUN_RECOVERY_SUPPORTED, mdata->mdss_caps_map); break; case MDSS_MDP_HW_REV_110: mdss_set_quirk(mdata, MDSS_QUIRK_BWCPANIC); mdata->max_target_zorder = 4; /* excluding base layer */ mdata->max_cursor_size = 128; set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map); mdata->min_prefill_lines = 12; mdata->props = mdss_get_props(); break; case MDSS_MDP_HW_REV_112: mdata->max_target_zorder = 4; /* excluding base layer */ mdata->max_cursor_size = 64; mdata->min_prefill_lines = 12; set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map); break; case MDSS_MDP_HW_REV_114: /* disable ECG for 28nm PHY platform */ mdata->enable_gate = false; case MDSS_MDP_HW_REV_116: mdata->max_target_zorder = 4; /* excluding base layer */ mdata->max_cursor_size = 128; mdata->min_prefill_lines = 14; mdata->has_ubwc = true; mdata->pixel_ram_size = 40 * 1024; mdata->apply_post_scale_bytes = false; mdata->hflip_buffer_reused = false; mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL; set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map); set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map); set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map); set_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map); mdss_mdp_init_default_prefill_factors(mdata); set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map); mdss_set_quirk(mdata, MDSS_QUIRK_DMA_BI_DIR); mdss_set_quirk(mdata, MDSS_QUIRK_NEED_SECURE_MAP); break; case MDSS_MDP_HW_REV_115: mdata->max_target_zorder = 4; /* excluding base layer */ mdata->max_cursor_size = 128; mdata->min_prefill_lines = 14; mdata->has_ubwc = false; mdata->pixel_ram_size = 16 * 1024; mdata->apply_post_scale_bytes = false; mdata->hflip_buffer_reused = false; /* disable ECG for 28nm PHY platform */ mdata->enable_gate = false; mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL; set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map); set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map); set_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map); set_bit(MDSS_CAPS_MIXER_1_FOR_WB, mdata->mdss_caps_map); mdss_mdp_init_default_prefill_factors(mdata); set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map); mdss_set_quirk(mdata, MDSS_QUIRK_DMA_BI_DIR); mdss_set_quirk(mdata, MDSS_QUIRK_NEED_SECURE_MAP); break; case MDSS_MDP_HW_REV_300: case MDSS_MDP_HW_REV_301: mdata->max_target_zorder = 7; /* excluding base layer */ mdata->max_cursor_size = 384; mdata->per_pipe_ib_factor.numer = 8; mdata->per_pipe_ib_factor.denom = 5; mdata->apply_post_scale_bytes = false; mdata->hflip_buffer_reused = false; mdata->min_prefill_lines = 25; mdata->has_ubwc = true; mdata->pixel_ram_size = 50 * 1024; mdata->rects_per_sspp[MDSS_MDP_PIPE_TYPE_DMA] = 2; set_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map); set_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map); set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map); set_bit(MDSS_QOS_CDP, mdata->mdss_qos_map); set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map); set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map); set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map); set_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map); set_bit(MDSS_QOS_IB_NOCR, mdata->mdss_qos_map); set_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map); set_bit(MDSS_CAPS_SCM_RESTORE_NOT_REQUIRED, mdata->mdss_caps_map); set_bit(MDSS_CAPS_3D_MUX_UNDERRUN_RECOVERY_SUPPORTED, mdata->mdss_caps_map); set_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map); set_bit(MDSS_CAPS_DEST_SCALER, mdata->mdss_caps_map); mdss_mdp_init_default_prefill_factors(mdata); mdss_set_quirk(mdata, MDSS_QUIRK_DSC_RIGHT_ONLY_PU); mdss_set_quirk(mdata, MDSS_QUIRK_DSC_2SLICE_PU_THRPUT); mdss_set_quirk(mdata, MDSS_QUIRK_SRC_SPLIT_ALWAYS); mdata->has_wb_ubwc = true; set_bit(MDSS_CAPS_10_BIT_SUPPORTED, mdata->mdss_caps_map); break; default: mdata->max_target_zorder = 4; /* excluding base layer */ mdata->max_cursor_size = 64; } if (mdata->mdp_rev < MDSS_MDP_HW_REV_103) mdss_set_quirk(mdata, MDSS_QUIRK_DOWNSCALE_HANG); if (mdata->mdp_rev < MDSS_MDP_HW_REV_102 || mdata->mdp_rev == MDSS_MDP_HW_REV_200) mdss_set_quirk(mdata, MDSS_QUIRK_FMT_PACK_PATTERN); } static void mdss_hw_rev_init(struct mdss_data_type *mdata) { if (mdata->mdp_rev) return; mdata->mdp_rev = MDSS_REG_READ(mdata, MDSS_REG_HW_VERSION); mdss_mdp_hw_rev_caps_init(mdata); } /** * mdss_hw_init() - Initialize MDSS target specific register settings * @mdata: MDP private data * * Initialize basic MDSS hardware settings based on the board specific * parameters. This function does not explicitly turn on the MDP clocks * and so it must be called with the MDP clocks already enabled. */ void mdss_hw_init(struct mdss_data_type *mdata) { struct mdss_mdp_pipe *vig; mdss_hw_rev_init(mdata); /* Disable hw underrun recovery only for older mdp reversions. */ if (mdata->mdp_rev < MDSS_MDP_HW_REV_105) writel_relaxed(0x0, mdata->mdp_base + MDSS_MDP_REG_VIDEO_INTF_UNDERFLOW_CTL); if (mdata->hw_settings) { struct mdss_hw_settings *hws = mdata->hw_settings; while (hws->reg) { writel_relaxed(hws->val, hws->reg); hws++; } } vig = mdata->vig_pipes; mdata->nmax_concurrent_ad_hw = (mdata->mdp_rev < MDSS_MDP_HW_REV_103) ? 1 : 2; pr_debug("MDP hw init done\n"); } static u32 mdss_mdp_res_init(struct mdss_data_type *mdata) { u32 rc = 0; if (mdata->res_init) { pr_err("mdss resources already initialized\n"); return -EPERM; } mdata->res_init = true; mdata->clk_ena = false; mdss_mdp_hw.irq_info->irq_mask = MDSS_MDP_DEFAULT_INTR_MASK; mdss_mdp_hw.irq_info->irq_ena = false; rc = mdss_mdp_irq_clk_setup(mdata); if (rc) return rc; mdata->hist_intr.req = 0; mdata->hist_intr.curr = 0; mdata->hist_intr.state = 0; spin_lock_init(&mdata->hist_intr.lock); mdata->iclient = msm_ion_client_create(mdata->pdev->name); if (IS_ERR_OR_NULL(mdata->iclient)) { pr_err("msm_ion_client_create() return error (%pK)\n", mdata->iclient); mdata->iclient = NULL; } return rc; } static u32 mdss_mdp_scaler_init(struct mdss_data_type *mdata, struct device *dev) { int ret; struct device_node *node; u32 prop_val; if (!dev) return -EPERM; node = of_get_child_by_name(dev->of_node, "qcom,mdss-scaler-offsets"); if (!node) return 0; if (mdata->scaler_off) return -EFAULT; mdata->scaler_off = devm_kzalloc(&mdata->pdev->dev, sizeof(*mdata->scaler_off), GFP_KERNEL); if (!mdata->scaler_off) return -ENOMEM; ret = of_property_read_u32(node, "qcom,mdss-vig-scaler-off", &prop_val); if (ret) { pr_err("read property %s failed ret %d\n", "qcom,mdss-vig-scaler-off", ret); return -EINVAL; } mdata->scaler_off->vig_scaler_off = prop_val; ret = of_property_read_u32(node, "qcom,mdss-vig-scaler-lut-off", &prop_val); if (ret) { pr_err("read property %s failed ret %d\n", "qcom,mdss-vig-scaler-lut-off", ret); return -EINVAL; } mdata->scaler_off->vig_scaler_lut_off = prop_val; mdata->scaler_off->has_dest_scaler = of_property_read_bool(mdata->pdev->dev.of_node, "qcom,mdss-has-dest-scaler"); if (mdata->scaler_off->has_dest_scaler) { ret = of_property_read_u32(node, "qcom,mdss-dest-block-off", &prop_val); if (ret) { pr_err("read property %s failed ret %d\n", "qcom,mdss-dest-block-off", ret); return -EINVAL; } mdata->scaler_off->dest_base = mdata->mdss_io.base + prop_val; mdata->scaler_off->ndest_scalers = mdss_mdp_parse_dt_prop_len(mdata->pdev, "qcom,mdss-dest-scalers-off"); mdata->scaler_off->dest_scaler_off = devm_kzalloc(&mdata->pdev->dev, sizeof(u32) * mdata->scaler_off->ndest_scalers, GFP_KERNEL); if (!mdata->scaler_off->dest_scaler_off) { kfree(mdata->scaler_off->dest_scaler_off); return -ENOMEM; } ret = mdss_mdp_parse_dt_handler(mdata->pdev, "qcom,mdss-dest-scaler-off", mdata->scaler_off->dest_scaler_off, mdata->scaler_off->ndest_scalers); if (ret) return -EINVAL; mdata->scaler_off->dest_scaler_lut_off = devm_kzalloc(&mdata->pdev->dev, sizeof(u32) * mdata->scaler_off->ndest_scalers, GFP_KERNEL); if (!mdata->scaler_off->dest_scaler_lut_off) { kfree(mdata->scaler_off->dest_scaler_lut_off); return -ENOMEM; } ret = mdss_mdp_parse_dt_handler(mdata->pdev, "qcom,mdss-dest-scalers-lut-off", mdata->scaler_off->dest_scaler_lut_off, mdata->scaler_off->ndest_scalers); if (ret) return -EINVAL; } return 0; } /** * mdss_mdp_footswitch_ctrl_splash() - clocks handoff for cont. splash screen * @on: 1 to start handoff, 0 to complete the handoff after first frame update * * MDSS Clocks and GDSC are already on during continous splash screen, but * increasing ref count will keep clocks from being turned off until handoff * has properly happend after frame update. */ void mdss_mdp_footswitch_ctrl_splash(int on) { int ret; struct mdss_data_type *mdata = mdss_mdp_get_mdata(); if (mdata != NULL) { if (on) { mdata->handoff_pending = true; pr_debug("Enable MDP FS for splash.\n"); if (mdata->venus) { ret = regulator_enable(mdata->venus); if (ret) pr_err("venus failed to enable\n"); } ret = regulator_enable(mdata->fs); if (ret) pr_err("Footswitch failed to enable\n"); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON); mdss_bus_bandwidth_ctrl(true); } else { pr_debug("Disable MDP FS for splash.\n"); mdss_bus_bandwidth_ctrl(false); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF); regulator_disable(mdata->fs); if (mdata->venus) regulator_disable(mdata->venus); mdata->handoff_pending = false; } } else { pr_warn("mdss mdata not initialized\n"); } } static int mdss_mdp_get_pan_intf(const char *pan_intf) { int i, rc = MDSS_PANEL_INTF_INVALID; if (!pan_intf) return rc; for (i = 0; i < ARRAY_SIZE(pan_types); i++) { if (!strcmp(pan_intf, pan_types[i].name)) { rc = pan_types[i].type; break; } } return rc; } static int mdss_mdp_get_pan_cfg(struct mdss_panel_cfg *pan_cfg) { char *t = NULL; char pan_intf_str[MDSS_MAX_PANEL_LEN]; int rc, i, panel_len; char pan_name[MDSS_MAX_PANEL_LEN] = {'\0'}; if (!pan_cfg) return -EINVAL; if (mdss_mdp_panel[0] == '0') { pr_debug("panel name is not set\n"); pan_cfg->lk_cfg = false; pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID; return -EINVAL; } else if (mdss_mdp_panel[0] == '1') { pan_cfg->lk_cfg = true; } else { /* read from dt */ pan_cfg->lk_cfg = true; pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID; return -EINVAL; } /* skip lk cfg and delimiter; ex: "1:" */ strlcpy(pan_name, &mdss_mdp_panel[2], MDSS_MAX_PANEL_LEN); t = strnstr(pan_name, ":", MDSS_MAX_PANEL_LEN); if (!t) { pr_err("pan_name=[%s] invalid\n", pan_name); pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID; return -EINVAL; } for (i = 0; ((pan_name + i) < t) && (i < 4); i++) pan_intf_str[i] = *(pan_name + i); pan_intf_str[i] = 0; pr_debug("%d panel intf %s\n", __LINE__, pan_intf_str); /* point to the start of panel name */ t = t + 1; strlcpy(&pan_cfg->arg_cfg[0], t, sizeof(pan_cfg->arg_cfg)); pr_debug("%d: t=[%s] panel name=[%s]\n", __LINE__, t, pan_cfg->arg_cfg); panel_len = strlen(pan_cfg->arg_cfg); if (!panel_len) { pr_err("Panel name is invalid\n"); pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID; return -EINVAL; } rc = mdss_mdp_get_pan_intf(pan_intf_str); pan_cfg->pan_intf = (rc < 0) ? MDSS_PANEL_INTF_INVALID : rc; return 0; } static int mdss_mdp_parse_dt_pan_intf(struct platform_device *pdev) { int rc; struct mdss_data_type *mdata = platform_get_drvdata(pdev); const char *prim_intf = NULL; rc = of_property_read_string(pdev->dev.of_node, "qcom,mdss-pref-prim-intf", &prim_intf); if (rc) return -ENODEV; rc = mdss_mdp_get_pan_intf(prim_intf); if (rc < 0) { mdata->pan_cfg.pan_intf = MDSS_PANEL_INTF_INVALID; } else { mdata->pan_cfg.pan_intf = rc; rc = 0; } return rc; } static int mdss_mdp_get_cmdline_config(struct platform_device *pdev) { int rc, len = 0; int *intf_type; char *panel_name; struct mdss_panel_cfg *pan_cfg; struct mdss_data_type *mdata = platform_get_drvdata(pdev); mdata->pan_cfg.arg_cfg[MDSS_MAX_PANEL_LEN] = 0; pan_cfg = &mdata->pan_cfg; panel_name = &pan_cfg->arg_cfg[0]; intf_type = &pan_cfg->pan_intf; /* reads from dt by default */ pan_cfg->lk_cfg = true; len = strlen(mdss_mdp_panel); if (len > 0) { rc = mdss_mdp_get_pan_cfg(pan_cfg); if (!rc) { pan_cfg->init_done = true; return rc; } } rc = mdss_mdp_parse_dt_pan_intf(pdev); /* if pref pan intf is not present */ if (rc) pr_warn("unable to parse device tree for pan intf\n"); pan_cfg->init_done = true; return 0; } static void __update_sspp_info(struct mdss_mdp_pipe *pipe, int pipe_cnt, char *type, char *buf, int *cnt) { int i; int j; size_t len = PAGE_SIZE; int num_bytes = BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1); #define SPRINT(fmt, ...) \ (*cnt += scnprintf(buf + *cnt, len - *cnt, fmt, ##__VA_ARGS__)) for (i = 0; i < pipe_cnt && pipe; i++) { SPRINT("pipe_num:%d pipe_type:%s pipe_ndx:%d rects:%d pipe_is_handoff:%d display_id:%d ", pipe->num, type, pipe->ndx, pipe->multirect.max_rects, pipe->is_handed_off, mdss_mdp_get_display_id(pipe)); SPRINT("fmts_supported:"); for (j = 0; j < num_bytes; j++) SPRINT("%d,", pipe->supported_formats[j]); SPRINT("\n"); pipe += pipe->multirect.max_rects; } #undef SPRINT } static void mdss_mdp_update_sspp_info(struct mdss_data_type *mdata, char *buf, int *cnt) { __update_sspp_info(mdata->vig_pipes, mdata->nvig_pipes, "vig", buf, cnt); __update_sspp_info(mdata->rgb_pipes, mdata->nrgb_pipes, "rgb", buf, cnt); __update_sspp_info(mdata->dma_pipes, mdata->ndma_pipes, "dma", buf, cnt); __update_sspp_info(mdata->cursor_pipes, mdata->ncursor_pipes, "cursor", buf, cnt); } static void mdss_mdp_update_wb_info(struct mdss_data_type *mdata, char *buf, int *cnt) { #define SPRINT(fmt, ...) \ (*cnt += scnprintf(buf + *cnt, len - *cnt, fmt, ##__VA_ARGS__)) size_t len = PAGE_SIZE; int i; int num_bytes = BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1); SPRINT("rot_input_fmts="); for (i = 0; i < num_bytes && mdata->wb; i++) SPRINT("%d ", mdata->wb->supported_input_formats[i]); SPRINT("\nrot_output_fmts="); for (i = 0; i < num_bytes && mdata->wb; i++) SPRINT("%d ", mdata->wb->supported_input_formats[i]); SPRINT("\nwb_output_fmts="); for (i = 0; i < num_bytes && mdata->wb; i++) SPRINT("%d ", mdata->wb->supported_output_formats[i]); SPRINT("\n"); #undef SPRINT } ssize_t mdss_mdp_show_capabilities(struct device *dev, struct device_attribute *attr, char *buf) { struct mdss_data_type *mdata = dev_get_drvdata(dev); size_t len = PAGE_SIZE; int cnt = 0; #define SPRINT(fmt, ...) \ (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__)) SPRINT("mdp_version=5\n"); SPRINT("hw_rev=%d\n", mdata->mdp_rev); SPRINT("pipe_count:%d\n", mdata->nvig_pipes + mdata->nrgb_pipes + mdata->ndma_pipes + mdata->ncursor_pipes); mdss_mdp_update_sspp_info(mdata, buf, &cnt); mdss_mdp_update_wb_info(mdata, buf, &cnt); /* TODO : need to remove num pipes info */ SPRINT("rgb_pipes=%d\n", mdata->nrgb_pipes); SPRINT("vig_pipes=%d\n", mdata->nvig_pipes); SPRINT("dma_pipes=%d\n", mdata->ndma_pipes); SPRINT("blending_stages=%d\n", mdata->max_target_zorder); SPRINT("cursor_pipes=%d\n", mdata->ncursor_pipes); SPRINT("max_cursor_size=%d\n", mdata->max_cursor_size); SPRINT("smp_count=%d\n", mdata->smp_mb_cnt); SPRINT("smp_size=%d\n", mdata->smp_mb_size); SPRINT("smp_mb_per_pipe=%d\n", mdata->smp_mb_per_pipe); SPRINT("max_downscale_ratio=%d\n", MAX_DOWNSCALE_RATIO); SPRINT("max_upscale_ratio=%d\n", MAX_UPSCALE_RATIO); if (mdata->nwb) SPRINT("wb_intf_index=%d\n", mdata->nwb - 1); if (test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map)) { SPRINT("fmt_mt_nv12_factor=%d\n", mdata->prefill_data.prefill_factors.fmt_mt_nv12_factor); SPRINT("fmt_mt_factor=%d\n", mdata->prefill_data.prefill_factors.fmt_mt_factor); SPRINT("fmt_linear_factor=%d\n", mdata->prefill_data.prefill_factors.fmt_linear_factor); SPRINT("scale_factor=%d\n", mdata->prefill_data.prefill_factors.scale_factor); SPRINT("xtra_ff_factor=%d\n", mdata->prefill_data.prefill_factors.xtra_ff_factor); } if (test_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map)) { SPRINT("amortizable_threshold=%d\n", mdata->prefill_data.ts_threshold); SPRINT("system_overhead_lines=%d\n", mdata->prefill_data.ts_overhead); } if (mdata->props) SPRINT("props=%d\n", mdata->props); if (mdata->max_bw_low) SPRINT("max_bandwidth_low=%u\n", mdata->max_bw_low); if (mdata->max_bw_high) SPRINT("max_bandwidth_high=%u\n", mdata->max_bw_high); if (mdata->max_pipe_width) SPRINT("max_pipe_width=%d\n", mdata->max_pipe_width); if (mdata->max_mixer_width) SPRINT("max_mixer_width=%d\n", mdata->max_mixer_width); if (mdata->max_bw_per_pipe) SPRINT("max_pipe_bw=%u\n", mdata->max_bw_per_pipe); if (mdata->max_mdp_clk_rate) SPRINT("max_mdp_clk=%u\n", mdata->max_mdp_clk_rate); if (mdata->clk_factor.numer) SPRINT("clk_fudge_factor=%u,%u\n", mdata->clk_factor.numer, mdata->clk_factor.denom); SPRINT("features="); if (mdata->has_bwc) SPRINT(" bwc"); if (mdata->has_ubwc) SPRINT(" ubwc"); if (mdata->has_wb_ubwc) SPRINT(" wb_ubwc"); if (mdata->has_decimation) SPRINT(" decimation"); if (mdata->highest_bank_bit && !mdss_mdp_is_ubwc_supported(mdata)) SPRINT(" tile_format"); if (mdata->has_non_scalar_rgb) SPRINT(" non_scalar_rgb"); if (mdata->has_src_split) SPRINT(" src_split"); if (mdata->has_rot_dwnscale) SPRINT(" rotator_downscale"); if (mdata->max_bw_settings_cnt) SPRINT(" dynamic_bw_limit"); if (test_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map)) SPRINT(" qseed3"); if (test_bit(MDSS_CAPS_DEST_SCALER, mdata->mdss_caps_map)) SPRINT(" dest_scaler"); if (mdata->has_separate_rotator) SPRINT(" separate_rotator"); if (mdss_has_quirk(mdata, MDSS_QUIRK_HDR_SUPPORT_ENABLED)) SPRINT(" hdr"); SPRINT("\n"); #undef SPRINT return cnt; } static ssize_t mdss_mdp_read_max_limit_bw(struct device *dev, struct device_attribute *attr, char *buf) { struct mdss_data_type *mdata = dev_get_drvdata(dev); size_t len = PAGE_SIZE; u32 cnt = 0; int i; char bw_names[4][8] = {"default", "camera", "hflip", "vflip"}; char pipe_bw_names[4][16] = {"default_pipe", "camera_pipe", "hflip_pipe", "vflip_pipe"}; struct mdss_max_bw_settings *bw_settings; struct mdss_max_bw_settings *pipe_bw_settings; bw_settings = mdata->max_bw_settings; pipe_bw_settings = mdata->max_per_pipe_bw_settings; #define SPRINT(fmt, ...) \ (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__)) SPRINT("bw_mode_bitmap=%d\n", mdata->bw_mode_bitmap); SPRINT("bw_limit_pending=%d\n", mdata->bw_limit_pending); for (i = 0; i < mdata->max_bw_settings_cnt; i++) { SPRINT("%s=%d\n", bw_names[i], bw_settings->mdss_max_bw_val); bw_settings++; } for (i = 0; i < mdata->mdss_per_pipe_bw_cnt; i++) { SPRINT("%s=%d\n", pipe_bw_names[i], pipe_bw_settings->mdss_max_bw_val); pipe_bw_settings++; } return cnt; } static ssize_t mdss_mdp_store_max_limit_bw(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct mdss_data_type *mdata = dev_get_drvdata(dev); u32 data = 0; if (kstrtouint(buf, 0, &data)) { pr_info("Not able scan to bw_mode_bitmap\n"); } else { mdata->bw_mode_bitmap = data; mdata->bw_limit_pending = true; pr_debug("limit use case, bw_mode_bitmap = %d\n", data); } return len; } static DEVICE_ATTR(caps, S_IRUGO, mdss_mdp_show_capabilities, NULL); static DEVICE_ATTR(bw_mode_bitmap, S_IRUGO | S_IWUSR | S_IWGRP, mdss_mdp_read_max_limit_bw, mdss_mdp_store_max_limit_bw); static struct attribute *mdp_fs_attrs[] = { &dev_attr_caps.attr, &dev_attr_bw_mode_bitmap.attr, NULL }; static struct attribute_group mdp_fs_attr_group = { .attrs = mdp_fs_attrs }; static int mdss_mdp_register_sysfs(struct mdss_data_type *mdata) { struct device *dev = &mdata->pdev->dev; int rc; rc = sysfs_create_group(&dev->kobj, &mdp_fs_attr_group); return rc; } int mdss_panel_get_intf_status(u32 disp_num, u32 intf_type) { int rc, intf_status = 0; struct mdss_data_type *mdata = mdss_mdp_get_mdata(); if (!mdss_res || !mdss_res->pan_cfg.init_done) return -EPROBE_DEFER; if (mdss_res->handoff_pending) { mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON); intf_status = readl_relaxed(mdata->mdp_base + MDSS_MDP_REG_DISP_INTF_SEL); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF); if (intf_type == MDSS_PANEL_INTF_DSI) { if (disp_num == DISPLAY_1) rc = (intf_status & MDSS_MDP_INTF_DSI0_SEL); else if (disp_num == DISPLAY_2) rc = (intf_status & MDSS_MDP_INTF_DSI1_SEL); else rc = 0; } else if (intf_type == MDSS_PANEL_INTF_EDP) { intf_status &= MDSS_MDP_INTF_EDP_SEL; rc = (intf_status == MDSS_MDP_INTF_EDP_SEL); } else if (intf_type == MDSS_PANEL_INTF_HDMI) { intf_status &= MDSS_MDP_INTF_HDMI_SEL; rc = (intf_status == MDSS_MDP_INTF_HDMI_SEL); } else { rc = 0; } } else { rc = 0; } return rc; } static int mdss_mdp_probe(struct platform_device *pdev) { struct resource *res; int rc; struct mdss_data_type *mdata; uint32_t intf_sel = 0; uint32_t split_display = 0; int num_of_display_on = 0; int i = 0; if (!pdev->dev.of_node) { pr_err("MDP driver only supports device tree probe\n"); return -ENOTSUPP; } if (mdss_res) { pr_err("MDP already initialized\n"); return -EINVAL; } mdata = devm_kzalloc(&pdev->dev, sizeof(*mdata), GFP_KERNEL); if (mdata == NULL) return -ENOMEM; pdev->id = 0; mdata->pdev = pdev; platform_set_drvdata(pdev, mdata); mdss_res = mdata; mutex_init(&mdata->reg_lock); mutex_init(&mdata->reg_bus_lock); mutex_init(&mdata->bus_lock); INIT_LIST_HEAD(&mdata->reg_bus_clist); atomic_set(&mdata->sd_client_count, 0); atomic_set(&mdata->active_intf_cnt, 0); mdss_res->mdss_util = mdss_get_util_intf(); if (mdss_res->mdss_util == NULL) { pr_err("Failed to get mdss utility functions\n"); return -ENODEV; } mdss_res->mdss_util->get_iommu_domain = mdss_smmu_get_domain_id; mdss_res->mdss_util->iommu_attached = is_mdss_iommu_attached; mdss_res->mdss_util->iommu_ctrl = mdss_iommu_ctrl; mdss_res->mdss_util->bus_scale_set_quota = mdss_bus_scale_set_quota; mdss_res->mdss_util->bus_bandwidth_ctrl = mdss_bus_bandwidth_ctrl; mdss_res->mdss_util->panel_intf_type = mdss_panel_intf_type; mdss_res->mdss_util->panel_intf_status = mdss_panel_get_intf_status; rc = msm_dss_ioremap_byname(pdev, &mdata->mdss_io, "mdp_phys"); if (rc) { pr_err("unable to map MDP base\n"); goto probe_done; } pr_debug("MDSS HW Base addr=0x%x len=0x%x\n", (int) (unsigned long) mdata->mdss_io.base, mdata->mdss_io.len); rc = msm_dss_ioremap_byname(pdev, &mdata->vbif_io, "vbif_phys"); if (rc) { pr_err("unable to map MDSS VBIF base\n"); goto probe_done; } pr_debug("MDSS VBIF HW Base addr=0x%x len=0x%x\n", (int) (unsigned long) mdata->vbif_io.base, mdata->vbif_io.len); rc = msm_dss_ioremap_byname(pdev, &mdata->vbif_nrt_io, "vbif_nrt_phys"); if (rc) pr_debug("unable to map MDSS VBIF non-realtime base\n"); else pr_debug("MDSS VBIF NRT HW Base addr=%pK len=0x%x\n", mdata->vbif_nrt_io.base, mdata->vbif_nrt_io.len); res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { pr_err("unable to get MDSS irq\n"); rc = -ENOMEM; goto probe_done; } mdss_mdp_hw.irq_info = kzalloc(sizeof(struct irq_info), GFP_KERNEL); if (!mdss_mdp_hw.irq_info) { pr_err("no mem to save irq info: kzalloc fail\n"); return -ENOMEM; } mdss_mdp_hw.irq_info->irq = res->start; mdss_mdp_hw.ptr = mdata; /* export misc. interrupts to external driver */ mdata->irq_domain = irq_domain_add_linear(pdev->dev.of_node, 32, &mdss_irq_domain_ops, mdata); if (!mdata->irq_domain) { pr_err("unable to add linear domain\n"); rc = -ENOMEM; goto probe_done; } mdss_misc_hw.irq_info = mdss_intr_line(); rc = mdss_res->mdss_util->register_irq(&mdss_misc_hw); if (rc) pr_err("mdss_register_irq failed.\n"); rc = mdss_mdp_res_init(mdata); if (rc) { pr_err("unable to initialize mdss mdp resources\n"); goto probe_done; } pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT_MS); if (mdata->idle_pc_enabled) pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); pm_runtime_enable(&pdev->dev); if (!pm_runtime_enabled(&pdev->dev)) mdss_mdp_footswitch_ctrl(mdata, true); rc = mdss_mdp_bus_scale_register(mdata); if (rc) { pr_err("unable to register bus scaling\n"); goto probe_done; } /* * enable clocks and read mdp_rev as soon as possible once * kernel is up. */ mdss_mdp_footswitch_ctrl_splash(true); mdss_hw_rev_init(mdata); /*populate hw iomem base info from device tree*/ rc = mdss_mdp_parse_dt(pdev); if (rc) { pr_err("unable to parse device tree\n"); goto probe_done; } rc = mdss_mdp_get_cmdline_config(pdev); if (rc) { pr_err("Error in panel override:rc=[%d]\n", rc); goto probe_done; } rc = mdss_mdp_debug_init(pdev, mdata); if (rc) { pr_err("unable to initialize mdp debugging\n"); goto probe_done; } rc = mdss_mdp_scaler_init(mdata, &pdev->dev); if (rc) goto probe_done; rc = mdss_mdp_register_sysfs(mdata); if (rc) pr_err("unable to register mdp sysfs nodes\n"); rc = mdss_fb_register_mdp_instance(&mdp5); if (rc) pr_err("unable to register mdp instance\n"); rc = mdss_res->mdss_util->register_irq(&mdss_mdp_hw); if (rc) pr_err("mdss_register_irq failed.\n"); rc = mdss_smmu_init(mdata, &pdev->dev); if (rc) pr_err("mdss smmu init failed\n"); mdss_mdp_set_supported_formats(mdata); mdss_res->mdss_util->mdp_probe_done = true; mdss_hw_init(mdata); rc = mdss_mdp_pp_init(&pdev->dev); if (rc) pr_err("unable to initialize mdss pp resources\n"); /* Restoring Secure configuration during boot-up */ if (mdss_mdp_req_init_restore_cfg(mdata)) __mdss_restore_sec_cfg(mdata); if (mdss_has_quirk(mdata, MDSS_QUIRK_BWCPANIC)) { mdata->default_panic_lut0 = readl_relaxed(mdata->mdp_base + MMSS_MDP_PANIC_LUT0); mdata->default_panic_lut1 = readl_relaxed(mdata->mdp_base + MMSS_MDP_PANIC_LUT1); mdata->default_robust_lut = readl_relaxed(mdata->mdp_base + MMSS_MDP_ROBUST_LUT); } /* * Read the DISP_INTF_SEL register to check if display was enabled in * bootloader or not. If yes, let handoff handle removing the extra * clk/regulator votes else turn off clk/regulators because purpose * here is to get mdp_rev. */ intf_sel = readl_relaxed(mdata->mdp_base + MDSS_MDP_REG_DISP_INTF_SEL); split_display = readl_relaxed(mdata->mdp_base + MDSS_MDP_REG_SPLIT_DISPLAY_EN); if (intf_sel != 0) { for (i = 0; i < 4; i++) num_of_display_on += ((intf_sel >> i*8) & 0x000000FF); /* * For split display enabled - DSI0, DSI1 interfaces are * considered as single display. So decrement * 'num_of_display_on' by 1 */ if (split_display) num_of_display_on--; } if (!num_of_display_on) mdss_mdp_footswitch_ctrl_splash(false); else { mdata->handoff_pending = true; /* * If multiple displays are enabled in LK, ctrl_splash off will * be called multiple times during splash_cleanup. Need to * enable it symmetrically*/ for (i = 1; i < num_of_display_on; i++) mdss_mdp_footswitch_ctrl_splash(true); } mdp_intr_cb = kcalloc(ARRAY_SIZE(mdp_irq_map), sizeof(struct intr_callback), GFP_KERNEL); if (mdp_intr_cb == NULL) return -ENOMEM; mdss_res->mdp_irq_mask = kcalloc(ARRAY_SIZE(mdp_intr_reg), sizeof(u32), GFP_KERNEL); if (mdss_res->mdp_irq_mask == NULL) return -ENOMEM; pr_info("mdss version = 0x%x, bootloader display is %s, num %d, intf_sel=0x%08x\n", mdata->mdp_rev, num_of_display_on ? "on" : "off", num_of_display_on, intf_sel); probe_done: if (IS_ERR_VALUE(rc)) { if (!num_of_display_on) mdss_mdp_footswitch_ctrl_splash(false); if (mdata->regulator_notif_register) regulator_unregister_notifier(mdata->fs, &(mdata->gdsc_cb)); mdss_mdp_hw.ptr = NULL; mdss_mdp_pp_term(&pdev->dev); mutex_destroy(&mdata->reg_lock); mdss_res = NULL; } return rc; } static void mdss_mdp_parse_dt_regs_array(const u32 *arr, struct dss_io_data *io, struct mdss_hw_settings *hws, int count) { u32 len, reg; int i; if (!arr) return; for (i = 0, len = count * 2; i < len; i += 2) { reg = be32_to_cpu(arr[i]); if (reg >= io->len) continue; hws->reg = io->base + reg; hws->val = be32_to_cpu(arr[i + 1]); pr_debug("reg: 0x%04x=0x%08x\n", reg, hws->val); hws++; } } int mdss_mdp_parse_dt_hw_settings(struct platform_device *pdev) { struct mdss_data_type *mdata = platform_get_drvdata(pdev); struct mdss_hw_settings *hws; const u32 *vbif_arr, *mdp_arr, *vbif_nrt_arr; int vbif_len, mdp_len, vbif_nrt_len; vbif_arr = of_get_property(pdev->dev.of_node, "qcom,vbif-settings", &vbif_len); if (!vbif_arr || (vbif_len & 1)) { pr_debug("MDSS VBIF settings not found\n"); vbif_len = 0; } vbif_len /= 2 * sizeof(u32); vbif_nrt_arr = of_get_property(pdev->dev.of_node, "qcom,vbif-nrt-settings", &vbif_nrt_len); if (!vbif_nrt_arr || (vbif_nrt_len & 1)) { pr_debug("MDSS VBIF non-realtime settings not found\n"); vbif_nrt_len = 0; } vbif_nrt_len /= 2 * sizeof(u32); mdp_arr = of_get_property(pdev->dev.of_node, "qcom,mdp-settings", &mdp_len); if (!mdp_arr || (mdp_len & 1)) { pr_debug("MDSS MDP settings not found\n"); mdp_len = 0; } mdp_len /= 2 * sizeof(u32); if (!(mdp_len + vbif_len + vbif_nrt_len)) return 0; hws = devm_kzalloc(&pdev->dev, sizeof(*hws) * (vbif_len + mdp_len + vbif_nrt_len + 1), GFP_KERNEL); if (!hws) return -ENOMEM; mdss_mdp_parse_dt_regs_array(vbif_arr, &mdata->vbif_io, hws, vbif_len); mdss_mdp_parse_dt_regs_array(vbif_nrt_arr, &mdata->vbif_nrt_io, hws, vbif_nrt_len); mdss_mdp_parse_dt_regs_array(mdp_arr, &mdata->mdss_io, hws + vbif_len, mdp_len); mdata->hw_settings = hws; return 0; } static int mdss_mdp_parse_dt(struct platform_device *pdev) { int rc, data; struct mdss_data_type *mdata = platform_get_drvdata(pdev); rc = mdss_mdp_parse_dt_hw_settings(pdev); if (rc) { pr_err("Error in device tree : hw settings\n"); return rc; } rc = mdss_mdp_parse_dt_pipe(pdev); if (rc) { pr_err("Error in device tree : pipes\n"); return rc; } rc = mdss_mdp_parse_dt_mixer(pdev); if (rc) { pr_err("Error in device tree : mixers\n"); return rc; } rc = mdss_mdp_parse_dt_misc(pdev); if (rc) { pr_err("Error in device tree : misc\n"); return rc; } rc = mdss_mdp_parse_dt_wb(pdev); if (rc) { pr_err("Error in device tree : wb\n"); return rc; } rc = mdss_mdp_parse_dt_ctl(pdev); if (rc) { pr_err("Error in device tree : ctl\n"); return rc; } rc = mdss_mdp_parse_dt_video_intf(pdev); if (rc) { pr_err("Error in device tree : ctl\n"); return rc; } rc = mdss_mdp_parse_dt_smp(pdev); if (rc) { pr_err("Error in device tree : smp\n"); return rc; } rc = mdss_mdp_parse_dt_prefill(pdev); if (rc) { pr_err("Error in device tree : prefill\n"); return rc; } rc = mdss_mdp_parse_dt_ad_cfg(pdev); if (rc) { pr_err("Error in device tree : ad\n"); return rc; } rc = mdss_mdp_parse_dt_cdm(pdev); if (rc) pr_debug("CDM offset not found in device tree\n"); rc = mdss_mdp_parse_dt_dsc(pdev); if (rc) pr_debug("DSC offset not found in device tree\n"); /* Parse the mdp specific register base offset*/ rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-mdp-reg-offset", &data); if (rc) { pr_err("Error in device tree : mdp reg base\n"); return rc; } mdata->mdp_base = mdata->mdss_io.base + data; return 0; } static void mdss_mdp_parse_dt_pipe_sw_reset(struct platform_device *pdev, u32 reg_off, char *prop_name, struct mdss_mdp_pipe *pipe_list, u32 npipes) { int len; const u32 *arr; arr = of_get_property(pdev->dev.of_node, prop_name, &len); if (arr) { int i; len /= sizeof(u32); if (len != npipes) { pr_err("%s: invalid sw_reset entries req:%d found:%d\n", prop_name, len, npipes); return; } for (i = 0; i < len; i++) { pipe_list[i].sw_reset.reg_off = reg_off; pipe_list[i].sw_reset.bit_off = be32_to_cpu(arr[i]); pr_debug("%s[%d]: sw_reset: reg_off:0x%x bit_off:%d\n", prop_name, i, reg_off, be32_to_cpu(arr[i])); } } } static int mdss_mdp_parse_dt_pipe_clk_ctrl(struct platform_device *pdev, char *prop_name, struct mdss_mdp_pipe *pipe_list, u32 npipes) { int rc = 0, len; const u32 *arr; arr = of_get_property(pdev->dev.of_node, prop_name, &len); if (arr) { int i, j; len /= sizeof(u32); for (i = 0, j = 0; i < len; j++) { struct mdss_mdp_pipe *pipe = NULL; if (j >= npipes) { pr_err("invalid clk ctrl enries for prop: %s\n", prop_name); return -EINVAL; } pipe = &pipe_list[j]; pipe->clk_ctrl.reg_off = be32_to_cpu(arr[i++]); pipe->clk_ctrl.bit_off = be32_to_cpu(arr[i++]); /* status register is next in line to ctrl register */ pipe->clk_status.reg_off = pipe->clk_ctrl.reg_off + 4; pipe->clk_status.bit_off = be32_to_cpu(arr[i++]); pr_debug("%s[%d]: ctrl: reg_off: 0x%x bit_off: %d\n", prop_name, j, pipe->clk_ctrl.reg_off, pipe->clk_ctrl.bit_off); pr_debug("%s[%d]: status: reg_off: 0x%x bit_off: %d\n", prop_name, j, pipe->clk_status.reg_off, pipe->clk_status.bit_off); } if (j != npipes) { pr_err("%s: %d entries found. required %d\n", prop_name, j, npipes); for (i = 0; i < npipes; i++) { memset(&pipe_list[i].clk_ctrl, 0, sizeof(pipe_list[i].clk_ctrl)); memset(&pipe_list[i].clk_status, 0, sizeof(pipe_list[i].clk_status)); } rc = -EINVAL; } } else { pr_err("error mandatory property '%s' not found\n", prop_name); rc = -EINVAL; } return rc; } static void mdss_mdp_parse_dt_pipe_panic_ctrl(struct platform_device *pdev, char *prop_name, struct mdss_mdp_pipe *pipe_list, u32 npipes) { int i, j; int len; const u32 *arr; struct mdss_mdp_pipe *pipe = NULL; arr = of_get_property(pdev->dev.of_node, prop_name, &len); if (arr) { len /= sizeof(u32); for (i = 0, j = 0; i < len; j++) { if (j >= npipes) { pr_err("invalid panic ctrl enries for prop: %s\n", prop_name); return; } pipe = &pipe_list[j]; pipe->panic_ctrl_ndx = be32_to_cpu(arr[i++]); } if (j != npipes) pr_err("%s: %d entries found. required %d\n", prop_name, j, npipes); } else { pr_debug("panic ctrl enabled but property '%s' not found\n", prop_name); } } static int mdss_mdp_parse_dt_pipe_helper(struct platform_device *pdev, u32 ptype, char *ptypestr, struct mdss_mdp_pipe **out_plist, size_t len, u8 priority_base) { struct mdss_data_type *mdata = platform_get_drvdata(pdev); u32 offsets[MDSS_MDP_MAX_SSPP]; u32 ftch_id[MDSS_MDP_MAX_SSPP]; u32 xin_id[MDSS_MDP_MAX_SSPP]; u32 pnums[MDSS_MDP_MAX_SSPP]; struct mdss_mdp_pipe *pipe_list; char prop_name[64]; int i, cnt, rc; u32 rects_per_sspp; if (!out_plist) return -EINVAL; for (i = 0, cnt = 0; i < MDSS_MDP_MAX_SSPP && cnt < len; i++) { if (ptype == get_pipe_type_from_num(i)) { pnums[cnt] = i; cnt++; } } if (cnt < len) pr_warn("Invalid %s pipe count: %zu, max supported: %d\n", ptypestr, len, cnt); if (cnt == 0) { *out_plist = NULL; return 0; } /* by default works in single rect mode unless otherwise noted */ rects_per_sspp = mdata->rects_per_sspp[ptype] ? : 1; pipe_list = devm_kzalloc(&pdev->dev, (sizeof(struct mdss_mdp_pipe) * cnt * rects_per_sspp), GFP_KERNEL); if (!pipe_list) return -ENOMEM; if (mdata->has_pixel_ram || (ptype == MDSS_MDP_PIPE_TYPE_CURSOR)) { for (i = 0; i < cnt; i++) ftch_id[i] = -1; } else { snprintf(prop_name, sizeof(prop_name), "qcom,mdss-pipe-%s-fetch-id", ptypestr); rc = mdss_mdp_parse_dt_handler(pdev, prop_name, ftch_id, cnt); if (rc) goto parse_fail; } snprintf(prop_name, sizeof(prop_name), "qcom,mdss-pipe-%s-xin-id", ptypestr); rc = mdss_mdp_parse_dt_handler(pdev, prop_name, xin_id, cnt); if (rc) goto parse_fail; snprintf(prop_name, sizeof(prop_name), "qcom,mdss-pipe-%s-off", ptypestr); rc = mdss_mdp_parse_dt_handler(pdev, prop_name, offsets, cnt); if (rc) goto parse_fail; rc = mdss_mdp_pipe_addr_setup(mdata, pipe_list, offsets, ftch_id, xin_id, ptype, pnums, cnt, rects_per_sspp, priority_base); if (rc) goto parse_fail; snprintf(prop_name, sizeof(prop_name), "qcom,mdss-pipe-%s-clk-ctrl-offsets", ptypestr); rc = mdss_mdp_parse_dt_pipe_clk_ctrl(pdev, prop_name, pipe_list, cnt); if (rc) goto parse_fail; *out_plist = pipe_list; return cnt; parse_fail: devm_kfree(&pdev->dev, pipe_list); return rc; } static int mdss_mdp_parse_dt_pipe(struct platform_device *pdev) { int rc = 0; u32 nfids = 0, len, nxids = 0, npipes = 0; u32 sw_reset_offset = 0; u32 data[4]; struct mdss_data_type *mdata = platform_get_drvdata(pdev); mdata->has_pixel_ram = !mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-smp-data"); mdata->nvig_pipes = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-pipe-vig-off"); mdata->nrgb_pipes = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-pipe-rgb-off"); mdata->ndma_pipes = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-pipe-dma-off"); mdata->ncursor_pipes = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-pipe-cursor-off"); npipes = mdata->nvig_pipes + mdata->nrgb_pipes + mdata->ndma_pipes; if (!mdata->has_pixel_ram) { nfids += mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-pipe-vig-fetch-id"); nfids += mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-pipe-rgb-fetch-id"); nfids += mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-pipe-dma-fetch-id"); if (npipes != nfids) { pr_err("device tree err: unequal number of pipes and smp ids"); return -EINVAL; } } if (mdata->nvig_pipes) nxids += mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-pipe-vig-xin-id"); if (mdata->nrgb_pipes) nxids += mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-pipe-rgb-xin-id"); if (mdata->ndma_pipes) nxids += mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-pipe-dma-xin-id"); if (npipes != nxids) { pr_err("device tree err: unequal number of pipes and xin ids\n"); return -EINVAL; } rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_VIG, "vig", &mdata->vig_pipes, mdata->nvig_pipes, 0); if (IS_ERR_VALUE(rc)) goto parse_fail; mdata->nvig_pipes = rc; rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_RGB, "rgb", &mdata->rgb_pipes, mdata->nrgb_pipes, mdata->nvig_pipes); if (IS_ERR_VALUE(rc)) goto parse_fail; mdata->nrgb_pipes = rc; rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_DMA, "dma", &mdata->dma_pipes, mdata->ndma_pipes, mdata->nvig_pipes + mdata->nrgb_pipes); if (IS_ERR_VALUE(rc)) goto parse_fail; mdata->ndma_pipes = rc; rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_CURSOR, "cursor", &mdata->cursor_pipes, mdata->ncursor_pipes, 0); if (IS_ERR_VALUE(rc)) goto parse_fail; mdata->ncursor_pipes = rc; rc = 0; mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-sw-reset-off", &sw_reset_offset, 1); if (sw_reset_offset) { if (mdata->vig_pipes) mdss_mdp_parse_dt_pipe_sw_reset(pdev, sw_reset_offset, "qcom,mdss-pipe-vig-sw-reset-map", mdata->vig_pipes, mdata->nvig_pipes); if (mdata->rgb_pipes) mdss_mdp_parse_dt_pipe_sw_reset(pdev, sw_reset_offset, "qcom,mdss-pipe-rgb-sw-reset-map", mdata->rgb_pipes, mdata->nrgb_pipes); if (mdata->dma_pipes) mdss_mdp_parse_dt_pipe_sw_reset(pdev, sw_reset_offset, "qcom,mdss-pipe-dma-sw-reset-map", mdata->dma_pipes, mdata->ndma_pipes); } mdata->has_panic_ctrl = of_property_read_bool(pdev->dev.of_node, "qcom,mdss-has-panic-ctrl"); if (mdata->has_panic_ctrl) { if (mdata->vig_pipes) mdss_mdp_parse_dt_pipe_panic_ctrl(pdev, "qcom,mdss-pipe-vig-panic-ctrl-offsets", mdata->vig_pipes, mdata->nvig_pipes); if (mdata->rgb_pipes) mdss_mdp_parse_dt_pipe_panic_ctrl(pdev, "qcom,mdss-pipe-rgb-panic-ctrl-offsets", mdata->rgb_pipes, mdata->nrgb_pipes); if (mdata->dma_pipes) mdss_mdp_parse_dt_pipe_panic_ctrl(pdev, "qcom,mdss-pipe-dma-panic-ctrl-offsets", mdata->dma_pipes, mdata->ndma_pipes); } len = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-per-pipe-panic-luts"); if (len != 4) { pr_debug("Unable to read per-pipe-panic-luts\n"); } else { rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-per-pipe-panic-luts", data, len); mdata->default_panic_lut_per_pipe_linear = data[0]; mdata->default_panic_lut_per_pipe_tile = data[1]; mdata->default_robust_lut_per_pipe_linear = data[2]; mdata->default_robust_lut_per_pipe_tile = data[3]; pr_debug("per pipe panic lut [0]:0x%x [1]:0x%x [2]:0x%x [3]:0x%x\n", data[0], data[1], data[2], data[3]); } parse_fail: return rc; } static int mdss_mdp_parse_dt_mixer(struct platform_device *pdev) { u32 nmixers, npingpong; int rc = 0; u32 *mixer_offsets = NULL, *dspp_offsets = NULL, *pingpong_offsets = NULL; u32 is_virtual_mixer_req = false; struct mdss_data_type *mdata = platform_get_drvdata(pdev); mdata->nmixers_intf = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-mixer-intf-off"); mdata->nmixers_wb = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-mixer-wb-off"); mdata->ndspp = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-dspp-off"); npingpong = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-pingpong-off"); nmixers = mdata->nmixers_intf + mdata->nmixers_wb; rc = of_property_read_u32(pdev->dev.of_node, "qcom,max-mixer-width", &mdata->max_mixer_width); if (rc) { pr_err("device tree err: failed to get max mixer width\n"); return -EINVAL; } if (mdata->nmixers_intf < mdata->ndspp) { pr_err("device tree err: no of dspp are greater than intf mixers\n"); return -EINVAL; } if (mdata->nmixers_intf != npingpong) { pr_err("device tree err: unequal no of pingpong and intf mixers\n"); return -EINVAL; } mixer_offsets = kzalloc(sizeof(u32) * nmixers, GFP_KERNEL); if (!mixer_offsets) { pr_err("no mem assigned: kzalloc fail\n"); return -ENOMEM; } dspp_offsets = kzalloc(sizeof(u32) * mdata->ndspp, GFP_KERNEL); if (!dspp_offsets) { pr_err("no mem assigned: kzalloc fail\n"); rc = -ENOMEM; goto dspp_alloc_fail; } pingpong_offsets = kzalloc(sizeof(u32) * npingpong, GFP_KERNEL); if (!pingpong_offsets) { pr_err("no mem assigned: kzalloc fail\n"); rc = -ENOMEM; goto pingpong_alloc_fail; } rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-mixer-intf-off", mixer_offsets, mdata->nmixers_intf); if (rc) goto parse_done; mdata->has_separate_rotator = of_property_read_bool(pdev->dev.of_node, "qcom,mdss-has-separate-rotator"); if (mdata->nmixers_wb) { rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-mixer-wb-off", mixer_offsets + mdata->nmixers_intf, mdata->nmixers_wb); if (rc) goto parse_done; } else if (!mdata->has_separate_rotator) { /* * If writeback mixers are not available, put the number of * writeback mixers equal to number of DMA pipes so that * later same number of virtual writeback mixers can be * allocated. */ mdata->nmixers_wb = mdata->ndma_pipes; is_virtual_mixer_req = true; } rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-dspp-off", dspp_offsets, mdata->ndspp); if (rc) goto parse_done; rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pingpong-off", pingpong_offsets, npingpong); if (rc) goto parse_done; rc = mdss_mdp_mixer_addr_setup(mdata, mixer_offsets, dspp_offsets, pingpong_offsets, MDSS_MDP_MIXER_TYPE_INTF, mdata->nmixers_intf); if (rc) goto parse_done; if (mdata->nmixers_wb) { if (is_virtual_mixer_req) { /* * Replicate last interface mixers based on number of * dma pipes available as virtual writeback mixers. */ rc = mdss_mdp_mixer_addr_setup(mdata, mixer_offsets + mdata->nmixers_intf - mdata->ndma_pipes, NULL, NULL, MDSS_MDP_MIXER_TYPE_WRITEBACK, mdata->nmixers_wb); if (rc) goto parse_done; } else { rc = mdss_mdp_mixer_addr_setup(mdata, mixer_offsets + mdata->nmixers_intf, NULL, NULL, MDSS_MDP_MIXER_TYPE_WRITEBACK, mdata->nmixers_wb); if (rc) goto parse_done; } } parse_done: kfree(pingpong_offsets); pingpong_alloc_fail: kfree(dspp_offsets); dspp_alloc_fail: kfree(mixer_offsets); return rc; } static int mdss_mdp_cdm_addr_setup(struct mdss_data_type *mdata, u32 *cdm_offsets, u32 len) { struct mdss_mdp_cdm *head; u32 i = 0; head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_cdm) * len, GFP_KERNEL); if (!head) { pr_err("%s: no memory for CDM info\n", __func__); return -ENOMEM; } for (i = 0; i < len; i++) { head[i].num = i; head[i].base = (mdata->mdss_io.base) + cdm_offsets[i]; atomic_set(&head[i].kref.refcount, 0); mutex_init(&head[i].lock); init_completion(&head[i].free_comp); pr_debug("%s: cdm off (%d) = %pK\n", __func__, i, head[i].base); } mdata->cdm_off = head; mutex_init(&mdata->cdm_lock); return 0; } static int mdss_mdp_parse_dt_cdm(struct platform_device *pdev) { int rc = 0; u32 *cdm_offsets = NULL; struct mdss_data_type *mdata = platform_get_drvdata(pdev); mdata->ncdm = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-cdm-off"); if (!mdata->ncdm) { rc = 0; pr_debug("%s: No CDM offsets present in DT\n", __func__); goto end; } pr_debug("%s: cdm len == %d\n", __func__, mdata->ncdm); cdm_offsets = kzalloc(sizeof(u32) * mdata->ncdm, GFP_KERNEL); if (!cdm_offsets) { pr_err("no more memory for cdm offsets\n"); rc = -ENOMEM; mdata->ncdm = 0; goto end; } rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-cdm-off", cdm_offsets, mdata->ncdm); if (rc) { pr_err("device tree err: failed to get cdm offsets\n"); goto fail; } rc = mdss_mdp_cdm_addr_setup(mdata, cdm_offsets, mdata->ncdm); if (rc) { pr_err("%s: CDM address setup failed\n", __func__); goto fail; } fail: kfree(cdm_offsets); if (rc) mdata->ncdm = 0; end: return rc; } static int mdss_mdp_dsc_addr_setup(struct mdss_data_type *mdata, u32 *dsc_offsets, u32 len) { struct mdss_mdp_dsc *head; u32 i = 0; head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_dsc) * len, GFP_KERNEL); if (!head) { pr_err("no memory for DSC info\n"); return -ENOMEM; } for (i = 0; i < len; i++) { head[i].num = i; head[i].base = (mdata->mdss_io.base) + dsc_offsets[i]; pr_debug("dsc off (%d) = %pK\n", i, head[i].base); } mdata->dsc_off = head; return 0; } static int mdss_mdp_parse_dt_dsc(struct platform_device *pdev) { int rc = 0; u32 *dsc_offsets = NULL; struct mdss_data_type *mdata = platform_get_drvdata(pdev); mdata->ndsc = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-dsc-off"); if (!mdata->ndsc) { rc = 0; pr_debug("No DSC offsets present in DT\n"); goto end; } pr_debug("dsc len == %d\n", mdata->ndsc); dsc_offsets = kzalloc(sizeof(u32) * mdata->ndsc, GFP_KERNEL); if (!dsc_offsets) { pr_err("no more memory for dsc offsets\n"); rc = -ENOMEM; mdata->ndsc = 0; goto end; } rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-dsc-off", dsc_offsets, mdata->ndsc); if (rc) { pr_err("device tree err: failed to get cdm offsets\n"); goto fail; } rc = mdss_mdp_dsc_addr_setup(mdata, dsc_offsets, mdata->ndsc); if (rc) { pr_err("%s: DSC address setup failed\n", __func__); goto fail; } fail: kfree(dsc_offsets); if (rc) mdata->ndsc = 0; end: return rc; } static int mdss_mdp_parse_dt_wb(struct platform_device *pdev) { int rc = 0; u32 *wb_offsets = NULL; u32 num_wb_mixer, nwb_offsets, num_intf_wb = 0; const char *wfd_data; struct mdss_data_type *mdata; mdata = platform_get_drvdata(pdev); num_wb_mixer = mdata->nmixers_wb; wfd_data = of_get_property(pdev->dev.of_node, "qcom,mdss-wfd-mode", NULL); if (wfd_data && strcmp(wfd_data, "shared") != 0) num_intf_wb = 1; nwb_offsets = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-wb-off"); wb_offsets = kzalloc(sizeof(u32) * nwb_offsets, GFP_KERNEL); if (!wb_offsets) { pr_err("no more mem for writeback offsets\n"); return -ENOMEM; } rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-wb-off", wb_offsets, nwb_offsets); if (rc) goto wb_parse_done; rc = mdss_mdp_wb_addr_setup(mdata, num_wb_mixer, num_intf_wb); if (rc) goto wb_parse_done; mdata->nwb_offsets = nwb_offsets; mdata->wb_offsets = wb_offsets; return 0; wb_parse_done: kfree(wb_offsets); return rc; } static int mdss_mdp_parse_dt_ctl(struct platform_device *pdev) { int rc = 0; u32 *ctl_offsets = NULL; struct mdss_data_type *mdata = platform_get_drvdata(pdev); mdata->nctl = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-ctl-off"); if (mdata->nctl < mdata->nwb) { pr_err("device tree err: number of ctl greater than wb\n"); rc = -EINVAL; goto parse_done; } ctl_offsets = kzalloc(sizeof(u32) * mdata->nctl, GFP_KERNEL); if (!ctl_offsets) { pr_err("no more mem for ctl offsets\n"); return -ENOMEM; } rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-ctl-off", ctl_offsets, mdata->nctl); if (rc) goto parse_done; rc = mdss_mdp_ctl_addr_setup(mdata, ctl_offsets, mdata->nctl); if (rc) goto parse_done; parse_done: kfree(ctl_offsets); return rc; } static int mdss_mdp_parse_dt_video_intf(struct platform_device *pdev) { struct mdss_data_type *mdata = platform_get_drvdata(pdev); u32 count; u32 *offsets; int rc; count = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-intf-off"); if (count == 0) return -EINVAL; offsets = kzalloc(sizeof(u32) * count, GFP_KERNEL); if (!offsets) { pr_err("no mem assigned for video intf\n"); return -ENOMEM; } rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-intf-off", offsets, count); if (rc) goto parse_fail; rc = mdss_mdp_video_addr_setup(mdata, offsets, count); if (rc) pr_err("unable to setup video interfaces\n"); parse_fail: kfree(offsets); return rc; } static int mdss_mdp_update_smp_map(struct platform_device *pdev, const u32 *data, int len, int pipe_cnt, struct mdss_mdp_pipe *pipes) { struct mdss_data_type *mdata = platform_get_drvdata(pdev); int i, j, k; u32 cnt, mmb; len /= sizeof(u32); for (i = 0, k = 0; i < len; k++) { struct mdss_mdp_pipe *pipe = NULL; if (k >= pipe_cnt) { pr_err("invalid fixed mmbs\n"); return -EINVAL; } pipe = &pipes[k]; cnt = be32_to_cpu(data[i++]); if (cnt == 0) continue; for (j = 0; j < cnt; j++) { mmb = be32_to_cpu(data[i++]); if (mmb > mdata->smp_mb_cnt) { pr_err("overflow mmb:%d pipe:%d: max:%d\n", mmb, k, mdata->smp_mb_cnt); return -EINVAL; } set_bit(mmb, pipe->smp_map[0].fixed); } if (bitmap_intersects(pipe->smp_map[0].fixed, mdata->mmb_alloc_map, mdata->smp_mb_cnt)) { pr_err("overlapping fixed mmb map\n"); return -EINVAL; } bitmap_or(mdata->mmb_alloc_map, pipe->smp_map[0].fixed, mdata->mmb_alloc_map, mdata->smp_mb_cnt); } return 0; } static int mdss_mdp_parse_dt_smp(struct platform_device *pdev) { struct mdss_data_type *mdata = platform_get_drvdata(pdev); u32 num; u32 data[2]; int rc, len; const u32 *arr; num = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-smp-data"); /* * This property is optional for targets with fix pixel ram. Rest * must provide no. of smp and size of each block. */ if (!num) return 0; else if (num != 2) return -EINVAL; rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-smp-data", data, num); if (rc) return rc; rc = mdss_mdp_smp_setup(mdata, data[0], data[1]); if (rc) { pr_err("unable to setup smp data\n"); return rc; } rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-smp-mb-per-pipe", data); mdata->smp_mb_per_pipe = (!rc ? data[0] : 0); rc = 0; arr = of_get_property(pdev->dev.of_node, "qcom,mdss-pipe-rgb-fixed-mmb", &len); if (arr) { rc = mdss_mdp_update_smp_map(pdev, arr, len, mdata->nrgb_pipes, mdata->rgb_pipes); if (rc) pr_warn("unable to update smp map for RGB pipes\n"); } arr = of_get_property(pdev->dev.of_node, "qcom,mdss-pipe-vig-fixed-mmb", &len); if (arr) { rc = mdss_mdp_update_smp_map(pdev, arr, len, mdata->nvig_pipes, mdata->vig_pipes); if (rc) pr_warn("unable to update smp map for VIG pipes\n"); } return rc; } static void mdss_mdp_parse_dt_fudge_factors(struct platform_device *pdev, char *prop_name, struct mult_factor *ff) { int rc; u32 data[2] = {1, 1}; rc = mdss_mdp_parse_dt_handler(pdev, prop_name, data, 2); if (rc) { pr_debug("err reading %s\n", prop_name); } else { ff->numer = data[0]; ff->denom = data[1]; } } static int mdss_mdp_parse_dt_prefill(struct platform_device *pdev) { struct mdss_data_type *mdata = platform_get_drvdata(pdev); struct mdss_prefill_data *prefill = &mdata->prefill_data; int rc; rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-prefill-outstanding-buffer-bytes", &prefill->ot_bytes); if (rc) { pr_err("prefill outstanding buffer bytes not specified\n"); return rc; } rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-prefill-y-buffer-bytes", &prefill->y_buf_bytes); if (rc) { pr_err("prefill y buffer bytes not specified\n"); return rc; } rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-prefill-scaler-buffer-lines-bilinear", &prefill->y_scaler_lines_bilinear); if (rc) { pr_err("prefill scaler lines for bilinear not specified\n"); return rc; } rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-prefill-scaler-buffer-lines-caf", &prefill->y_scaler_lines_caf); if (rc) { pr_debug("prefill scaler lines for caf not specified\n"); return rc; } rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-prefill-post-scaler-buffer-pixels", &prefill->post_scaler_pixels); if (rc) { pr_err("prefill post scaler buffer pixels not specified\n"); return rc; } rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-prefill-pingpong-buffer-pixels", &prefill->pp_pixels); if (rc) { pr_err("prefill pingpong buffer lines not specified\n"); return rc; } rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-prefill-fbc-lines", &prefill->fbc_lines); if (rc) pr_debug("prefill FBC lines not specified\n"); return 0; } static void mdss_mdp_parse_vbif_qos(struct platform_device *pdev) { struct mdss_data_type *mdata = platform_get_drvdata(pdev); int rc; mdata->npriority_lvl = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-vbif-qos-rt-setting"); if (mdata->npriority_lvl == MDSS_VBIF_QOS_REMAP_ENTRIES) { mdata->vbif_rt_qos = kzalloc(sizeof(u32) * mdata->npriority_lvl, GFP_KERNEL); if (!mdata->vbif_rt_qos) { pr_err("no memory for real time qos_priority\n"); return; } rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-vbif-qos-rt-setting", mdata->vbif_rt_qos, mdata->npriority_lvl); if (rc) { pr_debug("rt setting not found\n"); return; } } else { mdata->npriority_lvl = 0; pr_debug("Invalid or no vbif qos rt setting\n"); return; } mdata->npriority_lvl = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-vbif-qos-nrt-setting"); if (mdata->npriority_lvl == MDSS_VBIF_QOS_REMAP_ENTRIES) { mdata->vbif_nrt_qos = kzalloc(sizeof(u32) * mdata->npriority_lvl, GFP_KERNEL); if (!mdata->vbif_nrt_qos) { pr_err("no memory for non real time qos_priority\n"); return; } rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-vbif-qos-nrt-setting", mdata->vbif_nrt_qos, mdata->npriority_lvl); if (rc) { pr_debug("nrt setting not found\n"); return; } } else { mdata->npriority_lvl = 0; pr_debug("Invalid or no vbif qos nrt seting\n"); } } static void mdss_mdp_parse_max_bw_array(const u32 *arr, struct mdss_max_bw_settings *max_bw_settings, int count) { int i; for (i = 0; i < count; i++) { max_bw_settings->mdss_max_bw_mode = be32_to_cpu(arr[i*2]); max_bw_settings->mdss_max_bw_val = be32_to_cpu(arr[(i*2)+1]); max_bw_settings++; } } static void mdss_mdp_parse_max_bandwidth(struct platform_device *pdev) { struct mdss_data_type *mdata = platform_get_drvdata(pdev); struct mdss_max_bw_settings *max_bw_settings; int max_bw_settings_cnt = 0; const u32 *max_bw; max_bw = of_get_property(pdev->dev.of_node, "qcom,max-bw-settings", &max_bw_settings_cnt); if (!max_bw || !max_bw_settings_cnt) { pr_debug("MDSS max bandwidth settings not found\n"); return; } max_bw_settings_cnt /= 2 * sizeof(u32); max_bw_settings = devm_kzalloc(&pdev->dev, sizeof(*max_bw_settings) * max_bw_settings_cnt, GFP_KERNEL); if (!max_bw_settings) { pr_err("Memory allocation failed for max_bw_settings\n"); return; } mdss_mdp_parse_max_bw_array(max_bw, max_bw_settings, max_bw_settings_cnt); mdata->max_bw_settings = max_bw_settings; mdata->max_bw_settings_cnt = max_bw_settings_cnt; } static void mdss_mdp_parse_per_pipe_bandwidth(struct platform_device *pdev) { struct mdss_data_type *mdata = platform_get_drvdata(pdev); struct mdss_max_bw_settings *max_bw_per_pipe_settings; int max_bw_settings_cnt = 0; const u32 *max_bw_settings; u32 max_bw, min_bw, threshold, i = 0; max_bw_settings = of_get_property(pdev->dev.of_node, "qcom,max-bandwidth-per-pipe-kbps", &max_bw_settings_cnt); if (!max_bw_settings || !max_bw_settings_cnt) { pr_debug("MDSS per pipe max bandwidth settings not found\n"); return; } /* Support targets where a common per pipe max bw is provided */ if ((max_bw_settings_cnt / sizeof(u32)) == 1) { mdata->max_bw_per_pipe = be32_to_cpu(max_bw_settings[0]); mdata->max_per_pipe_bw_settings = NULL; pr_debug("Common per pipe max bandwidth provided\n"); return; } max_bw_settings_cnt /= 2 * sizeof(u32); max_bw_per_pipe_settings = devm_kzalloc(&pdev->dev, sizeof(struct mdss_max_bw_settings) * max_bw_settings_cnt, GFP_KERNEL); if (!max_bw_per_pipe_settings) { pr_err("Memory allocation failed for max_bw_settings\n"); return; } mdss_mdp_parse_max_bw_array(max_bw_settings, max_bw_per_pipe_settings, max_bw_settings_cnt); mdata->max_per_pipe_bw_settings = max_bw_per_pipe_settings; mdata->mdss_per_pipe_bw_cnt = max_bw_settings_cnt; /* Calculate min and max allowed per pipe BW */ min_bw = mdata->max_bw_high; max_bw = 0; while (i < max_bw_settings_cnt) { threshold = mdata->max_per_pipe_bw_settings[i].mdss_max_bw_val; if (threshold > max_bw) max_bw = threshold; if (threshold < min_bw) min_bw = threshold; ++i; } mdata->max_bw_per_pipe = max_bw; mdata->min_bw_per_pipe = min_bw; } static int mdss_mdp_parse_dt_misc(struct platform_device *pdev) { struct mdss_data_type *mdata = platform_get_drvdata(pdev); u32 data, slave_pingpong_off; const char *wfd_data; int rc; struct property *prop = NULL; rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-rot-block-size", &data); mdata->rot_block_size = (!rc ? data : 128); rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-default-ot-rd-limit", &data); mdata->default_ot_rd_limit = (!rc ? data : 0); rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-default-ot-wr-limit", &data); mdata->default_ot_wr_limit = (!rc ? data : 0); mdata->has_non_scalar_rgb = of_property_read_bool(pdev->dev.of_node, "qcom,mdss-has-non-scalar-rgb"); mdata->has_bwc = of_property_read_bool(pdev->dev.of_node, "qcom,mdss-has-bwc"); mdata->has_decimation = of_property_read_bool(pdev->dev.of_node, "qcom,mdss-has-decimation"); mdata->has_no_lut_read = of_property_read_bool(pdev->dev.of_node, "qcom,mdss-no-lut-read"); mdata->needs_hist_vote = !(of_property_read_bool(pdev->dev.of_node, "qcom,mdss-no-hist-vote")); wfd_data = of_get_property(pdev->dev.of_node, "qcom,mdss-wfd-mode", NULL); if (wfd_data) { pr_debug("wfd mode: %s\n", wfd_data); if (!strcmp(wfd_data, "intf")) { mdata->wfd_mode = MDSS_MDP_WFD_INTERFACE; } else if (!strcmp(wfd_data, "shared")) { mdata->wfd_mode = MDSS_MDP_WFD_SHARED; } else if (!strcmp(wfd_data, "dedicated")) { mdata->wfd_mode = MDSS_MDP_WFD_DEDICATED; } else { pr_debug("wfd default mode: Shared\n"); mdata->wfd_mode = MDSS_MDP_WFD_SHARED; } } else { pr_warn("wfd mode not configured. Set to default: Shared\n"); mdata->wfd_mode = MDSS_MDP_WFD_SHARED; } mdata->has_src_split = of_property_read_bool(pdev->dev.of_node, "qcom,mdss-has-source-split"); mdata->has_fixed_qos_arbiter_enabled = of_property_read_bool(pdev->dev.of_node, "qcom,mdss-has-fixed-qos-arbiter-enabled"); mdata->idle_pc_enabled = of_property_read_bool(pdev->dev.of_node, "qcom,mdss-idle-power-collapse-enabled"); prop = of_find_property(pdev->dev.of_node, "batfet-supply", NULL); mdata->batfet_required = prop ? true : false; mdata->en_svs_high = of_property_read_bool(pdev->dev.of_node, "qcom,mdss-en-svs-high"); if (!mdata->en_svs_high) pr_debug("%s: svs_high is not enabled\n", __func__); rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-highest-bank-bit", &(mdata->highest_bank_bit)); if (rc) pr_debug("Could not read optional property: highest bank bit\n"); mdata->has_pingpong_split = of_property_read_bool(pdev->dev.of_node, "qcom,mdss-has-pingpong-split"); if (mdata->has_pingpong_split) { rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-slave-pingpong-off", &slave_pingpong_off); if (rc) { pr_err("Error in device tree: slave pingpong offset\n"); return rc; } mdata->slave_pingpong_base = mdata->mdss_io.base + slave_pingpong_off; rc = mdss_mdp_parse_dt_ppb_off(pdev); if (rc) { pr_err("Error in device tree: ppb offset not configured\n"); return rc; } } /* * 2x factor on AB because bus driver will divide by 2 * due to 2x ports to BIMC */ mdata->ab_factor.numer = 2; mdata->ab_factor.denom = 1; mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-ab-factor", &mdata->ab_factor); /* * 1.2 factor on ib as default value. This value is * experimentally determined and should be tuned in device * tree. */ mdata->ib_factor.numer = 6; mdata->ib_factor.denom = 5; mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-ib-factor", &mdata->ib_factor); /* * Set overlap ib value equal to ib by default. This value can * be tuned in device tree to be different from ib. * This factor apply when the max bandwidth per pipe * is the overlap BW. */ mdata->ib_factor_overlap.numer = mdata->ib_factor.numer; mdata->ib_factor_overlap.denom = mdata->ib_factor.denom; mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-ib-factor-overlap", &mdata->ib_factor_overlap); mdata->clk_factor.numer = 1; mdata->clk_factor.denom = 1; mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-clk-factor", &mdata->clk_factor); rc = of_property_read_u32(pdev->dev.of_node, "qcom,max-bandwidth-low-kbps", &mdata->max_bw_low); if (rc) pr_debug("max bandwidth (low) property not specified\n"); rc = of_property_read_u32(pdev->dev.of_node, "qcom,max-bandwidth-high-kbps", &mdata->max_bw_high); if (rc) pr_debug("max bandwidth (high) property not specified\n"); mdss_mdp_parse_per_pipe_bandwidth(pdev); mdss_mdp_parse_max_bandwidth(pdev); mdata->nclk_lvl = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-clk-levels"); if (mdata->nclk_lvl) { mdata->clock_levels = kzalloc(sizeof(u32) * mdata->nclk_lvl, GFP_KERNEL); if (!mdata->clock_levels) { pr_err("no mem assigned for mdata clock_levels\n"); return -ENOMEM; } rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-clk-levels", mdata->clock_levels, mdata->nclk_lvl); if (rc) pr_debug("clock levels not found\n"); } mdss_mdp_parse_vbif_qos(pdev); mdata->traffic_shaper_en = of_property_read_bool(pdev->dev.of_node, "qcom,mdss-traffic-shaper-enabled"); mdata->has_rot_dwnscale = of_property_read_bool(pdev->dev.of_node, "qcom,mdss-has-rotator-downscale"); rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-dram-channels", &mdata->bus_channels); if (rc) pr_debug("number of channels property not specified\n"); rc = of_property_read_u32(pdev->dev.of_node, "qcom,max-pipe-width", &mdata->max_pipe_width); if (rc) { pr_debug("max pipe width not specified. Using default value\n"); mdata->max_pipe_width = DEFAULT_MDP_PIPE_WIDTH; } return 0; } static int mdss_mdp_parse_dt_ad_cfg(struct platform_device *pdev) { struct mdss_data_type *mdata = platform_get_drvdata(pdev); u32 *ad_offsets = NULL; int rc; mdata->nad_cfgs = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-ad-off"); if (mdata->nad_cfgs == 0) { mdata->ad_cfgs = NULL; return 0; } if (mdata->nad_cfgs > mdata->nmixers_intf) return -EINVAL; mdata->has_wb_ad = of_property_read_bool(pdev->dev.of_node, "qcom,mdss-has-wb-ad"); ad_offsets = kzalloc(sizeof(u32) * mdata->nad_cfgs, GFP_KERNEL); if (!ad_offsets) { pr_err("no mem assigned: kzalloc fail\n"); return -ENOMEM; } rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-ad-off", ad_offsets, mdata->nad_cfgs); if (rc) goto parse_done; rc = mdss_mdp_ad_addr_setup(mdata, ad_offsets); if (rc) pr_err("unable to setup assertive display\n"); parse_done: kfree(ad_offsets); return rc; } static int mdss_mdp_parse_dt_ppb_off(struct platform_device *pdev) { struct mdss_data_type *mdata = platform_get_drvdata(pdev); u32 len, index; const u32 *arr; arr = of_get_property(pdev->dev.of_node, "qcom,mdss-ppb-ctl-off", &len); if (arr) { mdata->nppb_ctl = len / sizeof(u32); mdata->ppb_ctl = devm_kzalloc(&mdata->pdev->dev, sizeof(u32) * mdata->nppb_ctl, GFP_KERNEL); if (mdata->ppb_ctl == NULL) return -ENOMEM; for (index = 0; index < mdata->nppb_ctl; index++) mdata->ppb_ctl[index] = be32_to_cpu(arr[index]); } arr = of_get_property(pdev->dev.of_node, "qcom,mdss-ppb-cfg-off", &len); if (arr) { mdata->nppb_cfg = len / sizeof(u32); mdata->ppb_cfg = devm_kzalloc(&mdata->pdev->dev, sizeof(u32) * mdata->nppb_cfg, GFP_KERNEL); if (mdata->ppb_cfg == NULL) return -ENOMEM; for (index = 0; index < mdata->nppb_cfg; index++) mdata->ppb_cfg[index] = be32_to_cpu(arr[index]); } return 0; } #ifdef CONFIG_MSM_BUS_SCALING static int mdss_mdp_parse_dt_bus_scale(struct platform_device *pdev) { int rc, paths; struct device_node *node; struct mdss_data_type *mdata = platform_get_drvdata(pdev); rc = of_property_read_u32(pdev->dev.of_node, "qcom,msm-bus,num-paths", &paths); if (rc) { pr_err("Error. qcom,msm-bus,num-paths prop not found.rc=%d\n", rc); return rc; } mdss_res->axi_port_cnt = paths; rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-num-nrt-paths", &mdata->nrt_axi_port_cnt); if (rc && mdata->has_fixed_qos_arbiter_enabled) { pr_err("Error. qcom,mdss-num-nrt-paths prop not found.rc=%d\n", rc); return rc; } else { rc = 0; } mdata->bus_scale_table = msm_bus_cl_get_pdata(pdev); if (IS_ERR_OR_NULL(mdata->bus_scale_table)) { rc = PTR_ERR(mdata->bus_scale_table); if (!rc) rc = -EINVAL; pr_err("msm_bus_cl_get_pdata failed. rc=%d\n", rc); mdata->bus_scale_table = NULL; return rc; } /* * if mdss-reg-bus is not found then default table is picked * hence below code wont return error. */ node = of_get_child_by_name(pdev->dev.of_node, "qcom,mdss-reg-bus"); if (node) { mdata->reg_bus_scale_table = msm_bus_pdata_from_node(pdev, node); if (IS_ERR_OR_NULL(mdata->reg_bus_scale_table)) { rc = PTR_ERR(mdata->reg_bus_scale_table); if (!rc) pr_err("bus_pdata reg_bus failed rc=%d\n", rc); rc = 0; mdata->reg_bus_scale_table = NULL; } } else { rc = 0; mdata->reg_bus_scale_table = NULL; pr_debug("mdss-reg-bus not found\n"); } node = of_get_child_by_name(pdev->dev.of_node, "qcom,mdss-hw-rt-bus"); if (node) { mdata->hw_rt_bus_scale_table = msm_bus_pdata_from_node(pdev, node); if (IS_ERR_OR_NULL(mdata->hw_rt_bus_scale_table)) { rc = PTR_ERR(mdata->hw_rt_bus_scale_table); if (!rc) pr_err("hw_rt_bus_scale failed rc=%d\n", rc); rc = 0; mdata->hw_rt_bus_scale_table = NULL; } } else { rc = 0; mdata->hw_rt_bus_scale_table = NULL; pr_debug("mdss-hw-rt-bus not found\n"); } return rc; } #else static int mdss_mdp_parse_dt_bus_scale(struct platform_device *pdev) { return 0; } #endif static int mdss_mdp_parse_dt_handler(struct platform_device *pdev, char *prop_name, u32 *offsets, int len) { int rc; rc = of_property_read_u32_array(pdev->dev.of_node, prop_name, offsets, len); if (rc) { pr_err("Error from prop %s : u32 array read\n", prop_name); return -EINVAL; } return 0; } static int mdss_mdp_parse_dt_prop_len(struct platform_device *pdev, char *prop_name) { int len = 0; of_find_property(pdev->dev.of_node, prop_name, &len); if (len < 1) { pr_debug("prop %s : doesn't exist in device tree\n", prop_name); return 0; } len = len/sizeof(u32); return len; } struct mdss_data_type *mdss_mdp_get_mdata(void) { return mdss_res; } void mdss_mdp_batfet_ctrl(struct mdss_data_type *mdata, int enable) { int ret; if (!mdata->batfet_required) return; if (!mdata->batfet) { if (enable) { mdata->batfet = devm_regulator_get(&mdata->pdev->dev, "batfet"); if (IS_ERR_OR_NULL(mdata->batfet)) { pr_debug("unable to get batfet reg. rc=%d\n", PTR_RET(mdata->batfet)); mdata->batfet = NULL; return; } } else { pr_debug("Batfet regulator disable w/o enable\n"); return; } } if (enable) { ret = regulator_enable(mdata->batfet); if (ret) pr_err("regulator_enable failed\n"); } else { regulator_disable(mdata->batfet); } } /** * mdss_is_ready() - checks if mdss is probed and ready * * Checks if mdss resources have been initialized * * returns true if mdss is ready, else returns false */ bool mdss_is_ready(void) { return mdss_mdp_get_mdata() ? true : false; } EXPORT_SYMBOL(mdss_mdp_get_mdata); /** * mdss_panel_intf_type() - checks if a given intf type is primary * @intf_val: panel interface type of the individual controller * * Individual controller queries with MDP to check if it is * configured as the primary interface. * * returns a pointer to the configured structure mdss_panel_cfg * to the controller that's configured as the primary panel interface. * returns NULL on error or if @intf_val is not the configured * controller. */ struct mdss_panel_cfg *mdss_panel_intf_type(int intf_val) { if (!mdss_res || !mdss_res->pan_cfg.init_done) return ERR_PTR(-EPROBE_DEFER); if (mdss_res->pan_cfg.pan_intf == intf_val) return &mdss_res->pan_cfg; else return NULL; } EXPORT_SYMBOL(mdss_panel_intf_type); struct irq_info *mdss_intr_line() { return mdss_mdp_hw.irq_info; } EXPORT_SYMBOL(mdss_intr_line); int mdss_mdp_wait_for_xin_halt(u32 xin_id, bool is_vbif_nrt) { void __iomem *vbif_base; u32 status; struct mdss_data_type *mdata = mdss_mdp_get_mdata(); u32 idle_mask = BIT(xin_id); int rc; vbif_base = is_vbif_nrt ? mdata->vbif_nrt_io.base : mdata->vbif_io.base; rc = readl_poll_timeout(vbif_base + MMSS_VBIF_XIN_HALT_CTRL1, status, (status & idle_mask), 1000, XIN_HALT_TIMEOUT_US); if (rc == -ETIMEDOUT) { pr_err("VBIF client %d not halting. TIMEDOUT.\n", xin_id); MDSS_XLOG_TOUT_HANDLER("mdp", "vbif", "vbif_nrt", "dbg_bus", "vbif_dbg_bus", "panic"); } else { pr_debug("VBIF client %d is halted\n", xin_id); } return rc; } /** * force_on_xin_clk() - enable/disable the force-on for the pipe clock * @bit_off: offset of the bit to enable/disable the force-on. * @reg_off: register offset for the clock control. * @enable: boolean to indicate if the force-on of the clock needs to be * enabled or disabled. * * This function returns: * true - if the clock is forced-on by this function * false - if the clock was already forced on * It is the caller responsibility to check if this function is forcing * the clock on; if so, it will need to remove the force of the clock, * otherwise it should avoid to remove the force-on. * Clocks must be on when calling this function. */ bool force_on_xin_clk(u32 bit_off, u32 clk_ctl_reg_off, bool enable) { u32 val; u32 force_on_mask; struct mdss_data_type *mdata = mdss_mdp_get_mdata(); bool clk_forced_on = false; force_on_mask = BIT(bit_off); val = readl_relaxed(mdata->mdp_base + clk_ctl_reg_off); clk_forced_on = !(force_on_mask & val); if (true == enable) val |= force_on_mask; else val &= ~force_on_mask; writel_relaxed(val, mdata->mdp_base + clk_ctl_reg_off); return clk_forced_on; } static void apply_dynamic_ot_limit(u32 *ot_lim, struct mdss_mdp_set_ot_params *params) { struct mdss_data_type *mdata = mdss_mdp_get_mdata(); u32 res, read_vbif_ot; u32 rot_ot = 4; if (false == test_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map)) return; /* Dynamic OT setting done only for rotator and WFD */ if (!((params->is_rot && params->is_yuv) || params->is_wb)) return; res = params->width * params->height; pr_debug("w:%d h:%d rot:%d yuv:%d wb:%d res:%d fps:%d\n", params->width, params->height, params->is_rot, params->is_yuv, params->is_wb, res, params->frame_rate); switch (mdata->mdp_rev) { case MDSS_MDP_HW_REV_114: /* * MDP rev is same for msm8937 and msm8940, but rotator OT * recommendations are different. Setting it based on AXI OT. */ read_vbif_ot = MDSS_VBIF_READ(mdata, MMSS_VBIF_OUT_RD_LIM_CONF0, false); rot_ot = (read_vbif_ot == 0x10) ? 4 : 8; case MDSS_MDP_HW_REV_115: case MDSS_MDP_HW_REV_116: if ((res <= RES_1080p) && (params->frame_rate <= 30)) *ot_lim = 2; else if (params->is_rot && params->is_yuv) *ot_lim = rot_ot; else *ot_lim = 6; break; default: if (res <= RES_1080p) { *ot_lim = 2; } else if (res <= RES_UHD) { if (params->is_rot && params->is_yuv) *ot_lim = 8; else *ot_lim = 16; } break; } } static u32 get_ot_limit(u32 reg_off, u32 bit_off, struct mdss_mdp_set_ot_params *params) { struct mdss_data_type *mdata = mdss_mdp_get_mdata(); u32 ot_lim = 0; u32 is_vbif_nrt, val; if (mdata->default_ot_wr_limit && (params->reg_off_vbif_lim_conf == MMSS_VBIF_WR_LIM_CONF)) ot_lim = mdata->default_ot_wr_limit; else if (mdata->default_ot_rd_limit && (params->reg_off_vbif_lim_conf == MMSS_VBIF_RD_LIM_CONF)) ot_lim = mdata->default_ot_rd_limit; /* * If default ot is not set from dt, * then do not configure it. */ if (ot_lim == 0) goto exit; /* Modify the limits if the target and the use case requires it */ apply_dynamic_ot_limit(&ot_lim, params); is_vbif_nrt = params->is_vbif_nrt; val = MDSS_VBIF_READ(mdata, reg_off, is_vbif_nrt); val &= (0xFF << bit_off); val = val >> bit_off; if (val == ot_lim) ot_lim = 0; exit: pr_debug("ot_lim=%d\n", ot_lim); return ot_lim; } void mdss_mdp_set_ot_limit(struct mdss_mdp_set_ot_params *params) { struct mdss_data_type *mdata = mdss_mdp_get_mdata(); u32 ot_lim; u32 reg_off_vbif_lim_conf = (params->xin_id / 4) * 4 + params->reg_off_vbif_lim_conf; u32 bit_off_vbif_lim_conf = (params->xin_id % 4) * 8; bool is_vbif_nrt = params->is_vbif_nrt; u32 reg_val; bool forced_on; ot_lim = get_ot_limit( reg_off_vbif_lim_conf, bit_off_vbif_lim_conf, params) & 0xFF; if (ot_lim == 0) goto exit; trace_mdp_perf_set_ot(params->num, params->xin_id, ot_lim, is_vbif_nrt); mutex_lock(&mdata->reg_lock); forced_on = force_on_xin_clk(params->bit_off_mdp_clk_ctrl, params->reg_off_mdp_clk_ctrl, true); reg_val = MDSS_VBIF_READ(mdata, reg_off_vbif_lim_conf, is_vbif_nrt); reg_val &= ~(0xFF << bit_off_vbif_lim_conf); reg_val |= (ot_lim) << bit_off_vbif_lim_conf; MDSS_VBIF_WRITE(mdata, reg_off_vbif_lim_conf, reg_val, is_vbif_nrt); reg_val = MDSS_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0, is_vbif_nrt); MDSS_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0, reg_val | BIT(params->xin_id), is_vbif_nrt); mutex_unlock(&mdata->reg_lock); mdss_mdp_wait_for_xin_halt(params->xin_id, is_vbif_nrt); mutex_lock(&mdata->reg_lock); reg_val = MDSS_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0, is_vbif_nrt); MDSS_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0, reg_val & ~BIT(params->xin_id), is_vbif_nrt); if (forced_on) force_on_xin_clk(params->bit_off_mdp_clk_ctrl, params->reg_off_mdp_clk_ctrl, false); mutex_unlock(&mdata->reg_lock); exit: return; } #define RPM_MISC_REQ_TYPE 0x6373696d #define RPM_MISC_REQ_SVS_PLUS_KEY 0x2B737673 static void mdss_mdp_config_cx_voltage(struct mdss_data_type *mdata, int enable) { int ret = 0; static struct msm_rpm_kvp rpm_kvp; static uint8_t svs_en; if (!mdata->en_svs_high) return; if (!rpm_kvp.key) { rpm_kvp.key = RPM_MISC_REQ_SVS_PLUS_KEY; rpm_kvp.length = sizeof(unsigned); pr_debug("%s: Initialized rpm_kvp structure\n", __func__); } if (enable) { svs_en = 1; rpm_kvp.data = &svs_en; pr_debug("%s: voting for svs high\n", __func__); ret = msm_rpm_send_message(MSM_RPM_CTX_ACTIVE_SET, RPM_MISC_REQ_TYPE, 0, &rpm_kvp, 1); if (ret) pr_err("vote for active_set svs high failed: %d\n", ret); ret = msm_rpm_send_message(MSM_RPM_CTX_SLEEP_SET, RPM_MISC_REQ_TYPE, 0, &rpm_kvp, 1); if (ret) pr_err("vote for sleep_set svs high failed: %d\n", ret); } else { svs_en = 0; rpm_kvp.data = &svs_en; pr_debug("%s: Removing vote for svs high\n", __func__); ret = msm_rpm_send_message(MSM_RPM_CTX_ACTIVE_SET, RPM_MISC_REQ_TYPE, 0, &rpm_kvp, 1); if (ret) pr_err("Remove vote:active_set svs high failed: %d\n", ret); ret = msm_rpm_send_message(MSM_RPM_CTX_SLEEP_SET, RPM_MISC_REQ_TYPE, 0, &rpm_kvp, 1); if (ret) pr_err("Remove vote:sleep_set svs high failed: %d\n", ret); } } static int mdss_mdp_cx_ctrl(struct mdss_data_type *mdata, int enable) { int rc = 0; if (!mdata->vdd_cx) return rc; if (enable) { rc = regulator_set_voltage( mdata->vdd_cx, RPM_REGULATOR_CORNER_SVS_SOC, RPM_REGULATOR_CORNER_SUPER_TURBO); if (rc < 0) goto vreg_set_voltage_fail; pr_debug("Enabling CX power rail\n"); rc = regulator_enable(mdata->vdd_cx); if (rc) { pr_err("Failed to enable regulator.\n"); return rc; } } else { pr_debug("Disabling CX power rail\n"); rc = regulator_disable(mdata->vdd_cx); if (rc) { pr_err("Failed to disable regulator.\n"); return rc; } rc = regulator_set_voltage( mdata->vdd_cx, RPM_REGULATOR_CORNER_NONE, RPM_REGULATOR_CORNER_SUPER_TURBO); if (rc < 0) goto vreg_set_voltage_fail; } return rc; vreg_set_voltage_fail: pr_err("Set vltg fail\n"); return rc; } /** * mdss_mdp_footswitch_ctrl() - Disable/enable MDSS GDSC and CX/Batfet rails * @mdata: MDP private data * @on: 1 to turn on footswitch, 0 to turn off footswitch * * When no active references to the MDP device node and it's child nodes are * held, MDSS GDSC can be turned off. However, any any panels are still * active (but likely in an idle state), the vote for the CX and the batfet * rails should not be released. */ static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on) { int ret; int active_cnt = 0; if (!mdata->fs) return; MDSS_XLOG(on, mdata->fs_ena, mdata->idle_pc, mdata->en_svs_high, atomic_read(&mdata->active_intf_cnt)); if (on) { if (!mdata->fs_ena) { pr_debug("Enable MDP FS\n"); if (mdata->venus) { ret = regulator_enable(mdata->venus); if (ret) pr_err("venus failed to enable\n"); } ret = regulator_enable(mdata->fs); if (ret) pr_warn("Footswitch failed to enable\n"); if (!mdata->idle_pc) { mdss_mdp_cx_ctrl(mdata, true); mdss_mdp_batfet_ctrl(mdata, true); } } if (mdata->en_svs_high) mdss_mdp_config_cx_voltage(mdata, true); mdata->fs_ena = true; } else { if (mdata->fs_ena) { pr_debug("Disable MDP FS\n"); active_cnt = atomic_read(&mdata->active_intf_cnt); if (active_cnt != 0) { /* * Turning off GDSC while overlays are still * active. */ mdata->idle_pc = true; pr_debug("idle pc. active overlays=%d\n", active_cnt); mdss_mdp_memory_retention_enter(); } else { mdss_mdp_cx_ctrl(mdata, false); mdss_mdp_batfet_ctrl(mdata, false); } if (mdata->en_svs_high) mdss_mdp_config_cx_voltage(mdata, false); regulator_disable(mdata->fs); if (mdata->venus) regulator_disable(mdata->venus); } mdata->fs_ena = false; } } int mdss_mdp_secure_display_ctrl(struct mdss_data_type *mdata, unsigned int enable) { struct sd_ctrl_req { unsigned int enable; } __attribute__ ((__packed__)) request; unsigned int resp = -1; int ret = 0; struct scm_desc desc; if ((enable && (mdss_get_sd_client_cnt() > 0)) || (!enable && (mdss_get_sd_client_cnt() > 1))) { mdss_update_sd_client(mdata, enable); return ret; } desc.args[0] = request.enable = enable; desc.arginfo = SCM_ARGS(1); if (!is_scm_armv8()) { ret = scm_call(SCM_SVC_MP, MEM_PROTECT_SD_CTRL, &request, sizeof(request), &resp, sizeof(resp)); } else { ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP, mem_protect_sd_ctrl_id), &desc); resp = desc.ret[0]; } pr_debug("scm_call MEM_PROTECT_SD_CTRL(%u): ret=%d, resp=%x", enable, ret, resp); if (ret) return ret; mdss_update_sd_client(mdata, enable); return resp; } static inline int mdss_mdp_suspend_sub(struct mdss_data_type *mdata) { mdata->suspend_fs_ena = mdata->fs_ena; mdss_mdp_footswitch_ctrl(mdata, false); pr_debug("suspend done fs=%d\n", mdata->suspend_fs_ena); return 0; } static inline int mdss_mdp_resume_sub(struct mdss_data_type *mdata) { if (mdata->suspend_fs_ena) mdss_mdp_footswitch_ctrl(mdata, true); pr_debug("resume done fs=%d\n", mdata->suspend_fs_ena); return 0; } #ifdef CONFIG_PM_SLEEP static int mdss_mdp_pm_suspend(struct device *dev) { struct mdss_data_type *mdata; mdata = dev_get_drvdata(dev); if (!mdata) return -ENODEV; dev_dbg(dev, "display pm suspend\n"); return mdss_mdp_suspend_sub(mdata); } static int mdss_mdp_pm_resume(struct device *dev) { struct mdss_data_type *mdata; mdata = dev_get_drvdata(dev); if (!mdata) return -ENODEV; dev_dbg(dev, "display pm resume\n"); /* * It is possible that the runtime status of the mdp device may * have been active when the system was suspended. Reset the runtime * status to suspended state after a complete system resume. */ pm_runtime_disable(dev); pm_runtime_set_suspended(dev); pm_runtime_enable(dev); return mdss_mdp_resume_sub(mdata); } #endif #if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP) static int mdss_mdp_suspend(struct platform_device *pdev, pm_message_t state) { struct mdss_data_type *mdata = platform_get_drvdata(pdev); if (!mdata) return -ENODEV; dev_dbg(&pdev->dev, "display suspend\n"); return mdss_mdp_suspend_sub(mdata); } static int mdss_mdp_resume(struct platform_device *pdev) { struct mdss_data_type *mdata = platform_get_drvdata(pdev); if (!mdata) return -ENODEV; dev_dbg(&pdev->dev, "display resume\n"); return mdss_mdp_resume_sub(mdata); } #else #define mdss_mdp_suspend NULL #define mdss_mdp_resume NULL #endif #ifdef CONFIG_PM_RUNTIME static int mdss_mdp_runtime_resume(struct device *dev) { struct mdss_data_type *mdata = dev_get_drvdata(dev); bool device_on = true; if (!mdata) return -ENODEV; dev_dbg(dev, "pm_runtime: resuming. active overlay cnt=%d\n", atomic_read(&mdata->active_intf_cnt)); /* do not resume panels when coming out of idle power collapse */ if (!mdata->idle_pc) device_for_each_child(dev, &device_on, mdss_fb_suspres_panel); mdss_mdp_footswitch_ctrl(mdata, true); return 0; } static int mdss_mdp_runtime_idle(struct device *dev) { struct mdss_data_type *mdata = dev_get_drvdata(dev); if (!mdata) return -ENODEV; dev_dbg(dev, "pm_runtime: idling...\n"); return 0; } static int mdss_mdp_runtime_suspend(struct device *dev) { struct mdss_data_type *mdata = dev_get_drvdata(dev); bool device_on = false; if (!mdata) return -ENODEV; dev_dbg(dev, "pm_runtime: suspending. active overlay cnt=%d\n", atomic_read(&mdata->active_intf_cnt)); if (mdata->clk_ena) { pr_err("MDP suspend failed\n"); return -EBUSY; } mdss_mdp_footswitch_ctrl(mdata, false); /* do not suspend panels when going in to idle power collapse */ if (!mdata->idle_pc) device_for_each_child(dev, &device_on, mdss_fb_suspres_panel); return 0; } #endif static const struct dev_pm_ops mdss_mdp_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(mdss_mdp_pm_suspend, mdss_mdp_pm_resume) SET_RUNTIME_PM_OPS(mdss_mdp_runtime_suspend, mdss_mdp_runtime_resume, mdss_mdp_runtime_idle) }; static int mdss_mdp_remove(struct platform_device *pdev) { struct mdss_data_type *mdata = platform_get_drvdata(pdev); if (!mdata) return -ENODEV; pm_runtime_disable(&pdev->dev); mdss_mdp_pp_term(&pdev->dev); mdss_mdp_bus_scale_unregister(mdata); mdss_debugfs_remove(mdata); if (mdata->regulator_notif_register) regulator_unregister_notifier(mdata->fs, &(mdata->gdsc_cb)); return 0; } static const struct of_device_id mdss_mdp_dt_match[] = { { .compatible = "qcom,mdss_mdp",}, {} }; MODULE_DEVICE_TABLE(of, mdss_mdp_dt_match); static struct platform_driver mdss_mdp_driver = { .probe = mdss_mdp_probe, .remove = mdss_mdp_remove, .suspend = mdss_mdp_suspend, .resume = mdss_mdp_resume, .shutdown = NULL, .driver = { /* * Driver name must match the device name added in * platform.c. */ .name = "mdp", .of_match_table = mdss_mdp_dt_match, .pm = &mdss_mdp_pm_ops, }, }; static int mdss_mdp_register_driver(void) { return platform_driver_register(&mdss_mdp_driver); } static int __init mdss_mdp_driver_init(void) { int ret; ret = mdss_mdp_register_driver(); if (ret) { pr_err("mdp_register_driver() failed!\n"); return ret; } return 0; } module_param_string(panel, mdss_mdp_panel, MDSS_MAX_PANEL_LEN, 0); MODULE_PARM_DESC(panel, "panel=<lk_cfg>:<pan_intf>:<pan_intf_cfg>:<panel_topology_cfg> " "where <lk_cfg> is "1"-lk/gcdb config or "0" non-lk/non-gcdb " "config; <pan_intf> is dsi:<ctrl_id> or hdmi or edp " "<pan_intf_cfg> is panel interface specific string " "Ex: This string is panel's device node name from DT " "for DSI interface " "hdmi/edp interface does not use this string " "<panel_topology_cfg> is an optional string. Currently it is " "only valid for DSI panels. In dual-DSI case, it needs to be" "used on both panels or none. When used, format is config%d " "where %d is one of the configuration found in device node of " "panel selected by <pan_intf_cfg>"); module_init(mdss_mdp_driver_init);
RenderBroken/OP3-kernel
drivers/video/msm/mdss/mdss_mdp.c
C
gpl-2.0
135,578
/* ---------------------------------------------------------------------------- NOTE: If you edit this file, you should make sure that the CSS rules for buttons in the following files are updated. * jquery-ui-dialog.css * editor.css WordPress-style Buttons ======================= Create a button by adding the `.button` class to an element. For backward compatibility, we support several other classes (such as `.button-secondary`), but these will *not* work with the stackable classes described below. Button Styles ------------- To display a primary button style, add the `.button-primary` class to a button. Button Sizes ------------ Adjust a button's size by adding the `.button-large` or `.button-small` class. Button States ------------- Lock the state of a button by adding the name of the pseudoclass as an actual class (e.g. `.hover` for `:hover`). TABLE OF CONTENTS: ------------------ 1.0 - Button Layouts 2.0 - Default Button Style 3.0 - Primary Button Style 4.0 - Button Groups 5.0 - Responsive Button Styles ---------------------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- 1.0 - Button Layouts ---------------------------------------------------------------------------- */ .wp-core-ui .button, .wp-core-ui .button-primary, .wp-core-ui .button-secondary { display: inline-block; text-decoration: none; font-size: 13px; line-height: 26px; height: 28px; margin: 0; padding: 0 10px 1px; cursor: pointer; border-width: 1px; border-style: solid; -webkit-appearance: none; border-radius: 3px; white-space: nowrap; box-sizing: border-box; } /* Remove the dotted border on :focus and the extra padding in Firefox */ .wp-core-ui button::-moz-focus-inner, .wp-core-ui input[type="reset"]::-moz-focus-inner, .wp-core-ui input[type="button"]::-moz-focus-inner, .wp-core-ui input[type="submit"]::-moz-focus-inner { border-width: 0; border-style: none; padding: 0; } .wp-core-ui .button.button-large, .wp-core-ui .button-group.button-large .button { height: 30px; line-height: 28px; padding: 0 12px 2px; } .wp-core-ui .button.button-small, .wp-core-ui .button-group.button-small .button { height: 24px; line-height: 22px; padding: 0 8px 1px; font-size: 11px; } .wp-core-ui .button.button-hero, .wp-core-ui .button-group.button-hero .button { font-size: 14px; height: 46px; line-height: 44px; padding: 0 36px; } .wp-core-ui .button:active, .wp-core-ui .button:focus { outline: none; } .wp-core-ui .button.hidden { display: none; } /* Style Reset buttons as simple text links */ .wp-core-ui input[type="reset"], .wp-core-ui input[type="reset"]:hover, .wp-core-ui input[type="reset"]:active, .wp-core-ui input[type="reset"]:focus { background: none; border: none; box-shadow: none; padding: 0 2px 1px; width: auto; } /* ---------------------------------------------------------------------------- 2.0 - Default Button Style ---------------------------------------------------------------------------- */ .wp-core-ui .button, .wp-core-ui .button-secondary { color: #555; border-color: #cccccc; background: #f7f7f7; box-shadow: 0 1px 0 #cccccc; vertical-align: top; } .wp-core-ui p .button { vertical-align: baseline; } .wp-core-ui .button.hover, .wp-core-ui .button:hover, .wp-core-ui .button-secondary:hover, .wp-core-ui .button.focus, .wp-core-ui .button:focus, .wp-core-ui .button-secondary:focus { background: #fafafa; border-color: #999; color: #23282d; } .wp-core-ui .button.focus, .wp-core-ui .button:focus, .wp-core-ui .button-secondary:focus { border-color: #5b9dd9; box-shadow: 0 0 3px rgba( 0, 115, 170, .8 ); } .wp-core-ui .button.active, .wp-core-ui .button.active:hover, .wp-core-ui .button:active, .wp-core-ui .button-secondary:active { background: #eee; border-color: #999; box-shadow: inset 0 2px 5px -3px rgba( 0, 0, 0, 0.5 ); transform: translateY(1px); } .wp-core-ui .button.active:focus { border-color: #5b9dd9; box-shadow: inset 0 2px 5px -3px rgba( 0, 0, 0, 0.5 ), 0 0 3px rgba( 0, 115, 170, .8 ); } .wp-core-ui .button[disabled], .wp-core-ui .button:disabled, .wp-core-ui .button.disabled, .wp-core-ui .button-secondary[disabled], .wp-core-ui .button-secondary:disabled, .wp-core-ui .button-secondary.disabled, .wp-core-ui .button-disabled { color: #a0a5aa !important; border-color: #ddd !important; background: #f7f7f7 !important; box-shadow: none !important; text-shadow: 0 1px 0 #fff !important; cursor: default; transform: none !important; } /* Buttons that look like links, for a cross of good semantics with the visual */ .wp-core-ui .button-link { margin: 0; padding: 0; box-shadow: none; border: 0; border-radius: 0; background: none; outline: none; cursor: pointer; text-align: left; /* Mimics the default link style in common.css */ color: #0073aa; text-decoration: underline; transition-property: border, background, color; transition-duration: .05s; transition-timing-function: ease-in-out; } .wp-core-ui .button-link:hover, .wp-core-ui .button-link:active { color: #00a0d2; } .wp-core-ui .button-link:focus { color: #124964; box-shadow: 0 0 0 1px #5b9dd9, 0 0 2px 1px rgba(30, 140, 190, .8); } .wp-core-ui .button-link-delete { color: #a00; } .wp-core-ui .button-link-delete:hover, .wp-core-ui .button-link-delete:focus { color: #dc3232; } .ie8 .wp-core-ui .button-link:focus { outline: #5b9dd9 solid 1px; } /* ---------------------------------------------------------------------------- 3.0 - Primary Button Style ---------------------------------------------------------------------------- */ .wp-core-ui .button-primary { background: #0085ba; border-color: #0073aa #006799 #006799; box-shadow: 0 1px 0 #006799; color: #fff; text-decoration: none; text-shadow: 0 -1px 1px #006799, 1px 0 1px #006799, 0 1px 1px #006799, -1px 0 1px #006799; } .wp-core-ui .button-primary.hover, .wp-core-ui .button-primary:hover, .wp-core-ui .button-primary.focus, .wp-core-ui .button-primary:focus { background: #008ec2; border-color: #006799; color: #fff; } .wp-core-ui .button-primary.focus, .wp-core-ui .button-primary:focus { box-shadow: 0 1px 0 #0073aa, 0 0 2px 1px #33b3db; } .wp-core-ui .button-primary.active, .wp-core-ui .button-primary.active:hover, .wp-core-ui .button-primary.active:focus, .wp-core-ui .button-primary:active { background: #0073aa; border-color: #006799; box-shadow: inset 0 2px 0 #006799; vertical-align: top; } .wp-core-ui .button-primary[disabled], .wp-core-ui .button-primary:disabled, .wp-core-ui .button-primary-disabled, .wp-core-ui .button-primary.disabled { color: #66c6e4 !important; background: #008ec2 !important; border-color: #007cb2 !important; box-shadow: none !important; text-shadow: 0 -1px 0 rgba( 0, 0, 0, 0.1 ) !important; cursor: default; } .wp-core-ui .button.button-primary.button-hero { box-shadow: 0 2px 0 #006799; } .wp-core-ui .button.button-primary.button-hero.active, .wp-core-ui .button.button-primary.button-hero.active:hover, .wp-core-ui .button.button-primary.button-hero.active:focus, .wp-core-ui .button.button-primary.button-hero:active { box-shadow: inset 0 3px 0 #006799; } /* ---------------------------------------------------------------------------- 4.0 - Button Groups ---------------------------------------------------------------------------- */ .wp-core-ui .button-group { position: relative; display: inline-block; white-space: nowrap; font-size: 0; vertical-align: middle; } .wp-core-ui .button-group > .button { display: inline-block; border-radius: 0; margin-right: -1px; z-index: 10; } .wp-core-ui .button-group > .button-primary { z-index: 100; } .wp-core-ui .button-group > .button:hover { z-index: 20; } .wp-core-ui .button-group > .button:first-child { border-radius: 3px 0 0 3px; } .wp-core-ui .button-group > .button:last-child { border-radius: 0 3px 3px 0; } .wp-core-ui .button-group > .button:focus { position: relative; z-index: 1; } /* ---------------------------------------------------------------------------- 5.0 - Responsive Button Styles ---------------------------------------------------------------------------- */ @media screen and ( max-width: 782px ) { .wp-core-ui .button, .wp-core-ui .button.button-large, .wp-core-ui .button.button-small, input#publish, input#save-post, a.preview { padding: 6px 14px; line-height: normal; font-size: 14px; vertical-align: middle; height: auto; margin-bottom: 4px; } #media-upload.wp-core-ui .button { padding: 0 10px 1px; height: 24px; line-height: 22px; font-size: 13px; } .media-frame.mode-grid .bulk-select .button { margin-bottom: 0; } /* Publish Metabox Options */ .wp-core-ui .save-post-status.button { position: relative; margin: 0 14px 0 10px; /* 14px right margin to match all other buttons */ } /* Reset responsive styles in Press This, Customizer */ .wp-core-ui.wp-customizer .button { padding: 0 10px 1px; font-size: 13px; line-height: 26px; height: 28px; margin: 0; vertical-align: inherit; } .media-modal-content .media-toolbar-primary .media-button { margin-top: 10px; margin-left: 5px; } /* Reset responsive styles on Log in button on iframed login form */ .interim-login .button.button-large { height: 30px; line-height: 28px; padding: 0 12px 2px; } }
pcutler/stoneopen
wp-includes/css/buttons.css
CSS
gpl-2.0
9,409
/* * linux/mm/vmscan.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Swap reorganised 29.12.95, Stephen Tweedie. * kswapd added: 7.1.96 sct * Removed kswapd_ctl limits, and swap out as many pages as needed * to bring the system back to freepages.high: 2.4.97, Rik van Riel. * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). * Multiqueue VM started 5.8.00, Rik van Riel. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/mm.h> #include <linux/module.h> #include <linux/gfp.h> #include <linux/kernel_stat.h> #include <linux/swap.h> #include <linux/pagemap.h> #include <linux/init.h> #include <linux/highmem.h> #include <linux/vmpressure.h> #include <linux/vmstat.h> #include <linux/file.h> #include <linux/writeback.h> #include <linux/blkdev.h> #include <linux/buffer_head.h> /* for try_to_release_page(), buffer_heads_over_limit */ #include <linux/mm_inline.h> #include <linux/backing-dev.h> #include <linux/rmap.h> #include <linux/topology.h> #include <linux/cpu.h> #include <linux/cpuset.h> #include <linux/compaction.h> #include <linux/notifier.h> #include <linux/rwsem.h> #include <linux/delay.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/memcontrol.h> #include <linux/delayacct.h> #include <linux/sysctl.h> #include <linux/oom.h> #include <linux/prefetch.h> #include <linux/printk.h> #include <linux/dax.h> #include <asm/tlbflush.h> #include <asm/div64.h> #include <linux/swapops.h> #include <linux/balloon_compaction.h> #include "internal.h" #define CREATE_TRACE_POINTS #include <trace/events/vmscan.h> struct scan_control { /* How many pages shrink_list() should reclaim */ unsigned long nr_to_reclaim; /* This context's GFP mask */ gfp_t gfp_mask; /* Allocation order */ int order; /* * Nodemask of nodes allowed by the caller. If NULL, all nodes * are scanned. */ nodemask_t *nodemask; /* * The memory cgroup that hit its limit and as a result is the * primary target of this reclaim invocation. */ struct mem_cgroup *target_mem_cgroup; /* Scan (total_size >> priority) pages at once */ int priority; unsigned int may_writepage:1; /* Can mapped pages be reclaimed? */ unsigned int may_unmap:1; /* Can pages be swapped as part of reclaim? */ unsigned int may_swap:1; /* Can cgroups be reclaimed below their normal consumption range? */ unsigned int may_thrash:1; unsigned int hibernation_mode:1; /* One of the zones is ready for compaction */ unsigned int compaction_ready:1; /* Incremented by the number of inactive pages that were scanned */ unsigned long nr_scanned; /* Number of pages freed so far during a call to shrink_zones() */ unsigned long nr_reclaimed; }; #ifdef ARCH_HAS_PREFETCH #define prefetch_prev_lru_page(_page, _base, _field) \ do { \ if ((_page)->lru.prev != _base) { \ struct page *prev; \ \ prev = lru_to_page(&(_page->lru)); \ prefetch(&prev->_field); \ } \ } while (0) #else #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0) #endif #ifdef ARCH_HAS_PREFETCHW #define prefetchw_prev_lru_page(_page, _base, _field) \ do { \ if ((_page)->lru.prev != _base) { \ struct page *prev; \ \ prev = lru_to_page(&(_page->lru)); \ prefetchw(&prev->_field); \ } \ } while (0) #else #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0) #endif /* * From 0 .. 100. Higher means more swappy. */ int vm_swappiness = 60; /* * The total number of pages which are beyond the high watermark within all * zones. */ unsigned long vm_total_pages; static LIST_HEAD(shrinker_list); static DECLARE_RWSEM(shrinker_rwsem); #ifdef CONFIG_MEMCG static bool global_reclaim(struct scan_control *sc) { return !sc->target_mem_cgroup; } /** * sane_reclaim - is the usual dirty throttling mechanism operational? * @sc: scan_control in question * * The normal page dirty throttling mechanism in balance_dirty_pages() is * completely broken with the legacy memcg and direct stalling in * shrink_page_list() is used for throttling instead, which lacks all the * niceties such as fairness, adaptive pausing, bandwidth proportional * allocation and configurability. * * This function tests whether the vmscan currently in progress can assume * that the normal dirty throttling mechanism is operational. */ static bool sane_reclaim(struct scan_control *sc) { struct mem_cgroup *memcg = sc->target_mem_cgroup; if (!memcg) return true; #ifdef CONFIG_CGROUP_WRITEBACK if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) return true; #endif return false; } #else static bool global_reclaim(struct scan_control *sc) { return true; } static bool sane_reclaim(struct scan_control *sc) { return true; } #endif static unsigned long zone_reclaimable_pages(struct zone *zone) { unsigned long nr; nr = zone_page_state_snapshot(zone, NR_ACTIVE_FILE) + zone_page_state_snapshot(zone, NR_INACTIVE_FILE) + zone_page_state_snapshot(zone, NR_ISOLATED_FILE); if (get_nr_swap_pages() > 0) nr += zone_page_state_snapshot(zone, NR_ACTIVE_ANON) + zone_page_state_snapshot(zone, NR_INACTIVE_ANON) + zone_page_state_snapshot(zone, NR_ISOLATED_ANON); return nr; } bool zone_reclaimable(struct zone *zone) { return zone_page_state_snapshot(zone, NR_PAGES_SCANNED) < zone_reclaimable_pages(zone) * 6; } unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru) { if (!mem_cgroup_disabled()) return mem_cgroup_get_lru_size(lruvec, lru); return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru); } /* * Add a shrinker callback to be called from the vm. */ int register_shrinker(struct shrinker *shrinker) { size_t size = sizeof(*shrinker->nr_deferred); if (shrinker->flags & SHRINKER_NUMA_AWARE) size *= nr_node_ids; shrinker->nr_deferred = kzalloc(size, GFP_KERNEL); if (!shrinker->nr_deferred) return -ENOMEM; down_write(&shrinker_rwsem); list_add_tail(&shrinker->list, &shrinker_list); up_write(&shrinker_rwsem); return 0; } EXPORT_SYMBOL(register_shrinker); /* * Remove one */ void unregister_shrinker(struct shrinker *shrinker) { down_write(&shrinker_rwsem); list_del(&shrinker->list); up_write(&shrinker_rwsem); kfree(shrinker->nr_deferred); } EXPORT_SYMBOL(unregister_shrinker); #define SHRINK_BATCH 128 static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, struct shrinker *shrinker, unsigned long nr_scanned, unsigned long nr_eligible) { unsigned long freed = 0; unsigned long long delta; long total_scan; long freeable; long nr; long new_nr; int nid = shrinkctl->nid; long batch_size = shrinker->batch ? shrinker->batch : SHRINK_BATCH; freeable = shrinker->count_objects(shrinker, shrinkctl); if (freeable == 0) return 0; /* * copy the current shrinker scan count into a local variable * and zero it so that other concurrent shrinker invocations * don't also do this scanning work. */ nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0); total_scan = nr; delta = (4 * nr_scanned) / shrinker->seeks; delta *= freeable; do_div(delta, nr_eligible + 1); total_scan += delta; if (total_scan < 0) { pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n", shrinker->scan_objects, total_scan); total_scan = freeable; } /* * We need to avoid excessive windup on filesystem shrinkers * due to large numbers of GFP_NOFS allocations causing the * shrinkers to return -1 all the time. This results in a large * nr being built up so when a shrink that can do some work * comes along it empties the entire cache due to nr >>> * freeable. This is bad for sustaining a working set in * memory. * * Hence only allow the shrinker to scan the entire cache when * a large delta change is calculated directly. */ if (delta < freeable / 4) total_scan = min(total_scan, freeable / 2); /* * Avoid risking looping forever due to too large nr value: * never try to free more than twice the estimate number of * freeable entries. */ if (total_scan > freeable * 2) total_scan = freeable * 2; trace_mm_shrink_slab_start(shrinker, shrinkctl, nr, nr_scanned, nr_eligible, freeable, delta, total_scan); /* * Normally, we should not scan less than batch_size objects in one * pass to avoid too frequent shrinker calls, but if the slab has less * than batch_size objects in total and we are really tight on memory, * we will try to reclaim all available objects, otherwise we can end * up failing allocations although there are plenty of reclaimable * objects spread over several slabs with usage less than the * batch_size. * * We detect the "tight on memory" situations by looking at the total * number of objects we want to scan (total_scan). If it is greater * than the total number of objects on slab (freeable), we must be * scanning at high prio and therefore should try to reclaim as much as * possible. */ while (total_scan >= batch_size || total_scan >= freeable) { unsigned long ret; unsigned long nr_to_scan = min(batch_size, total_scan); shrinkctl->nr_to_scan = nr_to_scan; ret = shrinker->scan_objects(shrinker, shrinkctl); if (ret == SHRINK_STOP) break; freed += ret; count_vm_events(SLABS_SCANNED, nr_to_scan); total_scan -= nr_to_scan; cond_resched(); } /* * move the unused scan count back into the shrinker in a * manner that handles concurrent updates. If we exhausted the * scan, there is no need to do an update. */ if (total_scan > 0) new_nr = atomic_long_add_return(total_scan, &shrinker->nr_deferred[nid]); else new_nr = atomic_long_read(&shrinker->nr_deferred[nid]); trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan); return freed; } /** * shrink_slab - shrink slab caches * @gfp_mask: allocation context * @nid: node whose slab caches to target * @memcg: memory cgroup whose slab caches to target * @nr_scanned: pressure numerator * @nr_eligible: pressure denominator * * Call the shrink functions to age shrinkable caches. * * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set, * unaware shrinkers will receive a node id of 0 instead. * * @memcg specifies the memory cgroup to target. If it is not NULL, * only shrinkers with SHRINKER_MEMCG_AWARE set will be called to scan * objects from the memory cgroup specified. Otherwise, only unaware * shrinkers are called. * * @nr_scanned and @nr_eligible form a ratio that indicate how much of * the available objects should be scanned. Page reclaim for example * passes the number of pages scanned and the number of pages on the * LRU lists that it considered on @nid, plus a bias in @nr_scanned * when it encountered mapped pages. The ratio is further biased by * the ->seeks setting of the shrink function, which indicates the * cost to recreate an object relative to that of an LRU page. * * Returns the number of reclaimed slab objects. */ static unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg, unsigned long nr_scanned, unsigned long nr_eligible) { struct shrinker *shrinker; unsigned long freed = 0; if (memcg && (!memcg_kmem_enabled() || !mem_cgroup_online(memcg))) return 0; if (nr_scanned == 0) nr_scanned = SWAP_CLUSTER_MAX; if (!down_read_trylock(&shrinker_rwsem)) { /* * If we would return 0, our callers would understand that we * have nothing else to shrink and give up trying. By returning * 1 we keep it going and assume we'll be able to shrink next * time. */ freed = 1; goto out; } list_for_each_entry(shrinker, &shrinker_list, list) { struct shrink_control sc = { .gfp_mask = gfp_mask, .nid = nid, .memcg = memcg, }; /* * If kernel memory accounting is disabled, we ignore * SHRINKER_MEMCG_AWARE flag and call all shrinkers * passing NULL for memcg. */ if (memcg_kmem_enabled() && !!memcg != !!(shrinker->flags & SHRINKER_MEMCG_AWARE)) continue; if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) sc.nid = 0; freed += do_shrink_slab(&sc, shrinker, nr_scanned, nr_eligible); } up_read(&shrinker_rwsem); out: cond_resched(); return freed; } void drop_slab_node(int nid) { unsigned long freed; do { struct mem_cgroup *memcg = NULL; freed = 0; do { freed += shrink_slab(GFP_KERNEL, nid, memcg, 1000, 1000); } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); } while (freed > 10); } void drop_slab(void) { int nid; for_each_online_node(nid) drop_slab_node(nid); } static inline int is_page_cache_freeable(struct page *page) { /* * A freeable page cache page is referenced only by the caller * that isolated the page, the page cache radix tree and * optional buffer heads at page->private. */ return page_count(page) - page_has_private(page) == 2; } static int may_write_to_inode(struct inode *inode, struct scan_control *sc) { if (current->flags & PF_SWAPWRITE) return 1; if (!inode_write_congested(inode)) return 1; if (inode_to_bdi(inode) == current->backing_dev_info) return 1; return 0; } /* * We detected a synchronous write error writing a page out. Probably * -ENOSPC. We need to propagate that into the address_space for a subsequent * fsync(), msync() or close(). * * The tricky part is that after writepage we cannot touch the mapping: nothing * prevents it from being freed up. But we have a ref on the page and once * that page is locked, the mapping is pinned. * * We're allowed to run sleeping lock_page() here because we know the caller has * __GFP_FS. */ static void handle_write_error(struct address_space *mapping, struct page *page, int error) { lock_page(page); if (page_mapping(page) == mapping) mapping_set_error(mapping, error); unlock_page(page); } /* possible outcome of pageout() */ typedef enum { /* failed to write page out, page is locked */ PAGE_KEEP, /* move page to the active list, page is locked */ PAGE_ACTIVATE, /* page has been sent to the disk successfully, page is unlocked */ PAGE_SUCCESS, /* page is clean and locked */ PAGE_CLEAN, } pageout_t; /* * pageout is called by shrink_page_list() for each dirty page. * Calls ->writepage(). */ static pageout_t pageout(struct page *page, struct address_space *mapping, struct scan_control *sc) { /* * If the page is dirty, only perform writeback if that write * will be non-blocking. To prevent this allocation from being * stalled by pagecache activity. But note that there may be * stalls if we need to run get_block(). We could test * PagePrivate for that. * * If this process is currently in __generic_file_write_iter() against * this page's queue, we can perform writeback even if that * will block. * * If the page is swapcache, write it back even if that would * block, for some throttling. This happens by accident, because * swap_backing_dev_info is bust: it doesn't reflect the * congestion state of the swapdevs. Easy to fix, if needed. */ if (!is_page_cache_freeable(page)) return PAGE_KEEP; if (!mapping) { /* * Some data journaling orphaned pages can have * page->mapping == NULL while being dirty with clean buffers. */ if (page_has_private(page)) { if (try_to_free_buffers(page)) { ClearPageDirty(page); pr_info("%s: orphaned page\n", __func__); return PAGE_CLEAN; } } return PAGE_KEEP; } if (mapping->a_ops->writepage == NULL) return PAGE_ACTIVATE; if (!may_write_to_inode(mapping->host, sc)) return PAGE_KEEP; if (clear_page_dirty_for_io(page)) { int res; struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE, .nr_to_write = SWAP_CLUSTER_MAX, .range_start = 0, .range_end = LLONG_MAX, .for_reclaim = 1, }; SetPageReclaim(page); res = mapping->a_ops->writepage(page, &wbc); if (res < 0) handle_write_error(mapping, page, res); if (res == AOP_WRITEPAGE_ACTIVATE) { ClearPageReclaim(page); return PAGE_ACTIVATE; } if (!PageWriteback(page)) { /* synchronous write or broken a_ops? */ ClearPageReclaim(page); } trace_mm_vmscan_writepage(page); inc_zone_page_state(page, NR_VMSCAN_WRITE); return PAGE_SUCCESS; } return PAGE_CLEAN; } /* * Same as remove_mapping, but if the page is removed from the mapping, it * gets returned with a refcount of 0. */ static int __remove_mapping(struct address_space *mapping, struct page *page, bool reclaimed) { unsigned long flags; BUG_ON(!PageLocked(page)); BUG_ON(mapping != page_mapping(page)); spin_lock_irqsave(&mapping->tree_lock, flags); /* * The non racy check for a busy page. * * Must be careful with the order of the tests. When someone has * a ref to the page, it may be possible that they dirty it then * drop the reference. So if PageDirty is tested before page_count * here, then the following race may occur: * * get_user_pages(&page); * [user mapping goes away] * write_to(page); * !PageDirty(page) [good] * SetPageDirty(page); * put_page(page); * !page_count(page) [good, discard it] * * [oops, our write_to data is lost] * * Reversing the order of the tests ensures such a situation cannot * escape unnoticed. The smp_rmb is needed to ensure the page->flags * load is not satisfied before that of page->_count. * * Note that if SetPageDirty is always performed via set_page_dirty, * and thus under tree_lock, then this ordering is not required. */ if (!page_ref_freeze(page, 2)) goto cannot_free; /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */ if (unlikely(PageDirty(page))) { page_ref_unfreeze(page, 2); goto cannot_free; } if (PageSwapCache(page)) { swp_entry_t swap = { .val = page_private(page) }; mem_cgroup_swapout(page, swap); __delete_from_swap_cache(page); spin_unlock_irqrestore(&mapping->tree_lock, flags); swapcache_free(swap); } else { void (*freepage)(struct page *); void *shadow = NULL; freepage = mapping->a_ops->freepage; /* * Remember a shadow entry for reclaimed file cache in * order to detect refaults, thus thrashing, later on. * * But don't store shadows in an address space that is * already exiting. This is not just an optizimation, * inode reclaim needs to empty out the radix tree or * the nodes are lost. Don't plant shadows behind its * back. * * We also don't store shadows for DAX mappings because the * only page cache pages found in these are zero pages * covering holes, and because we don't want to mix DAX * exceptional entries and shadow exceptional entries in the * same page_tree. */ if (reclaimed && page_is_file_cache(page) && !mapping_exiting(mapping) && !dax_mapping(mapping)) shadow = workingset_eviction(mapping, page); __delete_from_page_cache(page, shadow); spin_unlock_irqrestore(&mapping->tree_lock, flags); if (freepage != NULL) freepage(page); } return 1; cannot_free: spin_unlock_irqrestore(&mapping->tree_lock, flags); return 0; } /* * Attempt to detach a locked page from its ->mapping. If it is dirty or if * someone else has a ref on the page, abort and return 0. If it was * successfully detached, return 1. Assumes the caller has a single ref on * this page. */ int remove_mapping(struct address_space *mapping, struct page *page) { if (__remove_mapping(mapping, page, false)) { /* * Unfreezing the refcount with 1 rather than 2 effectively * drops the pagecache ref for us without requiring another * atomic operation. */ page_ref_unfreeze(page, 1); return 1; } return 0; } /** * putback_lru_page - put previously isolated page onto appropriate LRU list * @page: page to be put back to appropriate lru list * * Add previously isolated @page to appropriate LRU list. * Page may still be unevictable for other reasons. * * lru_lock must not be held, interrupts must be enabled. */ void putback_lru_page(struct page *page) { bool is_unevictable; int was_unevictable = PageUnevictable(page); VM_BUG_ON_PAGE(PageLRU(page), page); redo: ClearPageUnevictable(page); if (page_evictable(page)) { /* * For evictable pages, we can use the cache. * In event of a race, worst case is we end up with an * unevictable page on [in]active list. * We know how to handle that. */ is_unevictable = false; lru_cache_add(page); } else { /* * Put unevictable pages directly on zone's unevictable * list. */ is_unevictable = true; add_page_to_unevictable_list(page); /* * When racing with an mlock or AS_UNEVICTABLE clearing * (page is unlocked) make sure that if the other thread * does not observe our setting of PG_lru and fails * isolation/check_move_unevictable_pages, * we see PG_mlocked/AS_UNEVICTABLE cleared below and move * the page back to the evictable list. * * The other side is TestClearPageMlocked() or shmem_lock(). */ smp_mb(); } /* * page's status can change while we move it among lru. If an evictable * page is on unevictable list, it never be freed. To avoid that, * check after we added it to the list, again. */ if (is_unevictable && page_evictable(page)) { if (!isolate_lru_page(page)) { put_page(page); goto redo; } /* This means someone else dropped this page from LRU * So, it will be freed or putback to LRU again. There is * nothing to do here. */ } if (was_unevictable && !is_unevictable) count_vm_event(UNEVICTABLE_PGRESCUED); else if (!was_unevictable && is_unevictable) count_vm_event(UNEVICTABLE_PGCULLED); put_page(page); /* drop ref from isolate */ } enum page_references { PAGEREF_RECLAIM, PAGEREF_RECLAIM_CLEAN, PAGEREF_KEEP, PAGEREF_ACTIVATE, }; static enum page_references page_check_references(struct page *page, struct scan_control *sc) { int referenced_ptes, referenced_page; unsigned long vm_flags; referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup, &vm_flags); referenced_page = TestClearPageReferenced(page); /* * Mlock lost the isolation race with us. Let try_to_unmap() * move the page to the unevictable list. */ if (vm_flags & VM_LOCKED) return PAGEREF_RECLAIM; if (referenced_ptes) { if (PageSwapBacked(page)) return PAGEREF_ACTIVATE; /* * All mapped pages start out with page table * references from the instantiating fault, so we need * to look twice if a mapped file page is used more * than once. * * Mark it and spare it for another trip around the * inactive list. Another page table reference will * lead to its activation. * * Note: the mark is set for activated pages as well * so that recently deactivated but used pages are * quickly recovered. */ SetPageReferenced(page); if (referenced_page || referenced_ptes > 1) return PAGEREF_ACTIVATE; /* * Activate file-backed executable pages after first usage. */ if (vm_flags & VM_EXEC) return PAGEREF_ACTIVATE; return PAGEREF_KEEP; } /* Reclaim if clean, defer dirty pages to writeback */ if (referenced_page && !PageSwapBacked(page)) return PAGEREF_RECLAIM_CLEAN; return PAGEREF_RECLAIM; } /* Check if a page is dirty or under writeback */ static void page_check_dirty_writeback(struct page *page, bool *dirty, bool *writeback) { struct address_space *mapping; /* * Anonymous pages are not handled by flushers and must be written * from reclaim context. Do not stall reclaim based on them */ if (!page_is_file_cache(page)) { *dirty = false; *writeback = false; return; } /* By default assume that the page flags are accurate */ *dirty = PageDirty(page); *writeback = PageWriteback(page); /* Verify dirty/writeback state if the filesystem supports it */ if (!page_has_private(page)) return; mapping = page_mapping(page); if (mapping && mapping->a_ops->is_dirty_writeback) mapping->a_ops->is_dirty_writeback(page, dirty, writeback); } /* * shrink_page_list() returns the number of reclaimed pages */ static unsigned long shrink_page_list(struct list_head *page_list, struct zone *zone, struct scan_control *sc, enum ttu_flags ttu_flags, unsigned long *ret_nr_dirty, unsigned long *ret_nr_unqueued_dirty, unsigned long *ret_nr_congested, unsigned long *ret_nr_writeback, unsigned long *ret_nr_immediate, bool force_reclaim) { LIST_HEAD(ret_pages); LIST_HEAD(free_pages); int pgactivate = 0; unsigned long nr_unqueued_dirty = 0; unsigned long nr_dirty = 0; unsigned long nr_congested = 0; unsigned long nr_reclaimed = 0; unsigned long nr_writeback = 0; unsigned long nr_immediate = 0; cond_resched(); while (!list_empty(page_list)) { struct address_space *mapping; struct page *page; int may_enter_fs; enum page_references references = PAGEREF_RECLAIM_CLEAN; bool dirty, writeback; bool lazyfree = false; int ret = SWAP_SUCCESS; cond_resched(); page = lru_to_page(page_list); list_del(&page->lru); if (!trylock_page(page)) goto keep; VM_BUG_ON_PAGE(PageActive(page), page); VM_BUG_ON_PAGE(page_zone(page) != zone, page); sc->nr_scanned++; if (unlikely(!page_evictable(page))) goto cull_mlocked; if (!sc->may_unmap && page_mapped(page)) goto keep_locked; /* Double the slab pressure for mapped and swapcache pages */ if (page_mapped(page) || PageSwapCache(page)) sc->nr_scanned++; may_enter_fs = (sc->gfp_mask & __GFP_FS) || (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); /* * The number of dirty pages determines if a zone is marked * reclaim_congested which affects wait_iff_congested. kswapd * will stall and start writing pages if the tail of the LRU * is all dirty unqueued pages. */ page_check_dirty_writeback(page, &dirty, &writeback); if (dirty || writeback) nr_dirty++; if (dirty && !writeback) nr_unqueued_dirty++; /* * Treat this page as congested if the underlying BDI is or if * pages are cycling through the LRU so quickly that the * pages marked for immediate reclaim are making it to the * end of the LRU a second time. */ mapping = page_mapping(page); if (((dirty || writeback) && mapping && inode_write_congested(mapping->host)) || (writeback && PageReclaim(page))) nr_congested++; /* * If a page at the tail of the LRU is under writeback, there * are three cases to consider. * * 1) If reclaim is encountering an excessive number of pages * under writeback and this page is both under writeback and * PageReclaim then it indicates that pages are being queued * for IO but are being recycled through the LRU before the * IO can complete. Waiting on the page itself risks an * indefinite stall if it is impossible to writeback the * page due to IO error or disconnected storage so instead * note that the LRU is being scanned too quickly and the * caller can stall after page list has been processed. * * 2) Global or new memcg reclaim encounters a page that is * not marked for immediate reclaim, or the caller does not * have __GFP_FS (or __GFP_IO if it's simply going to swap, * not to fs). In this case mark the page for immediate * reclaim and continue scanning. * * Require may_enter_fs because we would wait on fs, which * may not have submitted IO yet. And the loop driver might * enter reclaim, and deadlock if it waits on a page for * which it is needed to do the write (loop masks off * __GFP_IO|__GFP_FS for this reason); but more thought * would probably show more reasons. * * 3) Legacy memcg encounters a page that is already marked * PageReclaim. memcg does not have any dirty pages * throttling so we could easily OOM just because too many * pages are in writeback and there is nothing else to * reclaim. Wait for the writeback to complete. */ if (PageWriteback(page)) { /* Case 1 above */ if (current_is_kswapd() && PageReclaim(page) && test_bit(ZONE_WRITEBACK, &zone->flags)) { nr_immediate++; goto keep_locked; /* Case 2 above */ } else if (sane_reclaim(sc) || !PageReclaim(page) || !may_enter_fs) { /* * This is slightly racy - end_page_writeback() * might have just cleared PageReclaim, then * setting PageReclaim here end up interpreted * as PageReadahead - but that does not matter * enough to care. What we do want is for this * page to have PageReclaim set next time memcg * reclaim reaches the tests above, so it will * then wait_on_page_writeback() to avoid OOM; * and it's also appropriate in global reclaim. */ SetPageReclaim(page); nr_writeback++; goto keep_locked; /* Case 3 above */ } else { unlock_page(page); wait_on_page_writeback(page); /* then go back and try same page again */ list_add_tail(&page->lru, page_list); continue; } } if (!force_reclaim) references = page_check_references(page, sc); switch (references) { case PAGEREF_ACTIVATE: goto activate_locked; case PAGEREF_KEEP: goto keep_locked; case PAGEREF_RECLAIM: case PAGEREF_RECLAIM_CLEAN: ; /* try to reclaim the page below */ } /* * Anonymous process memory has backing store? * Try to allocate it some swap space here. */ if (PageAnon(page) && !PageSwapCache(page)) { if (!(sc->gfp_mask & __GFP_IO)) goto keep_locked; if (!add_to_swap(page, page_list)) goto activate_locked; lazyfree = true; may_enter_fs = 1; /* Adding to swap updated mapping */ mapping = page_mapping(page); } /* * The page is mapped into the page tables of one or more * processes. Try to unmap it here. */ if (page_mapped(page) && mapping) { switch (ret = try_to_unmap(page, lazyfree ? (ttu_flags | TTU_BATCH_FLUSH | TTU_LZFREE) : (ttu_flags | TTU_BATCH_FLUSH))) { case SWAP_FAIL: goto activate_locked; case SWAP_AGAIN: goto keep_locked; case SWAP_MLOCK: goto cull_mlocked; case SWAP_LZFREE: goto lazyfree; case SWAP_SUCCESS: ; /* try to free the page below */ } } if (PageDirty(page)) { /* * Only kswapd can writeback filesystem pages to * avoid risk of stack overflow but only writeback * if many dirty pages have been encountered. */ if (page_is_file_cache(page) && (!current_is_kswapd() || !test_bit(ZONE_DIRTY, &zone->flags))) { /* * Immediately reclaim when written back. * Similar in principal to deactivate_page() * except we already have the page isolated * and know it's dirty */ inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE); SetPageReclaim(page); goto keep_locked; } if (references == PAGEREF_RECLAIM_CLEAN) goto keep_locked; if (!may_enter_fs) goto keep_locked; if (!sc->may_writepage) goto keep_locked; /* * Page is dirty. Flush the TLB if a writable entry * potentially exists to avoid CPU writes after IO * starts and then write it out here. */ try_to_unmap_flush_dirty(); switch (pageout(page, mapping, sc)) { case PAGE_KEEP: goto keep_locked; case PAGE_ACTIVATE: goto activate_locked; case PAGE_SUCCESS: if (PageWriteback(page)) goto keep; if (PageDirty(page)) goto keep; /* * A synchronous write - probably a ramdisk. Go * ahead and try to reclaim the page. */ if (!trylock_page(page)) goto keep; if (PageDirty(page) || PageWriteback(page)) goto keep_locked; mapping = page_mapping(page); case PAGE_CLEAN: ; /* try to free the page below */ } } /* * If the page has buffers, try to free the buffer mappings * associated with this page. If we succeed we try to free * the page as well. * * We do this even if the page is PageDirty(). * try_to_release_page() does not perform I/O, but it is * possible for a page to have PageDirty set, but it is actually * clean (all its buffers are clean). This happens if the * buffers were written out directly, with submit_bh(). ext3 * will do this, as well as the blockdev mapping. * try_to_release_page() will discover that cleanness and will * drop the buffers and mark the page clean - it can be freed. * * Rarely, pages can have buffers and no ->mapping. These are * the pages which were not successfully invalidated in * truncate_complete_page(). We try to drop those buffers here * and if that worked, and the page is no longer mapped into * process address space (page_count == 1) it can be freed. * Otherwise, leave the page on the LRU so it is swappable. */ if (page_has_private(page)) { if (!try_to_release_page(page, sc->gfp_mask)) goto activate_locked; if (!mapping && page_count(page) == 1) { unlock_page(page); if (put_page_testzero(page)) goto free_it; else { /* * rare race with speculative reference. * the speculative reference will free * this page shortly, so we may * increment nr_reclaimed here (and * leave it off the LRU). */ nr_reclaimed++; continue; } } } lazyfree: if (!mapping || !__remove_mapping(mapping, page, true)) goto keep_locked; /* * At this point, we have no other references and there is * no way to pick any more up (removed from LRU, removed * from pagecache). Can use non-atomic bitops now (and * we obviously don't have to worry about waking up a process * waiting on the page lock, because there are no references. */ __ClearPageLocked(page); free_it: if (ret == SWAP_LZFREE) count_vm_event(PGLAZYFREED); nr_reclaimed++; /* * Is there need to periodically free_page_list? It would * appear not as the counts should be low */ list_add(&page->lru, &free_pages); continue; cull_mlocked: if (PageSwapCache(page)) try_to_free_swap(page); unlock_page(page); list_add(&page->lru, &ret_pages); continue; activate_locked: /* Not a candidate for swapping, so reclaim swap space. */ if (PageSwapCache(page) && mem_cgroup_swap_full(page)) try_to_free_swap(page); VM_BUG_ON_PAGE(PageActive(page), page); SetPageActive(page); pgactivate++; keep_locked: unlock_page(page); keep: list_add(&page->lru, &ret_pages); VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page); } mem_cgroup_uncharge_list(&free_pages); try_to_unmap_flush(); free_hot_cold_page_list(&free_pages, true); list_splice(&ret_pages, page_list); count_vm_events(PGACTIVATE, pgactivate); *ret_nr_dirty += nr_dirty; *ret_nr_congested += nr_congested; *ret_nr_unqueued_dirty += nr_unqueued_dirty; *ret_nr_writeback += nr_writeback; *ret_nr_immediate += nr_immediate; return nr_reclaimed; } unsigned long reclaim_clean_pages_from_list(struct zone *zone, struct list_head *page_list) { struct scan_control sc = { .gfp_mask = GFP_KERNEL, .priority = DEF_PRIORITY, .may_unmap = 1, }; unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5; struct page *page, *next; LIST_HEAD(clean_pages); list_for_each_entry_safe(page, next, page_list, lru) { if (page_is_file_cache(page) && !PageDirty(page) && !isolated_balloon_page(page)) { ClearPageActive(page); list_move(&page->lru, &clean_pages); } } ret = shrink_page_list(&clean_pages, zone, &sc, TTU_UNMAP|TTU_IGNORE_ACCESS, &dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true); list_splice(&clean_pages, page_list); mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret); return ret; } /* * Attempt to remove the specified page from its LRU. Only take this page * if it is of the appropriate PageActive status. Pages which are being * freed elsewhere are also ignored. * * page: page to consider * mode: one of the LRU isolation modes defined above * * returns 0 on success, -ve errno on failure. */ int __isolate_lru_page(struct page *page, isolate_mode_t mode) { int ret = -EINVAL; /* Only take pages on the LRU. */ if (!PageLRU(page)) return ret; /* Compaction should not handle unevictable pages but CMA can do so */ if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE)) return ret; ret = -EBUSY; /* * To minimise LRU disruption, the caller can indicate that it only * wants to isolate pages it will be able to operate on without * blocking - clean pages for the most part. * * ISOLATE_CLEAN means that only clean pages should be isolated. This * is used by reclaim when it is cannot write to backing storage * * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages * that it is possible to migrate without blocking */ if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) { /* All the caller can do on PageWriteback is block */ if (PageWriteback(page)) return ret; if (PageDirty(page)) { struct address_space *mapping; /* ISOLATE_CLEAN means only clean pages */ if (mode & ISOLATE_CLEAN) return ret; /* * Only pages without mappings or that have a * ->migratepage callback are possible to migrate * without blocking */ mapping = page_mapping(page); if (mapping && !mapping->a_ops->migratepage) return ret; } } if ((mode & ISOLATE_UNMAPPED) && page_mapped(page)) return ret; if (likely(get_page_unless_zero(page))) { /* * Be careful not to clear PageLRU until after we're * sure the page is not being freed elsewhere -- the * page release code relies on it. */ ClearPageLRU(page); ret = 0; } return ret; } /* * zone->lru_lock is heavily contended. Some of the functions that * shrink the lists perform better by taking out a batch of pages * and working on them outside the LRU lock. * * For pagecache intensive workloads, this function is the hottest * spot in the kernel (apart from copy_*_user functions). * * Appropriate locks must be held before calling this function. * * @nr_to_scan: The number of pages to look through on the list. * @lruvec: The LRU vector to pull pages from. * @dst: The temp list to put pages on to. * @nr_scanned: The number of pages that were scanned. * @sc: The scan_control struct for this reclaim session * @mode: One of the LRU isolation modes * @lru: LRU list id for isolating * * returns how many pages were moved onto *@dst. */ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, struct lruvec *lruvec, struct list_head *dst, unsigned long *nr_scanned, struct scan_control *sc, isolate_mode_t mode, enum lru_list lru) { struct list_head *src = &lruvec->lists[lru]; unsigned long nr_taken = 0; unsigned long scan; for (scan = 0; scan < nr_to_scan && nr_taken < nr_to_scan && !list_empty(src); scan++) { struct page *page; int nr_pages; page = lru_to_page(src); prefetchw_prev_lru_page(page, src, flags); VM_BUG_ON_PAGE(!PageLRU(page), page); switch (__isolate_lru_page(page, mode)) { case 0: nr_pages = hpage_nr_pages(page); mem_cgroup_update_lru_size(lruvec, lru, -nr_pages); list_move(&page->lru, dst); nr_taken += nr_pages; break; case -EBUSY: /* else it is being freed elsewhere */ list_move(&page->lru, src); continue; default: BUG(); } } *nr_scanned = scan; trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan, nr_taken, mode, is_file_lru(lru)); return nr_taken; } /** * isolate_lru_page - tries to isolate a page from its LRU list * @page: page to isolate from its LRU list * * Isolates a @page from an LRU list, clears PageLRU and adjusts the * vmstat statistic corresponding to whatever LRU list the page was on. * * Returns 0 if the page was removed from an LRU list. * Returns -EBUSY if the page was not on an LRU list. * * The returned page will have PageLRU() cleared. If it was found on * the active list, it will have PageActive set. If it was found on * the unevictable list, it will have the PageUnevictable bit set. That flag * may need to be cleared by the caller before letting the page go. * * The vmstat statistic corresponding to the list on which the page was * found will be decremented. * * Restrictions: * (1) Must be called with an elevated refcount on the page. This is a * fundamentnal difference from isolate_lru_pages (which is called * without a stable reference). * (2) the lru_lock must not be held. * (3) interrupts must be enabled. */ int isolate_lru_page(struct page *page) { int ret = -EBUSY; VM_BUG_ON_PAGE(!page_count(page), page); WARN_RATELIMIT(PageTail(page), "trying to isolate tail page"); if (PageLRU(page)) { struct zone *zone = page_zone(page); struct lruvec *lruvec; spin_lock_irq(&zone->lru_lock); lruvec = mem_cgroup_page_lruvec(page, zone); if (PageLRU(page)) { int lru = page_lru(page); get_page(page); ClearPageLRU(page); del_page_from_lru_list(page, lruvec, lru); ret = 0; } spin_unlock_irq(&zone->lru_lock); } return ret; } /* * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and * then get resheduled. When there are massive number of tasks doing page * allocation, such sleeping direct reclaimers may keep piling up on each CPU, * the LRU list will go small and be scanned faster than necessary, leading to * unnecessary swapping, thrashing and OOM. */ static int too_many_isolated(struct zone *zone, int file, struct scan_control *sc) { unsigned long inactive, isolated; if (current_is_kswapd()) return 0; if (!sane_reclaim(sc)) return 0; if (file) { inactive = zone_page_state(zone, NR_INACTIVE_FILE); isolated = zone_page_state(zone, NR_ISOLATED_FILE); } else { inactive = zone_page_state(zone, NR_INACTIVE_ANON); isolated = zone_page_state(zone, NR_ISOLATED_ANON); } /* * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they * won't get blocked by normal direct-reclaimers, forming a circular * deadlock. */ if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) inactive >>= 3; return isolated > inactive; } static noinline_for_stack void putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) { struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; struct zone *zone = lruvec_zone(lruvec); LIST_HEAD(pages_to_free); /* * Put back any unfreeable pages. */ while (!list_empty(page_list)) { struct page *page = lru_to_page(page_list); int lru; VM_BUG_ON_PAGE(PageLRU(page), page); list_del(&page->lru); if (unlikely(!page_evictable(page))) { spin_unlock_irq(&zone->lru_lock); putback_lru_page(page); spin_lock_irq(&zone->lru_lock); continue; } lruvec = mem_cgroup_page_lruvec(page, zone); SetPageLRU(page); lru = page_lru(page); add_page_to_lru_list(page, lruvec, lru); if (is_active_lru(lru)) { int file = is_file_lru(lru); int numpages = hpage_nr_pages(page); reclaim_stat->recent_rotated[file] += numpages; } if (put_page_testzero(page)) { __ClearPageLRU(page); __ClearPageActive(page); del_page_from_lru_list(page, lruvec, lru); if (unlikely(PageCompound(page))) { spin_unlock_irq(&zone->lru_lock); mem_cgroup_uncharge(page); (*get_compound_page_dtor(page))(page); spin_lock_irq(&zone->lru_lock); } else list_add(&page->lru, &pages_to_free); } } /* * To save our caller's stack, now use input list for pages to free. */ list_splice(&pages_to_free, page_list); } /* * If a kernel thread (such as nfsd for loop-back mounts) services * a backing device by writing to the page cache it sets PF_LESS_THROTTLE. * In that case we should only throttle if the backing device it is * writing to is congested. In other cases it is safe to throttle. */ static int current_may_throttle(void) { return !(current->flags & PF_LESS_THROTTLE) || current->backing_dev_info == NULL || bdi_write_congested(current->backing_dev_info); } /* * shrink_inactive_list() is a helper for shrink_zone(). It returns the number * of reclaimed pages */ static noinline_for_stack unsigned long shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, struct scan_control *sc, enum lru_list lru) { LIST_HEAD(page_list); unsigned long nr_scanned; unsigned long nr_reclaimed = 0; unsigned long nr_taken; unsigned long nr_dirty = 0; unsigned long nr_congested = 0; unsigned long nr_unqueued_dirty = 0; unsigned long nr_writeback = 0; unsigned long nr_immediate = 0; isolate_mode_t isolate_mode = 0; int file = is_file_lru(lru); struct zone *zone = lruvec_zone(lruvec); struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; while (unlikely(too_many_isolated(zone, file, sc))) { congestion_wait(BLK_RW_ASYNC, HZ/10); /* We are about to die and free our memory. Return now. */ if (fatal_signal_pending(current)) return SWAP_CLUSTER_MAX; } lru_add_drain(); if (!sc->may_unmap) isolate_mode |= ISOLATE_UNMAPPED; if (!sc->may_writepage) isolate_mode |= ISOLATE_CLEAN; spin_lock_irq(&zone->lru_lock); nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list, &nr_scanned, sc, isolate_mode, lru); __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken); __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken); if (global_reclaim(sc)) { __mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned); if (current_is_kswapd()) __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned); else __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned); } spin_unlock_irq(&zone->lru_lock); if (nr_taken == 0) return 0; nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP, &nr_dirty, &nr_unqueued_dirty, &nr_congested, &nr_writeback, &nr_immediate, false); spin_lock_irq(&zone->lru_lock); reclaim_stat->recent_scanned[file] += nr_taken; if (global_reclaim(sc)) { if (current_is_kswapd()) __count_zone_vm_events(PGSTEAL_KSWAPD, zone, nr_reclaimed); else __count_zone_vm_events(PGSTEAL_DIRECT, zone, nr_reclaimed); } putback_inactive_pages(lruvec, &page_list); __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); spin_unlock_irq(&zone->lru_lock); mem_cgroup_uncharge_list(&page_list); free_hot_cold_page_list(&page_list, true); /* * If reclaim is isolating dirty pages under writeback, it implies * that the long-lived page allocation rate is exceeding the page * laundering rate. Either the global limits are not being effective * at throttling processes due to the page distribution throughout * zones or there is heavy usage of a slow backing device. The * only option is to throttle from reclaim context which is not ideal * as there is no guarantee the dirtying process is throttled in the * same way balance_dirty_pages() manages. * * Once a zone is flagged ZONE_WRITEBACK, kswapd will count the number * of pages under pages flagged for immediate reclaim and stall if any * are encountered in the nr_immediate check below. */ if (nr_writeback && nr_writeback == nr_taken) set_bit(ZONE_WRITEBACK, &zone->flags); /* * Legacy memcg will stall in page writeback so avoid forcibly * stalling here. */ if (sane_reclaim(sc)) { /* * Tag a zone as congested if all the dirty pages scanned were * backed by a congested BDI and wait_iff_congested will stall. */ if (nr_dirty && nr_dirty == nr_congested) set_bit(ZONE_CONGESTED, &zone->flags); /* * If dirty pages are scanned that are not queued for IO, it * implies that flushers are not keeping up. In this case, flag * the zone ZONE_DIRTY and kswapd will start writing pages from * reclaim context. */ if (nr_unqueued_dirty == nr_taken) set_bit(ZONE_DIRTY, &zone->flags); /* * If kswapd scans pages marked marked for immediate * reclaim and under writeback (nr_immediate), it implies * that pages are cycling through the LRU faster than * they are written so also forcibly stall. */ if (nr_immediate && current_may_throttle()) congestion_wait(BLK_RW_ASYNC, HZ/10); } /* * Stall direct reclaim for IO completions if underlying BDIs or zone * is congested. Allow kswapd to continue until it starts encountering * unqueued dirty pages or cycling through the LRU too quickly. */ if (!sc->hibernation_mode && !current_is_kswapd() && current_may_throttle()) wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10); trace_mm_vmscan_lru_shrink_inactive(zone, nr_scanned, nr_reclaimed, sc->priority, file); return nr_reclaimed; } /* * This moves pages from the active list to the inactive list. * * We move them the other way if the page is referenced by one or more * processes, from rmap. * * If the pages are mostly unmapped, the processing is fast and it is * appropriate to hold zone->lru_lock across the whole operation. But if * the pages are mapped, the processing is slow (page_referenced()) so we * should drop zone->lru_lock around each page. It's impossible to balance * this, so instead we remove the pages from the LRU while processing them. * It is safe to rely on PG_active against the non-LRU pages in here because * nobody will play with that bit on a non-LRU page. * * The downside is that we have to touch page->_count against each page. * But we had to alter page->flags anyway. */ static void move_active_pages_to_lru(struct lruvec *lruvec, struct list_head *list, struct list_head *pages_to_free, enum lru_list lru) { struct zone *zone = lruvec_zone(lruvec); unsigned long pgmoved = 0; struct page *page; int nr_pages; while (!list_empty(list)) { page = lru_to_page(list); lruvec = mem_cgroup_page_lruvec(page, zone); VM_BUG_ON_PAGE(PageLRU(page), page); SetPageLRU(page); nr_pages = hpage_nr_pages(page); mem_cgroup_update_lru_size(lruvec, lru, nr_pages); list_move(&page->lru, &lruvec->lists[lru]); pgmoved += nr_pages; if (put_page_testzero(page)) { __ClearPageLRU(page); __ClearPageActive(page); del_page_from_lru_list(page, lruvec, lru); if (unlikely(PageCompound(page))) { spin_unlock_irq(&zone->lru_lock); mem_cgroup_uncharge(page); (*get_compound_page_dtor(page))(page); spin_lock_irq(&zone->lru_lock); } else list_add(&page->lru, pages_to_free); } } __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); if (!is_active_lru(lru)) __count_vm_events(PGDEACTIVATE, pgmoved); } static void shrink_active_list(unsigned long nr_to_scan, struct lruvec *lruvec, struct scan_control *sc, enum lru_list lru) { unsigned long nr_taken; unsigned long nr_scanned; unsigned long vm_flags; LIST_HEAD(l_hold); /* The pages which were snipped off */ LIST_HEAD(l_active); LIST_HEAD(l_inactive); struct page *page; struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; unsigned long nr_rotated = 0; isolate_mode_t isolate_mode = 0; int file = is_file_lru(lru); struct zone *zone = lruvec_zone(lruvec); lru_add_drain(); if (!sc->may_unmap) isolate_mode |= ISOLATE_UNMAPPED; if (!sc->may_writepage) isolate_mode |= ISOLATE_CLEAN; spin_lock_irq(&zone->lru_lock); nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold, &nr_scanned, sc, isolate_mode, lru); if (global_reclaim(sc)) __mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned); reclaim_stat->recent_scanned[file] += nr_taken; __count_zone_vm_events(PGREFILL, zone, nr_scanned); __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken); __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken); spin_unlock_irq(&zone->lru_lock); while (!list_empty(&l_hold)) { cond_resched(); page = lru_to_page(&l_hold); list_del(&page->lru); if (unlikely(!page_evictable(page))) { putback_lru_page(page); continue; } if (unlikely(buffer_heads_over_limit)) { if (page_has_private(page) && trylock_page(page)) { if (page_has_private(page)) try_to_release_page(page, 0); unlock_page(page); } } if (page_referenced(page, 0, sc->target_mem_cgroup, &vm_flags)) { nr_rotated += hpage_nr_pages(page); /* * Identify referenced, file-backed active pages and * give them one more trip around the active list. So * that executable code get better chances to stay in * memory under moderate memory pressure. Anon pages * are not likely to be evicted by use-once streaming * IO, plus JVM can create lots of anon VM_EXEC pages, * so we ignore them here. */ if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) { list_add(&page->lru, &l_active); continue; } } ClearPageActive(page); /* we are de-activating */ list_add(&page->lru, &l_inactive); } /* * Move pages back to the lru list. */ spin_lock_irq(&zone->lru_lock); /* * Count referenced pages from currently used mappings as rotated, * even though only some of them are actually re-activated. This * helps balance scan pressure between file and anonymous pages in * get_scan_count. */ reclaim_stat->recent_rotated[file] += nr_rotated; move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru); move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE); __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); spin_unlock_irq(&zone->lru_lock); mem_cgroup_uncharge_list(&l_hold); free_hot_cold_page_list(&l_hold, true); } #ifdef CONFIG_SWAP static bool inactive_anon_is_low_global(struct zone *zone) { unsigned long active, inactive; active = zone_page_state(zone, NR_ACTIVE_ANON); inactive = zone_page_state(zone, NR_INACTIVE_ANON); return inactive * zone->inactive_ratio < active; } /** * inactive_anon_is_low - check if anonymous pages need to be deactivated * @lruvec: LRU vector to check * * Returns true if the zone does not have enough inactive anon pages, * meaning some active anon pages need to be deactivated. */ static bool inactive_anon_is_low(struct lruvec *lruvec) { /* * If we don't have swap space, anonymous page deactivation * is pointless. */ if (!total_swap_pages) return false; if (!mem_cgroup_disabled()) return mem_cgroup_inactive_anon_is_low(lruvec); return inactive_anon_is_low_global(lruvec_zone(lruvec)); } #else static inline bool inactive_anon_is_low(struct lruvec *lruvec) { return false; } #endif /** * inactive_file_is_low - check if file pages need to be deactivated * @lruvec: LRU vector to check * * When the system is doing streaming IO, memory pressure here * ensures that active file pages get deactivated, until more * than half of the file pages are on the inactive list. * * Once we get to that situation, protect the system's working * set from being evicted by disabling active file page aging. * * This uses a different ratio than the anonymous pages, because * the page cache uses a use-once replacement algorithm. */ static bool inactive_file_is_low(struct lruvec *lruvec) { unsigned long inactive; unsigned long active; inactive = lruvec_lru_size(lruvec, LRU_INACTIVE_FILE); active = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE); return active > inactive; } static bool inactive_list_is_low(struct lruvec *lruvec, enum lru_list lru) { if (is_file_lru(lru)) return inactive_file_is_low(lruvec); else return inactive_anon_is_low(lruvec); } static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, struct lruvec *lruvec, struct scan_control *sc) { if (is_active_lru(lru)) { if (inactive_list_is_low(lruvec, lru)) shrink_active_list(nr_to_scan, lruvec, sc, lru); return 0; } return shrink_inactive_list(nr_to_scan, lruvec, sc, lru); } enum scan_balance { SCAN_EQUAL, SCAN_FRACT, SCAN_ANON, SCAN_FILE, }; /* * Determine how aggressively the anon and file LRU lists should be * scanned. The relative value of each set of LRU lists is determined * by looking at the fraction of the pages scanned we did rotate back * onto the active list instead of evict. * * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan */ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg, struct scan_control *sc, unsigned long *nr, unsigned long *lru_pages) { int swappiness = mem_cgroup_swappiness(memcg); struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; u64 fraction[2]; u64 denominator = 0; /* gcc */ struct zone *zone = lruvec_zone(lruvec); unsigned long anon_prio, file_prio; enum scan_balance scan_balance; unsigned long anon, file; bool force_scan = false; unsigned long ap, fp; enum lru_list lru; bool some_scanned; int pass; /* * If the zone or memcg is small, nr[l] can be 0. This * results in no scanning on this priority and a potential * priority drop. Global direct reclaim can go to the next * zone and tends to have no problems. Global kswapd is for * zone balancing and it needs to scan a minimum amount. When * reclaiming for a memcg, a priority drop can cause high * latencies, so it's better to scan a minimum amount there as * well. */ if (current_is_kswapd()) { if (!zone_reclaimable(zone)) force_scan = true; if (!mem_cgroup_online(memcg)) force_scan = true; } if (!global_reclaim(sc)) force_scan = true; /* If we have no swap space, do not bother scanning anon pages. */ if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) { scan_balance = SCAN_FILE; goto out; } /* * Global reclaim will swap to prevent OOM even with no * swappiness, but memcg users want to use this knob to * disable swapping for individual groups completely when * using the memory controller's swap limit feature would be * too expensive. */ if (!global_reclaim(sc) && !swappiness) { scan_balance = SCAN_FILE; goto out; } /* * Do not apply any pressure balancing cleverness when the * system is close to OOM, scan both anon and file equally * (unless the swappiness setting disagrees with swapping). */ if (!sc->priority && swappiness) { scan_balance = SCAN_EQUAL; goto out; } /* * Prevent the reclaimer from falling into the cache trap: as * cache pages start out inactive, every cache fault will tip * the scan balance towards the file LRU. And as the file LRU * shrinks, so does the window for rotation from references. * This means we have a runaway feedback loop where a tiny * thrashing file LRU becomes infinitely more attractive than * anon pages. Try to detect this based on file LRU size. */ if (global_reclaim(sc)) { unsigned long zonefile; unsigned long zonefree; zonefree = zone_page_state(zone, NR_FREE_PAGES); zonefile = zone_page_state(zone, NR_ACTIVE_FILE) + zone_page_state(zone, NR_INACTIVE_FILE); if (unlikely(zonefile + zonefree <= high_wmark_pages(zone))) { scan_balance = SCAN_ANON; goto out; } } /* * If there is enough inactive page cache, i.e. if the size of the * inactive list is greater than that of the active list *and* the * inactive list actually has some pages to scan on this priority, we * do not reclaim anything from the anonymous working set right now. * Without the second condition we could end up never scanning an * lruvec even if it has plenty of old anonymous pages unless the * system is under heavy pressure. */ if (!inactive_file_is_low(lruvec) && lruvec_lru_size(lruvec, LRU_INACTIVE_FILE) >> sc->priority) { scan_balance = SCAN_FILE; goto out; } scan_balance = SCAN_FRACT; /* * With swappiness at 100, anonymous and file have the same priority. * This scanning priority is essentially the inverse of IO cost. */ anon_prio = swappiness; file_prio = 200 - anon_prio; /* * OK, so we have swap space and a fair amount of page cache * pages. We use the recently rotated / recently scanned * ratios to determine how valuable each cache is. * * Because workloads change over time (and to avoid overflow) * we keep these statistics as a floating average, which ends * up weighing recent references more than old ones. * * anon in [0], file in [1] */ anon = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON) + lruvec_lru_size(lruvec, LRU_INACTIVE_ANON); file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE) + lruvec_lru_size(lruvec, LRU_INACTIVE_FILE); spin_lock_irq(&zone->lru_lock); if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { reclaim_stat->recent_scanned[0] /= 2; reclaim_stat->recent_rotated[0] /= 2; } if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) { reclaim_stat->recent_scanned[1] /= 2; reclaim_stat->recent_rotated[1] /= 2; } /* * The amount of pressure on anon vs file pages is inversely * proportional to the fraction of recently scanned pages on * each list that were recently referenced and in active use. */ ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1); ap /= reclaim_stat->recent_rotated[0] + 1; fp = file_prio * (reclaim_stat->recent_scanned[1] + 1); fp /= reclaim_stat->recent_rotated[1] + 1; spin_unlock_irq(&zone->lru_lock); fraction[0] = ap; fraction[1] = fp; denominator = ap + fp + 1; out: some_scanned = false; /* Only use force_scan on second pass. */ for (pass = 0; !some_scanned && pass < 2; pass++) { *lru_pages = 0; for_each_evictable_lru(lru) { int file = is_file_lru(lru); unsigned long size; unsigned long scan; size = lruvec_lru_size(lruvec, lru); scan = size >> sc->priority; if (!scan && pass && force_scan) scan = min(size, SWAP_CLUSTER_MAX); switch (scan_balance) { case SCAN_EQUAL: /* Scan lists relative to size */ break; case SCAN_FRACT: /* * Scan types proportional to swappiness and * their relative recent reclaim efficiency. */ scan = div64_u64(scan * fraction[file], denominator); break; case SCAN_FILE: case SCAN_ANON: /* Scan one type exclusively */ if ((scan_balance == SCAN_FILE) != file) { size = 0; scan = 0; } break; default: /* Look ma, no brain */ BUG(); } *lru_pages += size; nr[lru] = scan; /* * Skip the second pass and don't force_scan, * if we found something to scan. */ some_scanned |= !!scan; } } } #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH static void init_tlb_ubc(void) { /* * This deliberately does not clear the cpumask as it's expensive * and unnecessary. If there happens to be data in there then the * first SWAP_CLUSTER_MAX pages will send an unnecessary IPI and * then will be cleared. */ current->tlb_ubc.flush_required = false; } #else static inline void init_tlb_ubc(void) { } #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ /* * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. */ static void shrink_zone_memcg(struct zone *zone, struct mem_cgroup *memcg, struct scan_control *sc, unsigned long *lru_pages) { struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg); unsigned long nr[NR_LRU_LISTS]; unsigned long targets[NR_LRU_LISTS]; unsigned long nr_to_scan; enum lru_list lru; unsigned long nr_reclaimed = 0; unsigned long nr_to_reclaim = sc->nr_to_reclaim; struct blk_plug plug; bool scan_adjusted; get_scan_count(lruvec, memcg, sc, nr, lru_pages); /* Record the original scan target for proportional adjustments later */ memcpy(targets, nr, sizeof(nr)); /* * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal * event that can occur when there is little memory pressure e.g. * multiple streaming readers/writers. Hence, we do not abort scanning * when the requested number of pages are reclaimed when scanning at * DEF_PRIORITY on the assumption that the fact we are direct * reclaiming implies that kswapd is not keeping up and it is best to * do a batch of work at once. For memcg reclaim one check is made to * abort proportional reclaim if either the file or anon lru has already * dropped to zero at the first pass. */ scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() && sc->priority == DEF_PRIORITY); init_tlb_ubc(); blk_start_plug(&plug); while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || nr[LRU_INACTIVE_FILE]) { unsigned long nr_anon, nr_file, percentage; unsigned long nr_scanned; for_each_evictable_lru(lru) { if (nr[lru]) { nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX); nr[lru] -= nr_to_scan; nr_reclaimed += shrink_list(lru, nr_to_scan, lruvec, sc); } } if (nr_reclaimed < nr_to_reclaim || scan_adjusted) continue; /* * For kswapd and memcg, reclaim at least the number of pages * requested. Ensure that the anon and file LRUs are scanned * proportionally what was requested by get_scan_count(). We * stop reclaiming one LRU and reduce the amount scanning * proportional to the original scan target. */ nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE]; nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON]; /* * It's just vindictive to attack the larger once the smaller * has gone to zero. And given the way we stop scanning the * smaller below, this makes sure that we only make one nudge * towards proportionality once we've got nr_to_reclaim. */ if (!nr_file || !nr_anon) break; if (nr_file > nr_anon) { unsigned long scan_target = targets[LRU_INACTIVE_ANON] + targets[LRU_ACTIVE_ANON] + 1; lru = LRU_BASE; percentage = nr_anon * 100 / scan_target; } else { unsigned long scan_target = targets[LRU_INACTIVE_FILE] + targets[LRU_ACTIVE_FILE] + 1; lru = LRU_FILE; percentage = nr_file * 100 / scan_target; } /* Stop scanning the smaller of the LRU */ nr[lru] = 0; nr[lru + LRU_ACTIVE] = 0; /* * Recalculate the other LRU scan count based on its original * scan target and the percentage scanning already complete */ lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE; nr_scanned = targets[lru] - nr[lru]; nr[lru] = targets[lru] * (100 - percentage) / 100; nr[lru] -= min(nr[lru], nr_scanned); lru += LRU_ACTIVE; nr_scanned = targets[lru] - nr[lru]; nr[lru] = targets[lru] * (100 - percentage) / 100; nr[lru] -= min(nr[lru], nr_scanned); scan_adjusted = true; } blk_finish_plug(&plug); sc->nr_reclaimed += nr_reclaimed; /* * Even if we did not try to evict anon pages at all, we want to * rebalance the anon lru active/inactive ratio. */ if (inactive_anon_is_low(lruvec)) shrink_active_list(SWAP_CLUSTER_MAX, lruvec, sc, LRU_ACTIVE_ANON); throttle_vm_writeout(sc->gfp_mask); } /* Use reclaim/compaction for costly allocs or under memory pressure */ static bool in_reclaim_compaction(struct scan_control *sc) { if (IS_ENABLED(CONFIG_COMPACTION) && sc->order && (sc->order > PAGE_ALLOC_COSTLY_ORDER || sc->priority < DEF_PRIORITY - 2)) return true; return false; } /* * Reclaim/compaction is used for high-order allocation requests. It reclaims * order-0 pages before compacting the zone. should_continue_reclaim() returns * true if more pages should be reclaimed such that when the page allocator * calls try_to_compact_zone() that it will have enough free pages to succeed. * It will give up earlier than that if there is difficulty reclaiming pages. */ static inline bool should_continue_reclaim(struct zone *zone, unsigned long nr_reclaimed, unsigned long nr_scanned, struct scan_control *sc) { unsigned long pages_for_compaction; unsigned long inactive_lru_pages; /* If not in reclaim/compaction mode, stop */ if (!in_reclaim_compaction(sc)) return false; /* Consider stopping depending on scan and reclaim activity */ if (sc->gfp_mask & __GFP_REPEAT) { /* * For __GFP_REPEAT allocations, stop reclaiming if the * full LRU list has been scanned and we are still failing * to reclaim pages. This full LRU scan is potentially * expensive but a __GFP_REPEAT caller really wants to succeed */ if (!nr_reclaimed && !nr_scanned) return false; } else { /* * For non-__GFP_REPEAT allocations which can presumably * fail without consequence, stop if we failed to reclaim * any pages from the last SWAP_CLUSTER_MAX number of * pages that were scanned. This will return to the * caller faster at the risk reclaim/compaction and * the resulting allocation attempt fails */ if (!nr_reclaimed) return false; } /* * If we have not reclaimed enough pages for compaction and the * inactive lists are large enough, continue reclaiming */ pages_for_compaction = (2UL << sc->order); inactive_lru_pages = zone_page_state(zone, NR_INACTIVE_FILE); if (get_nr_swap_pages() > 0) inactive_lru_pages += zone_page_state(zone, NR_INACTIVE_ANON); if (sc->nr_reclaimed < pages_for_compaction && inactive_lru_pages > pages_for_compaction) return true; /* If compaction would go ahead or the allocation would succeed, stop */ switch (compaction_suitable(zone, sc->order, 0, 0)) { case COMPACT_PARTIAL: case COMPACT_CONTINUE: return false; default: return true; } } static bool shrink_zone(struct zone *zone, struct scan_control *sc, bool is_classzone) { struct reclaim_state *reclaim_state = current->reclaim_state; unsigned long nr_reclaimed, nr_scanned; bool reclaimable = false; do { struct mem_cgroup *root = sc->target_mem_cgroup; struct mem_cgroup_reclaim_cookie reclaim = { .zone = zone, .priority = sc->priority, }; unsigned long zone_lru_pages = 0; struct mem_cgroup *memcg; nr_reclaimed = sc->nr_reclaimed; nr_scanned = sc->nr_scanned; memcg = mem_cgroup_iter(root, NULL, &reclaim); do { unsigned long lru_pages; unsigned long reclaimed; unsigned long scanned; if (mem_cgroup_low(root, memcg)) { if (!sc->may_thrash) continue; mem_cgroup_events(memcg, MEMCG_LOW, 1); } reclaimed = sc->nr_reclaimed; scanned = sc->nr_scanned; shrink_zone_memcg(zone, memcg, sc, &lru_pages); zone_lru_pages += lru_pages; if (memcg && is_classzone) shrink_slab(sc->gfp_mask, zone_to_nid(zone), memcg, sc->nr_scanned - scanned, lru_pages); /* Record the group's reclaim efficiency */ vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned, sc->nr_reclaimed - reclaimed); /* * Direct reclaim and kswapd have to scan all memory * cgroups to fulfill the overall scan target for the * zone. * * Limit reclaim, on the other hand, only cares about * nr_to_reclaim pages to be reclaimed and it will * retry with decreasing priority if one round over the * whole hierarchy is not sufficient. */ if (!global_reclaim(sc) && sc->nr_reclaimed >= sc->nr_to_reclaim) { mem_cgroup_iter_break(root, memcg); break; } } while ((memcg = mem_cgroup_iter(root, memcg, &reclaim))); /* * Shrink the slab caches in the same proportion that * the eligible LRU pages were scanned. */ if (global_reclaim(sc) && is_classzone) shrink_slab(sc->gfp_mask, zone_to_nid(zone), NULL, sc->nr_scanned - nr_scanned, zone_lru_pages); if (reclaim_state) { sc->nr_reclaimed += reclaim_state->reclaimed_slab; reclaim_state->reclaimed_slab = 0; } /* Record the subtree's reclaim efficiency */ vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true, sc->nr_scanned - nr_scanned, sc->nr_reclaimed - nr_reclaimed); if (sc->nr_reclaimed - nr_reclaimed) reclaimable = true; } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed, sc->nr_scanned - nr_scanned, sc)); return reclaimable; } /* * Returns true if compaction should go ahead for a high-order request, or * the high-order allocation would succeed without compaction. */ static inline bool compaction_ready(struct zone *zone, int order) { unsigned long balance_gap, watermark; bool watermark_ok; /* * Compaction takes time to run and there are potentially other * callers using the pages just freed. Continue reclaiming until * there is a buffer of free pages available to give compaction * a reasonable chance of completing and allocating the page */ balance_gap = min(low_wmark_pages(zone), DIV_ROUND_UP( zone->managed_pages, KSWAPD_ZONE_BALANCE_GAP_RATIO)); watermark = high_wmark_pages(zone) + balance_gap + (2UL << order); watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0); /* * If compaction is deferred, reclaim up to a point where * compaction will have a chance of success when re-enabled */ if (compaction_deferred(zone, order)) return watermark_ok; /* * If compaction is not ready to start and allocation is not likely * to succeed without it, then keep reclaiming. */ if (compaction_suitable(zone, order, 0, 0) == COMPACT_SKIPPED) return false; return watermark_ok; } /* * This is the direct reclaim path, for page-allocating processes. We only * try to reclaim pages from zones which will satisfy the caller's allocation * request. * * We reclaim from a zone even if that zone is over high_wmark_pages(zone). * Because: * a) The caller may be trying to free *extra* pages to satisfy a higher-order * allocation or * b) The target zone may be at high_wmark_pages(zone) but the lower zones * must go *over* high_wmark_pages(zone) to satisfy the `incremental min' * zone defense algorithm. * * If a zone is deemed to be full of pinned pages then just give it a light * scan then give up on it. * * Returns true if a zone was reclaimable. */ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) { struct zoneref *z; struct zone *zone; unsigned long nr_soft_reclaimed; unsigned long nr_soft_scanned; gfp_t orig_mask; enum zone_type requested_highidx = gfp_zone(sc->gfp_mask); bool reclaimable = false; /* * If the number of buffer_heads in the machine exceeds the maximum * allowed level, force direct reclaim to scan the highmem zone as * highmem pages could be pinning lowmem pages storing buffer_heads */ orig_mask = sc->gfp_mask; if (buffer_heads_over_limit) sc->gfp_mask |= __GFP_HIGHMEM; for_each_zone_zonelist_nodemask(zone, z, zonelist, requested_highidx, sc->nodemask) { enum zone_type classzone_idx; if (!populated_zone(zone)) continue; classzone_idx = requested_highidx; while (!populated_zone(zone->zone_pgdat->node_zones + classzone_idx)) classzone_idx--; /* * Take care memory controller reclaiming has small influence * to global LRU. */ if (global_reclaim(sc)) { if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL)) continue; if (sc->priority != DEF_PRIORITY && !zone_reclaimable(zone)) continue; /* Let kswapd poll it */ /* * If we already have plenty of memory free for * compaction in this zone, don't free any more. * Even though compaction is invoked for any * non-zero order, only frequent costly order * reclamation is disruptive enough to become a * noticeable problem, like transparent huge * page allocations. */ if (IS_ENABLED(CONFIG_COMPACTION) && sc->order > PAGE_ALLOC_COSTLY_ORDER && zonelist_zone_idx(z) <= requested_highidx && compaction_ready(zone, sc->order)) { sc->compaction_ready = true; continue; } /* * This steals pages from memory cgroups over softlimit * and returns the number of reclaimed pages and * scanned pages. This works for global memory pressure * and balancing, not for a memcg's limit. */ nr_soft_scanned = 0; nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, sc->order, sc->gfp_mask, &nr_soft_scanned); sc->nr_reclaimed += nr_soft_reclaimed; sc->nr_scanned += nr_soft_scanned; if (nr_soft_reclaimed) reclaimable = true; /* need some check for avoid more shrink_zone() */ } if (shrink_zone(zone, sc, zone_idx(zone) == classzone_idx)) reclaimable = true; if (global_reclaim(sc) && !reclaimable && zone_reclaimable(zone)) reclaimable = true; } /* * Restore to original mask to avoid the impact on the caller if we * promoted it to __GFP_HIGHMEM. */ sc->gfp_mask = orig_mask; return reclaimable; } /* * This is the main entry point to direct page reclaim. * * If a full scan of the inactive list fails to free enough memory then we * are "out of memory" and something needs to be killed. * * If the caller is !__GFP_FS then the probability of a failure is reasonably * high - the zone may be full of dirty or under-writeback pages, which this * caller can't do much about. We kick the writeback threads and take explicit * naps in the hope that some of these pages can be written. But if the * allocating task holds filesystem locks which prevent writeout this might not * work, and the allocation attempt will fail. * * returns: 0, if no pages reclaimed * else, the number of pages reclaimed */ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, struct scan_control *sc) { int initial_priority = sc->priority; unsigned long total_scanned = 0; unsigned long writeback_threshold; bool zones_reclaimable; retry: delayacct_freepages_start(); if (global_reclaim(sc)) count_vm_event(ALLOCSTALL); do { vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, sc->priority); sc->nr_scanned = 0; zones_reclaimable = shrink_zones(zonelist, sc); total_scanned += sc->nr_scanned; if (sc->nr_reclaimed >= sc->nr_to_reclaim) break; if (sc->compaction_ready) break; /* * If we're getting trouble reclaiming, start doing * writepage even in laptop mode. */ if (sc->priority < DEF_PRIORITY - 2) sc->may_writepage = 1; /* * Try to write back as many pages as we just scanned. This * tends to cause slow streaming writers to write data to the * disk smoothly, at the dirtying rate, which is nice. But * that's undesirable in laptop mode, where we *want* lumpy * writeout. So in laptop mode, write out the whole world. */ writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2; if (total_scanned > writeback_threshold) { wakeup_flusher_threads(laptop_mode ? 0 : total_scanned, WB_REASON_TRY_TO_FREE_PAGES); sc->may_writepage = 1; } } while (--sc->priority >= 0); delayacct_freepages_end(); if (sc->nr_reclaimed) return sc->nr_reclaimed; /* Aborted reclaim to try compaction? don't OOM, then */ if (sc->compaction_ready) return 1; /* Untapped cgroup reserves? Don't OOM, retry. */ if (!sc->may_thrash) { sc->priority = initial_priority; sc->may_thrash = 1; goto retry; } /* Any of the zones still reclaimable? Don't OOM. */ if (zones_reclaimable) return 1; return 0; } static bool pfmemalloc_watermark_ok(pg_data_t *pgdat) { struct zone *zone; unsigned long pfmemalloc_reserve = 0; unsigned long free_pages = 0; int i; bool wmark_ok; for (i = 0; i <= ZONE_NORMAL; i++) { zone = &pgdat->node_zones[i]; if (!populated_zone(zone) || zone_reclaimable_pages(zone) == 0) continue; pfmemalloc_reserve += min_wmark_pages(zone); free_pages += zone_page_state(zone, NR_FREE_PAGES); } /* If there are no reserves (unexpected config) then do not throttle */ if (!pfmemalloc_reserve) return true; wmark_ok = free_pages > pfmemalloc_reserve / 2; /* kswapd must be awake if processes are being throttled */ if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { pgdat->classzone_idx = min(pgdat->classzone_idx, (enum zone_type)ZONE_NORMAL); wake_up_interruptible(&pgdat->kswapd_wait); } return wmark_ok; } /* * Throttle direct reclaimers if backing storage is backed by the network * and the PFMEMALLOC reserve for the preferred node is getting dangerously * depleted. kswapd will continue to make progress and wake the processes * when the low watermark is reached. * * Returns true if a fatal signal was delivered during throttling. If this * happens, the page allocator should not consider triggering the OOM killer. */ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, nodemask_t *nodemask) { struct zoneref *z; struct zone *zone; pg_data_t *pgdat = NULL; /* * Kernel threads should not be throttled as they may be indirectly * responsible for cleaning pages necessary for reclaim to make forward * progress. kjournald for example may enter direct reclaim while * committing a transaction where throttling it could forcing other * processes to block on log_wait_commit(). */ if (current->flags & PF_KTHREAD) goto out; /* * If a fatal signal is pending, this process should not throttle. * It should return quickly so it can exit and free its memory */ if (fatal_signal_pending(current)) goto out; /* * Check if the pfmemalloc reserves are ok by finding the first node * with a usable ZONE_NORMAL or lower zone. The expectation is that * GFP_KERNEL will be required for allocating network buffers when * swapping over the network so ZONE_HIGHMEM is unusable. * * Throttling is based on the first usable node and throttled processes * wait on a queue until kswapd makes progress and wakes them. There * is an affinity then between processes waking up and where reclaim * progress has been made assuming the process wakes on the same node. * More importantly, processes running on remote nodes will not compete * for remote pfmemalloc reserves and processes on different nodes * should make reasonable progress. */ for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) { if (zone_idx(zone) > ZONE_NORMAL) continue; /* Throttle based on the first usable node */ pgdat = zone->zone_pgdat; if (pfmemalloc_watermark_ok(pgdat)) goto out; break; } /* If no zone was usable by the allocation flags then do not throttle */ if (!pgdat) goto out; /* Account for the throttling */ count_vm_event(PGSCAN_DIRECT_THROTTLE); /* * If the caller cannot enter the filesystem, it's possible that it * is due to the caller holding an FS lock or performing a journal * transaction in the case of a filesystem like ext[3|4]. In this case, * it is not safe to block on pfmemalloc_wait as kswapd could be * blocked waiting on the same lock. Instead, throttle for up to a * second before continuing. */ if (!(gfp_mask & __GFP_FS)) { wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, pfmemalloc_watermark_ok(pgdat), HZ); goto check_pending; } /* Throttle until kswapd wakes the process */ wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, pfmemalloc_watermark_ok(pgdat)); check_pending: if (fatal_signal_pending(current)) return true; out: return false; } unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask) { unsigned long nr_reclaimed; struct scan_control sc = { .nr_to_reclaim = SWAP_CLUSTER_MAX, .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)), .order = order, .nodemask = nodemask, .priority = DEF_PRIORITY, .may_writepage = !laptop_mode, .may_unmap = 1, .may_swap = 1, }; /* * Do not enter reclaim if fatal signal was delivered while throttled. * 1 is returned so that the page allocator does not OOM kill at this * point. */ if (throttle_direct_reclaim(gfp_mask, zonelist, nodemask)) return 1; trace_mm_vmscan_direct_reclaim_begin(order, sc.may_writepage, gfp_mask); nr_reclaimed = do_try_to_free_pages(zonelist, &sc); trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); return nr_reclaimed; } #ifdef CONFIG_MEMCG unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg, gfp_t gfp_mask, bool noswap, struct zone *zone, unsigned long *nr_scanned) { struct scan_control sc = { .nr_to_reclaim = SWAP_CLUSTER_MAX, .target_mem_cgroup = memcg, .may_writepage = !laptop_mode, .may_unmap = 1, .may_swap = !noswap, }; unsigned long lru_pages; sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order, sc.may_writepage, sc.gfp_mask); /* * NOTE: Although we can get the priority field, using it * here is not a good idea, since it limits the pages we can scan. * if we don't reclaim here, the shrink_zone from balance_pgdat * will pick up pages from other mem cgroup's as well. We hack * the priority and make it zero. */ shrink_zone_memcg(zone, memcg, &sc, &lru_pages); trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); *nr_scanned = sc.nr_scanned; return sc.nr_reclaimed; } unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, unsigned long nr_pages, gfp_t gfp_mask, bool may_swap) { struct zonelist *zonelist; unsigned long nr_reclaimed; int nid; struct scan_control sc = { .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), .target_mem_cgroup = memcg, .priority = DEF_PRIORITY, .may_writepage = !laptop_mode, .may_unmap = 1, .may_swap = may_swap, }; /* * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't * take care of from where we get pages. So the node where we start the * scan does not need to be the current node. */ nid = mem_cgroup_select_victim_node(memcg); zonelist = NODE_DATA(nid)->node_zonelists; trace_mm_vmscan_memcg_reclaim_begin(0, sc.may_writepage, sc.gfp_mask); nr_reclaimed = do_try_to_free_pages(zonelist, &sc); trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); return nr_reclaimed; } #endif static void age_active_anon(struct zone *zone, struct scan_control *sc) { struct mem_cgroup *memcg; if (!total_swap_pages) return; memcg = mem_cgroup_iter(NULL, NULL, NULL); do { struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg); if (inactive_anon_is_low(lruvec)) shrink_active_list(SWAP_CLUSTER_MAX, lruvec, sc, LRU_ACTIVE_ANON); memcg = mem_cgroup_iter(NULL, memcg, NULL); } while (memcg); } static bool zone_balanced(struct zone *zone, int order, bool highorder, unsigned long balance_gap, int classzone_idx) { unsigned long mark = high_wmark_pages(zone) + balance_gap; /* * When checking from pgdat_balanced(), kswapd should stop and sleep * when it reaches the high order-0 watermark and let kcompactd take * over. Other callers such as wakeup_kswapd() want to determine the * true high-order watermark. */ if (IS_ENABLED(CONFIG_COMPACTION) && !highorder) { mark += (1UL << order); order = 0; } return zone_watermark_ok_safe(zone, order, mark, classzone_idx); } /* * pgdat_balanced() is used when checking if a node is balanced. * * For order-0, all zones must be balanced! * * For high-order allocations only zones that meet watermarks and are in a * zone allowed by the callers classzone_idx are added to balanced_pages. The * total of balanced pages must be at least 25% of the zones allowed by * classzone_idx for the node to be considered balanced. Forcing all zones to * be balanced for high orders can cause excessive reclaim when there are * imbalanced zones. * The choice of 25% is due to * o a 16M DMA zone that is balanced will not balance a zone on any * reasonable sized machine * o On all other machines, the top zone must be at least a reasonable * percentage of the middle zones. For example, on 32-bit x86, highmem * would need to be at least 256M for it to be balance a whole node. * Similarly, on x86-64 the Normal zone would need to be at least 1G * to balance a node on its own. These seemed like reasonable ratios. */ static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx) { unsigned long managed_pages = 0; unsigned long balanced_pages = 0; int i; /* Check the watermark levels */ for (i = 0; i <= classzone_idx; i++) { struct zone *zone = pgdat->node_zones + i; if (!populated_zone(zone)) continue; managed_pages += zone->managed_pages; /* * A special case here: * * balance_pgdat() skips over all_unreclaimable after * DEF_PRIORITY. Effectively, it considers them balanced so * they must be considered balanced here as well! */ if (!zone_reclaimable(zone)) { balanced_pages += zone->managed_pages; continue; } if (zone_balanced(zone, order, false, 0, i)) balanced_pages += zone->managed_pages; else if (!order) return false; } if (order) return balanced_pages >= (managed_pages >> 2); else return true; } /* * Prepare kswapd for sleeping. This verifies that there are no processes * waiting in throttle_direct_reclaim() and that watermarks have been met. * * Returns true if kswapd is ready to sleep */ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining, int classzone_idx) { /* If a direct reclaimer woke kswapd within HZ/10, it's premature */ if (remaining) return false; /* * The throttled processes are normally woken up in balance_pgdat() as * soon as pfmemalloc_watermark_ok() is true. But there is a potential * race between when kswapd checks the watermarks and a process gets * throttled. There is also a potential race if processes get * throttled, kswapd wakes, a large process exits thereby balancing the * zones, which causes kswapd to exit balance_pgdat() before reaching * the wake up checks. If kswapd is going to sleep, no process should * be sleeping on pfmemalloc_wait, so wake them now if necessary. If * the wake up is premature, processes will wake kswapd and get * throttled again. The difference from wake ups in balance_pgdat() is * that here we are under prepare_to_wait(). */ if (waitqueue_active(&pgdat->pfmemalloc_wait)) wake_up_all(&pgdat->pfmemalloc_wait); return pgdat_balanced(pgdat, order, classzone_idx); } /* * kswapd shrinks the zone by the number of pages required to reach * the high watermark. * * Returns true if kswapd scanned at least the requested number of pages to * reclaim or if the lack of progress was due to pages under writeback. * This is used to determine if the scanning priority needs to be raised. */ static bool kswapd_shrink_zone(struct zone *zone, int classzone_idx, struct scan_control *sc) { unsigned long balance_gap; bool lowmem_pressure; /* Reclaim above the high watermark. */ sc->nr_to_reclaim = max(SWAP_CLUSTER_MAX, high_wmark_pages(zone)); /* * We put equal pressure on every zone, unless one zone has way too * many pages free already. The "too many pages" is defined as the * high wmark plus a "gap" where the gap is either the low * watermark or 1% of the zone, whichever is smaller. */ balance_gap = min(low_wmark_pages(zone), DIV_ROUND_UP( zone->managed_pages, KSWAPD_ZONE_BALANCE_GAP_RATIO)); /* * If there is no low memory pressure or the zone is balanced then no * reclaim is necessary */ lowmem_pressure = (buffer_heads_over_limit && is_highmem(zone)); if (!lowmem_pressure && zone_balanced(zone, sc->order, false, balance_gap, classzone_idx)) return true; shrink_zone(zone, sc, zone_idx(zone) == classzone_idx); clear_bit(ZONE_WRITEBACK, &zone->flags); /* * If a zone reaches its high watermark, consider it to be no longer * congested. It's possible there are dirty pages backed by congested * BDIs but as pressure is relieved, speculatively avoid congestion * waits. */ if (zone_reclaimable(zone) && zone_balanced(zone, sc->order, false, 0, classzone_idx)) { clear_bit(ZONE_CONGESTED, &zone->flags); clear_bit(ZONE_DIRTY, &zone->flags); } return sc->nr_scanned >= sc->nr_to_reclaim; } /* * For kswapd, balance_pgdat() will work across all this node's zones until * they are all at high_wmark_pages(zone). * * Returns the highest zone idx kswapd was reclaiming at * * There is special handling here for zones which are full of pinned pages. * This can happen if the pages are all mlocked, or if they are all used by * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb. * What we do is to detect the case where all pages in the zone have been * scanned twice and there has been zero successful reclaim. Mark the zone as * dead and from now on, only perform a short scan. Basically we're polling * the zone for when the problem goes away. * * kswapd scans the zones in the highmem->normal->dma direction. It skips * zones which have free_pages > high_wmark_pages(zone), but once a zone is * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the * lower zones regardless of the number of free pages in the lower zones. This * interoperates with the page allocator fallback scheme to ensure that aging * of pages is balanced across the zones. */ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) { int i; int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ unsigned long nr_soft_reclaimed; unsigned long nr_soft_scanned; struct scan_control sc = { .gfp_mask = GFP_KERNEL, .order = order, .priority = DEF_PRIORITY, .may_writepage = !laptop_mode, .may_unmap = 1, .may_swap = 1, }; count_vm_event(PAGEOUTRUN); do { bool raise_priority = true; sc.nr_reclaimed = 0; /* * Scan in the highmem->dma direction for the highest * zone which needs scanning */ for (i = pgdat->nr_zones - 1; i >= 0; i--) { struct zone *zone = pgdat->node_zones + i; if (!populated_zone(zone)) continue; if (sc.priority != DEF_PRIORITY && !zone_reclaimable(zone)) continue; /* * Do some background aging of the anon list, to give * pages a chance to be referenced before reclaiming. */ age_active_anon(zone, &sc); /* * If the number of buffer_heads in the machine * exceeds the maximum allowed level and this node * has a highmem zone, force kswapd to reclaim from * it to relieve lowmem pressure. */ if (buffer_heads_over_limit && is_highmem_idx(i)) { end_zone = i; break; } if (!zone_balanced(zone, order, false, 0, 0)) { end_zone = i; break; } else { /* * If balanced, clear the dirty and congested * flags */ clear_bit(ZONE_CONGESTED, &zone->flags); clear_bit(ZONE_DIRTY, &zone->flags); } } if (i < 0) goto out; /* * If we're getting trouble reclaiming, start doing writepage * even in laptop mode. */ if (sc.priority < DEF_PRIORITY - 2) sc.may_writepage = 1; /* * Now scan the zone in the dma->highmem direction, stopping * at the last zone which needs scanning. * * We do this because the page allocator works in the opposite * direction. This prevents the page allocator from allocating * pages behind kswapd's direction of progress, which would * cause too much scanning of the lower zones. */ for (i = 0; i <= end_zone; i++) { struct zone *zone = pgdat->node_zones + i; if (!populated_zone(zone)) continue; if (sc.priority != DEF_PRIORITY && !zone_reclaimable(zone)) continue; sc.nr_scanned = 0; nr_soft_scanned = 0; /* * Call soft limit reclaim before calling shrink_zone. */ nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask, &nr_soft_scanned); sc.nr_reclaimed += nr_soft_reclaimed; /* * There should be no need to raise the scanning * priority if enough pages are already being scanned * that that high watermark would be met at 100% * efficiency. */ if (kswapd_shrink_zone(zone, end_zone, &sc)) raise_priority = false; } /* * If the low watermark is met there is no need for processes * to be throttled on pfmemalloc_wait as they should not be * able to safely make forward progress. Wake them */ if (waitqueue_active(&pgdat->pfmemalloc_wait) && pfmemalloc_watermark_ok(pgdat)) wake_up_all(&pgdat->pfmemalloc_wait); /* Check if kswapd should be suspending */ if (try_to_freeze() || kthread_should_stop()) break; /* * Raise priority if scanning rate is too low or there was no * progress in reclaiming pages */ if (raise_priority || !sc.nr_reclaimed) sc.priority--; } while (sc.priority >= 1 && !pgdat_balanced(pgdat, order, classzone_idx)); out: /* * Return the highest zone idx we were reclaiming at so * prepare_kswapd_sleep() makes the same decisions as here. */ return end_zone; } static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx, int balanced_classzone_idx) { long remaining = 0; DEFINE_WAIT(wait); if (freezing(current) || kthread_should_stop()) return; prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); /* Try to sleep for a short interval */ if (prepare_kswapd_sleep(pgdat, order, remaining, balanced_classzone_idx)) { remaining = schedule_timeout(HZ/10); finish_wait(&pgdat->kswapd_wait, &wait); prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); } /* * After a short sleep, check if it was a premature sleep. If not, then * go fully to sleep until explicitly woken up. */ if (prepare_kswapd_sleep(pgdat, order, remaining, balanced_classzone_idx)) { trace_mm_vmscan_kswapd_sleep(pgdat->node_id); /* * vmstat counters are not perfectly accurate and the estimated * value for counters such as NR_FREE_PAGES can deviate from the * true value by nr_online_cpus * threshold. To avoid the zone * watermarks being breached while under pressure, we reduce the * per-cpu vmstat threshold while kswapd is awake and restore * them before going back to sleep. */ set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); /* * Compaction records what page blocks it recently failed to * isolate pages from and skips them in the future scanning. * When kswapd is going to sleep, it is reasonable to assume * that pages and compaction may succeed so reset the cache. */ reset_isolation_suitable(pgdat); /* * We have freed the memory, now we should compact it to make * allocation of the requested order possible. */ wakeup_kcompactd(pgdat, order, classzone_idx); if (!kthread_should_stop()) schedule(); set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold); } else { if (remaining) count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); else count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY); } finish_wait(&pgdat->kswapd_wait, &wait); } /* * The background pageout daemon, started as a kernel thread * from the init process. * * This basically trickles out pages so that we have _some_ * free memory available even if there is no other activity * that frees anything up. This is needed for things like routing * etc, where we otherwise might have all activity going on in * asynchronous contexts that cannot page things out. * * If there are applications that are active memory-allocators * (most normal use), this basically shouldn't matter. */ static int kswapd(void *p) { unsigned long order, new_order; int classzone_idx, new_classzone_idx; int balanced_classzone_idx; pg_data_t *pgdat = (pg_data_t*)p; struct task_struct *tsk = current; struct reclaim_state reclaim_state = { .reclaimed_slab = 0, }; const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); lockdep_set_current_reclaim_state(GFP_KERNEL); if (!cpumask_empty(cpumask)) set_cpus_allowed_ptr(tsk, cpumask); current->reclaim_state = &reclaim_state; /* * Tell the memory management that we're a "memory allocator", * and that if we need more memory we should get access to it * regardless (see "__alloc_pages()"). "kswapd" should * never get caught in the normal page freeing logic. * * (Kswapd normally doesn't need memory anyway, but sometimes * you need a small amount of memory in order to be able to * page out something else, and this flag essentially protects * us from recursively trying to free more memory as we're * trying to free the first piece of memory in the first place). */ tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; set_freezable(); order = new_order = 0; classzone_idx = new_classzone_idx = pgdat->nr_zones - 1; balanced_classzone_idx = classzone_idx; for ( ; ; ) { bool ret; /* * While we were reclaiming, there might have been another * wakeup, so check the values. */ new_order = pgdat->kswapd_max_order; new_classzone_idx = pgdat->classzone_idx; pgdat->kswapd_max_order = 0; pgdat->classzone_idx = pgdat->nr_zones - 1; if (order < new_order || classzone_idx > new_classzone_idx) { /* * Don't sleep if someone wants a larger 'order' * allocation or has tigher zone constraints */ order = new_order; classzone_idx = new_classzone_idx; } else { kswapd_try_to_sleep(pgdat, order, classzone_idx, balanced_classzone_idx); order = pgdat->kswapd_max_order; classzone_idx = pgdat->classzone_idx; new_order = order; new_classzone_idx = classzone_idx; pgdat->kswapd_max_order = 0; pgdat->classzone_idx = pgdat->nr_zones - 1; } ret = try_to_freeze(); if (kthread_should_stop()) break; /* * We can speed up thawing tasks if we don't call balance_pgdat * after returning from the refrigerator */ if (!ret) { trace_mm_vmscan_kswapd_wake(pgdat->node_id, order); balanced_classzone_idx = balance_pgdat(pgdat, order, classzone_idx); } } tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD); current->reclaim_state = NULL; lockdep_clear_current_reclaim_state(); return 0; } /* * A zone is low on free memory, so wake its kswapd task to service it. */ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) { pg_data_t *pgdat; if (!populated_zone(zone)) return; if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL)) return; pgdat = zone->zone_pgdat; if (pgdat->kswapd_max_order < order) { pgdat->kswapd_max_order = order; pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx); } if (!waitqueue_active(&pgdat->kswapd_wait)) return; if (zone_balanced(zone, order, true, 0, 0)) return; trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order); wake_up_interruptible(&pgdat->kswapd_wait); } #ifdef CONFIG_HIBERNATION /* * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of * freed pages. * * Rather than trying to age LRUs the aim is to preserve the overall * LRU order by reclaiming preferentially * inactive > active > active referenced > active mapped */ unsigned long shrink_all_memory(unsigned long nr_to_reclaim) { struct reclaim_state reclaim_state; struct scan_control sc = { .nr_to_reclaim = nr_to_reclaim, .gfp_mask = GFP_HIGHUSER_MOVABLE, .priority = DEF_PRIORITY, .may_writepage = 1, .may_unmap = 1, .may_swap = 1, .hibernation_mode = 1, }; struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); struct task_struct *p = current; unsigned long nr_reclaimed; p->flags |= PF_MEMALLOC; lockdep_set_current_reclaim_state(sc.gfp_mask); reclaim_state.reclaimed_slab = 0; p->reclaim_state = &reclaim_state; nr_reclaimed = do_try_to_free_pages(zonelist, &sc); p->reclaim_state = NULL; lockdep_clear_current_reclaim_state(); p->flags &= ~PF_MEMALLOC; return nr_reclaimed; } #endif /* CONFIG_HIBERNATION */ /* It's optimal to keep kswapds on the same CPUs as their memory, but not required for correctness. So if the last cpu in a node goes away, we get changed to run anywhere: as the first one comes back, restore their cpu bindings. */ static int cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { int nid; if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { for_each_node_state(nid, N_MEMORY) { pg_data_t *pgdat = NODE_DATA(nid); const struct cpumask *mask; mask = cpumask_of_node(pgdat->node_id); if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) /* One of our CPUs online: restore mask */ set_cpus_allowed_ptr(pgdat->kswapd, mask); } } return NOTIFY_OK; } /* * This kswapd start function will be called by init and node-hot-add. * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added. */ int kswapd_run(int nid) { pg_data_t *pgdat = NODE_DATA(nid); int ret = 0; if (pgdat->kswapd) return 0; pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); if (IS_ERR(pgdat->kswapd)) { /* failure at boot is fatal */ BUG_ON(system_state == SYSTEM_BOOTING); pr_err("Failed to start kswapd on node %d\n", nid); ret = PTR_ERR(pgdat->kswapd); pgdat->kswapd = NULL; } return ret; } /* * Called by memory hotplug when all memory in a node is offlined. Caller must * hold mem_hotplug_begin/end(). */ void kswapd_stop(int nid) { struct task_struct *kswapd = NODE_DATA(nid)->kswapd; if (kswapd) { kthread_stop(kswapd); NODE_DATA(nid)->kswapd = NULL; } } static int __init kswapd_init(void) { int nid; swap_setup(); for_each_node_state(nid, N_MEMORY) kswapd_run(nid); hotcpu_notifier(cpu_callback, 0); return 0; } module_init(kswapd_init) #ifdef CONFIG_NUMA /* * Zone reclaim mode * * If non-zero call zone_reclaim when the number of free pages falls below * the watermarks. */ int zone_reclaim_mode __read_mostly; #define RECLAIM_OFF 0 #define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */ #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ #define RECLAIM_UNMAP (1<<2) /* Unmap pages during reclaim */ /* * Priority for ZONE_RECLAIM. This determines the fraction of pages * of a node considered for each zone_reclaim. 4 scans 1/16th of * a zone. */ #define ZONE_RECLAIM_PRIORITY 4 /* * Percentage of pages in a zone that must be unmapped for zone_reclaim to * occur. */ int sysctl_min_unmapped_ratio = 1; /* * If the number of slab pages in a zone grows beyond this percentage then * slab reclaim needs to occur. */ int sysctl_min_slab_ratio = 5; static inline unsigned long zone_unmapped_file_pages(struct zone *zone) { unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED); unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) + zone_page_state(zone, NR_ACTIVE_FILE); /* * It's possible for there to be more file mapped pages than * accounted for by the pages on the file LRU lists because * tmpfs pages accounted for as ANON can also be FILE_MAPPED */ return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; } /* Work out how many page cache pages we can reclaim in this reclaim_mode */ static unsigned long zone_pagecache_reclaimable(struct zone *zone) { unsigned long nr_pagecache_reclaimable; unsigned long delta = 0; /* * If RECLAIM_UNMAP is set, then all file pages are considered * potentially reclaimable. Otherwise, we have to worry about * pages like swapcache and zone_unmapped_file_pages() provides * a better estimate */ if (zone_reclaim_mode & RECLAIM_UNMAP) nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES); else nr_pagecache_reclaimable = zone_unmapped_file_pages(zone); /* If we can't clean pages, remove dirty pages from consideration */ if (!(zone_reclaim_mode & RECLAIM_WRITE)) delta += zone_page_state(zone, NR_FILE_DIRTY); /* Watch for any possible underflows due to delta */ if (unlikely(delta > nr_pagecache_reclaimable)) delta = nr_pagecache_reclaimable; return nr_pagecache_reclaimable - delta; } /* * Try to free up some pages from this zone through reclaim. */ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) { /* Minimum pages needed in order to stay on node */ const unsigned long nr_pages = 1 << order; struct task_struct *p = current; struct reclaim_state reclaim_state; struct scan_control sc = { .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)), .order = order, .priority = ZONE_RECLAIM_PRIORITY, .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), .may_unmap = !!(zone_reclaim_mode & RECLAIM_UNMAP), .may_swap = 1, }; cond_resched(); /* * We need to be able to allocate from the reserves for RECLAIM_UNMAP * and we also need to be able to write out pages for RECLAIM_WRITE * and RECLAIM_UNMAP. */ p->flags |= PF_MEMALLOC | PF_SWAPWRITE; lockdep_set_current_reclaim_state(gfp_mask); reclaim_state.reclaimed_slab = 0; p->reclaim_state = &reclaim_state; if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) { /* * Free memory by calling shrink zone with increasing * priorities until we have enough memory freed. */ do { shrink_zone(zone, &sc, true); } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); } p->reclaim_state = NULL; current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); lockdep_clear_current_reclaim_state(); return sc.nr_reclaimed >= nr_pages; } int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) { int node_id; int ret; /* * Zone reclaim reclaims unmapped file backed pages and * slab pages if we are over the defined limits. * * A small portion of unmapped file backed pages is needed for * file I/O otherwise pages read by file I/O will be immediately * thrown out if the zone is overallocated. So we do not reclaim * if less than a specified percentage of the zone is used by * unmapped file backed pages. */ if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages && zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages) return ZONE_RECLAIM_FULL; if (!zone_reclaimable(zone)) return ZONE_RECLAIM_FULL; /* * Do not scan if the allocation should not be delayed. */ if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC)) return ZONE_RECLAIM_NOSCAN; /* * Only run zone reclaim on the local zone or on zones that do not * have associated processors. This will favor the local processor * over remote processors and spread off node memory allocations * as wide as possible. */ node_id = zone_to_nid(zone); if (node_state(node_id, N_CPU) && node_id != numa_node_id()) return ZONE_RECLAIM_NOSCAN; if (test_and_set_bit(ZONE_RECLAIM_LOCKED, &zone->flags)) return ZONE_RECLAIM_NOSCAN; ret = __zone_reclaim(zone, gfp_mask, order); clear_bit(ZONE_RECLAIM_LOCKED, &zone->flags); if (!ret) count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED); return ret; } #endif /* * page_evictable - test whether a page is evictable * @page: the page to test * * Test whether page is evictable--i.e., should be placed on active/inactive * lists vs unevictable list. * * Reasons page might not be evictable: * (1) page's mapping marked unevictable * (2) page is part of an mlocked VMA * */ int page_evictable(struct page *page) { return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page); } #ifdef CONFIG_SHMEM /** * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list * @pages: array of pages to check * @nr_pages: number of pages to check * * Checks pages for evictability and moves them to the appropriate lru list. * * This function is only used for SysV IPC SHM_UNLOCK. */ void check_move_unevictable_pages(struct page **pages, int nr_pages) { struct lruvec *lruvec; struct zone *zone = NULL; int pgscanned = 0; int pgrescued = 0; int i; for (i = 0; i < nr_pages; i++) { struct page *page = pages[i]; struct zone *pagezone; pgscanned++; pagezone = page_zone(page); if (pagezone != zone) { if (zone) spin_unlock_irq(&zone->lru_lock); zone = pagezone; spin_lock_irq(&zone->lru_lock); } lruvec = mem_cgroup_page_lruvec(page, zone); if (!PageLRU(page) || !PageUnevictable(page)) continue; if (page_evictable(page)) { enum lru_list lru = page_lru_base_type(page); VM_BUG_ON_PAGE(PageActive(page), page); ClearPageUnevictable(page); del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE); add_page_to_lru_list(page, lruvec, lru); pgrescued++; } } if (zone) { __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued); __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned); spin_unlock_irq(&zone->lru_lock); } } #endif /* CONFIG_SHMEM */
oe5hpm/linux
mm/vmscan.c
C
gpl-2.0
112,709
/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef __EMAC_DEFINES_H__ #define __EMAC_DEFINES_H__ /* EMAC_DMA_MAS_CTRL */ #define DEV_ID_NUM_BMSK 0x7f000000 #define DEV_ID_NUM_SHFT 24 #define DEV_REV_NUM_BMSK 0xff0000 #define DEV_REV_NUM_SHFT 16 #define INT_RD_CLR_EN 0x4000 #define IRQ_MODERATOR2_EN 0x800 #define IRQ_MODERATOR_EN 0x400 #define LPW_CLK_SEL 0x80 #define LPW_STATE 0x20 #define LPW_MODE 0x10 #define SOFT_RST 0x1 /* EMAC_IRQ_MOD_TIM_INIT */ #define IRQ_MODERATOR2_INIT_BMSK 0xffff0000 #define IRQ_MODERATOR2_INIT_SHFT 16 #define IRQ_MODERATOR_INIT_BMSK 0xffff #define IRQ_MODERATOR_INIT_SHFT 0 /* EMAC_MDIO_CTRL */ #define MDIO_MODE 0x40000000 #define MDIO_PR 0x20000000 #define MDIO_AP_EN 0x10000000 #define MDIO_BUSY 0x8000000 #define MDIO_CLK_SEL_BMSK 0x7000000 #define MDIO_CLK_SEL_SHFT 24 #define MDIO_START 0x800000 #define SUP_PREAMBLE 0x400000 #define MDIO_RD_NWR 0x200000 #define MDIO_REG_ADDR_BMSK 0x1f0000 #define MDIO_REG_ADDR_SHFT 16 #define MDIO_DATA_BMSK 0xffff #define MDIO_DATA_SHFT 0 /* EMAC_PHY_STS */ #define PHY_ADDR_BMSK 0x1f0000 #define PHY_ADDR_SHFT 16 /* EMAC_MDIO_EX_CTRL */ #define DEVAD_BMSK 0x1f0000 #define DEVAD_SHFT 16 #define EX_REG_ADDR_BMSK 0xffff #define EX_REG_ADDR_SHFT 0 /* EMAC_MAC_CTRL */ #define SINGLE_PAUSE_MODE 0x10000000 #define DEBUG_MODE 0x8000000 #define BROAD_EN 0x4000000 #define MULTI_ALL 0x2000000 #define RX_CHKSUM_EN 0x1000000 #define HUGE 0x800000 #define SPEED(x) (((x) & 0x3) << 20) #define SPEED_MASK SPEED(0x3) #define SIMR 0x80000 #define TPAUSE 0x10000 #define PROM_MODE 0x8000 #define VLAN_STRIP 0x4000 #define PRLEN_BMSK 0x3c00 #define PRLEN_SHFT 10 #define HUGEN 0x200 #define FLCHK 0x100 #define PCRCE 0x80 #define CRCE 0x40 #define FULLD 0x20 #define MAC_LP_EN 0x10 #define RXFC 0x8 #define TXFC 0x4 #define RXEN 0x2 #define TXEN 0x1 /* EMAC_WOL_CTRL0 */ #define LK_CHG_PME 0x20 #define LK_CHG_EN 0x10 #define MG_FRAME_PME 0x8 #define MG_FRAME_EN 0x4 #define WK_FRAME_EN 0x1 /* EMAC_DESC_CTRL_3 */ #define RFD_RING_SIZE_BMSK 0xfff /* EMAC_DESC_CTRL_4 */ #define RX_BUFFER_SIZE_BMSK 0xffff /* EMAC_DESC_CTRL_6 */ #define RRD_RING_SIZE_BMSK 0xfff /* EMAC_DESC_CTRL_9 */ #define TPD_RING_SIZE_BMSK 0xffff /* EMAC_TXQ_CTRL_0 */ #define NUM_TXF_BURST_PREF_BMSK 0xffff0000 #define NUM_TXF_BURST_PREF_SHFT 16 #define LS_8023_SP 0x80 #define TXQ_MODE 0x40 #define TXQ_EN 0x20 #define IP_OP_SP 0x10 #define NUM_TPD_BURST_PREF_BMSK 0xf #define NUM_TPD_BURST_PREF_SHFT 0 /* EMAC_TXQ_CTRL_1 */ #define JUMBO_TASK_OFFLOAD_THRESHOLD_BMSK 0x7ff /* EMAC_TXQ_CTRL_2 */ #define TXF_HWM_BMSK 0xfff0000 #define TXF_LWM_BMSK 0xfff /* EMAC_RXQ_CTRL_0 */ #define RXQ_EN 0x80000000 #define CUT_THRU_EN 0x40000000 #define RSS_HASH_EN 0x20000000 #define NUM_RFD_BURST_PREF_BMSK 0x3f00000 #define NUM_RFD_BURST_PREF_SHFT 20 #define IDT_TABLE_SIZE_BMSK 0x1ff00 #define IDT_TABLE_SIZE_SHFT 8 #define SP_IPV6 0x80 /* EMAC_RXQ_CTRL_1 */ #define JUMBO_1KAH_BMSK 0xf000 #define JUMBO_1KAH_SHFT 12 #define RFD_PREF_LOW_THRESHOLD_BMSK 0xfc0 #define RFD_PREF_LOW_THRESHOLD_SHFT 6 #define RFD_PREF_UP_THRESHOLD_BMSK 0x3f #define RFD_PREF_UP_THRESHOLD_SHFT 0 /* EMAC_RXQ_CTRL_2 */ #define RXF_DOF_THRESHOLD_BMSK 0xfff0000 #define RXF_DOF_THRESHOLD_SHFT 16 #define RXF_UOF_THRESHOLD_BMSK 0xfff #define RXF_UOF_THRESHOLD_SHFT 0 /* EMAC_RXQ_CTRL_3 */ #define RXD_TIMER_BMSK 0xffff0000 #define RXD_THRESHOLD_BMSK 0xfff #define RXD_THRESHOLD_SHFT 0 /* EMAC_DMA_CTRL */ #define DMAW_DLY_CNT_BMSK 0xf0000 #define DMAW_DLY_CNT_SHFT 16 #define DMAR_DLY_CNT_BMSK 0xf800 #define DMAR_DLY_CNT_SHFT 11 #define DMAR_REQ_PRI 0x400 #define REGWRBLEN_BMSK 0x380 #define REGWRBLEN_SHFT 7 #define REGRDBLEN_BMSK 0x70 #define REGRDBLEN_SHFT 4 #define OUT_ORDER_MODE 0x4 #define ENH_ORDER_MODE 0x2 #define IN_ORDER_MODE 0x1 /* EMAC_MAILBOX_13 */ #define RFD3_PROC_IDX_BMSK 0xfff0000 #define RFD3_PROC_IDX_SHFT 16 #define RFD3_PROD_IDX_BMSK 0xfff #define RFD3_PROD_IDX_SHFT 0 /* EMAC_MAILBOX_2 */ #define NTPD_CONS_IDX_BMSK 0xffff0000 #define NTPD_CONS_IDX_SHFT 16 /* EMAC_MAILBOX_3 */ #define RFD0_CONS_IDX_BMSK 0xfff #define RFD0_CONS_IDX_SHFT 0 /* EMAC_INT_STATUS */ #define DIS_INT BIT(31) #define PTP_INT BIT(30) #define RFD4_UR_INT BIT(29) #define TX_PKT_INT3 BIT(26) #define TX_PKT_INT2 BIT(25) #define TX_PKT_INT1 BIT(24) #define RX_PKT_INT3 BIT(19) #define RX_PKT_INT2 BIT(18) #define RX_PKT_INT1 BIT(17) #define RX_PKT_INT0 BIT(16) #define TX_PKT_INT BIT(15) #define TXQ_TO_INT BIT(14) #define GPHY_WAKEUP_INT BIT(13) #define GPHY_LINK_DOWN_INT BIT(12) #define GPHY_LINK_UP_INT BIT(11) #define DMAW_TO_INT BIT(10) #define DMAR_TO_INT BIT(9) #define TXF_UR_INT BIT(8) #define RFD3_UR_INT BIT(7) #define RFD2_UR_INT BIT(6) #define RFD1_UR_INT BIT(5) #define RFD0_UR_INT BIT(4) #define RXF_OF_INT BIT(3) #define SW_MAN_INT BIT(2) /* EMAC_INT_RETRIG_INIT */ #define INT_RETRIG_TIME_BMSK 0xffff /* EMAC_MAILBOX_11 */ #define H3TPD_PROD_IDX_BMSK 0xffff0000 #define H3TPD_PROD_IDX_SHFT 16 /* EMAC_AXI_MAST_CTRL */ #define DATA_BYTE_SWAP 0x8 #define MAX_BOUND 0x2 #define MAX_BTYPE 0x1 /* EMAC_MAILBOX_12 */ #define H3TPD_CONS_IDX_BMSK 0xffff0000 #define H3TPD_CONS_IDX_SHFT 16 /* EMAC_MAILBOX_9 */ #define H2TPD_PROD_IDX_BMSK 0xffff #define H2TPD_PROD_IDX_SHFT 0 /* EMAC_MAILBOX_10 */ #define H1TPD_CONS_IDX_BMSK 0xffff0000 #define H1TPD_CONS_IDX_SHFT 16 #define H2TPD_CONS_IDX_BMSK 0xffff #define H2TPD_CONS_IDX_SHFT 0 /* EMAC_ATHR_HEADER_CTRL */ #define HEADER_CNT_EN 0x2 #define HEADER_ENABLE 0x1 /* EMAC_MAILBOX_0 */ #define RFD0_PROC_IDX_BMSK 0xfff0000 #define RFD0_PROC_IDX_SHFT 16 #define RFD0_PROD_IDX_BMSK 0xfff #define RFD0_PROD_IDX_SHFT 0 /* EMAC_MAILBOX_5 */ #define RFD1_PROC_IDX_BMSK 0xfff0000 #define RFD1_PROC_IDX_SHFT 16 #define RFD1_PROD_IDX_BMSK 0xfff #define RFD1_PROD_IDX_SHFT 0 /* EMAC_MAILBOX_6 */ #define RFD2_PROC_IDX_BMSK 0xfff0000 #define RFD2_PROC_IDX_SHFT 16 #define RFD2_PROD_IDX_BMSK 0xfff #define RFD2_PROD_IDX_SHFT 0 /* EMAC_CORE_HW_VERSION */ #define MAJOR_BMSK 0xf0000000 #define MAJOR_SHFT 28 #define MINOR_BMSK 0xfff0000 #define MINOR_SHFT 16 #define STEP_BMSK 0xffff #define STEP_SHFT 0 /* EMAC_MISC_CTRL */ #define RX_UNCPL_INT_EN 0x1 /* EMAC_MAILBOX_7 */ #define RFD2_CONS_IDX_BMSK 0xfff0000 #define RFD2_CONS_IDX_SHFT 16 #define RFD1_CONS_IDX_BMSK 0xfff #define RFD1_CONS_IDX_SHFT 0 /* EMAC_MAILBOX_8 */ #define RFD3_CONS_IDX_BMSK 0xfff #define RFD3_CONS_IDX_SHFT 0 /* EMAC_MAILBOX_15 */ #define NTPD_PROD_IDX_BMSK 0xffff #define NTPD_PROD_IDX_SHFT 0 /* EMAC_MAILBOX_16 */ #define H1TPD_PROD_IDX_BMSK 0xffff #define H1TPD_PROD_IDX_SHFT 0 /* EMAC_EMAC_WRAPPER_CSR1 */ #define TX_INDX_FIFO_SYNC_RST BIT(23) #define TX_TS_FIFO_SYNC_RST BIT(22) #define RX_TS_FIFO2_SYNC_RST BIT(21) #define RX_TS_FIFO1_SYNC_RST BIT(20) #define TX_TS_ENABLE BIT(16) #define DIS_1588_CLKS BIT(11) #define FREQ_MODE BIT(9) #define ENABLE_RRD_TIMESTAMP BIT(3) /* EMAC_EMAC_WRAPPER_CSR2 */ #define HDRIVE_BMSK 0x3000 #define HDRIVE_SHFT 12 #define SLB_EN 0x200 #define PLB_EN 0x100 #define WOL_EN 0x80 #define CKEDGE_SEL 0x40 #define TX_ID_EN_L 0x20 #define RX_ID_EN_L 0x10 #define RGMII_PHY_MODE_BMSK 0x6 #define RGMII_PHY_MODE_SHFT 1 #define PHY_RESET 0x1 /* EMAC_EMAC_WRAPPER_CSR3 */ #define PLL_RESET 0x1000000 #define PLL_L_VAL_5_0_BMSK 0xfc0000 #define PLL_L_VAL_5_0_SHFT 18 #define BYPASSNL 0x10000 /* EMAC_EMAC_WRAPPER_CSR5 */ #define RMII_125_CLK_EN 0x20 /* EMAC_EMAC_WRAPPER_CSR10 */ #define RD_CLR_1588 0x2 #define DIS_1588 0x1 /* EMAC_EMAC_WRAPPER_STATUS */ #define PLL_LOCK_DET 0x1 /* EMAC_EMAC_WRAPPER_TX_TS_INX */ #define EMAC_WRAPPER_TX_TS_EMPTY 0x80000000 #define EMAC_WRAPPER_TX_TS_INX_BMSK 0xffff /* EMAC_P1588_CTRL_REG */ #define ATTACH_EN 0x10 #define BYPASS_O 0x8 #define CLOCK_MODE_BMSK 0x6 #define CLOCK_MODE_SHFT 1 #define ETH_MODE_SW 0x1 /* EMAC_P1588_TX_LATENCY */ #define TX_LATENCY_BMSK 0xffff #define TX_LATENCY_SHFT 0 /* EMAC_P1588_INC_VALUE_2 */ #define INC_VALUE_2_BMSK 0xffff /* EMAC_P1588_INC_VALUE_1 */ #define INC_VALUE_1_BMSK 0xffff /* EMAC_P1588_NANO_OFFSET_2 */ #define NANO_OFFSET_2_BMSK 0xffff /* EMAC_P1588_NANO_OFFSET_1 */ #define NANO_OFFSET_1_BMSK 0xffff /* EMAC_P1588_SEC_OFFSET_2 */ #define SEC_OFFSET_2_BMSK 0xffff /* EMAC_P1588_SEC_OFFSET_1 */ #define SEC_OFFSET_1_BMSK 0xffff /* EMAC_P1588_REAL_TIME_5 */ #define REAL_TIME_5_BMSK 0xffff #define REAL_TIME_5_SHFT 0 /* EMAC_P1588_REAL_TIME_4 */ #define REAL_TIME_4_BMSK 0xffff #define REAL_TIME_4_SHFT 0 /* EMAC_P1588_REAL_TIME_3 */ #define REAL_TIME_3_BMSK 0xffff #define REAL_TIME_3_SHFT 0 /* EMAC_P1588_REAL_TIME_2 */ #define REAL_TIME_2_BMSK 0xffff #define REAL_TIME_2_SHFT 0 /* EMAC_P1588_REAL_TIME_1 */ #define REAL_TIME_1_BMSK 0xffff #define REAL_TIME_1_SHFT 0 /* EMAC_P1588_EXPANDED_INT_STATUS */ #define PPS_IN 0x20 /* EMAC_P1588_RTC_EXPANDED_CONFIG */ #define RTC_READ_MODE 0x20 #define RTC_SNAPSHOT 0x10 #define LOAD_RTC 0x1 /* EMAC_P1588_RTC_PRELOADED_4 */ #define RTC_PRELOADED_4_BMSK 0xffff /* EMAC_P1588_RTC_PRELOADED_3 */ #define RTC_PRELOADED_3_BMSK 0xffff /* EMAC_P1588_RTC_PRELOADED_2 */ #define RTC_PRELOADED_2_BMSK 0xffff /* EMAC_P1588_RTC_PRELOADED_1 */ #define RTC_PRELOADED_1_BMSK 0xffff /* EMAC_P1588_GRAND_MASTER_CONFIG_0 */ #define GRANDMASTER_MODE 0x40 #define GM_PPS_SYNC 0x20 #endif /* __EMAC_DEFINES_H__ */
shminer/kernel-msm-3.18
drivers/net/ethernet/qualcomm/emac/emac_defines.h
C
gpl-2.0
20,285
/* ellpe.c * * Complete elliptic integral of the second kind * * * * SYNOPSIS: * * double m1, y, ellpe(); * * y = ellpe( m1 ); * * * * DESCRIPTION: * * Approximates the integral * * * pi/2 * - * | | 2 * E(m) = | sqrt( 1 - m sin t ) dt * | | * - * 0 * * Where m = 1 - m1, using the approximation * * P(x) - x log x Q(x). * * Though there are no singularities, the argument m1 is used * rather than m for compatibility with ellpk(). * * E(1) = 1; E(0) = pi/2. * * * ACCURACY: * * Relative error: * arithmetic domain # trials peak rms * DEC 0, 1 13000 3.1e-17 9.4e-18 * IEEE 0, 1 10000 2.1e-16 7.3e-17 * * * ERROR MESSAGES: * * message condition value returned * ellpe domain x<0, x>1 0.0 * */ /* ellpe.c */ /* Elliptic integral of second kind */ /* Cephes Math Library, Release 2.8: June, 2000 Copyright 1984, 1987, 1989, 2000 by Stephen L. Moshier */ #include "mconf.h" #ifdef UNK static double P[] = { 1.53552577301013293365E-4, 2.50888492163602060990E-3, 8.68786816565889628429E-3, 1.07350949056076193403E-2, 7.77395492516787092951E-3, 7.58395289413514708519E-3, 1.15688436810574127319E-2, 2.18317996015557253103E-2, 5.68051945617860553470E-2, 4.43147180560990850618E-1, 1.00000000000000000299E0 }; static double Q[] = { 3.27954898576485872656E-5, 1.00962792679356715133E-3, 6.50609489976927491433E-3, 1.68862163993311317300E-2, 2.61769742454493659583E-2, 3.34833904888224918614E-2, 4.27180926518931511717E-2, 5.85936634471101055642E-2, 9.37499997197644278445E-2, 2.49999999999888314361E-1 }; #endif #ifdef DEC static unsigned short P[] = { 0035041,0001364,0141572,0117555, 0036044,0066032,0130027,0033404, 0036416,0053617,0064456,0102632, 0036457,0161100,0061177,0122612, 0036376,0136251,0012403,0124162, 0036370,0101316,0151715,0131613, 0036475,0105477,0050317,0133272, 0036662,0154232,0024645,0171552, 0037150,0126220,0047054,0030064, 0037742,0162057,0167645,0165612, 0040200,0000000,0000000,0000000 }; static unsigned short Q[] = { 0034411,0106743,0115771,0055462, 0035604,0052575,0155171,0045540, 0036325,0030424,0064332,0167756, 0036612,0052366,0063006,0115175, 0036726,0070430,0004533,0124654, 0037011,0022741,0030675,0030711, 0037056,0174452,0127062,0132122, 0037157,0177750,0142041,0072523, 0037277,0177777,0173137,0002627, 0037577,0177777,0177777,0101101 }; #endif #ifdef IBMPC static unsigned short P[] = { 0x53ee,0x986f,0x205e,0x3f24, 0xe6e0,0x5602,0x8d83,0x3f64, 0xd0b3,0xed25,0xcaf1,0x3f81, 0xf4b1,0x0c4f,0xfc48,0x3f85, 0x750e,0x22a0,0xd795,0x3f7f, 0xb671,0xda79,0x1059,0x3f7f, 0xf6d7,0xea19,0xb167,0x3f87, 0xbe6d,0x4534,0x5b13,0x3f96, 0x8607,0x09c5,0x1592,0x3fad, 0xbd71,0xfdf4,0x5c85,0x3fdc, 0x0000,0x0000,0x0000,0x3ff0 }; static unsigned short Q[] = { 0x2b66,0x737f,0x31bc,0x3f01, 0x296c,0xbb4f,0x8aaf,0x3f50, 0x5dfe,0x8d1b,0xa622,0x3f7a, 0xd350,0xccc0,0x4a9e,0x3f91, 0x7535,0x012b,0xce23,0x3f9a, 0xa639,0x2637,0x24bc,0x3fa1, 0x568a,0x55c6,0xdf25,0x3fa5, 0x2eaa,0x1884,0xfffd,0x3fad, 0xe0b3,0xfecb,0xffff,0x3fb7, 0xf048,0xffff,0xffff,0x3fcf }; #endif #ifdef MIEEE static unsigned short P[] = { 0x3f24,0x205e,0x986f,0x53ee, 0x3f64,0x8d83,0x5602,0xe6e0, 0x3f81,0xcaf1,0xed25,0xd0b3, 0x3f85,0xfc48,0x0c4f,0xf4b1, 0x3f7f,0xd795,0x22a0,0x750e, 0x3f7f,0x1059,0xda79,0xb671, 0x3f87,0xb167,0xea19,0xf6d7, 0x3f96,0x5b13,0x4534,0xbe6d, 0x3fad,0x1592,0x09c5,0x8607, 0x3fdc,0x5c85,0xfdf4,0xbd71, 0x3ff0,0x0000,0x0000,0x0000 }; static unsigned short Q[] = { 0x3f01,0x31bc,0x737f,0x2b66, 0x3f50,0x8aaf,0xbb4f,0x296c, 0x3f7a,0xa622,0x8d1b,0x5dfe, 0x3f91,0x4a9e,0xccc0,0xd350, 0x3f9a,0xce23,0x012b,0x7535, 0x3fa1,0x24bc,0x2637,0xa639, 0x3fa5,0xdf25,0x55c6,0x568a, 0x3fad,0xfffd,0x1884,0x2eaa, 0x3fb7,0xffff,0xfecb,0xe0b3, 0x3fcf,0xffff,0xffff,0xf048 }; #endif #ifdef ANSIPROT extern double polevl ( double, void *, int ); extern double log ( double ); #else double polevl(), log(); #endif double ellpe(x) double x; { if( (x <= 0.0) || (x > 1.0) ) { if( x == 0.0 ) return( 1.0 ); mtherr( "ellpe", DOMAIN ); return( 0.0 ); } return( polevl(x,P,10) - log(x) * (x * polevl(x,Q,9)) ); }
ysleu/RTL8685
uClinux-dist/lib/libm/ellpe.c
C
gpl-2.0
4,328
<?php /** * @file * Contains \Drupal\config\Tests\Storage\CachedStorageTest. */ namespace Drupal\config\Tests\Storage; use Drupal\Core\Config\FileStorage; use Drupal\Core\Config\CachedStorage; use Drupal\Core\Database\Database; use Drupal\Core\DependencyInjection\ContainerBuilder; use Symfony\Component\DependencyInjection\Reference; /** * Tests CachedStorage operations. * * @group config */ class CachedStorageTest extends ConfigStorageTestBase { /** * The cache backend the cached storage is using. * * @var \Drupal\Core\Cache\CacheBackendInterface */ protected $cache; /** * The file storage the cached storage is using. * * @var \Drupal\Core\Config\FileStorage */ protected $fileStorage; protected function setUp() { parent::setUp(); // Create a directory. $dir = $this->publicFilesDirectory . '/config'; mkdir($dir); $this->fileStorage = new FileStorage($dir); $this->storage = new CachedStorage($this->fileStorage, \Drupal::service('cache.config')); $this->cache = \Drupal::service('cache_factory')->get('config'); // ::listAll() verifications require other configuration data to exist. $this->storage->write('system.performance', array()); } /** * {@inheritdoc} */ public function testInvalidStorage() { // No-op as this test does not make sense. } /** * {@inheritdoc} */ protected function read($name) { $data = $this->cache->get($name); // Cache misses fall through to the underlying storage. return $data ? $data->data : $this->fileStorage->read($name); } /** * {@inheritdoc} */ protected function insert($name, $data) { $this->fileStorage->write($name, $data); $this->cache->set($name, $data); } /** * {@inheritdoc} */ protected function update($name, $data) { $this->fileStorage->write($name, $data); $this->cache->set($name, $data); } /** * {@inheritdoc} */ protected function delete($name) { $this->cache->delete($name); unlink($this->fileStorage->getFilePath($name)); } /** * {@inheritdoc} */ public function containerBuild(ContainerBuilder $container) { parent::containerBuild($container); // Use the regular database cache backend to aid testing. $container->register('cache_factory', 'Drupal\Core\Cache\DatabaseBackendFactory') ->addArgument(new Reference('database')) ->addArgument(new Reference('cache_tags.invalidator.checksum')); } }
komejo/article-test
web/core/modules/config/src/Tests/Storage/CachedStorageTest.php
PHP
gpl-2.0
2,487
//============================================================================= // MuseScore // Music Composition & Notation // // Copyright (C) 2010-2011 Werner Schweer // // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License version 2 // as published by the Free Software Foundation and appearing in // the file LICENCE.GPL //============================================================================= #include "stem.h" #include "staff.h" #include "chord.h" #include "score.h" #include "stafftype.h" #include "hook.h" #include "tremolo.h" #include "note.h" #include "xml.h" // TEMPORARY HACK!! #include "sym.h" // END OF HACK namespace Ms { //--------------------------------------------------------- // Stem // Notenhals //--------------------------------------------------------- Stem::Stem(Score* s) : Element(s) { _len = 0.0; _userLen = 0.0; setFlags(ElementFlag::SELECTABLE); } //--------------------------------------------------------- // up //--------------------------------------------------------- bool Stem::up() const { return chord() ? chord()->up() : true; } //--------------------------------------------------------- // stemLen //--------------------------------------------------------- qreal Stem::stemLen() const { return up() ? -_len : _len; } //--------------------------------------------------------- // lineWidth //--------------------------------------------------------- qreal Stem::lineWidth() const { return point(score()->styleS(StyleIdx::stemWidth)); } //--------------------------------------------------------- // layout //--------------------------------------------------------- void Stem::layout() { qreal l = _len + _userLen; qreal _up = up() ? -1.0 : 1.0; l *= _up; qreal y1 = 0.0; // vertical displacement to match note attach point Staff* st = staff(); if (chord() && st ) { if (st->isTabStaff() ) { // TAB staves if (st->staffType()->stemThrough()) { // if stems through staves, gets Y pos. of stem-side note relative to chord other side qreal lineDist = st->lineDistance() * spatium(); y1 = (chord()->downString() - chord()->upString() ) * _up * lineDist; // if fret marks above lines, raise stem beginning by 1/2 line distance if (!st->staffType()->onLines()) y1 -= lineDist * 0.5; // shorten stem by 1/2 lineDist to clear the note and a little more to keep 'air' betwen stem and note lineDist *= 0.7 * mag(); y1 += _up * lineDist; } // in other TAB types, no correction } else { // non-TAB // move stem start to note attach point Note* n = up() ? chord()->downNote() : chord()->upNote(); y1 += (up() ? n->stemUpSE().y() : n->stemDownNW().y()); rypos() = n->rypos(); } } line.setLine(0.0, y1, 0.0, l); // compute bounding rectangle QRectF r(line.p1(), line.p2()); qreal lw5 = lineWidth() * .5; setbbox(r.normalized().adjusted(-lw5, -lw5, lw5, lw5)); adjustReadPos(); } //--------------------------------------------------------- // setLen //--------------------------------------------------------- void Stem::setLen(qreal v) { _len = (v < 0.0) ? -v : v; layout(); } //--------------------------------------------------------- // spatiumChanged //--------------------------------------------------------- void Stem::spatiumChanged(qreal oldValue, qreal newValue) { _userLen = (_userLen / oldValue) * newValue; layout(); } //--------------------------------------------------------- // draw //--------------------------------------------------------- void Stem::draw(QPainter* painter) const { // hide if second chord of a cross-measure pair if (chord() && chord()->crossMeasure() == CrossMeasure::SECOND) return; Staff* st = staff(); bool useTab = st && st->isTabStaff(); qreal lw = lineWidth(); painter->setPen(QPen(curColor(), lw, Qt::SolidLine, Qt::RoundCap)); painter->drawLine(line); if (!useTab || !chord()) return; // TODO: adjust bounding rectangle in layout() for dots and for slash StaffType* stt = st->staffType(); qreal sp = spatium(); bool _up = up(); // slashed half note stem if (chord()->durationType().type() == TDuration::DurationType::V_HALF && stt->minimStyle() == TablatureMinimStyle::SLASHED) { // position slashes onto stem qreal y = _up ? -(_len+_userLen) + STAFFTYPE_TAB_SLASH_2STARTY_UP*sp : (_len+_userLen) - STAFFTYPE_TAB_SLASH_2STARTY_DN*sp; // if stems through, try to align slashes within or across lines if (stt->stemThrough()) { qreal halfLineDist = stt->lineDistance().val() * sp * 0.5; qreal halfSlashHgt = STAFFTYPE_TAB_SLASH_2TOTHEIGHT * sp * 0.5; y = lrint( (y + halfSlashHgt) / halfLineDist) * halfLineDist - halfSlashHgt; } // draw slashes qreal hlfWdt= sp * STAFFTYPE_TAB_SLASH_WIDTH * 0.5; qreal sln = sp * STAFFTYPE_TAB_SLASH_SLANTY; qreal thk = sp * STAFFTYPE_TAB_SLASH_THICK; qreal displ = sp * STAFFTYPE_TAB_SLASH_DISPL; QPainterPath path; for (int i = 0; i < 2; ++i) { path.moveTo( hlfWdt, y); // top-right corner path.lineTo( hlfWdt, y+thk); // bottom-right corner path.lineTo(-hlfWdt, y+thk+sln); // bottom-left corner path.lineTo(-hlfWdt, y+sln); // top-left corner path.closeSubpath(); y += displ; } painter->setBrush(QBrush(curColor())); painter->setPen(Qt::NoPen); painter->drawPath(path); } // dots // NOT THE BEST PLACE FOR THIS? // with tablatures and stems beside staves, dots are not drawn near 'notes', but near stems int nDots = chord()->dots(); if (nDots > 0 && !stt->stemThrough()) { qreal x = chord()->dotPosX(); qreal y = ( (STAFFTYPE_TAB_DEFAULTSTEMLEN_DN * 0.2) * sp) * (_up ? -1.0 : 1.0); qreal step = score()->styleS(StyleIdx::dotDotDistance).val() * sp; for (int dot = 0; dot < nDots; dot++, x += step) drawSymbol(SymId::augmentationDot, painter, QPointF(x, y)); } } //--------------------------------------------------------- // write //--------------------------------------------------------- void Stem::write(Xml& xml) const { xml.stag("Stem"); Element::writeProperties(xml); if (_userLen != 0.0) xml.tag("userLen", _userLen / spatium()); xml.etag(); } //--------------------------------------------------------- // read //--------------------------------------------------------- void Stem::read(XmlReader& e) { while (e.readNextStartElement()) { const QStringRef& tag(e.name()); if (tag == "userLen") _userLen = e.readDouble() * spatium(); else if (tag == "subtype") // obsolete e.skipCurrentElement(); else if (!Element::readProperties(e)) e.unknown(); } } //--------------------------------------------------------- // updateGrips //--------------------------------------------------------- void Stem::updateGrips(Grip* defaultGrip, QVector<QRectF>& grip) const { *defaultGrip = Grip::START; grip[0].translate(pagePos() + line.p2()); } //--------------------------------------------------------- // startEdit //--------------------------------------------------------- void Stem::startEdit(MuseScoreView*, const QPointF&) { undoPushProperty(P_ID::USER_LEN); } //--------------------------------------------------------- // editDrag //--------------------------------------------------------- void Stem::editDrag(const EditData& ed) { qreal yDelta = ed.delta.y(); _userLen += up() ? -yDelta : yDelta; layout(); Chord* c = static_cast<Chord*>(parent()); if (c->hook()) c->hook()->move(QPointF(0.0, ed.delta.y())); } //--------------------------------------------------------- // reset //--------------------------------------------------------- void Stem::reset() { score()->undoChangeProperty(this, P_ID::USER_LEN, 0.0); Element::reset(); } //--------------------------------------------------------- // acceptDrop //--------------------------------------------------------- bool Stem::acceptDrop(const DropData& data) const { Element* e = data.element; if ((e->type() == Element::Type::TREMOLO) && (static_cast<Tremolo*>(e)->tremoloType() <= TremoloType::R64)) { return true; } return false; } //--------------------------------------------------------- // drop //--------------------------------------------------------- Element* Stem::drop(const DropData& data) { Element* e = data.element; Chord* ch = chord(); switch(e->type()) { case Element::Type::TREMOLO: e->setParent(ch); score()->setLayoutAll(true); score()->undoAddElement(e); return e; default: delete e; break; } return 0; } //--------------------------------------------------------- // getProperty //--------------------------------------------------------- QVariant Stem::getProperty(P_ID propertyId) const { switch(propertyId) { case P_ID::USER_LEN: return userLen(); default: return Element::getProperty(propertyId); } } //--------------------------------------------------------- // setProperty //--------------------------------------------------------- bool Stem::setProperty(P_ID propertyId, const QVariant& v) { score()->addRefresh(canvasBoundingRect()); switch(propertyId) { case P_ID::USER_LEN: setUserLen(v.toDouble()); break; default: return Element::setProperty(propertyId, v); } score()->addRefresh(canvasBoundingRect()); layout(); score()->addRefresh(canvasBoundingRect()); score()->setLayoutAll(false); //DEBUG return true; } //--------------------------------------------------------- // hookPos // in chord coordinates //--------------------------------------------------------- QPointF Stem::hookPos() const { QPointF p(pos() + line.p2()); qreal xoff = lineWidth() * .5; p.rx() += xoff; return p; } }
evinism/MuseScore
libmscore/stem.cpp
C++
gpl-2.0
11,515
/* * Copyright (C) 1994 Linus Torvalds * * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86 * stack - Manfred Spraul <manfred@colorfullife.com> * * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle * them correctly. Now the emulation will be in a * consistent state after stackfaults - Kasper Dupont * <kasperd@daimi.au.dk> * * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont * <kasperd@daimi.au.dk> * * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault * caused by Kasper Dupont's changes - Stas Sergeev * * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes. * Kasper Dupont <kasperd@daimi.au.dk> * * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault. * Kasper Dupont <kasperd@daimi.au.dk> * * 9 apr 2002 - Changed stack access macros to jump to a label * instead of returning to userspace. This simplifies * do_int, and is needed by handle_vm6_fault. Kasper * Dupont <kasperd@daimi.au.dk> * */ #include <linux/capability.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/highmem.h> #include <linux/ptrace.h> #include <linux/audit.h> #include <linux/stddef.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/tlbflush.h> #include <asm/irq.h> /* * Known problems: * * Interrupt handling is not guaranteed: * - a real x86 will disable all interrupts for one instruction * after a "mov ss,xx" to make stack handling atomic even without * the 'lss' instruction. We can't guarantee this in v86 mode, * as the next instruction might result in a page fault or similar. * - a real x86 will have interrupts disabled for one instruction * past the 'sti' that enables them. We don't bother with all the * details yet. * * Let's hope these problems do not actually matter for anything. */ #define KVM86 ((struct kernel_vm86_struct *)regs) #define VMPI KVM86->vm86plus /* * 8- and 16-bit register defines.. */ #define AL(regs) (((unsigned char *)&((regs)->pt.eax))[0]) #define AH(regs) (((unsigned char *)&((regs)->pt.eax))[1]) #define IP(regs) (*(unsigned short *)&((regs)->pt.eip)) #define SP(regs) (*(unsigned short *)&((regs)->pt.esp)) /* * virtual flags (16 and 32-bit versions) */ #define VFLAGS (*(unsigned short *)&(current->thread.v86flags)) #define VEFLAGS (current->thread.v86flags) #define set_flags(X,new,mask) \ ((X) = ((X) & ~(mask)) | ((new) & (mask))) #define SAFE_MASK (0xDD5) #define RETURN_MASK (0xDFF) /* convert kernel_vm86_regs to vm86_regs */ static int copy_vm86_regs_to_user(struct vm86_regs __user *user, const struct kernel_vm86_regs *regs) { int ret = 0; /* kernel_vm86_regs is missing xgs, so copy everything up to (but not including) orig_eax, and then rest including orig_eax. */ ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_eax)); ret += copy_to_user(&user->orig_eax, &regs->pt.orig_eax, sizeof(struct kernel_vm86_regs) - offsetof(struct kernel_vm86_regs, pt.orig_eax)); return ret; } /* convert vm86_regs to kernel_vm86_regs */ static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs, const struct vm86_regs __user *user, unsigned extra) { int ret = 0; /* copy eax-xfs inclusive */ ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_eax)); /* copy orig_eax-__gsh+extra */ ret += copy_from_user(&regs->pt.orig_eax, &user->orig_eax, sizeof(struct kernel_vm86_regs) - offsetof(struct kernel_vm86_regs, pt.orig_eax) + extra); return ret; } struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs)); struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs) { struct tss_struct *tss; struct pt_regs *ret; unsigned long tmp; /* * This gets called from entry.S with interrupts disabled, but * from process context. Enable interrupts here, before trying * to access user space. */ local_irq_enable(); if (!current->thread.vm86_info) { printk("no vm86_info: BAD\n"); do_exit(SIGSEGV); } set_flags(regs->pt.eflags, VEFLAGS, VIF_MASK | current->thread.v86mask); tmp = copy_vm86_regs_to_user(&current->thread.vm86_info->regs,regs); tmp += put_user(current->thread.screen_bitmap,&current->thread.vm86_info->screen_bitmap); if (tmp) { printk("vm86: could not access userspace vm86_info\n"); do_exit(SIGSEGV); } tss = &per_cpu(init_tss, get_cpu()); current->thread.esp0 = current->thread.saved_esp0; current->thread.sysenter_cs = __KERNEL_CS; load_esp0(tss, &current->thread); current->thread.saved_esp0 = 0; put_cpu(); ret = KVM86->regs32; ret->xfs = current->thread.saved_fs; loadsegment(gs, current->thread.saved_gs); return ret; } static void mark_screen_rdonly(struct mm_struct *mm) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; spinlock_t *ptl; int i; pgd = pgd_offset(mm, 0xA0000); if (pgd_none_or_clear_bad(pgd)) goto out; pud = pud_offset(pgd, 0xA0000); if (pud_none_or_clear_bad(pud)) goto out; pmd = pmd_offset(pud, 0xA0000); if (pmd_none_or_clear_bad(pmd)) goto out; pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl); for (i = 0; i < 32; i++) { if (pte_present(*pte)) set_pte(pte, pte_wrprotect(*pte)); pte++; } pte_unmap_unlock(pte, ptl); out: flush_tlb(); } static int do_vm86_irq_handling(int subfunction, int irqnumber); static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk); asmlinkage int sys_vm86old(struct pt_regs regs) { struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.ebx; struct kernel_vm86_struct info; /* declare this _on top_, * this avoids wasting of stack space. * This remains on the stack until we * return to 32 bit user space. */ struct task_struct *tsk; int tmp, ret = -EPERM; tsk = current; if (tsk->thread.saved_esp0) goto out; tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, offsetof(struct kernel_vm86_struct, vm86plus) - sizeof(info.regs)); ret = -EFAULT; if (tmp) goto out; memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus); info.regs32 = &regs; tsk->thread.vm86_info = v86; do_sys_vm86(&info, tsk); ret = 0; /* we never return here */ out: return ret; } asmlinkage int sys_vm86(struct pt_regs regs) { struct kernel_vm86_struct info; /* declare this _on top_, * this avoids wasting of stack space. * This remains on the stack until we * return to 32 bit user space. */ struct task_struct *tsk; int tmp, ret; struct vm86plus_struct __user *v86; tsk = current; switch (regs.ebx) { case VM86_REQUEST_IRQ: case VM86_FREE_IRQ: case VM86_GET_IRQ_BITS: case VM86_GET_AND_RESET_IRQ: ret = do_vm86_irq_handling(regs.ebx, (int)regs.ecx); goto out; case VM86_PLUS_INSTALL_CHECK: /* NOTE: on old vm86 stuff this will return the error from access_ok(), because the subfunction is interpreted as (invalid) address to vm86_struct. So the installation check works. */ ret = 0; goto out; } /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ ret = -EPERM; if (tsk->thread.saved_esp0) goto out; v86 = (struct vm86plus_struct __user *)regs.ecx; tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, offsetof(struct kernel_vm86_struct, regs32) - sizeof(info.regs)); ret = -EFAULT; if (tmp) goto out; info.regs32 = &regs; info.vm86plus.is_vm86pus = 1; tsk->thread.vm86_info = (struct vm86_struct __user *)v86; do_sys_vm86(&info, tsk); ret = 0; /* we never return here */ out: return ret; } static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk) { struct tss_struct *tss; /* * make sure the vm86() system call doesn't try to do anything silly */ info->regs.pt.xds = 0; info->regs.pt.xes = 0; info->regs.pt.xfs = 0; /* we are clearing gs later just before "jmp resume_userspace", * because it is not saved/restored. */ /* * The eflags register is also special: we cannot trust that the user * has set it up safely, so this makes sure interrupt etc flags are * inherited from protected mode. */ VEFLAGS = info->regs.pt.eflags; info->regs.pt.eflags &= SAFE_MASK; info->regs.pt.eflags |= info->regs32->eflags & ~SAFE_MASK; info->regs.pt.eflags |= VM_MASK; switch (info->cpu_type) { case CPU_286: tsk->thread.v86mask = 0; break; case CPU_386: tsk->thread.v86mask = NT_MASK | IOPL_MASK; break; case CPU_486: tsk->thread.v86mask = AC_MASK | NT_MASK | IOPL_MASK; break; default: tsk->thread.v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK; break; } /* * Save old state, set default return value (%eax) to 0 */ info->regs32->eax = 0; tsk->thread.saved_esp0 = tsk->thread.esp0; tsk->thread.saved_fs = info->regs32->xfs; savesegment(gs, tsk->thread.saved_gs); tss = &per_cpu(init_tss, get_cpu()); tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0; if (cpu_has_sep) tsk->thread.sysenter_cs = 0; load_esp0(tss, &tsk->thread); put_cpu(); tsk->thread.screen_bitmap = info->screen_bitmap; if (info->flags & VM86_SCREEN_BITMAP) mark_screen_rdonly(tsk->mm); /*call audit_syscall_exit since we do not exit via the normal paths */ if (unlikely(current->audit_context)) audit_syscall_exit(AUDITSC_RESULT(0), 0); __asm__ __volatile__( "movl %0,%%esp\n\t" "movl %1,%%ebp\n\t" "mov %2, %%gs\n\t" "jmp resume_userspace" : /* no outputs */ :"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0)); /* we never return here */ } static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval) { struct pt_regs * regs32; regs32 = save_v86_state(regs16); regs32->eax = retval; __asm__ __volatile__("movl %0,%%esp\n\t" "movl %1,%%ebp\n\t" "jmp resume_userspace" : : "r" (regs32), "r" (current_thread_info())); } static inline void set_IF(struct kernel_vm86_regs * regs) { VEFLAGS |= VIF_MASK; if (VEFLAGS & VIP_MASK) return_to_32bit(regs, VM86_STI); } static inline void clear_IF(struct kernel_vm86_regs * regs) { VEFLAGS &= ~VIF_MASK; } static inline void clear_TF(struct kernel_vm86_regs * regs) { regs->pt.eflags &= ~TF_MASK; } static inline void clear_AC(struct kernel_vm86_regs * regs) { regs->pt.eflags &= ~AC_MASK; } /* It is correct to call set_IF(regs) from the set_vflags_* * functions. However someone forgot to call clear_IF(regs) * in the opposite case. * After the command sequence CLI PUSHF STI POPF you should * end up with interrups disabled, but you ended up with * interrupts enabled. * ( I was testing my own changes, but the only bug I * could find was in a function I had not changed. ) * [KD] */ static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs * regs) { set_flags(VEFLAGS, eflags, current->thread.v86mask); set_flags(regs->pt.eflags, eflags, SAFE_MASK); if (eflags & IF_MASK) set_IF(regs); else clear_IF(regs); } static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs) { set_flags(VFLAGS, flags, current->thread.v86mask); set_flags(regs->pt.eflags, flags, SAFE_MASK); if (flags & IF_MASK) set_IF(regs); else clear_IF(regs); } static inline unsigned long get_vflags(struct kernel_vm86_regs * regs) { unsigned long flags = regs->pt.eflags & RETURN_MASK; if (VEFLAGS & VIF_MASK) flags |= IF_MASK; flags |= IOPL_MASK; return flags | (VEFLAGS & current->thread.v86mask); } static inline int is_revectored(int nr, struct revectored_struct * bitmap) { __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" :"=r" (nr) :"m" (*bitmap),"r" (nr)); return nr; } #define val_byte(val, n) (((__u8 *)&val)[n]) #define pushb(base, ptr, val, err_label) \ do { \ __u8 __val = val; \ ptr--; \ if (put_user(__val, base + ptr) < 0) \ goto err_label; \ } while(0) #define pushw(base, ptr, val, err_label) \ do { \ __u16 __val = val; \ ptr--; \ if (put_user(val_byte(__val, 1), base + ptr) < 0) \ goto err_label; \ ptr--; \ if (put_user(val_byte(__val, 0), base + ptr) < 0) \ goto err_label; \ } while(0) #define pushl(base, ptr, val, err_label) \ do { \ __u32 __val = val; \ ptr--; \ if (put_user(val_byte(__val, 3), base + ptr) < 0) \ goto err_label; \ ptr--; \ if (put_user(val_byte(__val, 2), base + ptr) < 0) \ goto err_label; \ ptr--; \ if (put_user(val_byte(__val, 1), base + ptr) < 0) \ goto err_label; \ ptr--; \ if (put_user(val_byte(__val, 0), base + ptr) < 0) \ goto err_label; \ } while(0) #define popb(base, ptr, err_label) \ ({ \ __u8 __res; \ if (get_user(__res, base + ptr) < 0) \ goto err_label; \ ptr++; \ __res; \ }) #define popw(base, ptr, err_label) \ ({ \ __u16 __res; \ if (get_user(val_byte(__res, 0), base + ptr) < 0) \ goto err_label; \ ptr++; \ if (get_user(val_byte(__res, 1), base + ptr) < 0) \ goto err_label; \ ptr++; \ __res; \ }) #define popl(base, ptr, err_label) \ ({ \ __u32 __res; \ if (get_user(val_byte(__res, 0), base + ptr) < 0) \ goto err_label; \ ptr++; \ if (get_user(val_byte(__res, 1), base + ptr) < 0) \ goto err_label; \ ptr++; \ if (get_user(val_byte(__res, 2), base + ptr) < 0) \ goto err_label; \ ptr++; \ if (get_user(val_byte(__res, 3), base + ptr) < 0) \ goto err_label; \ ptr++; \ __res; \ }) /* There are so many possible reasons for this function to return * VM86_INTx, so adding another doesn't bother me. We can expect * userspace programs to be able to handle it. (Getting a problem * in userspace is always better than an Oops anyway.) [KD] */ static void do_int(struct kernel_vm86_regs *regs, int i, unsigned char __user * ssp, unsigned short sp) { unsigned long __user *intr_ptr; unsigned long segoffs; if (regs->pt.xcs == BIOSSEG) goto cannot_handle; if (is_revectored(i, &KVM86->int_revectored)) goto cannot_handle; if (i==0x21 && is_revectored(AH(regs),&KVM86->int21_revectored)) goto cannot_handle; intr_ptr = (unsigned long __user *) (i << 2); if (get_user(segoffs, intr_ptr)) goto cannot_handle; if ((segoffs >> 16) == BIOSSEG) goto cannot_handle; pushw(ssp, sp, get_vflags(regs), cannot_handle); pushw(ssp, sp, regs->pt.xcs, cannot_handle); pushw(ssp, sp, IP(regs), cannot_handle); regs->pt.xcs = segoffs >> 16; SP(regs) -= 6; IP(regs) = segoffs & 0xffff; clear_TF(regs); clear_IF(regs); clear_AC(regs); return; cannot_handle: return_to_32bit(regs, VM86_INTx + (i << 8)); } int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno) { if (VMPI.is_vm86pus) { if ( (trapno==3) || (trapno==1) ) return_to_32bit(regs, VM86_TRAP + (trapno << 8)); do_int(regs, trapno, (unsigned char __user *) (regs->pt.xss << 4), SP(regs)); return 0; } if (trapno !=1) return 1; /* we let this handle by the calling routine */ if (current->ptrace & PT_PTRACED) { unsigned long flags; spin_lock_irqsave(&current->sighand->siglock, flags); sigdelset(&current->blocked, SIGTRAP); recalc_sigpending(); spin_unlock_irqrestore(&current->sighand->siglock, flags); } send_sig(SIGTRAP, current, 1); current->thread.trap_no = trapno; current->thread.error_code = error_code; return 0; } void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) { unsigned char opcode; unsigned char __user *csp; unsigned char __user *ssp; unsigned short ip, sp, orig_flags; int data32, pref_done; #define CHECK_IF_IN_TRAP \ if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \ newflags |= TF_MASK #define VM86_FAULT_RETURN do { \ if (VMPI.force_return_for_pic && (VEFLAGS & (IF_MASK | VIF_MASK))) \ return_to_32bit(regs, VM86_PICRETURN); \ if (orig_flags & TF_MASK) \ handle_vm86_trap(regs, 0, 1); \ return; } while (0) orig_flags = *(unsigned short *)&regs->pt.eflags; csp = (unsigned char __user *) (regs->pt.xcs << 4); ssp = (unsigned char __user *) (regs->pt.xss << 4); sp = SP(regs); ip = IP(regs); data32 = 0; pref_done = 0; do { switch (opcode = popb(csp, ip, simulate_sigsegv)) { case 0x66: /* 32-bit data */ data32=1; break; case 0x67: /* 32-bit address */ break; case 0x2e: /* CS */ break; case 0x3e: /* DS */ break; case 0x26: /* ES */ break; case 0x36: /* SS */ break; case 0x65: /* GS */ break; case 0x64: /* FS */ break; case 0xf2: /* repnz */ break; case 0xf3: /* rep */ break; default: pref_done = 1; } } while (!pref_done); switch (opcode) { /* pushf */ case 0x9c: if (data32) { pushl(ssp, sp, get_vflags(regs), simulate_sigsegv); SP(regs) -= 4; } else { pushw(ssp, sp, get_vflags(regs), simulate_sigsegv); SP(regs) -= 2; } IP(regs) = ip; VM86_FAULT_RETURN; /* popf */ case 0x9d: { unsigned long newflags; if (data32) { newflags=popl(ssp, sp, simulate_sigsegv); SP(regs) += 4; } else { newflags = popw(ssp, sp, simulate_sigsegv); SP(regs) += 2; } IP(regs) = ip; CHECK_IF_IN_TRAP; if (data32) { set_vflags_long(newflags, regs); } else { set_vflags_short(newflags, regs); } VM86_FAULT_RETURN; } /* int xx */ case 0xcd: { int intno=popb(csp, ip, simulate_sigsegv); IP(regs) = ip; if (VMPI.vm86dbg_active) { if ( (1 << (intno &7)) & VMPI.vm86dbg_intxxtab[intno >> 3] ) return_to_32bit(regs, VM86_INTx + (intno << 8)); } do_int(regs, intno, ssp, sp); return; } /* iret */ case 0xcf: { unsigned long newip; unsigned long newcs; unsigned long newflags; if (data32) { newip=popl(ssp, sp, simulate_sigsegv); newcs=popl(ssp, sp, simulate_sigsegv); newflags=popl(ssp, sp, simulate_sigsegv); SP(regs) += 12; } else { newip = popw(ssp, sp, simulate_sigsegv); newcs = popw(ssp, sp, simulate_sigsegv); newflags = popw(ssp, sp, simulate_sigsegv); SP(regs) += 6; } IP(regs) = newip; regs->pt.xcs = newcs; CHECK_IF_IN_TRAP; if (data32) { set_vflags_long(newflags, regs); } else { set_vflags_short(newflags, regs); } VM86_FAULT_RETURN; } /* cli */ case 0xfa: IP(regs) = ip; clear_IF(regs); VM86_FAULT_RETURN; /* sti */ /* * Damn. This is incorrect: the 'sti' instruction should actually * enable interrupts after the /next/ instruction. Not good. * * Probably needs some horsing around with the TF flag. Aiee.. */ case 0xfb: IP(regs) = ip; set_IF(regs); VM86_FAULT_RETURN; default: return_to_32bit(regs, VM86_UNKNOWN); } return; simulate_sigsegv: /* FIXME: After a long discussion with Stas we finally * agreed, that this is wrong. Here we should * really send a SIGSEGV to the user program. * But how do we create the correct context? We * are inside a general protection fault handler * and has just returned from a page fault handler. * The correct context for the signal handler * should be a mixture of the two, but how do we * get the information? [KD] */ return_to_32bit(regs, VM86_UNKNOWN); } /* ---------------- vm86 special IRQ passing stuff ----------------- */ #define VM86_IRQNAME "vm86irq" static struct vm86_irqs { struct task_struct *tsk; int sig; } vm86_irqs[16]; static DEFINE_SPINLOCK(irqbits_lock); static int irqbits; #define ALLOWED_SIGS ( 1 /* 0 = don't send a signal */ \ | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \ | (1 << SIGUNUSED) ) static irqreturn_t irq_handler(int intno, void *dev_id) { int irq_bit; unsigned long flags; spin_lock_irqsave(&irqbits_lock, flags); irq_bit = 1 << intno; if ((irqbits & irq_bit) || ! vm86_irqs[intno].tsk) goto out; irqbits |= irq_bit; if (vm86_irqs[intno].sig) send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1); /* * IRQ will be re-enabled when user asks for the irq (whether * polling or as a result of the signal) */ disable_irq_nosync(intno); spin_unlock_irqrestore(&irqbits_lock, flags); return IRQ_HANDLED; out: spin_unlock_irqrestore(&irqbits_lock, flags); return IRQ_NONE; } static inline void free_vm86_irq(int irqnumber) { unsigned long flags; free_irq(irqnumber, NULL); vm86_irqs[irqnumber].tsk = NULL; spin_lock_irqsave(&irqbits_lock, flags); irqbits &= ~(1 << irqnumber); spin_unlock_irqrestore(&irqbits_lock, flags); } void release_vm86_irqs(struct task_struct *task) { int i; for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++) if (vm86_irqs[i].tsk == task) free_vm86_irq(i); } static inline int get_and_reset_irq(int irqnumber) { int bit; unsigned long flags; int ret = 0; if (invalid_vm86_irq(irqnumber)) return 0; if (vm86_irqs[irqnumber].tsk != current) return 0; spin_lock_irqsave(&irqbits_lock, flags); bit = irqbits & (1 << irqnumber); irqbits &= ~bit; if (bit) { enable_irq(irqnumber); ret = 1; } spin_unlock_irqrestore(&irqbits_lock, flags); return ret; } static int do_vm86_irq_handling(int subfunction, int irqnumber) { int ret; switch (subfunction) { case VM86_GET_AND_RESET_IRQ: { return get_and_reset_irq(irqnumber); } case VM86_GET_IRQ_BITS: { return irqbits; } case VM86_REQUEST_IRQ: { int sig = irqnumber >> 8; int irq = irqnumber & 255; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM; if (invalid_vm86_irq(irq)) return -EPERM; if (vm86_irqs[irq].tsk) return -EPERM; ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL); if (ret) return ret; vm86_irqs[irq].sig = sig; vm86_irqs[irq].tsk = current; return irq; } case VM86_FREE_IRQ: { if (invalid_vm86_irq(irqnumber)) return -EPERM; if (!vm86_irqs[irqnumber].tsk) return 0; if (vm86_irqs[irqnumber].tsk != current) return -EPERM; free_vm86_irq(irqnumber); return 0; } } return -EINVAL; }
TechTux/linux-2.6.46.6
arch/x86/kernel/vm86_32.c
C
gpl-2.0
22,243
import sys, copy from itertools import * import benchbase from benchbase import (with_attributes, with_text, onlylib, serialized, children, nochange) ############################################################ # Benchmarks ############################################################ class BenchMark(benchbase.TreeBenchMark): repeat100 = range(100) repeat1000 = range(1000) repeat3000 = range(3000) def __init__(self, lib): from lxml import etree, objectify self.objectify = objectify parser = etree.XMLParser(remove_blank_text=True) lookup = objectify.ObjectifyElementClassLookup() parser.setElementClassLookup(lookup) super(BenchMark, self).__init__(etree, parser) @nochange def bench_attribute(self, root): "1 2 4" for i in self.repeat3000: root.zzzzz def bench_attribute_assign_int(self, root): "1 2 4" for i in self.repeat3000: root.XYZ = 5 def bench_attribute_assign_string(self, root): "1 2 4" for i in self.repeat3000: root.XYZ = "5" @nochange def bench_attribute_cached(self, root): "1 2 4" cache = root.zzzzz for i in self.repeat3000: root.zzzzz @nochange def bench_attributes_deep(self, root): "1 2 4" for i in self.repeat3000: root.zzzzz['{cdefg}a00001'] @nochange def bench_attributes_deep_cached(self, root): "1 2 4" cache1 = root.zzzzz cache2 = cache1['{cdefg}a00001'] for i in self.repeat3000: root.zzzzz['{cdefg}a00001'] @nochange def bench_objectpath(self, root): "1 2 4" path = self.objectify.ObjectPath(".zzzzz") for i in self.repeat3000: path(root) @nochange def bench_objectpath_deep(self, root): "1 2 4" path = self.objectify.ObjectPath(".zzzzz.{cdefg}a00001") for i in self.repeat3000: path(root) @nochange def bench_objectpath_deep_cached(self, root): "1 2 4" cache1 = root.zzzzz cache2 = cache1['{cdefg}a00001'] path = self.objectify.ObjectPath(".zzzzz.{cdefg}a00001") for i in self.repeat3000: path(root) @with_text(text=True, utext=True, no_text=True) def bench_annotate(self, root): self.objectify.annotate(root) @nochange def bench_descendantpaths(self, root): root.descendantpaths() @nochange @with_text(text=True) def bench_type_inference(self, root): "1 2 4" el = root.aaaaa for i in self.repeat1000: el.getchildren() @nochange @with_text(text=True) def bench_type_inference_annotated(self, root): "1 2 4" el = root.aaaaa self.objectify.annotate(el) for i in self.repeat1000: el.getchildren() @nochange @children def bench_elementmaker(self, children): E = self.objectify.E for child in children: root = E.this( "test", E.will( E.do("nothing"), E.special, ) ) if __name__ == '__main__': benchbase.main(BenchMark)
mhnatiuk/phd_sociology_of_religion
scrapper/build/lxml/benchmark/bench_objectify.py
Python
gpl-2.0
3,322
/* * arch/sh/kernel/process.c * * This file handles the architecture-dependent parts of process handling.. * * Copyright (C) 1995 Linus Torvalds * * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima * Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC * Copyright (C) 2002 - 2007 Paul Mundt */ #include <linux/module.h> #include <linux/mm.h> #include <linux/elfcore.h> #include <linux/pm.h> #include <linux/kallsyms.h> #include <linux/kexec.h> #include <linux/kdebug.h> #include <linux/tick.h> #include <linux/reboot.h> #include <linux/fs.h> #include <linux/preempt.h> #include <asm/uaccess.h> #include <asm/mmu_context.h> #include <asm/pgalloc.h> #include <asm/system.h> #include <asm/ubc.h> static int hlt_counter; int ubc_usercnt = 0; void (*pm_idle)(void); void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); void disable_hlt(void) { hlt_counter++; } EXPORT_SYMBOL(disable_hlt); void enable_hlt(void) { hlt_counter--; } EXPORT_SYMBOL(enable_hlt); static int __init nohlt_setup(char *__unused) { hlt_counter = 1; return 1; } __setup("nohlt", nohlt_setup); static int __init hlt_setup(char *__unused) { hlt_counter = 0; return 1; } __setup("hlt", hlt_setup); void default_idle(void) { if (!hlt_counter) { clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb__after_clear_bit(); set_bl_bit(); while (!need_resched()) cpu_sleep(); clear_bl_bit(); set_thread_flag(TIF_POLLING_NRFLAG); } else while (!need_resched()) cpu_relax(); } void cpu_idle(void) { set_thread_flag(TIF_POLLING_NRFLAG); /* endless idle loop with no priority at all */ while (1) { void (*idle)(void) = pm_idle; if (!idle) idle = default_idle; tick_nohz_stop_sched_tick(); while (!need_resched()) idle(); tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); schedule(); preempt_disable(); check_pgt_cache(); } } void machine_restart(char * __unused) { /* SR.BL=1 and invoke address error to let CPU reset (manual reset) */ asm volatile("ldc %0, sr\n\t" "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001)); } void machine_halt(void) { local_irq_disable(); while (1) cpu_sleep(); } void machine_power_off(void) { if (pm_power_off) pm_power_off(); } void show_regs(struct pt_regs * regs) { printk("\n"); printk("Pid : %d, Comm: %20s\n", task_pid_nr(current), current->comm); print_symbol("PC is at %s\n", instruction_pointer(regs)); printk("PC : %08lx SP : %08lx SR : %08lx ", regs->pc, regs->regs[15], regs->sr); #ifdef CONFIG_MMU printk("TEA : %08x ", ctrl_inl(MMU_TEA)); #else printk(" "); #endif printk("%s\n", print_tainted()); printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n", regs->regs[0],regs->regs[1], regs->regs[2],regs->regs[3]); printk("R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n", regs->regs[4],regs->regs[5], regs->regs[6],regs->regs[7]); printk("R8 : %08lx R9 : %08lx R10 : %08lx R11 : %08lx\n", regs->regs[8],regs->regs[9], regs->regs[10],regs->regs[11]); printk("R12 : %08lx R13 : %08lx R14 : %08lx\n", regs->regs[12],regs->regs[13], regs->regs[14]); printk("MACH: %08lx MACL: %08lx GBR : %08lx PR : %08lx\n", regs->mach, regs->macl, regs->gbr, regs->pr); show_trace(NULL, (unsigned long *)regs->regs[15], regs); } /* * Create a kernel thread */ /* * This is the mechanism for creating a new kernel thread. * */ extern void kernel_thread_helper(void); __asm__(".align 5\n" "kernel_thread_helper:\n\t" "jsr @r5\n\t" " nop\n\t" "mov.l 1f, r1\n\t" "jsr @r1\n\t" " mov r0, r4\n\t" ".align 2\n\t" "1:.long do_exit"); /* Don't use this in BL=1(cli). Or else, CPU resets! */ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) { struct pt_regs regs; memset(&regs, 0, sizeof(regs)); regs.regs[4] = (unsigned long)arg; regs.regs[5] = (unsigned long)fn; regs.pc = (unsigned long)kernel_thread_helper; regs.sr = (1 << 30); /* Ok, create the new process.. */ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL); } /* * Free current thread data structures etc.. */ void exit_thread(void) { if (current->thread.ubc_pc) { current->thread.ubc_pc = 0; ubc_usercnt -= 1; } } void flush_thread(void) { #if defined(CONFIG_SH_FPU) struct task_struct *tsk = current; /* Forget lazy FPU state */ clear_fpu(tsk, task_pt_regs(tsk)); clear_used_math(); #endif } void release_thread(struct task_struct *dead_task) { /* do nothing */ } /* Fill in the fpu structure for a core dump.. */ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) { int fpvalid = 0; #if defined(CONFIG_SH_FPU) struct task_struct *tsk = current; fpvalid = !!tsk_used_math(tsk); if (fpvalid) { unlazy_fpu(tsk, regs); memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu)); } #endif return fpvalid; } /* * Capture the user space registers if the task is not running (in user space) */ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) { struct pt_regs ptregs; ptregs = *task_pt_regs(tsk); elf_core_copy_regs(regs, &ptregs); return 1; } int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpu) { int fpvalid = 0; #if defined(CONFIG_SH_FPU) fpvalid = !!tsk_used_math(tsk); if (fpvalid) { unlazy_fpu(tsk, task_pt_regs(tsk)); memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu)); } #endif return fpvalid; } asmlinkage void ret_from_fork(void); int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, unsigned long unused, struct task_struct *p, struct pt_regs *regs) { struct thread_info *ti = task_thread_info(p); struct pt_regs *childregs; #if defined(CONFIG_SH_FPU) struct task_struct *tsk = current; unlazy_fpu(tsk, regs); p->thread.fpu = tsk->thread.fpu; copy_to_stopped_child_used_math(p); #endif childregs = task_pt_regs(p); *childregs = *regs; if (user_mode(regs)) { childregs->regs[15] = usp; ti->addr_limit = USER_DS; } else { childregs->regs[15] = (unsigned long)childregs; ti->addr_limit = KERNEL_DS; } if (clone_flags & CLONE_SETTLS) childregs->gbr = childregs->regs[0]; childregs->regs[0] = 0; /* Set return value for child */ p->thread.sp = (unsigned long) childregs; p->thread.pc = (unsigned long) ret_from_fork; p->thread.ubc_pc = 0; return 0; } /* Tracing by user break controller. */ static void ubc_set_tracing(int asid, unsigned long pc) { #if defined(CONFIG_CPU_SH4A) unsigned long val; val = (UBC_CBR_ID_INST | UBC_CBR_RW_READ | UBC_CBR_CE); val |= (UBC_CBR_AIE | UBC_CBR_AIV_SET(asid)); ctrl_outl(val, UBC_CBR0); ctrl_outl(pc, UBC_CAR0); ctrl_outl(0x0, UBC_CAMR0); ctrl_outl(0x0, UBC_CBCR); val = (UBC_CRR_RES | UBC_CRR_PCB | UBC_CRR_BIE); ctrl_outl(val, UBC_CRR0); /* Read UBC register that we wrote last, for checking update */ val = ctrl_inl(UBC_CRR0); #else /* CONFIG_CPU_SH4A */ ctrl_outl(pc, UBC_BARA); #ifdef CONFIG_MMU ctrl_outb(asid, UBC_BASRA); #endif ctrl_outl(0, UBC_BAMRA); if (current_cpu_data.type == CPU_SH7729 || current_cpu_data.type == CPU_SH7710 || current_cpu_data.type == CPU_SH7712) { ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA); ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR); } else { ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA); ctrl_outw(BRCR_PCBA, UBC_BRCR); } #endif /* CONFIG_CPU_SH4A */ } /* * switch_to(x,y) should switch tasks from x to y. * */ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next) { #if defined(CONFIG_SH_FPU) unlazy_fpu(prev, task_pt_regs(prev)); #endif #if defined(CONFIG_GUSA) && defined(CONFIG_PREEMPT) { struct pt_regs *regs; preempt_disable(); regs = task_pt_regs(prev); if (user_mode(regs) && regs->regs[15] >= 0xc0000000) { int offset = (int)regs->regs[15]; /* Reset stack pointer: clear critical region mark */ regs->regs[15] = regs->regs[1]; if (regs->pc < regs->regs[0]) /* Go to rewind point */ regs->pc = regs->regs[0] + offset; } preempt_enable_no_resched(); } #endif #ifdef CONFIG_MMU /* * Restore the kernel mode register * k7 (r7_bank1) */ asm volatile("ldc %0, r7_bank" : /* no output */ : "r" (task_thread_info(next))); #endif /* If no tasks are using the UBC, we're done */ if (ubc_usercnt == 0) /* If no tasks are using the UBC, we're done */; else if (next->thread.ubc_pc && next->mm) { int asid = 0; #ifdef CONFIG_MMU asid |= cpu_asid(smp_processor_id(), next->mm); #endif ubc_set_tracing(asid, next->thread.ubc_pc); } else { #if defined(CONFIG_CPU_SH4A) ctrl_outl(UBC_CBR_INIT, UBC_CBR0); ctrl_outl(UBC_CRR_INIT, UBC_CRR0); #else ctrl_outw(0, UBC_BBRA); ctrl_outw(0, UBC_BBRB); #endif } return prev; } asmlinkage int sys_fork(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, struct pt_regs __regs) { #ifdef CONFIG_MMU struct pt_regs *regs = RELOC_HIDE(&__regs, 0); return do_fork(SIGCHLD, regs->regs[15], regs, 0, NULL, NULL); #else /* fork almost works, enough to trick you into looking elsewhere :-( */ return -EINVAL; #endif } asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, unsigned long parent_tidptr, unsigned long child_tidptr, struct pt_regs __regs) { struct pt_regs *regs = RELOC_HIDE(&__regs, 0); if (!newsp) newsp = regs->regs[15]; return do_fork(clone_flags, newsp, regs, 0, (int __user *)parent_tidptr, (int __user *)child_tidptr); } /* * This is trivial, and on the face of it looks like it * could equally well be done in user mode. * * Not so, for quite unobvious reasons - register pressure. * In user mode vfork() cannot have a stack frame, and if * done by calling the "clone()" system call directly, you * do not have enough call-clobbered registers to hold all * the information you need. */ asmlinkage int sys_vfork(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, struct pt_regs __regs) { struct pt_regs *regs = RELOC_HIDE(&__regs, 0); return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->regs[15], regs, 0, NULL, NULL); } /* * sys_execve() executes a new program. */ asmlinkage int sys_execve(char __user *ufilename, char __user * __user *uargv, char __user * __user *uenvp, unsigned long r7, struct pt_regs __regs) { struct pt_regs *regs = RELOC_HIDE(&__regs, 0); int error; char *filename; filename = getname(ufilename); error = PTR_ERR(filename); if (IS_ERR(filename)) goto out; error = do_execve(filename, uargv, uenvp, regs); if (error == 0) { task_lock(current); current->ptrace &= ~PT_DTRACE; task_unlock(current); } putname(filename); out: return error; } unsigned long get_wchan(struct task_struct *p) { unsigned long pc; if (!p || p == current || p->state == TASK_RUNNING) return 0; /* * The same comment as on the Alpha applies here, too ... */ pc = thread_saved_pc(p); #ifdef CONFIG_FRAME_POINTER if (in_sched_functions(pc)) { unsigned long schedule_frame = (unsigned long)p->thread.sp; return ((unsigned long *)schedule_frame)[21]; } #endif return pc; } asmlinkage void break_point_trap(void) { /* Clear tracing. */ #if defined(CONFIG_CPU_SH4A) ctrl_outl(UBC_CBR_INIT, UBC_CBR0); ctrl_outl(UBC_CRR_INIT, UBC_CRR0); #else ctrl_outw(0, UBC_BBRA); ctrl_outw(0, UBC_BBRB); #endif current->thread.ubc_pc = 0; ubc_usercnt -= 1; force_sig(SIGTRAP, current); } /* * Generic trap handler. */ asmlinkage void debug_trap_handler(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, struct pt_regs __regs) { struct pt_regs *regs = RELOC_HIDE(&__regs, 0); /* Rewind */ regs->pc -= instruction_size(ctrl_inw(regs->pc - 4)); if (notify_die(DIE_TRAP, "debug trap", regs, 0, regs->tra & 0xff, SIGTRAP) == NOTIFY_STOP) return; force_sig(SIGTRAP, current); } /* * Special handler for BUG() traps. */ asmlinkage void bug_trap_handler(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, struct pt_regs __regs) { struct pt_regs *regs = RELOC_HIDE(&__regs, 0); /* Rewind */ regs->pc -= instruction_size(ctrl_inw(regs->pc - 4)); if (notify_die(DIE_TRAP, "bug trap", regs, 0, TRAPA_BUG_OPCODE & 0xff, SIGTRAP) == NOTIFY_STOP) return; #ifdef CONFIG_BUG if (__kernel_text_address(instruction_pointer(regs))) { u16 insn = *(u16 *)instruction_pointer(regs); if (insn == TRAPA_BUG_OPCODE) handle_BUG(regs); } #endif force_sig(SIGTRAP, current); }
bqwd36/android-athena
arch/sh/kernel/process.c
C
gpl-2.0
12,552
/* * Hashable interface. */ #ifndef INTERFACES__HASHABLE_HH #define INTERFACES__HASHABLE_HH namespace interfaces { /* * Template interface for hashable objects. * * If a class X extends hashable<T,H> then: * (1) X must be a T, and * (2) X must hash to an object of type H via its hash() method. * * Note that the interface is the same regardless of T's const qualifier. */ template <typename T, typename H> class hashable { public: /* * Destructor. */ virtual ~hashable(); /* * Function for hashing a hashable object. */ static H& hash(const hashable<T,H>&); /* * Hash method. */ virtual H& hash() const = 0; }; template <typename T, typename H> class hashable<const T,H> : public hashable<T,H> { }; /* * Destructor. */ template <typename T, typename H> hashable<T,H>::~hashable() { } /* * Function for hashing a hashable object. */ template <typename T, typename H> H& hashable<T,H>::hash(const hashable<T,H>& t) { return (t.hash()); } } /* namespace interfaces */ #endif
luizcavalcanti/ForestClassifier
segmentation/gpb/src/source/gpb_src/include/interfaces/hashable.hh
C++
gpl-2.0
1,050
/*- * BSD LICENSE * * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTE_MEMORY_H_ #define _RTE_MEMORY_H_ /** * @file * * Memory-related RTE API. */ #include <stdint.h> #include <stddef.h> #include <stdio.h> #ifdef RTE_EXEC_ENV_LINUXAPP #include <exec-env/rte_dom0_common.h> #endif #ifdef __cplusplus extern "C" { #endif enum rte_page_sizes { RTE_PGSIZE_4K = 1ULL << 12, RTE_PGSIZE_64K = 1ULL << 16, RTE_PGSIZE_256K = 1ULL << 18, RTE_PGSIZE_2M = 1ULL << 21, RTE_PGSIZE_16M = 1ULL << 24, RTE_PGSIZE_256M = 1ULL << 28, RTE_PGSIZE_512M = 1ULL << 29, RTE_PGSIZE_1G = 1ULL << 30, RTE_PGSIZE_4G = 1ULL << 32, RTE_PGSIZE_16G = 1ULL << 34, }; #define SOCKET_ID_ANY -1 /**< Any NUMA socket. */ #ifndef RTE_CACHE_LINE_SIZE #define RTE_CACHE_LINE_SIZE 64 /**< Cache line size. */ #endif #define RTE_CACHE_LINE_MASK (RTE_CACHE_LINE_SIZE-1) /**< Cache line mask. */ #define RTE_CACHE_LINE_ROUNDUP(size) \ (RTE_CACHE_LINE_SIZE * ((size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE)) /**< Return the first cache-aligned value greater or equal to size. */ /** * Force alignment to cache line. */ #define __rte_cache_aligned __attribute__((__aligned__(RTE_CACHE_LINE_SIZE))) typedef uint64_t phys_addr_t; /**< Physical address definition. */ #define RTE_BAD_PHYS_ADDR ((phys_addr_t)-1) /** * Physical memory segment descriptor. */ struct rte_memseg { phys_addr_t phys_addr; /**< Start physical address. */ union { void *addr; /**< Start virtual address. */ uint64_t addr_64; /**< Makes sure addr is always 64 bits */ }; #ifdef RTE_LIBRTE_IVSHMEM phys_addr_t ioremap_addr; /**< Real physical address inside the VM */ #endif size_t len; /**< Length of the segment. */ uint64_t hugepage_sz; /**< The pagesize of underlying memory */ int32_t socket_id; /**< NUMA socket ID. */ uint32_t nchannel; /**< Number of channels. */ uint32_t nrank; /**< Number of ranks. */ #ifdef RTE_LIBRTE_XEN_DOM0 /**< store segment MFNs */ uint64_t mfn[DOM0_NUM_MEMBLOCK]; #endif } __attribute__((__packed__)); /** * Lock page in physical memory and prevent from swapping. * * @param virt * The virtual address. * @return * 0 on success, negative on error. */ int rte_mem_lock_page(const void *virt); /** * Get physical address of any mapped virtual address in the current process. * It is found by browsing the /proc/self/pagemap special file. * The page must be locked. * * @param virt * The virtual address. * @return * The physical address or RTE_BAD_PHYS_ADDR on error. */ phys_addr_t rte_mem_virt2phy(const void *virt); /** * Get the layout of the available physical memory. * * It can be useful for an application to have the full physical * memory layout to decide the size of a memory zone to reserve. This * table is stored in rte_config (see rte_eal_get_configuration()). * * @return * - On success, return a pointer to a read-only table of struct * rte_physmem_desc elements, containing the layout of all * addressable physical memory. The last element of the table * contains a NULL address. * - On error, return NULL. This should not happen since it is a fatal * error that will probably cause the entire system to panic. */ const struct rte_memseg *rte_eal_get_physmem_layout(void); /** * Dump the physical memory layout to the console. * * @param f * A pointer to a file for output */ void rte_dump_physmem_layout(FILE *f); /** * Get the total amount of available physical memory. * * @return * The total amount of available physical memory in bytes. */ uint64_t rte_eal_get_physmem_size(void); /** * Get the number of memory channels. * * @return * The number of memory channels on the system. The value is 0 if unknown * or not the same on all devices. */ unsigned rte_memory_get_nchannel(void); /** * Get the number of memory ranks. * * @return * The number of memory ranks on the system. The value is 0 if unknown or * not the same on all devices. */ unsigned rte_memory_get_nrank(void); #ifdef RTE_LIBRTE_XEN_DOM0 /** * Return the physical address of elt, which is an element of the pool mp. * * @param memseg_id * The mempool is from which memory segment. * @param phy_addr * physical address of elt. * * @return * The physical address or error. */ phys_addr_t rte_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr); /** * Memory init for supporting application running on Xen domain0. * * @param void * * @return * 0: successfully * negative: error */ int rte_xen_dom0_memory_init(void); /** * Attach to memory setments of primary process on Xen domain0. * * @param void * * @return * 0: successfully * negative: error */ int rte_xen_dom0_memory_attach(void); #endif #ifdef __cplusplus } #endif #endif /* _RTE_MEMORY_H_ */
fatedier/studies
mtcp/dpdk-2.1.0/lib/librte_eal/common/include/rte_memory.h
C
gpl-3.0
6,567
/*---------------------------------------------------------------------------*\ ========= | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox \\ / O peration | \\ / A nd | Copyright (C) 2011-2014 OpenFOAM Foundation \\/ M anipulation | ------------------------------------------------------------------------------- License This file is part of OpenFOAM. OpenFOAM is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. OpenFOAM is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>. \*---------------------------------------------------------------------------*/ #include "LocalInteraction.H" // * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * * // template<class CloudType> Foam::LocalInteraction<CloudType>::LocalInteraction ( const dictionary& dict, CloudType& cloud ) : PatchInteractionModel<CloudType>(dict, cloud, typeName), patchData_(cloud.mesh(), this->coeffDict()), nEscape_(patchData_.size(), 0), massEscape_(patchData_.size(), 0.0), nStick_(patchData_.size(), 0), massStick_(patchData_.size(), 0.0), writeFields_(this->coeffDict().lookupOrDefault("writeFields", false)), massEscapePtr_(NULL), massStickPtr_(NULL) { if (writeFields_) { word massEscapeName(this->owner().name() + ":massEscape"); word massStickName(this->owner().name() + ":massStick"); Info<< " Interaction fields will be written to " << massEscapeName << " and " << massStickName << endl; (void)massEscape(); (void)massStick(); } else { Info<< " Interaction fields will not be written" << endl; } // check that interactions are valid/specified forAll(patchData_, patchI) { const word& interactionTypeName = patchData_[patchI].interactionTypeName(); const typename PatchInteractionModel<CloudType>::interactionType& it = this->wordToInteractionType(interactionTypeName); if (it == PatchInteractionModel<CloudType>::itOther) { const word& patchName = patchData_[patchI].patchName(); FatalErrorIn("LocalInteraction(const dictionary&, CloudType&)") << "Unknown patch interaction type " << interactionTypeName << " for patch " << patchName << ". Valid selections are:" << this->PatchInteractionModel<CloudType>::interactionTypeNames_ << nl << exit(FatalError); } } } template<class CloudType> Foam::LocalInteraction<CloudType>::LocalInteraction ( const LocalInteraction<CloudType>& pim ) : PatchInteractionModel<CloudType>(pim), patchData_(pim.patchData_), nEscape_(pim.nEscape_), massEscape_(pim.massEscape_), nStick_(pim.nStick_), massStick_(pim.massStick_), writeFields_(pim.writeFields_), massEscapePtr_(NULL), massStickPtr_(NULL) {} // * * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * // template<class CloudType> Foam::LocalInteraction<CloudType>::~LocalInteraction() {} // * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * * // template<class CloudType> Foam::volScalarField& Foam::LocalInteraction<CloudType>::massEscape() { if (!massEscapePtr_.valid()) { const fvMesh& mesh = this->owner().mesh(); massEscapePtr_.reset ( new volScalarField ( IOobject ( this->owner().name() + ":massEscape", mesh.time().timeName(), mesh, IOobject::READ_IF_PRESENT, IOobject::AUTO_WRITE ), mesh, dimensionedScalar("zero", dimMass, 0.0) ) ); } return massEscapePtr_(); } template<class CloudType> Foam::volScalarField& Foam::LocalInteraction<CloudType>::massStick() { if (!massStickPtr_.valid()) { const fvMesh& mesh = this->owner().mesh(); massStickPtr_.reset ( new volScalarField ( IOobject ( this->owner().name() + ":massStick", mesh.time().timeName(), mesh, IOobject::READ_IF_PRESENT, IOobject::AUTO_WRITE ), mesh, dimensionedScalar("zero", dimMass, 0.0) ) ); } return massStickPtr_(); } template<class CloudType> bool Foam::LocalInteraction<CloudType>::correct ( typename CloudType::parcelType& p, const polyPatch& pp, bool& keepParticle, const scalar trackFraction, const tetIndices& tetIs ) { label patchI = patchData_.applyToPatch(pp.index()); if (patchI >= 0) { vector& U = p.U(); bool& active = p.active(); typename PatchInteractionModel<CloudType>::interactionType it = this->wordToInteractionType ( patchData_[patchI].interactionTypeName() ); switch (it) { case PatchInteractionModel<CloudType>::itEscape: { scalar dm = p.mass()*p.nParticle(); keepParticle = false; active = false; U = vector::zero; nEscape_[patchI]++; massEscape_[patchI] += dm; if (writeFields_) { label pI = pp.index(); label fI = pp.whichFace(p.face()); massEscape().boundaryField()[pI][fI] += dm; } break; } case PatchInteractionModel<CloudType>::itStick: { scalar dm = p.mass()*p.nParticle(); keepParticle = true; active = false; U = vector::zero; nStick_[patchI]++; massStick_[patchI] += dm; if (writeFields_) { label pI = pp.index(); label fI = pp.whichFace(p.face()); massStick().boundaryField()[pI][fI] += dm; } break; } case PatchInteractionModel<CloudType>::itRebound: { keepParticle = true; active = true; vector nw; vector Up; this->owner().patchData(p, pp, trackFraction, tetIs, nw, Up); // Calculate motion relative to patch velocity U -= Up; scalar Un = U & nw; vector Ut = U - Un*nw; if (Un > 0) { U -= (1.0 + patchData_[patchI].e())*Un*nw; } U -= patchData_[patchI].mu()*Ut; // Return velocity to global space U += Up; break; } default: { FatalErrorIn ( "bool LocalInteraction<CloudType>::correct" "(" "typename CloudType::parcelType&, " "const polyPatch&, " "bool&, " "const scalar, " "const tetIndices&" ") const" ) << "Unknown interaction type " << patchData_[patchI].interactionTypeName() << "(" << it << ") for patch " << patchData_[patchI].patchName() << ". Valid selections are:" << this->interactionTypeNames_ << endl << abort(FatalError); } } return true; } return false; } template<class CloudType> void Foam::LocalInteraction<CloudType>::info(Ostream& os) { // retrieve any stored data labelList npe0(patchData_.size(), 0); this->getModelProperty("nEscape", npe0); scalarList mpe0(patchData_.size(), 0.0); this->getModelProperty("massEscape", mpe0); labelList nps0(patchData_.size(), 0); this->getModelProperty("nStick", nps0); scalarList mps0(patchData_.size(), 0.0); this->getModelProperty("massStick", mps0); // accumulate current data labelList npe(nEscape_); Pstream::listCombineGather(npe, plusEqOp<label>()); npe = npe + npe0; scalarList mpe(massEscape_); Pstream::listCombineGather(mpe, plusEqOp<scalar>()); mpe = mpe + mpe0; labelList nps(nStick_); Pstream::listCombineGather(nps, plusEqOp<label>()); nps = nps + nps0; scalarList mps(massStick_); Pstream::listCombineGather(mps, plusEqOp<scalar>()); mps = mps + mps0; forAll(patchData_, i) { os << " Parcel fate (number, mass) : patch " << patchData_[i].patchName() << nl << " - escape = " << npe[i] << ", " << mpe[i] << nl << " - stick = " << nps[i] << ", " << mps[i] << nl; } if (this->outputTime()) { this->setModelProperty("nEscape", npe); nEscape_ = 0; this->setModelProperty("massEscape", mpe); massEscape_ = 0.0; this->setModelProperty("nStick", nps); nStick_ = 0; this->setModelProperty("massStick", mps); massStick_ = 0.0; } } // ************************************************************************* //
adrcad/OpenFOAM-2.3.x
src/lagrangian/intermediate/submodels/Kinematic/PatchInteractionModel/LocalInteraction/LocalInteraction.C
C++
gpl-3.0
10,197
#pragma once // MESSAGE MESSAGE_INTERVAL PACKING #define MAVLINK_MSG_ID_MESSAGE_INTERVAL 244 MAVPACKED( typedef struct __mavlink_message_interval_t { int32_t interval_us; /*< The interval between two messages, in microseconds. A value of -1 indicates this stream is disabled, 0 indicates it is not available, > 0 indicates the interval at which it is sent.*/ uint16_t message_id; /*< The ID of the requested MAVLink message. v1.0 is limited to 254 messages.*/ }) mavlink_message_interval_t; #define MAVLINK_MSG_ID_MESSAGE_INTERVAL_LEN 6 #define MAVLINK_MSG_ID_MESSAGE_INTERVAL_MIN_LEN 6 #define MAVLINK_MSG_ID_244_LEN 6 #define MAVLINK_MSG_ID_244_MIN_LEN 6 #define MAVLINK_MSG_ID_MESSAGE_INTERVAL_CRC 95 #define MAVLINK_MSG_ID_244_CRC 95 #if MAVLINK_COMMAND_24BIT #define MAVLINK_MESSAGE_INFO_MESSAGE_INTERVAL { \ 244, \ "MESSAGE_INTERVAL", \ 2, \ { { "interval_us", NULL, MAVLINK_TYPE_INT32_T, 0, 0, offsetof(mavlink_message_interval_t, interval_us) }, \ { "message_id", NULL, MAVLINK_TYPE_UINT16_T, 0, 4, offsetof(mavlink_message_interval_t, message_id) }, \ } \ } #else #define MAVLINK_MESSAGE_INFO_MESSAGE_INTERVAL { \ "MESSAGE_INTERVAL", \ 2, \ { { "interval_us", NULL, MAVLINK_TYPE_INT32_T, 0, 0, offsetof(mavlink_message_interval_t, interval_us) }, \ { "message_id", NULL, MAVLINK_TYPE_UINT16_T, 0, 4, offsetof(mavlink_message_interval_t, message_id) }, \ } \ } #endif /** * @brief Pack a message_interval message * @param system_id ID of this system * @param component_id ID of this component (e.g. 200 for IMU) * @param msg The MAVLink message to compress the data into * * @param message_id The ID of the requested MAVLink message. v1.0 is limited to 254 messages. * @param interval_us The interval between two messages, in microseconds. A value of -1 indicates this stream is disabled, 0 indicates it is not available, > 0 indicates the interval at which it is sent. * @return length of the message in bytes (excluding serial stream start sign) */ static inline uint16_t mavlink_msg_message_interval_pack(uint8_t system_id, uint8_t component_id, mavlink_message_t* msg, uint16_t message_id, int32_t interval_us) { #if MAVLINK_NEED_BYTE_SWAP || !MAVLINK_ALIGNED_FIELDS char buf[MAVLINK_MSG_ID_MESSAGE_INTERVAL_LEN]; _mav_put_int32_t(buf, 0, interval_us); _mav_put_uint16_t(buf, 4, message_id); memcpy(_MAV_PAYLOAD_NON_CONST(msg), buf, MAVLINK_MSG_ID_MESSAGE_INTERVAL_LEN); #else mavlink_message_interval_t packet; packet.interval_us = interval_us; packet.message_id = message_id; memcpy(_MAV_PAYLOAD_NON_CONST(msg), &packet, MAVLINK_MSG_ID_MESSAGE_INTERVAL_LEN); #endif msg->msgid = MAVLINK_MSG_ID_MESSAGE_INTERVAL; return mavlink_finalize_message(msg, system_id, component_id, MAVLINK_MSG_ID_MESSAGE_INTERVAL_MIN_LEN, MAVLINK_MSG_ID_MESSAGE_INTERVAL_LEN, MAVLINK_MSG_ID_MESSAGE_INTERVAL_CRC); } /** * @brief Pack a message_interval message on a channel * @param system_id ID of this system * @param component_id ID of this component (e.g. 200 for IMU) * @param chan The MAVLink channel this message will be sent over * @param msg The MAVLink message to compress the data into * @param message_id The ID of the requested MAVLink message. v1.0 is limited to 254 messages. * @param interval_us The interval between two messages, in microseconds. A value of -1 indicates this stream is disabled, 0 indicates it is not available, > 0 indicates the interval at which it is sent. * @return length of the message in bytes (excluding serial stream start sign) */ static inline uint16_t mavlink_msg_message_interval_pack_chan(uint8_t system_id, uint8_t component_id, uint8_t chan, mavlink_message_t* msg, uint16_t message_id,int32_t interval_us) { #if MAVLINK_NEED_BYTE_SWAP || !MAVLINK_ALIGNED_FIELDS char buf[MAVLINK_MSG_ID_MESSAGE_INTERVAL_LEN]; _mav_put_int32_t(buf, 0, interval_us); _mav_put_uint16_t(buf, 4, message_id); memcpy(_MAV_PAYLOAD_NON_CONST(msg), buf, MAVLINK_MSG_ID_MESSAGE_INTERVAL_LEN); #else mavlink_message_interval_t packet; packet.interval_us = interval_us; packet.message_id = message_id; memcpy(_MAV_PAYLOAD_NON_CONST(msg), &packet, MAVLINK_MSG_ID_MESSAGE_INTERVAL_LEN); #endif msg->msgid = MAVLINK_MSG_ID_MESSAGE_INTERVAL; return mavlink_finalize_message_chan(msg, system_id, component_id, chan, MAVLINK_MSG_ID_MESSAGE_INTERVAL_MIN_LEN, MAVLINK_MSG_ID_MESSAGE_INTERVAL_LEN, MAVLINK_MSG_ID_MESSAGE_INTERVAL_CRC); } /** * @brief Encode a message_interval struct * * @param system_id ID of this system * @param component_id ID of this component (e.g. 200 for IMU) * @param msg The MAVLink message to compress the data into * @param message_interval C-struct to read the message contents from */ static inline uint16_t mavlink_msg_message_interval_encode(uint8_t system_id, uint8_t component_id, mavlink_message_t* msg, const mavlink_message_interval_t* message_interval) { return mavlink_msg_message_interval_pack(system_id, component_id, msg, message_interval->message_id, message_interval->interval_us); } /** * @brief Encode a message_interval struct on a channel * * @param system_id ID of this system * @param component_id ID of this component (e.g. 200 for IMU) * @param chan The MAVLink channel this message will be sent over * @param msg The MAVLink message to compress the data into * @param message_interval C-struct to read the message contents from */ static inline uint16_t mavlink_msg_message_interval_encode_chan(uint8_t system_id, uint8_t component_id, uint8_t chan, mavlink_message_t* msg, const mavlink_message_interval_t* message_interval) { return mavlink_msg_message_interval_pack_chan(system_id, component_id, chan, msg, message_interval->message_id, message_interval->interval_us); } /** * @brief Send a message_interval message * @param chan MAVLink channel to send the message * * @param message_id The ID of the requested MAVLink message. v1.0 is limited to 254 messages. * @param interval_us The interval between two messages, in microseconds. A value of -1 indicates this stream is disabled, 0 indicates it is not available, > 0 indicates the interval at which it is sent. */ #ifdef MAVLINK_USE_CONVENIENCE_FUNCTIONS static inline void mavlink_msg_message_interval_send(mavlink_channel_t chan, uint16_t message_id, int32_t interval_us) { #if MAVLINK_NEED_BYTE_SWAP || !MAVLINK_ALIGNED_FIELDS char buf[MAVLINK_MSG_ID_MESSAGE_INTERVAL_LEN]; _mav_put_int32_t(buf, 0, interval_us); _mav_put_uint16_t(buf, 4, message_id); _mav_finalize_message_chan_send(chan, MAVLINK_MSG_ID_MESSAGE_INTERVAL, buf, MAVLINK_MSG_ID_MESSAGE_INTERVAL_MIN_LEN, MAVLINK_MSG_ID_MESSAGE_INTERVAL_LEN, MAVLINK_MSG_ID_MESSAGE_INTERVAL_CRC); #else mavlink_message_interval_t packet; packet.interval_us = interval_us; packet.message_id = message_id; _mav_finalize_message_chan_send(chan, MAVLINK_MSG_ID_MESSAGE_INTERVAL, (const char *)&packet, MAVLINK_MSG_ID_MESSAGE_INTERVAL_MIN_LEN, MAVLINK_MSG_ID_MESSAGE_INTERVAL_LEN, MAVLINK_MSG_ID_MESSAGE_INTERVAL_CRC); #endif } /** * @brief Send a message_interval message * @param chan MAVLink channel to send the message * @param struct The MAVLink struct to serialize */ static inline void mavlink_msg_message_interval_send_struct(mavlink_channel_t chan, const mavlink_message_interval_t* message_interval) { #if MAVLINK_NEED_BYTE_SWAP || !MAVLINK_ALIGNED_FIELDS mavlink_msg_message_interval_send(chan, message_interval->message_id, message_interval->interval_us); #else _mav_finalize_message_chan_send(chan, MAVLINK_MSG_ID_MESSAGE_INTERVAL, (const char *)message_interval, MAVLINK_MSG_ID_MESSAGE_INTERVAL_MIN_LEN, MAVLINK_MSG_ID_MESSAGE_INTERVAL_LEN, MAVLINK_MSG_ID_MESSAGE_INTERVAL_CRC); #endif } #if MAVLINK_MSG_ID_MESSAGE_INTERVAL_LEN <= MAVLINK_MAX_PAYLOAD_LEN /* This varient of _send() can be used to save stack space by re-using memory from the receive buffer. The caller provides a mavlink_message_t which is the size of a full mavlink message. This is usually the receive buffer for the channel, and allows a reply to an incoming message with minimum stack space usage. */ static inline void mavlink_msg_message_interval_send_buf(mavlink_message_t *msgbuf, mavlink_channel_t chan, uint16_t message_id, int32_t interval_us) { #if MAVLINK_NEED_BYTE_SWAP || !MAVLINK_ALIGNED_FIELDS char *buf = (char *)msgbuf; _mav_put_int32_t(buf, 0, interval_us); _mav_put_uint16_t(buf, 4, message_id); _mav_finalize_message_chan_send(chan, MAVLINK_MSG_ID_MESSAGE_INTERVAL, buf, MAVLINK_MSG_ID_MESSAGE_INTERVAL_MIN_LEN, MAVLINK_MSG_ID_MESSAGE_INTERVAL_LEN, MAVLINK_MSG_ID_MESSAGE_INTERVAL_CRC); #else mavlink_message_interval_t *packet = (mavlink_message_interval_t *)msgbuf; packet->interval_us = interval_us; packet->message_id = message_id; _mav_finalize_message_chan_send(chan, MAVLINK_MSG_ID_MESSAGE_INTERVAL, (const char *)packet, MAVLINK_MSG_ID_MESSAGE_INTERVAL_MIN_LEN, MAVLINK_MSG_ID_MESSAGE_INTERVAL_LEN, MAVLINK_MSG_ID_MESSAGE_INTERVAL_CRC); #endif } #endif #endif // MESSAGE MESSAGE_INTERVAL UNPACKING /** * @brief Get field message_id from message_interval message * * @return The ID of the requested MAVLink message. v1.0 is limited to 254 messages. */ static inline uint16_t mavlink_msg_message_interval_get_message_id(const mavlink_message_t* msg) { return _MAV_RETURN_uint16_t(msg, 4); } /** * @brief Get field interval_us from message_interval message * * @return The interval between two messages, in microseconds. A value of -1 indicates this stream is disabled, 0 indicates it is not available, > 0 indicates the interval at which it is sent. */ static inline int32_t mavlink_msg_message_interval_get_interval_us(const mavlink_message_t* msg) { return _MAV_RETURN_int32_t(msg, 0); } /** * @brief Decode a message_interval message into a struct * * @param msg The message to decode * @param message_interval C-struct to decode the message contents into */ static inline void mavlink_msg_message_interval_decode(const mavlink_message_t* msg, mavlink_message_interval_t* message_interval) { #if MAVLINK_NEED_BYTE_SWAP || !MAVLINK_ALIGNED_FIELDS message_interval->interval_us = mavlink_msg_message_interval_get_interval_us(msg); message_interval->message_id = mavlink_msg_message_interval_get_message_id(msg); #else uint8_t len = msg->len < MAVLINK_MSG_ID_MESSAGE_INTERVAL_LEN? msg->len : MAVLINK_MSG_ID_MESSAGE_INTERVAL_LEN; memset(message_interval, 0, MAVLINK_MSG_ID_MESSAGE_INTERVAL_LEN); memcpy(message_interval, _MAV_PAYLOAD(msg), len); #endif }
kgreenek/vrep_px4
vendor/mavlink/include/mavlink/v1.0/common/mavlink_msg_message_interval.h
C
gpl-3.0
10,801
/** ****************************************************************************** * @file stm32f3xx_ll_fmc.h * @author MCD Application Team * @version V1.3.0 * @date 01-July-2016 * @brief Header file of FMC HAL module. ****************************************************************************** * @attention * * <h2><center>&copy; COPYRIGHT(c) 2016 STMicroelectronics</center></h2> * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of STMicroelectronics nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************** */ /* Define to prevent recursive inclusion -------------------------------------*/ #ifndef __STM32F3xx_LL_FMC_H #define __STM32F3xx_LL_FMC_H #ifdef __cplusplus extern "C" { #endif /* Includes ------------------------------------------------------------------*/ #include "stm32f3xx_hal_def.h" /** @addtogroup STM32F3xx_HAL_Driver * @{ */ #if defined(FMC_BANK1) /** @addtogroup FMC_LL * @{ */ /** @addtogroup FMC_LL_Private_Macros * @{ */ #define IS_FMC_NORSRAM_BANK(__BANK__) (((__BANK__) == FMC_NORSRAM_BANK1) || \ ((__BANK__) == FMC_NORSRAM_BANK2) || \ ((__BANK__) == FMC_NORSRAM_BANK3) || \ ((__BANK__) == FMC_NORSRAM_BANK4)) #define IS_FMC_MUX(__MUX__) (((__MUX__) == FMC_DATA_ADDRESS_MUX_DISABLE) || \ ((__MUX__) == FMC_DATA_ADDRESS_MUX_ENABLE)) #define IS_FMC_MEMORY(__MEMORY__) (((__MEMORY__) == FMC_MEMORY_TYPE_SRAM) || \ ((__MEMORY__) == FMC_MEMORY_TYPE_PSRAM)|| \ ((__MEMORY__) == FMC_MEMORY_TYPE_NOR)) #define IS_FMC_NORSRAM_MEMORY_WIDTH(__WIDTH__) (((__WIDTH__) == FMC_NORSRAM_MEM_BUS_WIDTH_8) || \ ((__WIDTH__) == FMC_NORSRAM_MEM_BUS_WIDTH_16) || \ ((__WIDTH__) == FMC_NORSRAM_MEM_BUS_WIDTH_32)) #define IS_FMC_WRITE_BURST(__BURST__) (((__BURST__) == FMC_WRITE_BURST_DISABLE) || \ ((__BURST__) == FMC_WRITE_BURST_ENABLE)) #define IS_FMC_CONTINOUS_CLOCK(__CCLOCK__) (((__CCLOCK__) == FMC_CONTINUOUS_CLOCK_SYNC_ONLY) || \ ((__CCLOCK__) == FMC_CONTINUOUS_CLOCK_SYNC_ASYNC)) #define IS_FMC_ACCESS_MODE(__MODE__) (((__MODE__) == FMC_ACCESS_MODE_A) || \ ((__MODE__) == FMC_ACCESS_MODE_B) || \ ((__MODE__) == FMC_ACCESS_MODE_C) || \ ((__MODE__) == FMC_ACCESS_MODE_D)) #define IS_FMC_NAND_BANK(__BANK__) (((__BANK__) == FMC_NAND_BANK2) || \ ((__BANK__) == FMC_NAND_BANK3)) #define IS_FMC_WAIT_FEATURE(__FEATURE__) (((__FEATURE__) == FMC_NAND_PCC_WAIT_FEATURE_DISABLE) || \ ((__FEATURE__) == FMC_NAND_PCC_WAIT_FEATURE_ENABLE)) #define IS_FMC_NAND_MEMORY_WIDTH(__WIDTH__) (((__WIDTH__) == FMC_NAND_PCC_MEM_BUS_WIDTH_8) || \ ((__WIDTH__) == FMC_NAND_PCC_MEM_BUS_WIDTH_16)) #define IS_FMC_ECC_STATE(__STATE__) (((__STATE__) == FMC_NAND_ECC_DISABLE) || \ ((__STATE__) == FMC_NAND_ECC_ENABLE)) #define IS_FMC_ECCPAGE_SIZE(__SIZE__) (((__SIZE__) == FMC_NAND_ECC_PAGE_SIZE_256BYTE) || \ ((__SIZE__) == FMC_NAND_ECC_PAGE_SIZE_512BYTE) || \ ((__SIZE__) == FMC_NAND_ECC_PAGE_SIZE_1024BYTE) || \ ((__SIZE__) == FMC_NAND_ECC_PAGE_SIZE_2048BYTE) || \ ((__SIZE__) == FMC_NAND_ECC_PAGE_SIZE_4096BYTE) || \ ((__SIZE__) == FMC_NAND_ECC_PAGE_SIZE_8192BYTE)) /** @defgroup FMC_TCLR_Setup_Time FMC_TCLR_Setup_Time * @{ */ #define IS_FMC_TCLR_TIME(__TIME__) ((__TIME__) <= 255) /** * @} */ /** @defgroup FMC_TAR_Setup_Time FMC_TAR_Setup_Time * @{ */ #define IS_FMC_TAR_TIME(__TIME__) ((__TIME__) <= 255) /** * @} */ /** @defgroup FMC_Setup_Time FMC_Setup_Time * @{ */ #define IS_FMC_SETUP_TIME(__TIME__) ((__TIME__) <= 255) /** * @} */ /** @defgroup FMC_Wait_Setup_Time FMC_Wait_Setup_Time * @{ */ #define IS_FMC_WAIT_TIME(__TIME__) ((__TIME__) <= 255) /** * @} */ /** @defgroup FMC_Hold_Setup_Time FMC_Hold_Setup_Time * @{ */ #define IS_FMC_HOLD_TIME(__TIME__) ((__TIME__) <= 255) /** * @} */ /** @defgroup FMC_HiZ_Setup_Time FMC_HiZ_Setup_Time * @{ */ #define IS_FMC_HIZ_TIME(__TIME__) ((__TIME__) <= 255) /** * @} */ /** @defgroup FMC_NORSRAM_Device_Instance FMC NOR/SRAM Device Instance * @{ */ #define IS_FMC_NORSRAM_DEVICE(__INSTANCE__) ((__INSTANCE__) == FMC_NORSRAM_DEVICE) /** * @} */ /** @defgroup FMC_NORSRAM_EXTENDED_Device_Instance FMC NOR/SRAM EXTENDED Device Instance * @{ */ #define IS_FMC_NORSRAM_EXTENDED_DEVICE(__INSTANCE__) ((__INSTANCE__) == FMC_NORSRAM_EXTENDED_DEVICE) /** * @} */ /** @defgroup FMC_NAND_Device_Instance FMC NAND Device Instance * @{ */ #define IS_FMC_NAND_DEVICE(__INSTANCE__) ((__INSTANCE__) == FMC_NAND_DEVICE) /** * @} */ /** @defgroup FMC_PCCARD_Device_Instance FMC PCCARD Device Instance * @{ */ #define IS_FMC_PCCARD_DEVICE(__INSTANCE__) ((__INSTANCE__) == FMC_PCCARD_DEVICE) /** * @} */ #define IS_FMC_BURSTMODE(__STATE__) (((__STATE__) == FMC_BURST_ACCESS_MODE_DISABLE) || \ ((__STATE__) == FMC_BURST_ACCESS_MODE_ENABLE)) #define IS_FMC_WAIT_POLARITY(__POLARITY__) (((__POLARITY__) == FMC_WAIT_SIGNAL_POLARITY_LOW) || \ ((__POLARITY__) == FMC_WAIT_SIGNAL_POLARITY_HIGH)) #define IS_FMC_WRAP_MODE(__MODE__) (((__MODE__) == FMC_WRAP_MODE_DISABLE) || \ ((__MODE__) == FMC_WRAP_MODE_ENABLE)) #define IS_FMC_WAIT_SIGNAL_ACTIVE(__ACTIVE__) (((__ACTIVE__) == FMC_WAIT_TIMING_BEFORE_WS) || \ ((__ACTIVE__) == FMC_WAIT_TIMING_DURING_WS)) #define IS_FMC_WRITE_OPERATION(__OPERATION__) (((__OPERATION__) == FMC_WRITE_OPERATION_DISABLE) || \ ((__OPERATION__) == FMC_WRITE_OPERATION_ENABLE)) #define IS_FMC_WAITE_SIGNAL(__SIGNAL__) (((__SIGNAL__) == FMC_WAIT_SIGNAL_DISABLE) || \ ((__SIGNAL__) == FMC_WAIT_SIGNAL_ENABLE)) #define IS_FMC_EXTENDED_MODE(__MODE__) (((__MODE__) == FMC_EXTENDED_MODE_DISABLE) || \ ((__MODE__) == FMC_EXTENDED_MODE_ENABLE)) #define IS_FMC_ASYNWAIT(__STATE__) (((__STATE__) == FMC_ASYNCHRONOUS_WAIT_DISABLE) || \ ((__STATE__) == FMC_ASYNCHRONOUS_WAIT_ENABLE)) #define IS_FMC_CLK_DIV(__DIV__) (((__DIV__) > 1) && ((__DIV__) <= 16)) /** @defgroup FMC_Data_Latency FMC Data Latency * @{ */ #define IS_FMC_DATA_LATENCY(__LATENCY__) (((__LATENCY__) > 1) && ((__LATENCY__) <= 17)) /** * @} */ /** @defgroup FMC_Address_Setup_Time FMC Address Setup Time * @{ */ #define IS_FMC_ADDRESS_SETUP_TIME(__TIME__) ((__TIME__) <= 15) /** * @} */ /** @defgroup FMC_Address_Hold_Time FMC Address Hold Time * @{ */ #define IS_FMC_ADDRESS_HOLD_TIME(__TIME__) (((__TIME__) > 0) && ((__TIME__) <= 15)) /** * @} */ /** @defgroup FMC_Data_Setup_Time FMC Data Setup Time * @{ */ #define IS_FMC_DATASETUP_TIME(__TIME__) (((__TIME__) > 0) && ((__TIME__) <= 255)) /** * @} */ /** @defgroup FMC_Bus_Turn_around_Duration FMC Bus Turn around Duration * @{ */ #define IS_FMC_TURNAROUND_TIME(__TIME__) ((__TIME__) <= 15) /** * @} */ /** * @} */ /* Exported typedef ----------------------------------------------------------*/ /** @defgroup FMC_NORSRAM_Exported_typedef FMC Low Layer Exported Types * @{ */ #define FMC_NORSRAM_TypeDef FMC_Bank1_TypeDef #define FMC_NORSRAM_EXTENDED_TypeDef FMC_Bank1E_TypeDef #define FMC_NAND_TypeDef FMC_Bank2_3_TypeDef #define FMC_PCCARD_TypeDef FMC_Bank4_TypeDef #define FMC_NORSRAM_DEVICE FMC_Bank1 #define FMC_NORSRAM_EXTENDED_DEVICE FMC_Bank1E #define FMC_NAND_DEVICE FMC_Bank2_3 #define FMC_PCCARD_DEVICE FMC_Bank4 /** * @brief FMC_NORSRAM Configuration Structure definition */ typedef struct { uint32_t NSBank; /*!< Specifies the NORSRAM memory device that will be used. This parameter can be a value of @ref FMC_NORSRAM_Bank */ uint32_t DataAddressMux; /*!< Specifies whether the address and data values are multiplexed on the data bus or not. This parameter can be a value of @ref FMC_Data_Address_Bus_Multiplexing */ uint32_t MemoryType; /*!< Specifies the type of external memory attached to the corresponding memory device. This parameter can be a value of @ref FMC_Memory_Type */ uint32_t MemoryDataWidth; /*!< Specifies the external memory device width. This parameter can be a value of @ref FMC_NORSRAM_Data_Width */ uint32_t BurstAccessMode; /*!< Enables or disables the burst access mode for Flash memory, valid only with synchronous burst Flash memories. This parameter can be a value of @ref FMC_Burst_Access_Mode */ uint32_t WaitSignalPolarity; /*!< Specifies the wait signal polarity, valid only when accessing the Flash memory in burst mode. This parameter can be a value of @ref FMC_Wait_Signal_Polarity */ uint32_t WrapMode; /*!< Enables or disables the Wrapped burst access mode for Flash memory, valid only when accessing Flash memories in burst mode. This parameter can be a value of @ref FMC_Wrap_Mode */ uint32_t WaitSignalActive; /*!< Specifies if the wait signal is asserted by the memory one clock cycle before the wait state or during the wait state, valid only when accessing memories in burst mode. This parameter can be a value of @ref FMC_Wait_Timing */ uint32_t WriteOperation; /*!< Enables or disables the write operation in the selected device by the FMC. This parameter can be a value of @ref FMC_Write_Operation */ uint32_t WaitSignal; /*!< Enables or disables the wait state insertion via wait signal, valid for Flash memory access in burst mode. This parameter can be a value of @ref FMC_Wait_Signal */ uint32_t ExtendedMode; /*!< Enables or disables the extended mode. This parameter can be a value of @ref FMC_Extended_Mode */ uint32_t AsynchronousWait; /*!< Enables or disables wait signal during asynchronous transfers, valid only with asynchronous Flash memories. This parameter can be a value of @ref FMC_AsynchronousWait */ uint32_t WriteBurst; /*!< Enables or disables the write burst operation. This parameter can be a value of @ref FMC_Write_Burst */ uint32_t ContinuousClock; /*!< Enables or disables the FMC clock output to external memory devices. This parameter is only enabled through the FMC_BCR1 register, and don't care through FMC_BCR2..4 registers. This parameter can be a value of @ref FMC_Continous_Clock */ }FMC_NORSRAM_InitTypeDef; /** * @brief FMC_NORSRAM Timing parameters structure definition */ typedef struct { uint32_t AddressSetupTime; /*!< Defines the number of HCLK cycles to configure the duration of the address setup time. This parameter can be a value between Min_Data = 0 and Max_Data = 15. @note This parameter is not used with synchronous NOR Flash memories. */ uint32_t AddressHoldTime; /*!< Defines the number of HCLK cycles to configure the duration of the address hold time. This parameter can be a value between Min_Data = 1 and Max_Data = 15. @note This parameter is not used with synchronous NOR Flash memories. */ uint32_t DataSetupTime; /*!< Defines the number of HCLK cycles to configure the duration of the data setup time. This parameter can be a value between Min_Data = 1 and Max_Data = 255. @note This parameter is used for SRAMs, ROMs and asynchronous multiplexed NOR Flash memories. */ uint32_t BusTurnAroundDuration; /*!< Defines the number of HCLK cycles to configure the duration of the bus turnaround. This parameter can be a value between Min_Data = 0 and Max_Data = 15. @note This parameter is only used for multiplexed NOR Flash memories. */ uint32_t CLKDivision; /*!< Defines the period of CLK clock output signal, expressed in number of HCLK cycles. This parameter can be a value between Min_Data = 2 and Max_Data = 16. @note This parameter is not used for asynchronous NOR Flash, SRAM or ROM accesses. */ uint32_t DataLatency; /*!< Defines the number of memory clock cycles to issue to the memory before getting the first data. The parameter value depends on the memory type as shown below: - It must be set to 0 in case of a CRAM - It is don't care in asynchronous NOR, SRAM or ROM accesses - It may assume a value between Min_Data = 2 and Max_Data = 17 in NOR Flash memories with synchronous burst mode enable */ uint32_t AccessMode; /*!< Specifies the asynchronous access mode. This parameter can be a value of @ref FMC_Access_Mode */ }FMC_NORSRAM_TimingTypeDef; /** * @brief FMC_NAND Configuration Structure definition */ typedef struct { uint32_t NandBank; /*!< Specifies the NAND memory device that will be used. This parameter can be a value of @ref FMC_NAND_Bank */ uint32_t Waitfeature; /*!< Enables or disables the Wait feature for the NAND Memory device. This parameter can be any value of @ref FMC_Wait_feature */ uint32_t MemoryDataWidth; /*!< Specifies the external memory device width. This parameter can be any value of @ref FMC_NAND_Data_Width */ uint32_t EccComputation; /*!< Enables or disables the ECC computation. This parameter can be any value of @ref FMC_ECC */ uint32_t ECCPageSize; /*!< Defines the page size for the extended ECC. This parameter can be any value of @ref FMC_ECC_Page_Size */ uint32_t TCLRSetupTime; /*!< Defines the number of HCLK cycles to configure the delay between CLE low and RE low. This parameter can be a value between Min_Data = 0 and Max_Data = 255 */ uint32_t TARSetupTime; /*!< Defines the number of HCLK cycles to configure the delay between ALE low and RE low. This parameter can be a number between Min_Data = 0 and Max_Data = 255 */ }FMC_NAND_InitTypeDef; /** * @brief FMC_NAND_PCC Timing parameters structure definition */ typedef struct { uint32_t SetupTime; /*!< Defines the number of HCLK cycles to setup address before the command assertion for NAND-Flash read or write access to common/Attribute or I/O memory space (depending on the memory space timing to be configured). This parameter can be a value between Min_Data = 0 and Max_Data = 255 */ uint32_t WaitSetupTime; /*!< Defines the minimum number of HCLK cycles to assert the command for NAND-Flash read or write access to common/Attribute or I/O memory space (depending on the memory space timing to be configured). This parameter can be a number between Min_Data = 0 and Max_Data = 255 */ uint32_t HoldSetupTime; /*!< Defines the number of HCLK clock cycles to hold address (and data for write access) after the command de-assertion for NAND-Flash read or write access to common/Attribute or I/O memory space (depending on the memory space timing to be configured). This parameter can be a number between Min_Data = 0 and Max_Data = 255 */ uint32_t HiZSetupTime; /*!< Defines the number of HCLK clock cycles during which the data bus is kept in HiZ after the start of a NAND-Flash write access to common/Attribute or I/O memory space (depending on the memory space timing to be configured). This parameter can be a number between Min_Data = 0 and Max_Data = 255 */ }FMC_NAND_PCC_TimingTypeDef; /** * @brief FMC_NAND Configuration Structure definition */ typedef struct { uint32_t Waitfeature; /*!< Enables or disables the Wait feature for the PCCARD Memory device. This parameter can be any value of @ref FMC_Wait_feature */ uint32_t TCLRSetupTime; /*!< Defines the number of HCLK cycles to configure the delay between CLE low and RE low. This parameter can be a value between Min_Data = 0 and Max_Data = 255 */ uint32_t TARSetupTime; /*!< Defines the number of HCLK cycles to configure the delay between ALE low and RE low. This parameter can be a number between Min_Data = 0 and Max_Data = 255 */ }FMC_PCCARD_InitTypeDef; /** * @} */ /* Exported constants --------------------------------------------------------*/ /** @defgroup FMC_Exported_Constants FMC Low Layer Exported Constants * @{ */ /** @defgroup FMC_NORSRAM_Exported_constants FMC NOR/SRAM Exported constants * @{ */ /** @defgroup FMC_NORSRAM_Bank FMC NOR/SRAM Bank * @{ */ #define FMC_NORSRAM_BANK1 ((uint32_t)0x00000000) #define FMC_NORSRAM_BANK2 ((uint32_t)0x00000002) #define FMC_NORSRAM_BANK3 ((uint32_t)0x00000004) #define FMC_NORSRAM_BANK4 ((uint32_t)0x00000006) /** * @} */ /** @defgroup FMC_Data_Address_Bus_Multiplexing FMC Data Address Bus Multiplexing * @{ */ #define FMC_DATA_ADDRESS_MUX_DISABLE ((uint32_t)0x00000000) #define FMC_DATA_ADDRESS_MUX_ENABLE ((uint32_t)FMC_BCRx_MUXEN) /** * @} */ /** @defgroup FMC_Memory_Type FMC Memory Type * @{ */ #define FMC_MEMORY_TYPE_SRAM ((uint32_t)0x00000000) #define FMC_MEMORY_TYPE_PSRAM ((uint32_t)FMC_BCRx_MTYP_0) #define FMC_MEMORY_TYPE_NOR ((uint32_t)FMC_BCRx_MTYP_1) /** * @} */ /** @defgroup FMC_NORSRAM_Data_Width FMC NOR/SRAM Data Width * @{ */ #define FMC_NORSRAM_MEM_BUS_WIDTH_8 ((uint32_t)0x00000000) #define FMC_NORSRAM_MEM_BUS_WIDTH_16 ((uint32_t)FMC_BCRx_MWID_0) #define FMC_NORSRAM_MEM_BUS_WIDTH_32 ((uint32_t)FMC_BCRx_MWID_1) /** * @} */ /** @defgroup FMC_NORSRAM_Flash_Access FMC NOR/SRAM Flash Access * @{ */ #define FMC_NORSRAM_FLASH_ACCESS_ENABLE ((uint32_t)FMC_BCRx_FACCEN) #define FMC_NORSRAM_FLASH_ACCESS_DISABLE ((uint32_t)0x00000000) /** * @} */ /** @defgroup FMC_Burst_Access_Mode FMC Burst Access Mode * @{ */ #define FMC_BURST_ACCESS_MODE_DISABLE ((uint32_t)0x00000000) #define FMC_BURST_ACCESS_MODE_ENABLE ((uint32_t)FMC_BCRx_BURSTEN) /** * @} */ /** @defgroup FMC_Wait_Signal_Polarity FMC Wait Signal Polarity * @{ */ #define FMC_WAIT_SIGNAL_POLARITY_LOW ((uint32_t)0x00000000) #define FMC_WAIT_SIGNAL_POLARITY_HIGH ((uint32_t)FMC_BCRx_WAITPOL) /** * @} */ /** @defgroup FMC_Wrap_Mode FMC Wrap Mode * @{ */ #define FMC_WRAP_MODE_DISABLE ((uint32_t)0x00000000) #define FMC_WRAP_MODE_ENABLE ((uint32_t)FMC_BCRx_WRAPMOD) /** * @} */ /** @defgroup FMC_Wait_Timing FMC Wait Timing * @{ */ #define FMC_WAIT_TIMING_BEFORE_WS ((uint32_t)0x00000000) #define FMC_WAIT_TIMING_DURING_WS ((uint32_t)FMC_BCRx_WAITCFG) /** * @} */ /** @defgroup FMC_Write_Operation FMC Write Operation * @{ */ #define FMC_WRITE_OPERATION_DISABLE ((uint32_t)0x00000000) #define FMC_WRITE_OPERATION_ENABLE ((uint32_t)FMC_BCRx_WREN) /** * @} */ /** @defgroup FMC_Wait_Signal FMC Wait Signal * @{ */ #define FMC_WAIT_SIGNAL_DISABLE ((uint32_t)0x00000000) #define FMC_WAIT_SIGNAL_ENABLE ((uint32_t)FMC_BCRx_WAITEN) /** * @} */ /** @defgroup FMC_Extended_Mode FMC Extended Mode * @{ */ #define FMC_EXTENDED_MODE_DISABLE ((uint32_t)0x00000000) #define FMC_EXTENDED_MODE_ENABLE ((uint32_t)FMC_BCRx_EXTMOD) /** * @} */ /** @defgroup FMC_AsynchronousWait FMC Asynchronous Wait * @{ */ #define FMC_ASYNCHRONOUS_WAIT_DISABLE ((uint32_t)0x00000000) #define FMC_ASYNCHRONOUS_WAIT_ENABLE ((uint32_t)FMC_BCRx_ASYNCWAIT) /** * @} */ /** @defgroup FMC_Write_Burst FMC Write Burst * @{ */ #define FMC_WRITE_BURST_DISABLE ((uint32_t)0x00000000) #define FMC_WRITE_BURST_ENABLE ((uint32_t)FMC_BCRx_CBURSTRW) /** * @} */ /** @defgroup FMC_Continous_Clock FMC Continous Clock * @{ */ #define FMC_CONTINUOUS_CLOCK_SYNC_ONLY ((uint32_t)0x00000000) #define FMC_CONTINUOUS_CLOCK_SYNC_ASYNC ((uint32_t)FMC_BCR1_CCLKEN) /** * @} */ /** @defgroup FMC_Access_Mode FMC Access Mode * @{ */ #define FMC_ACCESS_MODE_A ((uint32_t)0x00000000) #define FMC_ACCESS_MODE_B ((uint32_t)FMC_BTRx_ACCMOD_0) #define FMC_ACCESS_MODE_C ((uint32_t)FMC_BTRx_ACCMOD_1) #define FMC_ACCESS_MODE_D ((uint32_t)(FMC_BTRx_ACCMOD_0 | FMC_BTRx_ACCMOD_1)) /** * @} */ /** * @} */ /** @defgroup FMC_NAND_Controller FMC NAND and PCCARD Controller * @{ */ /** @defgroup FMC_NAND_Bank FMC NAND Bank * @{ */ #define FMC_NAND_BANK2 ((uint32_t)0x00000010) #define FMC_NAND_BANK3 ((uint32_t)0x00000100) /** * @} */ /** @defgroup FMC_Wait_feature FMC Wait feature * @{ */ #define FMC_NAND_PCC_WAIT_FEATURE_DISABLE ((uint32_t)0x00000000) #define FMC_NAND_PCC_WAIT_FEATURE_ENABLE ((uint32_t)FMC_PCRx_PWAITEN) /** * @} */ /** @defgroup FMC_PCR_Memory_Type FMC PCR Memory Type * @{ */ #define FMC_PCR_MEMORY_TYPE_PCCARD ((uint32_t)0x00000000) #define FMC_PCR_MEMORY_TYPE_NAND ((uint32_t)FMC_PCRx_PTYP) /** * @} */ /** @defgroup FMC_NAND_Data_Width FMC NAND Data Width * @{ */ #define FMC_NAND_PCC_MEM_BUS_WIDTH_8 ((uint32_t)0x00000000) #define FMC_NAND_PCC_MEM_BUS_WIDTH_16 ((uint32_t)FMC_PCRx_PWID_0) /** * @} */ /** @defgroup FMC_ECC FMC NAND ECC * @{ */ #define FMC_NAND_ECC_DISABLE ((uint32_t)0x00000000) #define FMC_NAND_ECC_ENABLE ((uint32_t)FMC_PCRx_ECCEN) /** * @} */ /** @defgroup FMC_ECC_Page_Size FMC ECC Page Size * @{ */ #define FMC_NAND_ECC_PAGE_SIZE_256BYTE ((uint32_t)0x00000000) #define FMC_NAND_ECC_PAGE_SIZE_512BYTE ((uint32_t)FMC_PCRx_ECCPS_0) #define FMC_NAND_ECC_PAGE_SIZE_1024BYTE ((uint32_t)FMC_PCRx_ECCPS_1) #define FMC_NAND_ECC_PAGE_SIZE_2048BYTE ((uint32_t)FMC_PCRx_ECCPS_0|FMC_PCRx_ECCPS_1) #define FMC_NAND_ECC_PAGE_SIZE_4096BYTE ((uint32_t)FMC_PCRx_ECCPS_2) #define FMC_NAND_ECC_PAGE_SIZE_8192BYTE ((uint32_t)FMC_PCRx_ECCPS_0|FMC_PCRx_ECCPS_2) /** * @} */ /** @defgroup FMC_Interrupt_definition FMC Interrupt definition * @brief FMC Interrupt definition * @{ */ #define FMC_IT_RISING_EDGE ((uint32_t)FMC_SRx_IREN) #define FMC_IT_LEVEL ((uint32_t)FMC_SRx_ILEN) #define FMC_IT_FALLING_EDGE ((uint32_t)FMC_SRx_IFEN) /** * @} */ /** @defgroup FMC_Flag_definition FMC Flag definition * @brief FMC Flag definition * @{ */ #define FMC_FLAG_RISING_EDGE ((uint32_t)FMC_SRx_IRS) #define FMC_FLAG_LEVEL ((uint32_t)FMC_SRx_ILS) #define FMC_FLAG_FALLING_EDGE ((uint32_t)FMC_SRx_IFS) #define FMC_FLAG_FEMPT ((uint32_t)FMC_SRx_FEMPT) /** * @} */ /** * @} */ /** * @} */ /* Exported macro ------------------------------------------------------------*/ /** @defgroup FMC_Exported_Macros FMC Low Layer Exported Macros * @{ */ /** @defgroup FMC_NOR_Macros FMC NOR/SRAM Exported Macros * @brief macros to handle NOR device enable/disable and read/write operations * @{ */ /** * @brief Enable the NORSRAM device access. * @param __INSTANCE__ FMC_NORSRAM Instance * @param __BANK__ FMC_NORSRAM Bank * @retval none */ #define __FMC_NORSRAM_ENABLE(__INSTANCE__, __BANK__) SET_BIT((__INSTANCE__)->BTCR[(__BANK__)], FMC_BCRx_MBKEN) /** * @brief Disable the NORSRAM device access. * @param __INSTANCE__ FMC_NORSRAM Instance * @param __BANK__ FMC_NORSRAM Bank * @retval none */ #define __FMC_NORSRAM_DISABLE(__INSTANCE__, __BANK__) CLEAR_BIT((__INSTANCE__)->BTCR[(__BANK__)], FMC_BCRx_MBKEN) /** * @} */ /** @defgroup FMC_NAND_Macros FMC NAND Macros * @brief macros to handle NAND device enable/disable * @{ */ /** * @brief Enable the NAND device access. * @param __INSTANCE__ FMC_NAND Instance * @param __BANK__ FMC_NAND Bank * @retval None */ #define __FMC_NAND_ENABLE(__INSTANCE__, __BANK__) (((__BANK__) == FMC_NAND_BANK2)? SET_BIT((__INSTANCE__)->PCR2, FMC_PCRx_PBKEN): \ SET_BIT((__INSTANCE__)->PCR3, FMC_PCRx_PBKEN)) /** * @brief Disable the NAND device access. * @param __INSTANCE__ FMC_NAND Instance * @param __BANK__ FMC_NAND Bank * @retval None */ #define __FMC_NAND_DISABLE(__INSTANCE__, __BANK__) (((__BANK__) == FMC_NAND_BANK2)? CLEAR_BIT((__INSTANCE__)->PCR2, FMC_PCRx_PBKEN): \ CLEAR_BIT((__INSTANCE__)->PCR3, FMC_PCRx_PBKEN)) /** * @} */ /** @defgroup FMC_PCCARD_Macros FMC PCCARD Macros * @brief macros to handle PCCARD read/write operations * @{ */ /** * @brief Enable the PCCARD device access. * @param __INSTANCE__ FMC_PCCARD Instance * @retval None */ #define __FMC_PCCARD_ENABLE(__INSTANCE__) SET_BIT((__INSTANCE__)->PCR4, FMC_PCRx_PBKEN) /** * @brief Disable the PCCARD device access. * @param __INSTANCE__ FMC_PCCARD Instance * @retval None */ #define __FMC_PCCARD_DISABLE(__INSTANCE__) CLEAR_BIT((__INSTANCE__)->PCR4, FMC_PCRx_PBKEN) /** * @} */ /** @defgroup FMC_Interrupt FMC Interrupt * @brief macros to handle FMC interrupts * @{ */ /** * @brief Enable the NAND device interrupt. * @param __INSTANCE__ FMC_NAND Instance * @param __BANK__ FMC_NAND Bank * @param __INTERRUPT__ FMC_NAND interrupt * This parameter can be any combination of the following values: * @arg FMC_IT_RISING_EDGE Interrupt rising edge. * @arg FMC_IT_LEVEL Interrupt level. * @arg FMC_IT_FALLING_EDGE Interrupt falling edge. * @retval None */ #define __FMC_NAND_ENABLE_IT(__INSTANCE__, __BANK__, __INTERRUPT__) (((__BANK__) == FMC_NAND_BANK2)? SET_BIT((__INSTANCE__)->SR2, (__INTERRUPT__)): \ SET_BIT((__INSTANCE__)->SR3, (__INTERRUPT__))) /** * @brief Disable the NAND device interrupt. * @param __INSTANCE__ FMC_NAND Instance * @param __BANK__ FMC_NAND Bank * @param __INTERRUPT__ FMC_NAND interrupt * This parameter can be any combination of the following values: * @arg FMC_IT_RISING_EDGE Interrupt rising edge. * @arg FMC_IT_LEVEL Interrupt level. * @arg FMC_IT_FALLING_EDGE Interrupt falling edge. * @retval None */ #define __FMC_NAND_DISABLE_IT(__INSTANCE__, __BANK__, __INTERRUPT__) (((__BANK__) == FMC_NAND_BANK2)? CLEAR_BIT((__INSTANCE__)->SR2, (__INTERRUPT__)): \ CLEAR_BIT((__INSTANCE__)->SR3, (__INTERRUPT__))) /** * @brief Get flag status of the NAND device. * @param __INSTANCE__ FMC_NAND Instance * @param __BANK__ FMC_NAND Bank * @param __FLAG__ FMC_NAND flag * This parameter can be any combination of the following values: * @arg FMC_FLAG_RISING_EDGE Interrupt rising edge flag. * @arg FMC_FLAG_LEVEL Interrupt level edge flag. * @arg FMC_FLAG_FALLING_EDGE Interrupt falling edge flag. * @arg FMC_FLAG_FEMPT FIFO empty flag. * @retval The state of FLAG (SET or RESET). */ #define __FMC_NAND_GET_FLAG(__INSTANCE__, __BANK__, __FLAG__) (((__BANK__) == FMC_NAND_BANK2)? (((__INSTANCE__)->SR2 &(__FLAG__)) == (__FLAG__)): \ (((__INSTANCE__)->SR3 &(__FLAG__)) == (__FLAG__))) /** * @brief Clear flag status of the NAND device. * @param __INSTANCE__ FMC_NAND Instance * @param __BANK__ FMC_NAND Bank * @param __FLAG__ FMC_NAND flag * This parameter can be any combination of the following values: * @arg FMC_FLAG_RISING_EDGE Interrupt rising edge flag. * @arg FMC_FLAG_LEVEL Interrupt level edge flag. * @arg FMC_FLAG_FALLING_EDGE Interrupt falling edge flag. * @arg FMC_FLAG_FEMPT FIFO empty flag. * @retval None */ #define __FMC_NAND_CLEAR_FLAG(__INSTANCE__, __BANK__, __FLAG__) (((__BANK__) == FMC_NAND_BANK2)? CLEAR_BIT((__INSTANCE__)->SR2, (__FLAG__)): \ CLEAR_BIT((__INSTANCE__)->SR3, (__FLAG__))) /** * @brief Enable the PCCARD device interrupt. * @param __INSTANCE__ FMC_PCCARD Instance * @param __INTERRUPT__ FMC_PCCARD interrupt * This parameter can be any combination of the following values: * @arg FMC_IT_RISING_EDGE Interrupt rising edge. * @arg FMC_IT_LEVEL Interrupt level. * @arg FMC_IT_FALLING_EDGE Interrupt falling edge. * @retval None */ #define __FMC_PCCARD_ENABLE_IT(__INSTANCE__, __INTERRUPT__) SET_BIT((__INSTANCE__)->SR4, (__INTERRUPT__)) /** * @brief Disable the PCCARD device interrupt. * @param __INSTANCE__ FMC_PCCARD Instance * @param __INTERRUPT__ FMC_PCCARD interrupt * This parameter can be any combination of the following values: * @arg FMC_IT_RISING_EDGE Interrupt rising edge. * @arg FMC_IT_LEVEL Interrupt level. * @arg FMC_IT_FALLING_EDGE Interrupt falling edge. * @retval None */ #define __FMC_PCCARD_DISABLE_IT(__INSTANCE__, __INTERRUPT__) CLEAR_BIT((__INSTANCE__)->SR4, (__INTERRUPT__)) /** * @brief Get flag status of the PCCARD device. * @param __INSTANCE__ FMC_PCCARD Instance * @param __FLAG__ FMC_PCCARD flag * This parameter can be any combination of the following values: * @arg FMC_FLAG_RISING_EDGE Interrupt rising edge flag. * @arg FMC_FLAG_LEVEL Interrupt level edge flag. * @arg FMC_FLAG_FALLING_EDGE Interrupt falling edge flag. * @arg FMC_FLAG_FEMPT FIFO empty flag. * @retval The state of FLAG (SET or RESET). */ #define __FMC_PCCARD_GET_FLAG(__INSTANCE__, __FLAG__) (((__INSTANCE__)->SR4 &(__FLAG__)) == (__FLAG__)) /** * @brief Clear flag status of the PCCARD device. * @param __INSTANCE__ FMC_PCCARD Instance * @param __FLAG__ FMC_PCCARD flag * This parameter can be any combination of the following values: * @arg FMC_FLAG_RISING_EDGE Interrupt rising edge flag. * @arg FMC_FLAG_LEVEL Interrupt level edge flag. * @arg FMC_FLAG_FALLING_EDGE Interrupt falling edge flag. * @arg FMC_FLAG_FEMPT FIFO empty flag. * @retval None */ #define __FMC_PCCARD_CLEAR_FLAG(__INSTANCE__, __FLAG__) CLEAR_BIT((__INSTANCE__)->SR4, (__FLAG__)) /** * @} */ /** * @} */ /* Exported functions --------------------------------------------------------*/ /** @addtogroup FMC_LL_Exported_Functions * @{ */ /** @addtogroup FMC_NORSRAM * @{ */ /** @addtogroup FMC_NORSRAM_Group1 * @{ */ /* FMC_NORSRAM Controller functions ******************************************/ /* Initialization/de-initialization functions */ HAL_StatusTypeDef FMC_NORSRAM_Init(FMC_NORSRAM_TypeDef *Device, FMC_NORSRAM_InitTypeDef *Init); HAL_StatusTypeDef FMC_NORSRAM_Timing_Init(FMC_NORSRAM_TypeDef *Device, FMC_NORSRAM_TimingTypeDef *Timing, uint32_t Bank); HAL_StatusTypeDef FMC_NORSRAM_Extended_Timing_Init(FMC_NORSRAM_EXTENDED_TypeDef *Device, FMC_NORSRAM_TimingTypeDef *Timing, uint32_t Bank, uint32_t ExtendedMode); HAL_StatusTypeDef FMC_NORSRAM_DeInit(FMC_NORSRAM_TypeDef *Device, FMC_NORSRAM_EXTENDED_TypeDef *ExDevice, uint32_t Bank); /** * @} */ /** @addtogroup FMC_NORSRAM_Group2 * @{ */ /* FMC_NORSRAM Control functions */ HAL_StatusTypeDef FMC_NORSRAM_WriteOperation_Enable(FMC_NORSRAM_TypeDef *Device, uint32_t Bank); HAL_StatusTypeDef FMC_NORSRAM_WriteOperation_Disable(FMC_NORSRAM_TypeDef *Device, uint32_t Bank); /** * @} */ /** * @} */ /** @addtogroup FMC_NAND * @{ */ /* FMC_NAND Controller functions **********************************************/ /* Initialization/de-initialization functions */ /** @addtogroup FMC_NAND_Exported_Functions_Group1 * @{ */ HAL_StatusTypeDef FMC_NAND_Init(FMC_NAND_TypeDef *Device, FMC_NAND_InitTypeDef *Init); HAL_StatusTypeDef FMC_NAND_CommonSpace_Timing_Init(FMC_NAND_TypeDef *Device, FMC_NAND_PCC_TimingTypeDef *Timing, uint32_t Bank); HAL_StatusTypeDef FMC_NAND_AttributeSpace_Timing_Init(FMC_NAND_TypeDef *Device, FMC_NAND_PCC_TimingTypeDef *Timing, uint32_t Bank); HAL_StatusTypeDef FMC_NAND_DeInit(FMC_NAND_TypeDef *Device, uint32_t Bank); /** * @} */ /* FMC_NAND Control functions */ /** @addtogroup FMC_NAND_Exported_Functions_Group2 * @{ */ HAL_StatusTypeDef FMC_NAND_ECC_Enable(FMC_NAND_TypeDef *Device, uint32_t Bank); HAL_StatusTypeDef FMC_NAND_ECC_Disable(FMC_NAND_TypeDef *Device, uint32_t Bank); HAL_StatusTypeDef FMC_NAND_GetECC(FMC_NAND_TypeDef *Device, uint32_t *ECCval, uint32_t Bank, uint32_t Timeout); /** * @} */ /** * @} */ /** @addtogroup FMC_PCCARD * @{ */ /* FMC_PCCARD Controller functions ********************************************/ /* Initialization/de-initialization functions */ /** @addtogroup FMC_PCCARD_Exported_Functions_Group1 * @{ */ HAL_StatusTypeDef FMC_PCCARD_Init(FMC_PCCARD_TypeDef *Device, FMC_PCCARD_InitTypeDef *Init); HAL_StatusTypeDef FMC_PCCARD_CommonSpace_Timing_Init(FMC_PCCARD_TypeDef *Device, FMC_NAND_PCC_TimingTypeDef *Timing); HAL_StatusTypeDef FMC_PCCARD_AttributeSpace_Timing_Init(FMC_PCCARD_TypeDef *Device, FMC_NAND_PCC_TimingTypeDef *Timing); HAL_StatusTypeDef FMC_PCCARD_IOSpace_Timing_Init(FMC_PCCARD_TypeDef *Device, FMC_NAND_PCC_TimingTypeDef *Timing); HAL_StatusTypeDef FMC_PCCARD_DeInit(FMC_PCCARD_TypeDef *Device); /** * @} */ /** * @} */ /** * @} */ /** * @} */ #endif /* FMC_BANK1 */ /** * @} */ #ifdef __cplusplus } #endif #endif /* __STM32F3xx_LL_FMC_H */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
yGO77/betaflight
lib/main/STM32F3xx_HAL_Driver/Inc/stm32f3xx_ll_fmc.h
C
gpl-3.0
40,444
<html> <head> <meta http-equiv="Content-Type" content="text/html; charset=US-ASCII"> <title>stream_socket_service::native_non_blocking (1 of 2 overloads)</title> <link rel="stylesheet" href="../../../../../../doc/src/boostbook.css" type="text/css"> <meta name="generator" content="DocBook XSL Stylesheets V1.78.1"> <link rel="home" href="../../../../boost_asio.html" title="Boost.Asio"> <link rel="up" href="../native_non_blocking.html" title="stream_socket_service::native_non_blocking"> <link rel="prev" href="../native_non_blocking.html" title="stream_socket_service::native_non_blocking"> <link rel="next" href="overload2.html" title="stream_socket_service::native_non_blocking (2 of 2 overloads)"> </head> <body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"> <table cellpadding="2" width="100%"><tr> <td valign="top"><img alt="Boost C++ Libraries" width="277" height="86" src="../../../../../../boost.png"></td> <td align="center"><a href="../../../../../../index.html">Home</a></td> <td align="center"><a href="../../../../../../libs/libraries.htm">Libraries</a></td> <td align="center"><a href="http://www.boost.org/users/people.html">People</a></td> <td align="center"><a href="http://www.boost.org/users/faq.html">FAQ</a></td> <td align="center"><a href="../../../../../../more/index.htm">More</a></td> </tr></table> <hr> <div class="spirit-nav"> <a accesskey="p" href="../native_non_blocking.html"><img src="../../../../../../doc/src/images/prev.png" alt="Prev"></a><a accesskey="u" href="../native_non_blocking.html"><img src="../../../../../../doc/src/images/up.png" alt="Up"></a><a accesskey="h" href="../../../../boost_asio.html"><img src="../../../../../../doc/src/images/home.png" alt="Home"></a><a accesskey="n" href="overload2.html"><img src="../../../../../../doc/src/images/next.png" alt="Next"></a> </div> <div class="section"> <div class="titlepage"><div><div><h5 class="title"> <a name="boost_asio.reference.stream_socket_service.native_non_blocking.overload1"></a><a class="link" href="overload1.html" title="stream_socket_service::native_non_blocking (1 of 2 overloads)">stream_socket_service::native_non_blocking (1 of 2 overloads)</a> </h5></div></div></div> <p> Gets the non-blocking mode of the native socket implementation. </p> <pre class="programlisting"><span class="keyword">bool</span> <span class="identifier">native_non_blocking</span><span class="special">(</span> <span class="keyword">const</span> <span class="identifier">implementation_type</span> <span class="special">&amp;</span> <span class="identifier">impl</span><span class="special">)</span> <span class="keyword">const</span><span class="special">;</span> </pre> </div> <table xmlns:rev="http://www.cs.rpi.edu/~gregod/boost/tools/doc/revision" width="100%"><tr> <td align="left"></td> <td align="right"><div class="copyright-footer">Copyright &#169; 2003-2015 Christopher M. Kohlhoff<p> Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at <a href="http://www.boost.org/LICENSE_1_0.txt" target="_top">http://www.boost.org/LICENSE_1_0.txt</a>) </p> </div></td> </tr></table> <hr> <div class="spirit-nav"> <a accesskey="p" href="../native_non_blocking.html"><img src="../../../../../../doc/src/images/prev.png" alt="Prev"></a><a accesskey="u" href="../native_non_blocking.html"><img src="../../../../../../doc/src/images/up.png" alt="Up"></a><a accesskey="h" href="../../../../boost_asio.html"><img src="../../../../../../doc/src/images/home.png" alt="Home"></a><a accesskey="n" href="overload2.html"><img src="../../../../../../doc/src/images/next.png" alt="Next"></a> </div> </body> </html>
gwq5210/litlib
thirdparty/sources/boost_1_60_0/doc/html/boost_asio/reference/stream_socket_service/native_non_blocking/overload1.html
HTML
gpl-3.0
3,742
--- layout: "enterprise" page_title: "Provider - Artifacts - Terraform Enterprise" sidebar_current: "docs-enterprise-artifacts-provider" description: |- Terraform has a provider for managing artifacts called `atlas_artifact`. --- # Artifact Provider Terraform has a [provider](https://terraform.io/docs/providers/index.html) for managing Terraform Enterprise artifacts called `atlas_artifact`. This is used to make data stored in Artifacts available to Terraform for interpolation. In the following example, an artifact is defined and references an AMI ID stored in Terraform Enterprise. ~> **Why is this called "atlas"?** Atlas was previously a commercial offering from HashiCorp that included a full suite of enterprise products. The products have since been broken apart into their individual products, like **Terraform Enterprise**. While this transition is in progress, you may see references to "atlas" in the documentation. We apologize for the inconvenience. ```hcl provider "atlas" { # You can also set the atlas token by exporting ATLAS_TOKEN into your env token = "${var.atlas_token}" } data "atlas_artifact" "web-worker" { name = "my-username/web-worker" type = "amazon.image" version = "latest" } resource "aws_instance" "worker-machine" { ami = "${atlas_artifact.web-worker.metadata_full.region-us-east-1}" instance_type = "m1.small" } ``` This automatically pulls the "latest" artifact version. Following a new artifact version being created via a Packer build, the following diff would be generated when running `terraform plan`. ``` -/+ aws_instance.worker-machine ami: "ami-168f9d7e" => "ami-2f3a9df2" (forces new resource) instance_type: "m1.small" => "m1.small" ``` This allows you to reference changing artifacts and trigger new deployments upon pushing subsequent Packer builds. Read more about artifacts in the [Terraform documentation](https://terraform.io/docs/providers/terraform-enterprise/r/artifact.html).
elblivion/terraform
website/source/docs/enterprise/artifacts/artifact-provider.html.md
Markdown
mpl-2.0
2,001
# Copyright 2013, Big Switch Networks, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import logging from django.core.urlresolvers import reverse from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import forms from horizon import messages from horizon.utils import validators from openstack_dashboard import api port_validator = validators.validate_port_or_colon_separated_port_range LOG = logging.getLogger(__name__) class UpdateRule(forms.SelfHandlingForm): name = forms.CharField(max_length=80, label=_("Name"), required=False) description = forms.CharField( required=False, max_length=80, label=_("Description")) protocol = forms.ChoiceField( label=_("Protocol"), required=False, choices=[('TCP', _('TCP')), ('UDP', _('UDP')), ('ICMP', _('ICMP')), ('ANY', _('ANY'))], help_text=_('Protocol for the firewall rule')) action = forms.ChoiceField( label=_("Action"), required=False, choices=[('ALLOW', _('ALLOW')), ('DENY', _('DENY'))], help_text=_('Action for the firewall rule')) source_ip_address = forms.IPField( label=_("Source IP Address/Subnet"), version=forms.IPv4 | forms.IPv6, required=False, mask=True, help_text=_('Source IP address or subnet')) destination_ip_address = forms.IPField( label=_('Destination IP Address/Subnet'), version=forms.IPv4 | forms.IPv6, required=False, mask=True, help_text=_('Destination IP address or subnet')) source_port = forms.CharField( max_length=80, label=_("Source Port/Port Range"), required=False, validators=[port_validator], help_text=_('Source port (integer in [1, 65535] or range in a:b)')) destination_port = forms.CharField( max_length=80, label=_("Destination Port/Port Range"), required=False, validators=[port_validator], help_text=_('Destination port (integer in [1, 65535] or range' ' in a:b)')) shared = forms.BooleanField(label=_("Shared"), required=False) enabled = forms.BooleanField(label=_("Enabled"), required=False) failure_url = 'horizon:project:firewalls:index' def handle(self, request, context): rule_id = self.initial['rule_id'] name_or_id = context.get('name') or rule_id if context['protocol'] == 'ANY': context['protocol'] = None for f in ['source_ip_address', 'destination_ip_address', 'source_port', 'destination_port']: if not context[f]: context[f] = None try: rule = api.fwaas.rule_update(request, rule_id, **context) msg = _('Rule %s was successfully updated.') % name_or_id LOG.debug(msg) messages.success(request, msg) return rule except Exception as e: msg = (_('Failed to update rule %(name)s: %(reason)s') % {'name': name_or_id, 'reason': e}) LOG.error(msg) redirect = reverse(self.failure_url) exceptions.handle(request, msg, redirect=redirect) class UpdatePolicy(forms.SelfHandlingForm): name = forms.CharField(max_length=80, label=_("Name"), required=False) description = forms.CharField(required=False, max_length=80, label=_("Description")) shared = forms.BooleanField(label=_("Shared"), required=False) audited = forms.BooleanField(label=_("Audited"), required=False) failure_url = 'horizon:project:firewalls:index' def handle(self, request, context): policy_id = self.initial['policy_id'] name_or_id = context.get('name') or policy_id try: policy = api.fwaas.policy_update(request, policy_id, **context) msg = _('Policy %s was successfully updated.') % name_or_id LOG.debug(msg) messages.success(request, msg) return policy except Exception as e: msg = _('Failed to update policy %(name)s: %(reason)s') % { 'name': name_or_id, 'reason': e} LOG.error(msg) redirect = reverse(self.failure_url) exceptions.handle(request, msg, redirect=redirect) class UpdateFirewall(forms.SelfHandlingForm): name = forms.CharField(max_length=80, label=_("Name"), required=False) description = forms.CharField(max_length=80, label=_("Description"), required=False) firewall_policy_id = forms.ChoiceField(label=_("Policy")) admin_state_up = forms.ChoiceField(choices=[(True, _('UP')), (False, _('DOWN'))], label=_("Admin State")) failure_url = 'horizon:project:firewalls:index' def __init__(self, request, *args, **kwargs): super(UpdateFirewall, self).__init__(request, *args, **kwargs) try: tenant_id = self.request.user.tenant_id policies = api.fwaas.policy_list_for_tenant(request, tenant_id) policies = sorted(policies, key=lambda policy: policy.name) except Exception: exceptions.handle(request, _('Unable to retrieve policy list.')) policies = [] policy_id = kwargs['initial']['firewall_policy_id'] policy_name = [p.name for p in policies if p.id == policy_id][0] firewall_policy_id_choices = [(policy_id, policy_name)] for p in policies: if p.id != policy_id: firewall_policy_id_choices.append((p.id, p.name_or_id)) self.fields['firewall_policy_id'].choices = firewall_policy_id_choices def handle(self, request, context): firewall_id = self.initial['firewall_id'] name_or_id = context.get('name') or firewall_id context['admin_state_up'] = (context['admin_state_up'] == 'True') try: firewall = api.fwaas.firewall_update(request, firewall_id, **context) msg = _('Firewall %s was successfully updated.') % name_or_id LOG.debug(msg) messages.success(request, msg) return firewall except Exception as e: msg = _('Failed to update firewall %(name)s: %(reason)s') % { 'name': name_or_id, 'reason': e} LOG.error(msg) redirect = reverse(self.failure_url) exceptions.handle(request, msg, redirect=redirect) class InsertRuleToPolicy(forms.SelfHandlingForm): firewall_rule_id = forms.ChoiceField(label=_("Insert Rule")) insert_before = forms.ChoiceField(label=_("Before"), required=False) insert_after = forms.ChoiceField(label=_("After"), required=False) failure_url = 'horizon:project:firewalls:index' def __init__(self, request, *args, **kwargs): super(InsertRuleToPolicy, self).__init__(request, *args, **kwargs) try: tenant_id = self.request.user.tenant_id all_rules = api.fwaas.rule_list_for_tenant(request, tenant_id) all_rules = sorted(all_rules, key=lambda rule: rule.name_or_id) available_rules = [r for r in all_rules if not r.firewall_policy_id] current_rules = [] for r in kwargs['initial']['firewall_rules']: r_obj = [rule for rule in all_rules if r == rule.id][0] current_rules.append(r_obj) available_choices = [(r.id, r.name_or_id) for r in available_rules] current_choices = [(r.id, r.name_or_id) for r in current_rules] except Exception as e: msg = _('Failed to retrieve available rules: %s') % e LOG.error(msg) redirect = reverse(self.failure_url) exceptions.handle(request, msg, redirect=redirect) self.fields['firewall_rule_id'].choices = available_choices self.fields['insert_before'].choices = [('', '')] + current_choices self.fields['insert_after'].choices = [('', '')] + current_choices def handle(self, request, context): policy_id = self.initial['policy_id'] policy_name_or_id = self.initial['name'] or policy_id try: insert_rule_id = context['firewall_rule_id'] insert_rule = api.fwaas.rule_get(request, insert_rule_id) body = {'firewall_rule_id': insert_rule_id, 'insert_before': context['insert_before'], 'insert_after': context['insert_after']} policy = api.fwaas.policy_insert_rule(request, policy_id, **body) msg = _('Rule %(rule)s was successfully inserted to policy ' '%(policy)s.') % { 'rule': insert_rule.name or insert_rule.id, 'policy': policy_name_or_id} LOG.debug(msg) messages.success(request, msg) return policy except Exception as e: msg = _('Failed to insert rule to policy %(name)s: %(reason)s') % { 'name': policy_id, 'reason': e} LOG.error(msg) redirect = reverse(self.failure_url) exceptions.handle(request, msg, redirect=redirect) class RemoveRuleFromPolicy(forms.SelfHandlingForm): firewall_rule_id = forms.ChoiceField(label=_("Remove Rule")) failure_url = 'horizon:project:firewalls:index' def __init__(self, request, *args, **kwargs): super(RemoveRuleFromPolicy, self).__init__(request, *args, **kwargs) try: tenant_id = request.user.tenant_id all_rules = api.fwaas.rule_list_for_tenant(request, tenant_id) current_rules = [] for r in kwargs['initial']['firewall_rules']: r_obj = [rule for rule in all_rules if r == rule.id][0] current_rules.append(r_obj) current_choices = [(r.id, r.name_or_id) for r in current_rules] except Exception as e: msg = _('Failed to retrieve current rules in policy %(name)s: ' '%(reason)s') % {'name': self.initial['name'], 'reason': e} LOG.error(msg) redirect = reverse(self.failure_url) exceptions.handle(request, msg, redirect=redirect) self.fields['firewall_rule_id'].choices = current_choices def handle(self, request, context): policy_id = self.initial['policy_id'] policy_name_or_id = self.initial['name'] or policy_id try: remove_rule_id = context['firewall_rule_id'] remove_rule = api.fwaas.rule_get(request, remove_rule_id) body = {'firewall_rule_id': remove_rule_id} policy = api.fwaas.policy_remove_rule(request, policy_id, **body) msg = _('Rule %(rule)s was successfully removed from policy ' '%(policy)s.') % { 'rule': remove_rule.name or remove_rule.id, 'policy': policy_name_or_id} LOG.debug(msg) messages.success(request, msg) return policy except Exception as e: msg = _('Failed to remove rule from policy %(name)s: ' '%(reason)s') % {'name': self.initial['name'], 'reason': e} LOG.error(msg) redirect = reverse(self.failure_url) exceptions.handle(request, msg, redirect=redirect) class RouterInsertionFormBase(forms.SelfHandlingForm): def __init__(self, request, *args, **kwargs): super(RouterInsertionFormBase, self).__init__(request, *args, **kwargs) try: router_choices = self.get_router_choices(request, kwargs) self.fields['router_ids'].choices = router_choices except Exception as e: msg = self.init_failure_msg % {'name': self.initial['name'], 'reason': e} LOG.error(msg) redirect = reverse(self.failure_url) exceptions.handle(request, msg, redirect=redirect) @abc.abstractmethod def get_router_choices(self, request, kwargs): """Return a list of selectable routers.""" @abc.abstractmethod def get_new_router_ids(self, context): """Return a new list of router IDs associated with the firewall.""" def handle(self, request, context): firewall_id = self.initial['firewall_id'] firewall_name_or_id = self.initial['name'] or firewall_id try: body = {'router_ids': self.get_new_router_ids(context)} firewall = api.fwaas.firewall_update(request, firewall_id, **body) msg = self.success_msg % {'firewall': firewall_name_or_id} LOG.debug(msg) messages.success(request, msg) return firewall except Exception as e: msg = self.failure_msg % {'name': firewall_name_or_id, 'reason': e} LOG.error(msg) redirect = reverse(self.failure_url) exceptions.handle(request, msg, redirect=redirect) class AddRouterToFirewall(RouterInsertionFormBase): router_ids = forms.MultipleChoiceField( label=_("Add Routers"), required=False, widget=forms.CheckboxSelectMultiple(), help_text=_("Add selected router(s) to the firewall.")) failure_url = 'horizon:project:firewalls:index' success_msg = _('Router(s) was/were successfully added to firewall ' '%(firewall)s.') failure_msg = _('Failed to add router(s) to firewall %(name)s: %(reason)s') init_failure_msg = _('Failed to retrieve available routers: %(reason)s') def get_router_choices(self, request, kwargs): tenant_id = self.request.user.tenant_id routers_list = api.fwaas.firewall_unassociated_routers_list( request, tenant_id) return [(r.id, r.name_or_id) for r in routers_list] def get_new_router_ids(self, context): existing_router_ids = self.initial['router_ids'] add_router_ids = context['router_ids'] return add_router_ids + existing_router_ids class RemoveRouterFromFirewall(RouterInsertionFormBase): router_ids = forms.MultipleChoiceField( label=_("Remove Routers"), required=False, widget=forms.CheckboxSelectMultiple(), help_text=_("Unselect the router(s) to be removed from firewall.")) failure_url = 'horizon:project:firewalls:index' success_msg = _('Router(s) was successfully removed from firewall ' '%(firewall)s.') failure_msg = _('Failed to remove router(s) from firewall %(name)s: ' '%(reason)s') init_failure_msg = _('Failed to retrieve current routers in firewall ' '%(name)s: %(reason)s') def get_router_choices(self, request, kwargs): tenant_id = self.request.user.tenant_id all_routers = api.neutron.router_list(request, tenant_id=tenant_id) current_routers = [r for r in all_routers if r['id'] in kwargs['initial']['router_ids']] return [(r.id, r.name_or_id) for r in current_routers] def get_new_router_ids(self, context): # context[router_ids] is router IDs to be kept. return context['router_ids']
wangxiangyu/horizon
openstack_dashboard/dashboards/project/firewalls/forms.py
Python
apache-2.0
16,187
/* **************************************************************************** * * Copyright (c) Microsoft Corporation. * * This source code is subject to terms and conditions of the Apache License, Version 2.0. A * copy of the license can be found in the License.html file at the root of this distribution. If * you cannot locate the Apache License, Version 2.0, please send an email to * vspython@microsoft.com. By using this source code in any fashion, you are agreeing to be bound * by the terms of the Apache License, Version 2.0. * * You must not remove this notice, or any other, from this software. * * ***************************************************************************/ using System; using System.Collections.Generic; using System.Linq; namespace Microsoft.PythonTools.Interpreter.Default { class CPythonSequenceType : IPythonSequenceType { private readonly IPythonType _type; private readonly List<IPythonType> _indexTypes; public CPythonSequenceType(IPythonType baseType, ITypeDatabaseReader typeDb, List<object> indexTypes) { _type = baseType; if (indexTypes != null) { _indexTypes = new List<IPythonType>(); foreach (var indexType in indexTypes) { typeDb.LookupType(indexType, type => _indexTypes.Add(type)); } } } public IEnumerable<IPythonType> IndexTypes { get { return _indexTypes; } } public IPythonFunction GetConstructors() { return _type.GetConstructors(); } public string Name { get { return _type.Name; } } public string Documentation { get { return _type.Documentation; } } public BuiltinTypeId TypeId { get { return _type.TypeId; } } public IList<IPythonType> Mro { get { return _type.Mro; } } public IPythonModule DeclaringModule { get { return _type.DeclaringModule; } } public bool IsBuiltin { get { return _type.IsBuiltin; } } public IMember GetMember(IModuleContext context, string name) { return _type.GetMember(context, name); } public IEnumerable<string> GetMemberNames(IModuleContext moduleContext) { return _type.GetMemberNames(moduleContext); } public PythonMemberType MemberType { get { return _type.MemberType; } } public override string ToString() { return String.Format("CPythonSequenceType('{0}', '{1}')", Name, string.Join("', '", _indexTypes.Select((t => t.Name)))); } } }
msunardi/PTVS
Python/Product/Analysis/Interpreter/Default/CPythonSequenceType.cs
C#
apache-2.0
2,859
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.impl; import java.lang.reflect.Field; import java.util.SortedMap; import java.util.TreeMap; import org.apache.camel.CamelContext; import org.apache.camel.ComponentConfiguration; import org.apache.camel.Endpoint; import org.apache.camel.spi.UriParam; import org.apache.camel.spi.UriParams; import org.apache.camel.util.ObjectHelper; import org.apache.camel.util.ReflectionHelper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * A component implementation for endpoints which are annotated with UriEndpoint to describe * their configurable parameters via annotations * * @deprecated use {@link DefaultComponent} */ @Deprecated public abstract class UriEndpointComponent extends DefaultComponent { private static final Logger LOG = LoggerFactory.getLogger(UriEndpointComponent.class); private Class<? extends Endpoint> endpointClass; private SortedMap<String, ParameterConfiguration> parameterConfigurationMap; public UriEndpointComponent(Class<? extends Endpoint> endpointClass) { this.endpointClass = endpointClass; } public UriEndpointComponent(CamelContext context, Class<? extends Endpoint> endpointClass) { super(context); this.endpointClass = endpointClass; } /** * To use a specific endpoint class, instead of what has been provided by the constructors. * * @param endpointClass the endpoint class to use */ public void setEndpointClass(Class<? extends Endpoint> endpointClass) { this.endpointClass = endpointClass; } @Override public ComponentConfiguration createComponentConfiguration() { return new UriComponentConfiguration(this); } /** * Returns a newly created sorted map, indexed by name of all the parameter configurations * of the given endpoint class using introspection for the various annotations like * {@link org.apache.camel.spi.UriEndpoint}, {@link org.apache.camel.spi.UriParam}, {@link org.apache.camel.spi.UriParams} */ public static SortedMap<String, ParameterConfiguration> createParameterConfigurationMap( Class<? extends Endpoint> endpointClass) { SortedMap<String, ParameterConfiguration> answer = new TreeMap<String, ParameterConfiguration>(); populateParameterConfigurationMap(answer, endpointClass, ""); return answer; } protected static void populateParameterConfigurationMap( final SortedMap<String, ParameterConfiguration> parameterMap, Class<?> aClass, final String prefix) { ReflectionHelper.doWithFields(aClass, new ReflectionHelper.FieldCallback() { @Override public void doWith(Field field) throws IllegalArgumentException, IllegalAccessException { UriParam uriParam = field.getAnnotation(UriParam.class); if (uriParam != null) { String name = uriParam.name(); if (ObjectHelper.isEmpty(name)) { name = field.getName(); } String propertyName = prefix + name; // is the parameter a nested configuration object Class<?> fieldType = field.getType(); UriParams uriParams = fieldType.getAnnotation(UriParams.class); if (uriParams != null) { String nestedPrefix = uriParams.prefix(); if (nestedPrefix == null) { nestedPrefix = ""; } nestedPrefix = (prefix + nestedPrefix).trim(); populateParameterConfigurationMap(parameterMap, fieldType, nestedPrefix); } else { if (parameterMap.containsKey(propertyName)) { LOG.warn("Duplicate property name " + propertyName + " defined on field " + field); } else { parameterMap.put(propertyName, ParameterConfiguration.newInstance(propertyName, field, uriParam)); } } } } }); } public Class<? extends Endpoint> getEndpointClass() { return endpointClass; } /** * Returns the sorted map of all the URI query parameter names to their {@link ParameterConfiguration} objects */ public SortedMap<String, ParameterConfiguration> getParameterConfigurationMap() { if (parameterConfigurationMap == null) { parameterConfigurationMap = createParameterConfigurationMap(getEndpointClass()); } return new TreeMap<String, ParameterConfiguration>(parameterConfigurationMap); } }
lburgazzoli/apache-camel
camel-core/src/main/java/org/apache/camel/impl/UriEndpointComponent.java
Java
apache-2.0
5,617
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractStreamableXContentTestCase; import org.elasticsearch.xpack.core.ml.action.GetBucketsAction.Request; import org.elasticsearch.xpack.core.ml.action.util.PageParams; public class GetBucketActionRequestTests extends AbstractStreamableXContentTestCase<Request> { @Override protected Request createTestInstance() { GetBucketsAction.Request request = new GetBucketsAction.Request(randomAlphaOfLengthBetween(1, 20)); if (randomBoolean()) { request.setTimestamp(String.valueOf(randomLong())); } else { if (randomBoolean()) { request.setStart(String.valueOf(randomLong())); } if (randomBoolean()) { request.setEnd(String.valueOf(randomLong())); } if (randomBoolean()) { request.setExcludeInterim(randomBoolean()); } if (randomBoolean()) { request.setAnomalyScore(randomDouble()); } if (randomBoolean()) { int from = randomInt(10000); int size = randomInt(10000); request.setPageParams(new PageParams(from, size)); } if (randomBoolean()) { request.setSort("anomaly_score"); } request.setDescending(randomBoolean()); } if (randomBoolean()) { request.setExpand(randomBoolean()); } if (randomBoolean()) { request.setExcludeInterim(randomBoolean()); } return request; } @Override protected boolean supportsUnknownFields() { return false; } @Override protected Request createBlankInstance() { return new GetBucketsAction.Request(); } @Override protected Request doParseInstance(XContentParser parser) { return GetBucketsAction.Request.parseRequest(null, parser); } }
gfyoung/elasticsearch
x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetBucketActionRequestTests.java
Java
apache-2.0
2,308
/* * Copyright 2016 The Closure Compiler Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @fileoverview * Tests transpilation of arrow functions. * * @author moz@google.com (Michael Zhou) */ goog.require('goog.testing.jsunit'); function testBasic() { var a = [1, 2, 3]; function map(arr, f) { var result = []; for (var i = 0; i < arr.length; i++) { result.push(f(arr[i])); } return result; } assertArrayEquals([2, 3, 4], map(a, (x) => x + 1)); } function testTemplate() { var f = x => `x is ${x}`; assertEquals('x is foo', f('foo')); } function testArguments() { function getArgs(x) { return ((y) => arguments)(7); } assertEquals(5, getArgs(5)[0]); } function forEach(arr, f) { for (var i = 0; i < arr.length; i++) { f(arr[i]); } } function testTwoArrowFunctionsInSameFunctionScope() { var count = 0; forEach([1, 2], x => count += x); forEach([3, 4], x => count += x); assertEquals(10, count); } function testTwoArrowFunctionsInSameBlockScope() { while (true) { var count = 0; forEach([1, 2], x => count += x); forEach([3, 4], x => count += x); assertEquals(10, count); break; } } // https://github.com/google/closure-compiler/issues/932 function testBug932_this() { var count = 0; class C { log(x) { count++; } f(xs) { forEach(xs, x => this.log(x)); if (xs.length > 1) { forEach(xs, x => this.log(x)); } } } var c = new C(); c.f([1, 2]); assertEquals(4, count); } // https://github.com/google/closure-compiler/issues/932 function testBug932_arguments() { var log = []; class C { log(x) { log.push(x); } f(var_args) { forEach(arguments, x => this.log(arguments[0])); if (arguments.length > 1) { forEach(arguments, x => this.log(arguments[1])); } } } var c = new C(); c.f(3, 4); assertArrayEquals([3, 3, 4, 4], log); } function testBug1907() { class Base { f() { assertTrue(this instanceof Sub); } } class Sub extends Base { f() { (() => super.f())(); } } (new Sub()).f(); }
GoogleChromeLabs/chromeos_smart_card_connector
third_party/closure-compiler/src/test/com/google/javascript/jscomp/runtime_tests/arrow_test.js
JavaScript
apache-2.0
2,668
RSpec.describe MiqWidget, "::ReportContent" do let(:vm_count) { 2 } let(:widget) do MiqWidget.sync_from_hash(YAML.load(" description: report_vendor_and_guest_os title: Vendor and Guest OS content_type: report options: :col_order: - name - vendor_display :row_count: #{vm_count} visibility: :roles: - _ALL_ resource_name: Vendor and Guest OS resource_type: MiqReport enabled: true read_only: true ")) end before do MiqReport.seed_report("Vendor and Guest OS") EvmSpecHelper.create_guid_miq_server_zone @admin = FactoryBot.create(:user_admin) @admin_group = @admin.current_group FactoryBot.create_list(:vm_vmware, vm_count) end it "#generate_one_content_for_user" do content = widget.generate_one_content_for_user(@admin_group, @admin) expect(content).to be_kind_of MiqWidgetContent expect(content.updated_at).to be_within(2.seconds).of(Time.now.utc) expect(content.contents.scan("</tr>").length).to eq(widget.options[:row_count] + 1) expect(content.contents.scan("</td>").length).to eq(widget.options[:row_count] * widget.options[:col_order].length) expect(content.contents.scan("</th>").length).to eq(widget.options[:col_order].length) expect(content.miq_report_result.html_rows(:offset => 0, :limit => 1).first.scan("</td>").length).to eq(widget.resource.col_order.length) expect(content.miq_report_result.html_rows.count { |c| c.match("<td>VMware</td>") }).to eq(vm_count) expect(content.contents).to match "<tr><th>Name</th><th>Container</th></tr>" expect(widget.contents_for_user(@admin)).to eq(content) end it "#generate_one_content_for_group" do content = widget.generate_one_content_for_group(@admin.current_group, @admin.get_timezone) expect(content).to be_kind_of MiqWidgetContent expect(content.updated_at).to be_within(2.seconds).of(Time.now.utc) expect(content.contents.scan("</tr>").length).to eq(widget.options[:row_count] + 1) expect(content.contents.scan("</td>").length).to eq(widget.options[:row_count] * widget.options[:col_order].length) expect(content.contents.scan("</th>").length).to eq(widget.options[:col_order].length) expect(content.miq_report_result.html_rows(:offset => 0, :limit => 1).first.scan("</td>").length).to eq(widget.resource.col_order.length) expect(content.miq_report_result.html_rows.count { |c| c.match("<td>VMware</td>") }).to eq(vm_count) expect(content.contents).to match "<tr><th>Name</th><th>Container</th></tr>" expect(widget.contents_for_user(@admin)).to eq(content) end it "#generate with self service user" do self_service_role = FactoryBot.create( :miq_user_role, :name => "ss_role", :settings => {:restrictions => {:vms => :user_or_group}} ) self_service_group = FactoryBot.create( :miq_group, :description => "EvmGroup-self_service", :miq_user_role => self_service_role ) user2 = FactoryBot.create(:user, :miq_groups => [self_service_group]) report = widget.generate_report(self_service_group, user2) content = MiqWidget::ReportContent.new(:report => report, :resource => widget.resource, :timezone => "UTC", :widget_options => widget.options) expect { content.generate(user2) }.not_to raise_error end end
aufi/manageiq
spec/models/miq_widget/report_content_spec.rb
Ruby
apache-2.0
3,355
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.internal.processors.cache.datastructures.partitioned; import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.IgniteQueue; import org.apache.ignite.cache.CacheAtomicityMode; import org.apache.ignite.cache.CacheMemoryMode; import org.apache.ignite.cache.CacheMode; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.processors.cache.datastructures.IgniteCollectionAbstractTest; import org.apache.ignite.testframework.GridTestUtils; import org.apache.ignite.transactions.Transaction; import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; import static org.apache.ignite.cache.CacheMemoryMode.ONHEAP_TIERED; import static org.apache.ignite.cache.CacheMode.PARTITIONED; import static org.apache.ignite.cache.CacheRebalanceMode.SYNC; import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC; import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC; import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ; /** * */ public class GridCachePartitionedQueueCreateMultiNodeSelfTest extends IgniteCollectionAbstractTest { /** {@inheritDoc} */ @Override protected int gridCount() { return 1; } /** {@inheritDoc} */ @Override protected CacheMode collectionCacheMode() { return PARTITIONED; } /** {@inheritDoc} */ @Override protected CacheMemoryMode collectionMemoryMode() { return ONHEAP_TIERED; } /** {@inheritDoc} */ @Override protected CacheAtomicityMode collectionCacheAtomicityMode() { return TRANSACTIONAL; } /** {@inheritDoc} */ @Override protected void beforeTestsStarted() throws Exception { // No-op. } /** {@inheritDoc} */ @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { IgniteConfiguration c = super.getConfiguration(gridName); c.setIncludeEventTypes(); c.setPeerClassLoadingEnabled(false); CacheConfiguration[] ccfg = c.getCacheConfiguration(); if (ccfg != null) { assert ccfg.length == 1 : ccfg.length; c.setCacheConfiguration(ccfg[0], cacheConfiguration()); } else c.setCacheConfiguration(cacheConfiguration()); return c; } /** {@inheritDoc} */ protected CacheConfiguration cacheConfiguration() { CacheConfiguration cc = defaultCacheConfiguration(); cc.setCacheMode(PARTITIONED); cc.setWriteSynchronizationMode(FULL_SYNC); cc.setRebalanceMode(SYNC); cc.setBackups(0); return cc; } /** {@inheritDoc} */ @Override protected void afterTest() throws Exception { stopAllGrids(true); } /** * @throws Exception If failed. */ public void testQueueCreation() throws Exception { final AtomicInteger idx = new AtomicInteger(); IgniteInternalFuture<?> fut = multithreadedAsync( new Callable<Object>() { @Override public Object call() throws Exception { int idx0 = idx.getAndIncrement(); Thread.currentThread().setName("createQueue-" + idx0); final Ignite ignite = startGrid(idx0); UUID locNodeId = ignite.cluster().localNode().id(); info("Started grid: " + locNodeId); info("Creating queue: " + locNodeId); GridTestUtils.runMultiThreaded(new Callable<Void>() { @Override public Void call() throws Exception { ignite.queue("queue", 1, config(true)); return null; } }, 10, "create-queue-" + ignite.name()); IgniteQueue<String> q = ignite.queue("queue", 1, config(true)); assert q != null; info("Putting first value: " + locNodeId); q.offer("val", 1000, MILLISECONDS); info("Putting second value: " + locNodeId); boolean res2 = q.offer("val1", 1000, MILLISECONDS); assert !res2; info("Thread finished: " + locNodeId); return null; } }, 10 ); fut.get(); } /** * @throws Exception If failed. */ public void testTx() throws Exception { fail("https://issues.apache.org/jira/browse/IGNITE-1804"); if (cacheConfiguration().getAtomicityMode() != TRANSACTIONAL) return; int threadCnt = 10; final AtomicInteger idx = new AtomicInteger(); final AtomicBoolean flag = new AtomicBoolean(); final CountDownLatch latch = new CountDownLatch(threadCnt); IgniteInternalFuture<?> fut = multithreadedAsync( new Callable<Object>() { @Override public Object call() throws Exception { Ignite ignite = startGrid(idx.getAndIncrement()); boolean wait = false; if (wait) { latch.countDown(); latch.await(); } // If output presents, test passes with greater probability. // info("Start puts."); IgniteCache<Integer, String> cache = ignite.cache(null); info("Partition: " + ignite.affinity(null).partition(1)); try (Transaction tx = ignite.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) { // info("Getting value for key 1"); String s = cache.get(1); // info("Got value: " + s); if (s == null) { assert flag.compareAndSet(false, true); // info("Putting value."); cache.put(1, "val"); // info("Done putting value"); tx.commit(); } else assert "val".equals(s) : "String: " + s; } info("Thread finished for grid: " + ignite.name()); return null; } }, threadCnt ); fut.get(); } }
vldpyatkov/ignite
modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/partitioned/GridCachePartitionedQueueCreateMultiNodeSelfTest.java
Java
apache-2.0
7,759
cask :v1 => 'wesnoth' do version '1.12' sha256 '603877e2acc7b867907a2f06a71d62fb7cbd0c0e39c5b29de207b78b2121e7e2' # sourceforge.net is the official download host per the vendor homepage url "http://downloads.sourceforge.net/sourceforge/wesnoth/Wesnoth_#{version}.dmg" homepage 'http://wesnoth.org' license :gpl app 'Wesnoth.app' end
mAAdhaTTah/homebrew-cask
Casks/wesnoth.rb
Ruby
bsd-2-clause
349
package com.mxgraph.shape; import com.mxgraph.canvas.mxGraphics2DCanvas; import com.mxgraph.util.mxPoint; import com.mxgraph.view.mxCellState; public interface mxIMarker { /** * */ mxPoint paintMarker(mxGraphics2DCanvas canvas, mxCellState state, String type, mxPoint pe, double nx, double ny, double size, boolean source); }
md-k-sarker/OWLAx
src/main/java/com/mxgraph/shape/mxIMarker.java
Java
bsd-2-clause
339
// RUN: %clang -### -Wlarge-by-value-copy %s 2>&1 | FileCheck -check-prefix=LARGE_VALUE_COPY_DEFAULT %s // LARGE_VALUE_COPY_DEFAULT: -Wlarge-by-value-copy=64 // RUN: %clang -### -Wlarge-by-value-copy=128 %s 2>&1 | FileCheck -check-prefix=LARGE_VALUE_COPY_JOINED %s // LARGE_VALUE_COPY_JOINED: -Wlarge-by-value-copy=128 // FIXME: Remove this together with -Warc-abi once an Xcode is released that doesn't pass this flag. // RUN: %clang -### -Warc-abi -Wno-arc-abi %s 2>&1 | FileCheck -check-prefix=ARCABI %s // ARCABI-NOT: unknown warning option '-Warc-abi' // ARCABI-NOT: unknown warning option '-Wno-arc-abi' // Check that -isysroot warns on nonexistent paths. // RUN: %clang -### -c -target i386-apple-darwin10 -isysroot /FOO %s 2>&1 | FileCheck --check-prefix=CHECK-ISYSROOT %s // CHECK-ISYSROOT: warning: no such sysroot directory: '{{([A-Za-z]:.*)?}}/FOO'
santoshn/softboundcets-34
softboundcets-llvm-clang34/tools/clang/test/Driver/warning-options.cpp
C++
bsd-3-clause
863
# -*- coding: utf-8 -*- # Tests for the contrib/localflavor/ CZ Form Fields tests = r""" # CZPostalCodeField ######################################################### >>> from django.contrib.localflavor.cz.forms import CZPostalCodeField >>> f = CZPostalCodeField() >>> f.clean('84545x') Traceback (most recent call last): ... ValidationError: [u'Enter a postal code in the format XXXXX or XXX XX.'] >>> f.clean('91909') u'91909' >>> f.clean('917 01') u'91701' >>> f.clean('12345') u'12345' >>> f.clean('123456') Traceback (most recent call last): ... ValidationError: [u'Enter a postal code in the format XXXXX or XXX XX.'] >>> f.clean('1234') Traceback (most recent call last): ... ValidationError: [u'Enter a postal code in the format XXXXX or XXX XX.'] >>> f.clean('123 4') Traceback (most recent call last): ... ValidationError: [u'Enter a postal code in the format XXXXX or XXX XX.'] # CZRegionSelect ############################################################ >>> from django.contrib.localflavor.cz.forms import CZRegionSelect >>> w = CZRegionSelect() >>> w.render('regions', 'TT') u'<select name="regions">\n<option value="PR">Prague</option>\n<option value="CE">Central Bohemian Region</option>\n<option value="SO">South Bohemian Region</option>\n<option value="PI">Pilsen Region</option>\n<option value="CA">Carlsbad Region</option>\n<option value="US">Usti Region</option>\n<option value="LB">Liberec Region</option>\n<option value="HK">Hradec Region</option>\n<option value="PA">Pardubice Region</option>\n<option value="VY">Vysocina Region</option>\n<option value="SM">South Moravian Region</option>\n<option value="OL">Olomouc Region</option>\n<option value="ZL">Zlin Region</option>\n<option value="MS">Moravian-Silesian Region</option>\n</select>' # CZBirthNumberField ######################################################## >>> from django.contrib.localflavor.cz.forms import CZBirthNumberField >>> f = CZBirthNumberField() >>> f.clean('880523/1237') u'880523/1237' >>> f.clean('8805231237') u'8805231237' >>> f.clean('880523/000') u'880523/000' >>> f.clean('880523000') u'880523000' >>> f.clean('882101/0011') u'882101/0011' >>> f.clean('880523/1237', 'm') u'880523/1237' >>> f.clean('885523/1231', 'f') u'885523/1231' >>> f.clean('123456/12') Traceback (most recent call last): ... ValidationError: [u'Enter a birth number in the format XXXXXX/XXXX or XXXXXXXXXX.'] >>> f.clean('123456/12345') Traceback (most recent call last): ... ValidationError: [u'Enter a birth number in the format XXXXXX/XXXX or XXXXXXXXXX.'] >>> f.clean('12345612') Traceback (most recent call last): ... ValidationError: [u'Enter a birth number in the format XXXXXX/XXXX or XXXXXXXXXX.'] >>> f.clean('12345612345') Traceback (most recent call last): ... ValidationError: [u'Enter a birth number in the format XXXXXX/XXXX or XXXXXXXXXX.'] >>> f.clean('881523/0000', 'm') Traceback (most recent call last): ... ValidationError: [u'Enter a valid birth number.'] >>> f.clean('885223/0000', 'm') Traceback (most recent call last): ... ValidationError: [u'Enter a valid birth number.'] >>> f.clean('881223/0000', 'f') Traceback (most recent call last): ... ValidationError: [u'Enter a valid birth number.'] >>> f.clean('886523/0000', 'f') Traceback (most recent call last): ... ValidationError: [u'Enter a valid birth number.'] >>> f.clean('880523/1239') Traceback (most recent call last): ... ValidationError: [u'Enter a valid birth number.'] >>> f.clean('8805231239') Traceback (most recent call last): ... ValidationError: [u'Enter a valid birth number.'] >>> f.clean('990101/0011') Traceback (most recent call last): ... ValidationError: [u'Enter a valid birth number.'] # CZICNumberField ######################################################## >>> from django.contrib.localflavor.cz.forms import CZICNumberField >>> f = CZICNumberField() >>> f.clean('12345679') u'12345679' >>> f.clean('12345601') u'12345601' >>> f.clean('12345661') u'12345661' >>> f.clean('12345610') u'12345610' >>> f.clean('1234567') Traceback (most recent call last): ... ValidationError: [u'Enter a valid IC number.'] >>> f.clean('12345660') Traceback (most recent call last): ... ValidationError: [u'Enter a valid IC number.'] >>> f.clean('12345600') Traceback (most recent call last): ... ValidationError: [u'Enter a valid IC number.'] """
Smarsh/django
tests/regressiontests/forms/localflavor/cz.py
Python
bsd-3-clause
4,319
class ContactSerializer < ActiveModel::Serializer attributes :department, :email, :id, :name, :title has_many :phones end
codeforamerica/ohana-api
app/serializers/contact_serializer.rb
Ruby
bsd-3-clause
127
/* Copyright (c) 2015 Intel Corporation. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of works must retain the original copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the original copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this work without specific prior written permission. THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Authors: Cui,Jieqiong <jieqiongx.cui@intel.com> */ var testTarget; $(document).ready(function(){ pause(); }); function play() { testTarget=document.getElementById("MediaPlayback"); testTarget.play(); } function pause() { testTarget=document.getElementById("MediaPlayback"); testTarget.pause(); }
BruceDai/crosswalk-test-suite
misc/webmanu-system-android-tests/testapp/video/resources/video.js
JavaScript
bsd-3-clause
1,758
// Copyright 2014 The oauth2 Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package oauth2 import ( "encoding/json" "errors" "fmt" "io/ioutil" "net/http" "net/http/httptest" "reflect" "strconv" "testing" "time" "github.com/flynn/flynn/Godeps/_workspace/src/golang.org/x/net/context" ) type mockTransport struct { rt func(req *http.Request) (resp *http.Response, err error) } func (t *mockTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { return t.rt(req) } type mockCache struct { token *Token readErr error } func (c *mockCache) ReadToken() (*Token, error) { return c.token, c.readErr } func (c *mockCache) WriteToken(*Token) { // do nothing } func newConf(url string) *Config { return &Config{ ClientID: "CLIENT_ID", ClientSecret: "CLIENT_SECRET", RedirectURL: "REDIRECT_URL", Scopes: []string{"scope1", "scope2"}, Endpoint: Endpoint{ AuthURL: url + "/auth", TokenURL: url + "/token", }, } } func TestAuthCodeURL(t *testing.T) { conf := newConf("server") url := conf.AuthCodeURL("foo", AccessTypeOffline, ApprovalForce) if url != "server/auth?access_type=offline&approval_prompt=force&client_id=CLIENT_ID&redirect_uri=REDIRECT_URL&response_type=code&scope=scope1+scope2&state=foo" { t.Errorf("Auth code URL doesn't match the expected, found: %v", url) } } func TestAuthCodeURL_CustomParam(t *testing.T) { conf := newConf("server") param := SetAuthURLParam("foo", "bar") url := conf.AuthCodeURL("baz", param) if url != "server/auth?client_id=CLIENT_ID&foo=bar&redirect_uri=REDIRECT_URL&response_type=code&scope=scope1+scope2&state=baz" { t.Errorf("Auth code URL doesn't match the expected, found: %v", url) } } func TestAuthCodeURL_Optional(t *testing.T) { conf := &Config{ ClientID: "CLIENT_ID", Endpoint: Endpoint{ AuthURL: "/auth-url", TokenURL: "/token-url", }, } url := conf.AuthCodeURL("") if url != "/auth-url?client_id=CLIENT_ID&response_type=code" { t.Fatalf("Auth code URL doesn't match the expected, found: %v", url) } } func TestExchangeRequest(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.String() != "/token" { t.Errorf("Unexpected exchange request URL, %v is found.", r.URL) } headerAuth := r.Header.Get("Authorization") if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" { t.Errorf("Unexpected authorization header, %v is found.", headerAuth) } headerContentType := r.Header.Get("Content-Type") if headerContentType != "application/x-www-form-urlencoded" { t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) } body, err := ioutil.ReadAll(r.Body) if err != nil { t.Errorf("Failed reading request body: %s.", err) } if string(body) != "client_id=CLIENT_ID&code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL&scope=scope1+scope2" { t.Errorf("Unexpected exchange payload, %v is found.", string(body)) } w.Header().Set("Content-Type", "application/x-www-form-urlencoded") w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer")) })) defer ts.Close() conf := newConf(ts.URL) tok, err := conf.Exchange(NoContext, "exchange-code") if err != nil { t.Error(err) } if !tok.Valid() { t.Fatalf("Token invalid. Got: %#v", tok) } if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" { t.Errorf("Unexpected access token, %#v.", tok.AccessToken) } if tok.TokenType != "bearer" { t.Errorf("Unexpected token type, %#v.", tok.TokenType) } scope := tok.Extra("scope") if scope != "user" { t.Errorf("Unexpected value for scope: %v", scope) } } func TestExchangeRequest_JSONResponse(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.String() != "/token" { t.Errorf("Unexpected exchange request URL, %v is found.", r.URL) } headerAuth := r.Header.Get("Authorization") if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" { t.Errorf("Unexpected authorization header, %v is found.", headerAuth) } headerContentType := r.Header.Get("Content-Type") if headerContentType != "application/x-www-form-urlencoded" { t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) } body, err := ioutil.ReadAll(r.Body) if err != nil { t.Errorf("Failed reading request body: %s.", err) } if string(body) != "client_id=CLIENT_ID&code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL&scope=scope1+scope2" { t.Errorf("Unexpected exchange payload, %v is found.", string(body)) } w.Header().Set("Content-Type", "application/json") w.Write([]byte(`{"access_token": "90d64460d14870c08c81352a05dedd3465940a7c", "scope": "user", "token_type": "bearer", "expires_in": 86400}`)) })) defer ts.Close() conf := newConf(ts.URL) tok, err := conf.Exchange(NoContext, "exchange-code") if err != nil { t.Error(err) } if !tok.Valid() { t.Fatalf("Token invalid. Got: %#v", tok) } if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" { t.Errorf("Unexpected access token, %#v.", tok.AccessToken) } if tok.TokenType != "bearer" { t.Errorf("Unexpected token type, %#v.", tok.TokenType) } scope := tok.Extra("scope") if scope != "user" { t.Errorf("Unexpected value for scope: %v", scope) } } const day = 24 * time.Hour func TestExchangeRequest_JSONResponse_Expiry(t *testing.T) { seconds := int32(day.Seconds()) jsonNumberType := reflect.TypeOf(json.Number("0")) for _, c := range []struct { expires string expect error }{ {fmt.Sprintf(`"expires_in": %d`, seconds), nil}, {fmt.Sprintf(`"expires_in": "%d"`, seconds), nil}, // PayPal case {fmt.Sprintf(`"expires": %d`, seconds), nil}, // Facebook case {`"expires": false`, &json.UnmarshalTypeError{Value: "bool", Type: jsonNumberType}}, // wrong type {`"expires": {}`, &json.UnmarshalTypeError{Value: "object", Type: jsonNumberType}}, // wrong type {`"expires": "zzz"`, &strconv.NumError{Func: "ParseInt", Num: "zzz", Err: strconv.ErrSyntax}}, // wrong value } { testExchangeRequest_JSONResponse_expiry(t, c.expires, c.expect) } } func testExchangeRequest_JSONResponse_expiry(t *testing.T, exp string, expect error) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.Write([]byte(fmt.Sprintf(`{"access_token": "90d", "scope": "user", "token_type": "bearer", %s}`, exp))) })) defer ts.Close() conf := newConf(ts.URL) t1 := time.Now().Add(day) tok, err := conf.Exchange(NoContext, "exchange-code") t2 := time.Now().Add(day) // Do a fmt.Sprint comparison so either side can be // nil. fmt.Sprint just stringifies them to "<nil>", and no // non-nil expected error ever stringifies as "<nil>", so this // isn't terribly disgusting. We do this because Go 1.4 and // Go 1.5 return a different deep value for // json.UnmarshalTypeError. In Go 1.5, the // json.UnmarshalTypeError contains a new field with a new // non-zero value. Rather than ignore it here with reflect or // add new files and +build tags, just look at the strings. if fmt.Sprint(err) != fmt.Sprint(expect) { t.Errorf("Error = %v; want %v", err, expect) } if err != nil { return } if !tok.Valid() { t.Fatalf("Token invalid. Got: %#v", tok) } expiry := tok.Expiry if expiry.Before(t1) || expiry.After(t2) { t.Errorf("Unexpected value for Expiry: %v (shold be between %v and %v)", expiry, t1, t2) } } func TestExchangeRequest_BadResponse(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.Write([]byte(`{"scope": "user", "token_type": "bearer"}`)) })) defer ts.Close() conf := newConf(ts.URL) tok, err := conf.Exchange(NoContext, "code") if err != nil { t.Fatal(err) } if tok.AccessToken != "" { t.Errorf("Unexpected access token, %#v.", tok.AccessToken) } } func TestExchangeRequest_BadResponseType(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.Write([]byte(`{"access_token":123, "scope": "user", "token_type": "bearer"}`)) })) defer ts.Close() conf := newConf(ts.URL) _, err := conf.Exchange(NoContext, "exchange-code") if err == nil { t.Error("expected error from invalid access_token type") } } func TestExchangeRequest_NonBasicAuth(t *testing.T) { tr := &mockTransport{ rt: func(r *http.Request) (w *http.Response, err error) { headerAuth := r.Header.Get("Authorization") if headerAuth != "" { t.Errorf("Unexpected authorization header, %v is found.", headerAuth) } return nil, errors.New("no response") }, } c := &http.Client{Transport: tr} conf := &Config{ ClientID: "CLIENT_ID", Endpoint: Endpoint{ AuthURL: "https://accounts.google.com/auth", TokenURL: "https://accounts.google.com/token", }, } ctx := context.WithValue(context.Background(), HTTPClient, c) conf.Exchange(ctx, "code") } func TestPasswordCredentialsTokenRequest(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() expected := "/token" if r.URL.String() != expected { t.Errorf("URL = %q; want %q", r.URL, expected) } headerAuth := r.Header.Get("Authorization") expected = "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" if headerAuth != expected { t.Errorf("Authorization header = %q; want %q", headerAuth, expected) } headerContentType := r.Header.Get("Content-Type") expected = "application/x-www-form-urlencoded" if headerContentType != expected { t.Errorf("Content-Type header = %q; want %q", headerContentType, expected) } body, err := ioutil.ReadAll(r.Body) if err != nil { t.Errorf("Failed reading request body: %s.", err) } expected = "client_id=CLIENT_ID&grant_type=password&password=password1&scope=scope1+scope2&username=user1" if string(body) != expected { t.Errorf("res.Body = %q; want %q", string(body), expected) } w.Header().Set("Content-Type", "application/x-www-form-urlencoded") w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer")) })) defer ts.Close() conf := newConf(ts.URL) tok, err := conf.PasswordCredentialsToken(NoContext, "user1", "password1") if err != nil { t.Error(err) } if !tok.Valid() { t.Fatalf("Token invalid. Got: %#v", tok) } expected := "90d64460d14870c08c81352a05dedd3465940a7c" if tok.AccessToken != expected { t.Errorf("AccessToken = %q; want %q", tok.AccessToken, expected) } expected = "bearer" if tok.TokenType != expected { t.Errorf("TokenType = %q; want %q", tok.TokenType, expected) } } func TestTokenRefreshRequest(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.String() == "/somethingelse" { return } if r.URL.String() != "/token" { t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL) } headerContentType := r.Header.Get("Content-Type") if headerContentType != "application/x-www-form-urlencoded" { t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) } body, _ := ioutil.ReadAll(r.Body) if string(body) != "client_id=CLIENT_ID&grant_type=refresh_token&refresh_token=REFRESH_TOKEN" { t.Errorf("Unexpected refresh token payload, %v is found.", string(body)) } })) defer ts.Close() conf := newConf(ts.URL) c := conf.Client(NoContext, &Token{RefreshToken: "REFRESH_TOKEN"}) c.Get(ts.URL + "/somethingelse") } func TestFetchWithNoRefreshToken(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.String() == "/somethingelse" { return } if r.URL.String() != "/token" { t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL) } headerContentType := r.Header.Get("Content-Type") if headerContentType != "application/x-www-form-urlencoded" { t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) } body, _ := ioutil.ReadAll(r.Body) if string(body) != "client_id=CLIENT_ID&grant_type=refresh_token&refresh_token=REFRESH_TOKEN" { t.Errorf("Unexpected refresh token payload, %v is found.", string(body)) } })) defer ts.Close() conf := newConf(ts.URL) c := conf.Client(NoContext, nil) _, err := c.Get(ts.URL + "/somethingelse") if err == nil { t.Errorf("Fetch should return an error if no refresh token is set") } } func TestRefreshToken_RefreshTokenReplacement(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.Write([]byte(`{"access_token":"ACCESS TOKEN", "scope": "user", "token_type": "bearer", "refresh_token": "NEW REFRESH TOKEN"}`)) return })) defer ts.Close() conf := newConf(ts.URL) tkr := tokenRefresher{ conf: conf, ctx: NoContext, refreshToken: "OLD REFRESH TOKEN", } tk, err := tkr.Token() if err != nil { t.Errorf("Unexpected refreshToken error returned: %v", err) return } if tk.RefreshToken != tkr.refreshToken { t.Errorf("tokenRefresher.refresh_token = %s; want %s", tkr.refreshToken, tk.RefreshToken) } } func TestConfigClientWithToken(t *testing.T) { tok := &Token{ AccessToken: "abc123", } ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if got, want := r.Header.Get("Authorization"), fmt.Sprintf("Bearer %s", tok.AccessToken); got != want { t.Errorf("Authorization header = %q; want %q", got, want) } return })) defer ts.Close() conf := newConf(ts.URL) c := conf.Client(NoContext, tok) req, err := http.NewRequest("GET", ts.URL, nil) if err != nil { t.Error(err) } _, err = c.Do(req) if err != nil { t.Error(err) } }
ozum/flynn
Godeps/_workspace/src/github.com/flynn/oauth2/oauth2_test.go
GO
bsd-3-clause
14,235
"""passlib.ext.django.models -- monkeypatch django hashing framework""" #============================================================================= # imports #============================================================================= # core import logging; log = logging.getLogger(__name__) from warnings import warn # site from django import VERSION from django.conf import settings # pkg from passlib.context import CryptContext from passlib.exc import ExpectedTypeError from passlib.ext.django.utils import _PatchManager, hasher_to_passlib_name, \ get_passlib_hasher, get_preset_config from passlib.utils.compat import callable, unicode, bytes # local __all__ = ["password_context"] #============================================================================= # global attrs #============================================================================= # the context object which this patches contrib.auth to use for password hashing. # configuration controlled by ``settings.PASSLIB_CONFIG``. password_context = CryptContext() # function mapping User objects -> passlib user category. # may be overridden via ``settings.PASSLIB_GET_CATEGORY``. def _get_category(user): """default get_category() implementation""" if user.is_superuser: return "superuser" elif user.is_staff: return "staff" else: return None # object used to track state of patches applied to django. _manager = _PatchManager(log=logging.getLogger(__name__ + "._manager")) # patch status _patched = False #============================================================================= # applying & removing the patches #============================================================================= def _apply_patch(): """monkeypatch django's password handling to use ``passlib_context``, assumes the caller will configure the object. """ # # setup constants # log.debug("preparing to monkeypatch 'django.contrib.auth' ...") global _patched assert not _patched, "monkeypatching already applied" HASHERS_PATH = "django.contrib.auth.hashers" MODELS_PATH = "django.contrib.auth.models" USER_PATH = MODELS_PATH + ":User" FORMS_PATH = "django.contrib.auth.forms" # # import UNUSUABLE_PASSWORD and is_password_usuable() helpers # (providing stubs for older django versions) # if VERSION < (1,4): has_hashers = False if VERSION < (1,0): UNUSABLE_PASSWORD = "!" else: from django.contrib.auth.models import UNUSABLE_PASSWORD def is_password_usable(encoded): return encoded is not None and encoded != UNUSABLE_PASSWORD def is_valid_secret(secret): return secret is not None elif VERSION < (1,6): has_hashers = True from django.contrib.auth.hashers import UNUSABLE_PASSWORD, \ is_password_usable # NOTE: 1.4 - 1.5 - empty passwords no longer valid. def is_valid_secret(secret): return bool(secret) else: has_hashers = True from django.contrib.auth.hashers import is_password_usable # 1.6 - empty passwords valid again def is_valid_secret(secret): return secret is not None if VERSION < (1,6): def make_unusable_password(): return UNUSABLE_PASSWORD else: from django.contrib.auth.hashers import make_password as _make_password def make_unusable_password(): return _make_password(None) # django 1.4.6+ uses a separate hasher for "sha1$$digest" hashes has_unsalted_sha1 = (VERSION >= (1,4,6)) # # backport ``User.set_unusable_password()`` for Django 0.9 # (simplifies rest of the code) # if not hasattr(_manager.getorig(USER_PATH), "set_unusable_password"): assert VERSION < (1,0) @_manager.monkeypatch(USER_PATH) def set_unusable_password(user): user.password = make_unusable_password() @_manager.monkeypatch(USER_PATH) def has_usable_password(user): return is_password_usable(user.password) # # patch ``User.set_password() & ``User.check_password()`` to use # context & get_category (would just leave these as wrappers for hashers # module under django 1.4, but then we couldn't pass User object into # get_category very easily) # @_manager.monkeypatch(USER_PATH) def set_password(user, password): "passlib replacement for User.set_password()" if is_valid_secret(password): # NOTE: pulls _get_category from module globals cat = _get_category(user) user.password = password_context.encrypt(password, category=cat) else: user.set_unusable_password() @_manager.monkeypatch(USER_PATH) def check_password(user, password): "passlib replacement for User.check_password()" hash = user.password if not is_valid_secret(password) or not is_password_usable(hash): return False if not hash and VERSION < (1,4): return False # NOTE: pulls _get_category from module globals cat = _get_category(user) ok, new_hash = password_context.verify_and_update(password, hash, category=cat) if ok and new_hash is not None: # migrate to new hash if needed. user.password = new_hash user.save() return ok # # override check_password() with our own implementation # @_manager.monkeypatch(HASHERS_PATH, enable=has_hashers) @_manager.monkeypatch(MODELS_PATH) def check_password(password, encoded, setter=None, preferred="default"): "passlib replacement for check_password()" # XXX: this currently ignores "preferred" keyword, since it's purpose # was for hash migration, and that's handled by the context. if not is_valid_secret(password) or not is_password_usable(encoded): return False ok = password_context.verify(password, encoded) if ok and setter and password_context.needs_update(encoded): setter(password) return ok # # patch the other functions defined in the ``hashers`` module, as well # as any other known locations where they're imported within ``contrib.auth`` # if has_hashers: @_manager.monkeypatch(HASHERS_PATH) @_manager.monkeypatch(MODELS_PATH) def make_password(password, salt=None, hasher="default"): "passlib replacement for make_password()" if not is_valid_secret(password): return make_unusable_password() if hasher == "default": scheme = None else: scheme = hasher_to_passlib_name(hasher) kwds = dict(scheme=scheme) handler = password_context.handler(scheme) # NOTE: django make specify an empty string for the salt, # even if scheme doesn't accept a salt. we omit keyword # in that case. if salt is not None and (salt or 'salt' in handler.setting_kwds): kwds['salt'] = salt return password_context.encrypt(password, **kwds) @_manager.monkeypatch(HASHERS_PATH) @_manager.monkeypatch(FORMS_PATH) def get_hasher(algorithm="default"): "passlib replacement for get_hasher()" if algorithm == "default": scheme = None else: scheme = hasher_to_passlib_name(algorithm) # NOTE: resolving scheme -> handler instead of # passing scheme into get_passlib_hasher(), # in case context contains custom handler # shadowing name of a builtin handler. handler = password_context.handler(scheme) return get_passlib_hasher(handler, algorithm=algorithm) # identify_hasher() was added in django 1.5, # patching it anyways for 1.4, so passlib's version is always available. @_manager.monkeypatch(HASHERS_PATH) @_manager.monkeypatch(FORMS_PATH) def identify_hasher(encoded): "passlib helper to identify hasher from encoded password" handler = password_context.identify(encoded, resolve=True, required=True) algorithm = None if (has_unsalted_sha1 and handler.name == "django_salted_sha1" and encoded.startswith("sha1$$")): # django 1.4.6+ uses a separate hasher for "sha1$$digest" hashes, # but passlib just reuses the "sha1$salt$digest" handler. # we want to resolve to correct django hasher. algorithm = "unsalted_sha1" return get_passlib_hasher(handler, algorithm=algorithm) _patched = True log.debug("... finished monkeypatching django") def _remove_patch(): """undo the django monkeypatching done by this module. offered as a last resort if it's ever needed. .. warning:: This may cause problems if any other Django modules have imported their own copies of the patched functions, though the patched code has been designed to throw an error as soon as possible in this case. """ global _patched if _patched: log.debug("removing django monkeypatching...") _manager.unpatch_all(unpatch_conflicts=True) password_context.load({}) _patched = False log.debug("...finished removing django monkeypatching") return True if _manager: # pragma: no cover -- sanity check log.warning("reverting partial monkeypatching of django...") _manager.unpatch_all() password_context.load({}) log.debug("...finished removing django monkeypatching") return True log.debug("django not monkeypatched") return False #============================================================================= # main code #============================================================================= def _load(): global _get_category # TODO: would like to add support for inheriting config from a preset # (or from existing hasher state) and letting PASSLIB_CONFIG # be an update, not a replacement. # TODO: wrap and import any custom hashers as passlib handlers, # so they could be used in the passlib config. # load config from settings _UNSET = object() config = getattr(settings, "PASSLIB_CONFIG", _UNSET) if config is _UNSET: # XXX: should probably deprecate this alias config = getattr(settings, "PASSLIB_CONTEXT", _UNSET) if config is _UNSET: config = "passlib-default" if config is None: warn("setting PASSLIB_CONFIG=None is deprecated, " "and support will be removed in Passlib 1.8, " "use PASSLIB_CONFIG='disabled' instead.", DeprecationWarning) config = "disabled" elif not isinstance(config, (unicode, bytes, dict)): raise ExpectedTypeError(config, "str or dict", "PASSLIB_CONFIG") # load custom category func (if any) get_category = getattr(settings, "PASSLIB_GET_CATEGORY", None) if get_category and not callable(get_category): raise ExpectedTypeError(get_category, "callable", "PASSLIB_GET_CATEGORY") # check if we've been disabled if config == "disabled": if _patched: # pragma: no cover -- sanity check log.error("didn't expect monkeypatching would be applied!") _remove_patch() return # resolve any preset aliases if isinstance(config, str) and '\n' not in config: config = get_preset_config(config) # setup context _apply_patch() password_context.load(config) if get_category: # NOTE: _get_category is module global which is read by # monkeypatched functions constructed by _apply_patch() _get_category = get_category log.debug("passlib.ext.django loaded") # wrap load function so we can undo any patching if something goes wrong try: _load() except: _remove_patch() raise #============================================================================= # eof #=============================================================================
charukiewicz/beer-manager
venv/lib/python3.4/site-packages/passlib/ext/django/models.py
Python
mit
12,558
// Boost.Geometry (aka GGL, Generic Geometry Library) // Copyright (c) 2011-2012 Barend Gehrels, Amsterdam, the Netherlands. // Copyright (c) 2017 Adam Wulkiewicz, Lodz, Poland. // This file was modified by Oracle on 2017. // Modifications copyright (c) 2017 Oracle and/or its affiliates. // Contributed and/or modified by Adam Wulkiewicz, on behalf of Oracle // Use, modification and distribution is subject to the Boost Software License, // Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_GEOMETRY_ALGORITHMS_DETAIL_HAS_SELF_INTERSECTIONS_HPP #define BOOST_GEOMETRY_ALGORITHMS_DETAIL_HAS_SELF_INTERSECTIONS_HPP #include <deque> #include <boost/range.hpp> #include <boost/throw_exception.hpp> #include <boost/geometry/core/point_type.hpp> #include <boost/geometry/algorithms/detail/overlay/turn_info.hpp> #include <boost/geometry/algorithms/detail/overlay/get_turns.hpp> #include <boost/geometry/algorithms/detail/overlay/self_turn_points.hpp> #include <boost/geometry/policies/disjoint_interrupt_policy.hpp> #include <boost/geometry/policies/robustness/robust_point_type.hpp> #include <boost/geometry/policies/robustness/segment_ratio_type.hpp> #include <boost/geometry/policies/robustness/get_rescale_policy.hpp> #ifdef BOOST_GEOMETRY_DEBUG_HAS_SELF_INTERSECTIONS # include <boost/geometry/algorithms/detail/overlay/debug_turn_info.hpp> # include <boost/geometry/io/dsv/write.hpp> #endif namespace boost { namespace geometry { #if ! defined(BOOST_GEOMETRY_OVERLAY_NO_THROW) /*! \brief Overlay Invalid Input Exception \ingroup overlay \details The overlay_invalid_input_exception is thrown at invalid input */ class overlay_invalid_input_exception : public geometry::exception { public: inline overlay_invalid_input_exception() {} virtual char const* what() const throw() { return "Boost.Geometry Overlay invalid input exception"; } }; #endif #ifndef DOXYGEN_NO_DETAIL namespace detail { namespace overlay { template <typename Geometry, typename Strategy, typename RobustPolicy> inline bool has_self_intersections(Geometry const& geometry, Strategy const& strategy, RobustPolicy const& robust_policy, bool throw_on_self_intersection = true) { typedef typename point_type<Geometry>::type point_type; typedef turn_info < point_type, typename segment_ratio_type<point_type, RobustPolicy>::type > turn_info; std::deque<turn_info> turns; detail::disjoint::disjoint_interrupt_policy policy; geometry::self_turns<detail::overlay::assign_null_policy>(geometry, strategy, robust_policy, turns, policy); #ifdef BOOST_GEOMETRY_DEBUG_HAS_SELF_INTERSECTIONS bool first = true; #endif for(typename std::deque<turn_info>::const_iterator it = boost::begin(turns); it != boost::end(turns); ++it) { turn_info const& info = *it; bool const both_union_turn = info.operations[0].operation == detail::overlay::operation_union && info.operations[1].operation == detail::overlay::operation_union; bool const both_intersection_turn = info.operations[0].operation == detail::overlay::operation_intersection && info.operations[1].operation == detail::overlay::operation_intersection; bool const valid = (both_union_turn || both_intersection_turn) && (info.method == detail::overlay::method_touch || info.method == detail::overlay::method_touch_interior); if (! valid) { #ifdef BOOST_GEOMETRY_DEBUG_HAS_SELF_INTERSECTIONS if (first) { std::cout << "turn points: " << std::endl; first = false; } std::cout << method_char(info.method); for (int i = 0; i < 2; i++) { std::cout << " " << operation_char(info.operations[i].operation); std::cout << " " << info.operations[i].seg_id; } std::cout << " " << geometry::dsv(info.point) << std::endl; #endif #if ! defined(BOOST_GEOMETRY_OVERLAY_NO_THROW) if (throw_on_self_intersection) { BOOST_THROW_EXCEPTION(overlay_invalid_input_exception()); } #endif return true; } } return false; } // For backward compatibility template <typename Geometry> inline bool has_self_intersections(Geometry const& geometry, bool throw_on_self_intersection = true) { typedef typename geometry::point_type<Geometry>::type point_type; typedef typename geometry::rescale_policy_type<point_type>::type rescale_policy_type; typename strategy::intersection::services::default_strategy < typename cs_tag<Geometry>::type >::type strategy; rescale_policy_type robust_policy = geometry::get_rescale_policy<rescale_policy_type>(geometry); return has_self_intersections(geometry, strategy, robust_policy, throw_on_self_intersection); } }} // namespace detail::overlay #endif // DOXYGEN_NO_DETAIL }} // namespace boost::geometry #endif // BOOST_GEOMETRY_ALGORITHMS_DETAIL_HAS_SELF_INTERSECTIONS_HPP
rkq/cplusplus
src/third-party/include/boost_1_64_0/geometry/algorithms/detail/has_self_intersections.hpp
C++
mit
5,291
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. using System.Collections.Generic; namespace System.Collections.Immutable { internal static class AllocFreeConcurrentStack<T> { private const int MaxSize = 35; private static readonly Type s_typeOfT = typeof(T); public static void TryAdd(T item) { // Just in case we're in a scenario where an object is continually requested on one thread // and returned on another, avoid unbounded growth of the stack. Stack<RefAsValueType<T>> localStack = ThreadLocalStack; if (localStack.Count < MaxSize) { localStack.Push(new RefAsValueType<T>(item)); } } public static bool TryTake(out T item) { Stack<RefAsValueType<T>> localStack = ThreadLocalStack; if (localStack != null && localStack.Count > 0) { item = localStack.Pop().Value; return true; } item = default(T); return false; } private static Stack<RefAsValueType<T>> ThreadLocalStack { get { // Ensure the [ThreadStatic] is initialized to a dictionary Dictionary<Type, object> typesToStacks = AllocFreeConcurrentStack.t_stacks; if (typesToStacks == null) { AllocFreeConcurrentStack.t_stacks = typesToStacks = new Dictionary<Type, object>(); } // Get the stack that corresponds to the T object stackObj; if (!typesToStacks.TryGetValue(s_typeOfT, out stackObj)) { stackObj = new Stack<RefAsValueType<T>>(MaxSize); typesToStacks.Add(s_typeOfT, stackObj); } // Return it as the correct type. return (Stack<RefAsValueType<T>>)stackObj; } } } internal static class AllocFreeConcurrentStack { // Workaround for https://github.com/dotnet/coreclr/issues/2191. // When that's fixed, a [ThreadStatic] Stack should be added back to AllocFreeConcurrentStack<T>. [ThreadStatic] internal static Dictionary<Type, object> t_stacks; } }
shahid-pk/corefx
src/System.Collections.Immutable/src/System/Collections/Immutable/AllocFreeConcurrentStack.cs
C#
mit
2,500
# coding=utf-8 """ The NetworkCollector class collects metrics on network interface usage using /proc/net/dev. #### Dependencies * /proc/net/dev """ import diamond.collector from diamond.collector import str_to_bool import diamond.convertor import os import re try: import psutil except ImportError: psutil = None class NetworkCollector(diamond.collector.Collector): PROC = '/proc/net/dev' def get_default_config_help(self): config_help = super(NetworkCollector, self).get_default_config_help() config_help.update({ 'interfaces': 'List of interface types to collect', 'greedy': 'Greedy match interfaces', }) return config_help def get_default_config(self): """ Returns the default collector settings """ config = super(NetworkCollector, self).get_default_config() config.update({ 'path': 'network', 'interfaces': ['eth', 'bond', 'em', 'p1p', 'eno', 'enp', 'ens', 'enx'], 'byte_unit': ['bit', 'byte'], 'greedy': 'true', }) return config def collect(self): """ Collect network interface stats. """ # Initialize results results = {} if os.access(self.PROC, os.R_OK): # Open File file = open(self.PROC) # Build Regular Expression greed = '' if str_to_bool(self.config['greedy']): greed = '\S*' exp = (('^(?:\s*)((?:%s)%s):(?:\s*)' + '(?P<rx_bytes>\d+)(?:\s*)' + '(?P<rx_packets>\w+)(?:\s*)' + '(?P<rx_errors>\d+)(?:\s*)' + '(?P<rx_drop>\d+)(?:\s*)' + '(?P<rx_fifo>\d+)(?:\s*)' + '(?P<rx_frame>\d+)(?:\s*)' + '(?P<rx_compressed>\d+)(?:\s*)' + '(?P<rx_multicast>\d+)(?:\s*)' + '(?P<tx_bytes>\d+)(?:\s*)' + '(?P<tx_packets>\w+)(?:\s*)' + '(?P<tx_errors>\d+)(?:\s*)' + '(?P<tx_drop>\d+)(?:\s*)' + '(?P<tx_fifo>\d+)(?:\s*)' + '(?P<tx_colls>\d+)(?:\s*)' + '(?P<tx_carrier>\d+)(?:\s*)' + '(?P<tx_compressed>\d+)(?:.*)$') % (('|'.join(self.config['interfaces'])), greed)) reg = re.compile(exp) # Match Interfaces for line in file: match = reg.match(line) if match: device = match.group(1) results[device] = match.groupdict() # Close File file.close() else: if not psutil: self.log.error('Unable to import psutil') self.log.error('No network metrics retrieved') return None network_stats = psutil.network_io_counters(True) for device in network_stats.keys(): network_stat = network_stats[device] results[device] = {} results[device]['rx_bytes'] = network_stat.bytes_recv results[device]['tx_bytes'] = network_stat.bytes_sent results[device]['rx_packets'] = network_stat.packets_recv results[device]['tx_packets'] = network_stat.packets_sent for device in results: stats = results[device] for s, v in stats.items(): # Get Metric Name metric_name = '.'.join([device, s]) # Get Metric Value metric_value = self.derivative(metric_name, long(v), diamond.collector.MAX_COUNTER) # Convert rx_bytes and tx_bytes if s == 'rx_bytes' or s == 'tx_bytes': convertor = diamond.convertor.binary(value=metric_value, unit='byte') for u in self.config['byte_unit']: # Public Converted Metric self.publish(metric_name.replace('bytes', u), convertor.get(unit=u), 2) else: # Publish Metric Derivative self.publish(metric_name, metric_value) return None
dcsquared13/Diamond
src/collectors/network/network.py
Python
mit
4,536
var should = require('should'), rewire = require('rewire'), configUtils = require('../../../../test/utils/configUtils'), // Stuff we are testing ampContentHelper = rewire('../lib/helpers/amp_content'); // TODO: Amperize really needs to get stubbed, so we can test returning errors // properly and make this test faster! describe('{{amp_content}} helper', function () { afterEach(function () { ampContentHelper.__set__('amperizeCache', {}); }); it('can render content', function (done) { var testData = { html: 'Hello World', updated_at: 'Wed Jul 27 2016 18:17:22 GMT+0200 (CEST)', id: 1 }, ampResult = ampContentHelper.call(testData); ampResult.then(function (rendered) { should.exist(rendered); rendered.string.should.equal(testData.html); done(); }).catch(done); }); it('returns if no html is provided', function (done) { var testData = { updated_at: 'Wed Jul 27 2016 18:17:22 GMT+0200 (CEST)', id: 1 }, ampResult = ampContentHelper.call(testData); ampResult.then(function (rendered) { should.exist(rendered); rendered.string.should.be.equal(''); done(); }).catch(done); }); describe('Cache', function () { it('can render content from cache', function (done) { var testData = { html: 'Hello World', updated_at: 'Wed Jul 27 2016 18:17:22 GMT+0200 (CEST)', id: 1 }, ampCachedResult, ampResult = ampContentHelper.call(testData), amperizeCache = ampContentHelper.__get__('amperizeCache'); ampResult.then(function (rendered) { should.exist(rendered); should.exist(amperizeCache); rendered.string.should.equal(testData.html); amperizeCache[1].should.have.property('updated_at', 'Wed Jul 27 2016 18:17:22 GMT+0200 (CEST)'); amperizeCache[1].should.have.property('amp', testData.html); // call it again, to make it fetch from cache ampCachedResult = ampContentHelper.call(testData); ampCachedResult.then(function (rendered) { should.exist(rendered); should.exist(amperizeCache); amperizeCache[1].should.have.property('updated_at', 'Wed Jul 27 2016 18:17:22 GMT+0200 (CEST)'); amperizeCache[1].should.have.property('amp', testData.html); done(); }); }).catch(done); }); it('fetches new AMP HTML if post was changed', function (done) { var testData1 = { html: 'Hello World', updated_at: 'Wed Jul 27 2016 18:17:22 GMT+0200 (CEST)', id: 1 }, testData2 = { html: 'Hello Ghost', updated_at: 'Wed Jul 30 2016 18:17:22 GMT+0200 (CEST)', id: 1 }, ampResult = ampContentHelper.call(testData1), amperizeCache = ampContentHelper.__get__('amperizeCache'); ampResult.then(function (rendered) { should.exist(rendered); should.exist(amperizeCache); rendered.string.should.equal(testData1.html); amperizeCache[1].should.have.property('updated_at', 'Wed Jul 27 2016 18:17:22 GMT+0200 (CEST)'); amperizeCache[1].should.have.property('amp', testData1.html); // call it again with different values to fetch from Amperize and not from cache ampResult = ampContentHelper.call(testData2); ampResult.then(function (rendered) { should.exist(rendered); should.exist(amperizeCache); // it should not have the old value, amperizeCache[1].should.not.have.property('Wed Jul 30 2016 18:17:22 GMT+0200 (CEST)'); // only the new one rendered.string.should.equal(testData2.html); amperizeCache[1].should.have.property('updated_at', 'Wed Jul 30 2016 18:17:22 GMT+0200 (CEST)'); amperizeCache[1].should.have.property('amp', testData2.html); done(); }); }).catch(done); }); }); describe('Transforms and sanitizes HTML', function () { beforeEach(function () { configUtils.set({url: 'https://blog.ghost.org/'}); }); afterEach(function () { ampContentHelper.__set__('amperizeCache', {}); configUtils.restore(); }); it('can transform img tags to amp-img', function (done) { var testData = { html: '<img src="/content/images/2016/08/scheduled2-1.jpg" alt="The Ghost Logo" />', updated_at: 'Wed Jul 27 2016 18:17:22 GMT+0200 (CEST)', id: 1 }, expectedResult = '<amp-img src="https://blog.ghost.org/content/images/2016/08/scheduled2-1.jpg" alt="The Ghost Logo" width="1000" height="281" layout="responsive"></amp-img>', ampResult = ampContentHelper.call(testData); ampResult.then(function (rendered) { should.exist(rendered); rendered.string.should.equal(expectedResult); done(); }).catch(done); }); it('can transform audio tags to amp-audio', function (done) { var testData = { html: '<audio controls="controls" width="auto" height="50" autoplay="mobile">Your browser does not support the <code>audio</code> element.<source src="https://audio.com/foo.wav" type="audio/wav"></audio>' + '<audio src="http://audio.com/foo.ogg"><track kind="captions" src="http://audio.com/foo.en.vtt" srclang="en" label="English"><source kind="captions" src="http://audio.com/foo.sv.vtt" srclang="sv" label="Svenska"></audio>', updated_at: 'Wed Jul 27 2016 18:17:22 GMT+0200 (CEST)', id: 1 }, expectedResult = '<amp-audio controls="controls" width="auto" height="50" autoplay="mobile">Your browser does not support the <code>audio</code> element.<source src="https://audio.com/foo.wav" type="audio/wav" /></amp-audio>' + '<amp-audio src="https://audio.com/foo.ogg"><track kind="captions" src="https://audio.com/foo.en.vtt" srclang="en" label="English" /><source kind="captions" src="https://audio.com/foo.sv.vtt" srclang="sv" label="Svenska" /></amp-audio>', ampResult = ampContentHelper.call(testData); ampResult.then(function (rendered) { should.exist(rendered); rendered.string.should.equal(expectedResult); done(); }).catch(done); }); it('removes video tags including source children', function (done) { var testData = { html: '<video width="480" controls poster="https://archive.org/download/WebmVp8Vorbis/webmvp8.gif" >' + '<track kind="captions" src="https://archive.org/download/WebmVp8Vorbis/webmvp8.webm" srclang="en">' + '<source src="https://archive.org/download/WebmVp8Vorbis/webmvp8.webm" type="video/webm">' + '<source src="https://archive.org/download/WebmVp8Vorbis/webmvp8_512kb.mp4" type="video/mp4">' + 'Your browser doesn\'t support HTML5 video tag.' + '</video>', updated_at: 'Wed Jul 27 2016 18:17:22 GMT+0200 (CEST)', id: 1 }, expectedResult = 'Your browser doesn\'t support HTML5 video tag.', ampResult = ampContentHelper.call(testData); ampResult.then(function (rendered) { should.exist(rendered); rendered.string.should.equal(expectedResult); done(); }).catch(done); }); it('removes inline style', function (done) { var testData = { html: '<amp-img src="/content/images/2016/08/aileen_small.jpg" style="border-radius: 50%"; !important' + 'border="0" align="center" font="Arial" width="50" height="50" layout="responsive"></amp-img>' + '<p align="right" style="color: red; !important" bgcolor="white">Hello</p>' + '<table style="width:100%"><tr bgcolor="tomato" colspan="2"><th font="Arial">Name:</th> ' + '<td color="white" colspan="2">Bill Gates</td></tr><tr><th rowspan="2" valign="center">Telephone:</th> ' + '<td>55577854</td></tr></table>', updated_at: 'Wed Jul 27 2016 18:17:22 GMT+0200 (CEST)', id: 1 }, expectedResult = '<amp-img src="https://blog.ghost.org/content/images/2016/08/aileen_small.jpg" width="50" ' + 'height="50" layout="responsive"></amp-img><p align="right">Hello</p>' + '<table><tr bgcolor="tomato"><th>Name:</th> ' + '<td colspan="2">Bill Gates</td></tr><tr><th rowspan="2" valign="center">Telephone:</th> ' + '<td>55577854</td></tr></table>', ampResult = ampContentHelper.call(testData); ampResult.then(function (rendered) { should.exist(rendered); rendered.string.should.equal(expectedResult); done(); }).catch(done); }); it('removes prohibited iframe attributes', function (done) { var testData = { html: '<iframe src="https://player.vimeo.com/video/180069681?color=ffffff" width="640" height="267" frameborder="0" ' + 'webkitallowfullscreen mozallowfullscreen allowfullscreen></iframe>', updated_at: 'Wed Jul 27 2016 18:17:22 GMT+0200 (CEST)', id: 1 }, expectedResult = '<amp-iframe src="https://player.vimeo.com/video/180069681?color=ffffff" width="640" height="267" ' + 'frameborder="0" allowfullscreen sandbox="allow-scripts allow-same-origin" layout="responsive"></amp-iframe>', ampResult = ampContentHelper.call(testData); ampResult.then(function (rendered) { should.exist(rendered); rendered.string.should.equal(expectedResult); done(); }).catch(done); }); it('can handle incomplete HTML tags by returning not Amperized HTML', function (done) { var testData = { html: '<img><///img>', updated_at: 'Wed Jul 27 2016 18:17:22 GMT+0200 (CEST)', id: 1 }, ampResult = ampContentHelper.call(testData), sanitizedHTML, ampedHTML; ampResult.then(function (rendered) { sanitizedHTML = ampContentHelper.__get__('cleanHTML'); ampedHTML = ampContentHelper.__get__('ampHTML'); should.exist(rendered); rendered.string.should.equal(''); should.exist(ampedHTML); ampedHTML.should.be.equal('<img>'); should.exist(sanitizedHTML); sanitizedHTML.should.be.equal(''); done(); }).catch(done); }); it('can handle not existing img src by returning not Amperized HTML', function (done) { var testData = { html: '<img src="/content/images/does-not-exist.jpg" alt="The Ghost Logo" />', updated_at: 'Wed Jul 27 2016 18:17:22 GMT+0200 (CEST)', id: 1 }, ampResult = ampContentHelper.call(testData), sanitizedHTML, ampedHTML; ampResult.then(function (rendered) { sanitizedHTML = ampContentHelper.__get__('cleanHTML'); ampedHTML = ampContentHelper.__get__('ampHTML'); should.exist(rendered); rendered.string.should.equal(''); should.exist(ampedHTML); ampedHTML.should.be.equal('<img src="https://blog.ghost.org/content/images/does-not-exist.jpg" alt="The Ghost Logo">'); should.exist(sanitizedHTML); sanitizedHTML.should.be.equal(''); done(); }).catch(done); }); it('sanitizes remaining and not valid tags', function (done) { var testData = { html: '<form<input type="text" placeholder="Hi AMP tester"></form>' + '<script>some script here</script>' + '<style> h1 {color:red;} p {color:blue;}</style>', updated_at: 'Wed Jul 27 2016 18:17:22 GMT+0200 (CEST)', id: 1 }, ampResult = ampContentHelper.call(testData); ampResult.then(function (rendered) { should.exist(rendered); rendered.string.should.be.equal(''); done(); }).catch(done); }); }); });
SkynetInc/steam
core/server/apps/amp/tests/amp_content_spec.js
JavaScript
mit
13,880
<?php namespace mageekguy\atoum\report\fields\runner; use mageekguy\atoum\locale, mageekguy\atoum\runner, mageekguy\atoum\report, mageekguy\atoum\observable ; abstract class failures extends report\field { protected $runner = null; public function __construct() { parent::__construct(array(runner::runStop)); } public function getRunner() { return $this->runner; } public function handleEvent($event, observable $observable) { if (parent::handleEvent($event, $observable) === false) { return false; } else { $this->runner = $observable; return true; } } }
kalebheitzman/grav-plugin-events
vendor/atoum/atoum/classes/report/fields/runner/failures.php
PHP
mit
602
-- -- Copyright (c) 2008--2012 Red Hat, Inc. -- -- This software is licensed to you under the GNU General Public License, -- version 2 (GPLv2). There is NO WARRANTY for this software, express or -- implied, including the implied warranties of MERCHANTABILITY or FITNESS -- FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 -- along with this software; if not, see -- http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. -- -- Red Hat trademarks are not licensed under GPLv2. No permission is -- granted to use or replicate Red Hat trademarks that are incorporated -- in this software or its documentation. -- CREATE TABLE rhnPackageFile ( package_id NUMBER NOT NULL CONSTRAINT rhn_package_file_pid_fk REFERENCES rhnPackage (id) ON DELETE CASCADE, capability_id NUMBER NOT NULL CONSTRAINT rhn_package_file_cid_fk REFERENCES rhnPackageCapability (id), device NUMBER NOT NULL, inode NUMBER NOT NULL, file_mode NUMBER NOT NULL, username VARCHAR2(32) NOT NULL, groupname VARCHAR2(32) NOT NULL, rdev NUMBER NOT NULL, file_size NUMBER NOT NULL, mtime timestamp with local time zone NOT NULL, checksum_id NUMBER CONSTRAINT rhn_package_file_chsum_fk REFERENCES rhnChecksum (id), linkto VARCHAR2(256), flags NUMBER NOT NULL, verifyflags NUMBER NOT NULL, lang VARCHAR2(32), created timestamp with local time zone DEFAULT (current_timestamp) NOT NULL, modified timestamp with local time zone DEFAULT (current_timestamp) NOT NULL ) ENABLE ROW MOVEMENT ; CREATE UNIQUE INDEX rhn_package_file_pid_cid_uq ON rhnPackageFile (package_id, capability_id) TABLESPACE [[32m_tbs]]; CREATE INDEX rhn_package_file_cid_idx ON rhnPackageFile (capability_id) TABLESPACE [[32m_tbs]] NOLOGGING;
ogajduse/spacewalk
schema/spacewalk/common/tables/rhnPackageFile.sql
SQL
gpl-2.0
2,105
/* * Copyright (c) 2003, 2007-11 Matteo Frigo * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * */ #include "bench.h" typedef bench_real R; typedef bench_complex C; typedef struct dofft_closure_s { void (*apply)(struct dofft_closure_s *k, bench_complex *in, bench_complex *out); int recopy_input; } dofft_closure; double dmax(double x, double y); typedef void (*aconstrain)(C *a, int n); void arand(C *a, int n); void mkreal(C *A, int n); void mkhermitian(C *A, int rank, const bench_iodim *dim, int stride); void mkhermitian1(C *a, int n); void aadd(C *c, C *a, C *b, int n); void asub(C *c, C *a, C *b, int n); void arol(C *b, C *a, int n, int nb, int na); void aphase_shift(C *b, C *a, int n, int nb, int na, double sign); void ascale(C *a, C alpha, int n); double acmp(C *a, C *b, int n, const char *test, double tol); double mydrand(void); double impulse(dofft_closure *k, int n, int vecn, C *inA, C *inB, C *inC, C *outA, C *outB, C *outC, C *tmp, int rounds, double tol); double linear(dofft_closure *k, int realp, int n, C *inA, C *inB, C *inC, C *outA, C *outB, C *outC, C *tmp, int rounds, double tol); void preserves_input(dofft_closure *k, aconstrain constrain, int n, C *inA, C *inB, C *outB, int rounds); enum { TIME_SHIFT, FREQ_SHIFT }; double tf_shift(dofft_closure *k, int realp, const bench_tensor *sz, int n, int vecn, double sign, C *inA, C *inB, C *outA, C *outB, C *tmp, int rounds, double tol, int which_shift); typedef struct dotens2_closure_s { void (*apply)(struct dotens2_closure_s *k, int indx0, int ondx0, int indx1, int ondx1); } dotens2_closure; void bench_dotens2(const bench_tensor *sz0, const bench_tensor *sz1, dotens2_closure *k); void accuracy_test(dofft_closure *k, aconstrain constrain, int sign, int n, C *a, C *b, int rounds, int impulse_rounds, double t[6]); void accuracy_dft(bench_problem *p, int rounds, int impulse_rounds, double t[6]); void accuracy_rdft2(bench_problem *p, int rounds, int impulse_rounds, double t[6]); void accuracy_r2r(bench_problem *p, int rounds, int impulse_rounds, double t[6]); #if defined(BENCHFFT_LDOUBLE) && HAVE_COSL typedef long double trigreal; # define COS cosl # define SIN sinl # define TAN tanl # define KTRIG(x) (x##L) #elif defined(BENCHFFT_QUAD) && HAVE_LIBQUADMATH typedef __float128 trigreal; # define COS cosq # define SIN sinq # define TAN tanq # define KTRIG(x) (x##Q) extern trigreal cosq(trigreal); extern trigreal sinq(trigreal); extern trigreal tanq(trigreal); #else typedef double trigreal; # define COS cos # define SIN sin # define TAN tan # define KTRIG(x) (x) #endif #define K2PI KTRIG(6.2831853071795864769252867665590057683943388)
mesjetiu/grandorgue-es
src/fftw/src/libbench2/verify.h
C
gpl-2.0
3,539
/* * org.openmicroscopy.shoola.agents.imviewer.actions.UnitBarSizeAction * *------------------------------------------------------------------------------ * Copyright (C) 2006 University of Dundee. All rights reserved. * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * *------------------------------------------------------------------------------ */ package org.openmicroscopy.shoola.agents.imviewer.actions; //Java imports import java.awt.event.ActionEvent; import javax.swing.Action; //Third-party libraries //Application-internal dependencies import org.openmicroscopy.shoola.agents.imviewer.view.ImViewer; import org.openmicroscopy.shoola.util.ui.UIUtilities; /** * Increases or decreases the size of the unit bar. * * @author Jean-Marie Burel &nbsp;&nbsp;&nbsp;&nbsp; * <a href="mailto:j.burel@dundee.ac.uk">j.burel@dundee.ac.uk</a> * @author Donald MacDonald &nbsp;&nbsp;&nbsp;&nbsp; * <a href="mailto:donald@lifesci.dundee.ac.uk">donald@lifesci.dundee.ac.uk</a> * @version 3.0 * <small> * (<b>Internal version:</b> $Revision: $ $Date: $) * </small> * @since OME2.2 */ public class UnitBarSizeAction extends ViewerAction { /** Identifies the scale bar of size <code>1</code>. */ public static final int ONE = 0; /** Identifies the scale bar of size <code>2</code>. */ public static final int TWO = 1; /** Identifies the scale bar of size <code>5</code>. */ public static final int FIVE = 2; /** Identifies the scale bar of size <code>10</code>. */ public static final int TEN = 3; /** Identifies the scale bar of size <code>20</code>. */ public static final int TWENTY = 4; /** Identifies the scale bar of size <code>50</code>. */ public static final int FIFTY = 5; /** Identifies the scale bar of size <code>100</code>. */ public static final int HUNDRED = 6; /** Identifies the scale bar of size customized. */ public static final int CUSTOMIZED = 7; /** The default index. */ public static final int DEFAULT_UNIT_INDEX = FIVE; /** The number of supported identifiers. */ private static final int MAX = 7; /** The description of the action. */ private static final String DESCRIPTION = "Select the size of " + "the Scale bar displayed on top of the image."; /** * Array of action names associated to the identifiers defined by this * class. */ private static String[] names; /** * Array of values associated to the identifiers defined by this * class. */ private static int[] values; static { values = new int[MAX+1]; values[ONE] = 1; values[TWO] = 2; values[FIVE] = 5; values[TEN] = 10; values[TWENTY] = 20; values[FIFTY] = 50; values[HUNDRED] = 100; names = new String[MAX+1]; names[ONE] = ""+values[ONE]; names[TWO] = ""+values[TWO]; names[FIVE] = ""+values[FIVE]; names[TEN] = ""+values[TEN]; names[TWENTY] = ""+values[TWENTY]; names[FIFTY] = ""+values[FIFTY]; names[HUNDRED] = ""+values[HUNDRED]; names[CUSTOMIZED] = "Custom"; } /** * Returns the value associated to the default index. * * @return See above. */ public static int getDefaultValue() { return values[DEFAULT_UNIT_INDEX]; } /** * Returns the value associated to the default index. * * @param size The size of reference. * @return See above. */ public static int getDefaultIndex(double size) { if (size < 1) return FIVE; if (size >=1 && size < 2) return TEN; if (size >=2 && size < 3) return TWENTY; if (size >=3 && size < 4) return FIFTY; return HUNDRED; } /** * Returns the value corresponding to the passed index or <code>-1</code>. * * @param index The index to handle. * @return See above. */ public static int getValue(int index) { if (index < ONE || index > MAX) return -1; return values[index]; } /** One of the constant defined by this class. */ private int index; /** * Controls if the specified index is valid. * * @param i The index to check. */ private void checkIndex(int i) { switch (i) { case ONE: case TWO: case FIVE: case TEN: case TWENTY: case FIFTY: case HUNDRED: case CUSTOMIZED: return; default: throw new IllegalArgumentException("Index not supported."); } } /** * Creates a new instance. * * @param model Reference to the model. Mustn't be <code>null</code>. * @param index One of the constant defined by this class. */ public UnitBarSizeAction(ImViewer model, int index) { super(model); checkIndex(index); this.index = index; putValue(Action.SHORT_DESCRIPTION, UIUtilities.formatToolTipText(DESCRIPTION)); putValue(Action.NAME, names[index]); } /** * Returns the index of the action * * @return See above. */ public int getIndex() { return index; } /** * Sets the size of the unit bar. * @see java.awt.event.ActionListener#actionPerformed(ActionEvent) */ public void actionPerformed(ActionEvent e) { if (index != CUSTOMIZED) model.setUnitBarSize(values[index]); else model.showUnitBarSelection(); } }
jballanc/openmicroscopy
components/insight/SRC/org/openmicroscopy/shoola/agents/imviewer/actions/UnitBarSizeAction.java
Java
gpl-2.0
6,387