text
stringlengths
2
99k
meta
dict
/* * Copyright (c) 2017, Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ //! //! \file mhw_state_heap_hwcmd_g11_X.cpp //! \brief Auto-generated definitions for MHW commands and states. //! #include "mhw_state_heap_hwcmd_g11_X.h" #include "mos_utilities.h" mhw_state_heap_g11_X::INTERFACE_DESCRIPTOR_DATA_CMD::INTERFACE_DESCRIPTOR_DATA_CMD() { DW0.Value = 0; DW1.Value = 0; DW2.Value = 0; DW2.FloatingPointMode = FLOATING_POINT_MODE_IEEE_754; DW2.ThreadPriority = THREAD_PRIORITY_NORMALPRIORITY; DW2.SingleProgramFlow = SINGLE_PROGRAM_FLOW_MULTIPLE; DW2.DenormMode = DENORM_MODE_FTZ; DW2.ThreadPreemptionDisable = THREAD_PREEMPTION_DISABLE_DISABLE; DW3.Value = 0; DW3.SamplerCount = SAMPLER_COUNT_NOSAMPLERSUSED; DW4.Value = 0; DW5.Value = 0; DW6.Value = 0; DW6.SharedLocalMemorySize = SHARED_LOCAL_MEMORY_SIZE_ENCODES0K; DW6.RoundingMode = ROUNDING_MODE_RTNE; DW7.Value = 0; } mhw_state_heap_g11_X::BINDING_TABLE_STATE_CMD::BINDING_TABLE_STATE_CMD() { DW0.Value = 0; } mhw_state_heap_g11_X::RENDER_SURFACE_STATE_CMD::RENDER_SURFACE_STATE_CMD() { DW0.Value = 0; DW0.MediaBoundaryPixelMode = MEDIA_BOUNDARY_PIXEL_MODE_NORMALMODE; DW0.RenderCacheReadWriteMode = RENDER_CACHE_READ_WRITE_MODE_WRITE_ONLYCACHE; DW0.TileMode = TILE_MODE_LINEAR; DW0.SurfaceHorizontalAlignment = 0; DW0.SurfaceVerticalAlignment = 0; DW0.SurfaceFormat = SURFACE_FORMAT_R32G32B32A32FLOAT; DW0.SurfaceType = SURFACE_TYPE_SURFTYPE1D; DW1.Value = 0; DW1.SampleTapDiscardDisable = SAMPLE_TAP_DISCARD_DISABLE_DISABLE; DW1.CornerTexelMode = CORNER_TEXEL_MODE_DISABLE; DW1.EnableUnormPathInColorPipe = ENABLE_UNORM_PATH_IN_COLOR_PIPE_UNNAMED1; DW2.Value = 0; DW3.Value = 0; DW3.StandardTilingModeExtensions = STANDARD_TILING_MODE_EXTENSIONS_DISABLE; DW3.TileAddressMappingMode = TILE_ADDRESS_MAPPING_MODE_GEN9; DW4.Value = 0; DW4.NumberOfMultisamples = NUMBER_OF_MULTISAMPLES_MULTISAMPLECOUNT1; DW4.MultisampledSurfaceStorageFormat = MULTISAMPLED_SURFACE_STORAGE_FORMAT_MSS; DW4.RenderTargetAndSampleUnormRotation = RENDER_TARGET_AND_SAMPLE_UNORM_ROTATION_0DEG; DW5.Value = 0; DW5.CoherencyType = COHERENCY_TYPE_GPUCOHERENT; DW5.TiledResourceMode = TILED_RESOURCE_MODE_NONE; DW5.EwaDisableForCube = EWA_DISABLE_FOR_CUBE_ENABLE; DW6.Value = 0; DW6.Obj0.HalfPitchForChroma = HALF_PITCH_FOR_CHROMA_DISABLE; DW6.Obj1.AuxiliarySurfaceMode = AUXILIARY_SURFACE_MODE_AUXNONE; DW6.Obj2.YuvInterpolationEnable = YUV_INTERPOLATION_ENABLE_DISABLE; DW7.Value = 0; DW7.ShaderChannelSelectAlpha = SHADER_CHANNEL_SELECT_ALPHA_ZERO; DW7.ShaderChannelSelectBlue = SHADER_CHANNEL_SELECT_BLUE_ZERO; DW7.ShaderChannelSelectGreen = SHADER_CHANNEL_SELECT_GREEN_ZERO; DW7.ShaderChannelSelectRed = SHADER_CHANNEL_SELECT_RED_ZERO; DW7.MemoryCompressionMode = MEMORY_COMPRESSION_MODE_HORIZONTAL; DW8_9.Value[0] = DW8_9.Value[1] = 0; DW10_11.Value[0] = DW10_11.Value[1] = 0; DW10_11.Obj0.ClearValueAddressEnable = CLEAR_VALUE_ADDRESS_ENABLE_DISABLE; DW12.Value = 0; DW13.Value = 0; DW14.Value = 0; DW15.Value = 0; } mhw_state_heap_g11_X::MEDIA_SURFACE_STATE_CMD::MEDIA_SURFACE_STATE_CMD() { DW0.Value = 0; DW0.Rotation = ROTATION_NOROTATIONOR0DEGREE; DW1.Value = 0; DW1.CrVCbUPixelOffsetVDirection = CRVCBU_PIXEL_OFFSET_V_DIRECTION_UNNAMED0; DW1.PictureStructure = PICTURE_STRUCTURE_FRAMEPICTURE; DW2.Value = 0; DW2.TileMode = TILE_MODE_TILEMODELINEAR; DW2.AddressControl = ADDRESS_CONTROL_CLAMP; DW2.MemoryCompressionMode = MEMORY_COMPRESSION_MODE_HORIZONTALCOMPRESSIONMODE; DW2.CrVCbUPixelOffsetVDirectionMsb = CRVCBU_PIXEL_OFFSET_V_DIRECTION_MSB_UNNAMED0; DW2.CrVCbUPixelOffsetUDirection = CRVCBU_PIXEL_OFFSET_U_DIRECTION_UNNAMED0; DW2.SurfaceFormat = SURFACE_FORMAT_YCRCBNORMAL; DW3.Value = 0; DW4.Value = 0; DW5.Value = 0; DW5.SurfaceMemoryObjectControlState = SURFACE_MEMORY_OBJECT_CONTROL_STATE_DEFAULTVAUEDESC; DW5.TiledResourceMode = TILED_RESOURCE_MODE_TRMODENONE; DW6.Value = 0; DW7.Value = 0; } mhw_state_heap_g11_X::SAMPLER_STATE_CMD::SAMPLER_STATE_CMD() { DW0.Value = 0; DW0.LodAlgorithm = LOD_ALGORITHM_LEGACY; DW0.MinModeFilter = MIN_MODE_FILTER_NEAREST; DW0.MagModeFilter = MAG_MODE_FILTER_NEAREST; DW0.MipModeFilter = MIP_MODE_FILTER_NONE; DW0.CoarseLodQualityMode = COARSE_LOD_QUALITY_MODE_DISABLED; DW0.LodPreclampMode = LOD_PRECLAMP_MODE_NONE; DW0.TextureBorderColorMode = TEXTURE_BORDER_COLOR_MODE_OGL; DW1.Value = 0; DW1.CubeSurfaceControlMode = CUBE_SURFACE_CONTROL_MODE_PROGRAMMED; DW1.ShadowFunction = SHADOW_FUNCTION_PREFILTEROPALWAYS; DW1.ChromakeyMode = CHROMAKEY_MODE_KEYFILTERKILLONANYMATCH; DW2.Value = 0; DW2.LodClampMagnificationMode = LOD_CLAMP_MAGNIFICATION_MODE_MIPNONE; DW2.SrgbDecode = SRGB_DECODE_DECODEEXT; DW2.ReturnFilterWeightForNullTexels = RETURN_FILTER_WEIGHT_FOR_NULL_TEXELS_DISABLE; DW2.ReturnFilterWeightForBorderTexels = RETURN_FILTER_WEIGHT_FOR_BORDER_TEXELS_DISABLE; DW3.Value = 0; DW3.TczAddressControlMode = TCZ_ADDRESS_CONTROL_MODE_WRAP; DW3.TcyAddressControlMode = TCY_ADDRESS_CONTROL_MODE_WRAP; DW3.TcxAddressControlMode = TCX_ADDRESS_CONTROL_MODE_WRAP; DW3.TrilinearFilterQuality = TRILINEAR_FILTER_QUALITY_FULL; DW3.MaximumAnisotropy = MAXIMUM_ANISOTROPY_RATIO21; DW3.ReductionType = REDUCTION_TYPE_STDFILTER; } mhw_state_heap_g11_X::SAMPLER_STATE_8x8_AVS_COEFFICIENTS_CMD::SAMPLER_STATE_8x8_AVS_COEFFICIENTS_CMD() { DW0.Value = 0; DW1.Value = 0; DW2.Value = 0; DW3.Value = 0; DW4.Value = 0; DW5.Value = 0; DW6.Value = 0; DW7.Value = 0; } mhw_state_heap_g11_X::SAMPLER_STATE_8x8_AVS_CMD::SAMPLER_STATE_8x8_AVS_CMD() { DW0.Value = 0; DW0.GainFactor = GAIN_FACTOR_UNNAMED44; DW0.WeakEdgeThreshold = WEAK_EDGE_THRESHOLD_UNNAMED1; DW0.StrongEdgeThreshold = STRONG_EDGE_THRESHOLD_UNNAMED8; DW0.R3XCoefficient = R3X_COEFFICIENT_UNNAMED5; DW0.R3CCoefficient = R3C_COEFFICIENT_UNNAMED5; DW1.Value = 0; DW2.Value = 0; DW2.GlobalNoiseEstimation = GLOBAL_NOISE_ESTIMATION_UNNAMED255; DW2.NonEdgeWeight = NON_EDGE_WEIGHT_UNNAMED1; DW2.RegularWeight = REGULAR_WEIGHT_UNNAMED2; DW2.StrongEdgeWeight = STRONG_EDGE_WEIGHT_UNNAMED7; DW2.R5XCoefficient = R5X_COEFFICIENT_UNNAMED7; DW2.R5CxCoefficient = R5CX_COEFFICIENT_UNNAMED7; DW2.R5CCoefficient = R5C_COEFFICIENT_UNNAMED7; DW3.Value = 0; DW3.SatMax = SAT_MAX_UNNAMED31; DW3.HueMax = HUE_MAX_UNNAMED14; DW3.Enable8TapFilter = ENABLE_8_TAP_FILTER_UNNAMED0; DW3.Ief4SmoothEnable = IEF4SMOOTH_ENABLE_UNNAMED0; DW3.SkinToneTunedIefEnable = SKIN_TONE_TUNED_IEF_ENABLE_UNNAMED1; DW4.Value = 0; DW4.ShuffleOutputwritebackForSample8X8 = SHUFFLE_OUTPUTWRITEBACK_FOR_SAMPLE_8X8_UNNAMED0; DW4.DiamondMargin = DIAMOND_MARGIN_UNNAMED4; DW4.UMid = U_MID_UNNAMED110; DW4.VMid = V_MID_UNNAMED154; DW5.Value = 0; DW5.DiamondDv = DIAMOND_DV_UNNAMED0; DW5.DiamondTh = DIAMOND_TH_UNNAMED35; DW5.HsMargin = HS_MARGIN_UNNAMED3; DW5.DiamondDu = DIAMOND_DU_UNNAMED2; DW5.Skindetailfactor = SKINDETAILFACTOR_UNNAMED0; DW6.Value = 0; DW6.YPoint1 = Y_POINT_1_UNNAMED46; DW6.YPoint2 = Y_POINT_2_UNNAMED47; DW6.YPoint3 = Y_POINT_3_UNNAMED254; DW6.YPoint4 = Y_POINT_4_UNNAMED255; DW7.Value = 0; DW8.Value = 0; DW8.P0L = P0L_UNNAMED46; DW8.P1L = P1L_UNNAMED216; DW9.Value = 0; DW9.P2L = P2L_UNNAMED236; DW9.P3L = P3L_UNNAMED236; DW9.B0L = B0L_UNNAMED133; DW9.B1L = B1L_UNNAMED130; DW10.Value = 0; DW10.B2L = B2L_UNNAMED130; DW10.B3L = B3L_UNNAMED130; DW11.Value = 0; DW12.Value = 0; DW12.P0U = P0U_UNNAMED46; DW12.P1U = P1U_UNNAMED66; DW13.Value = 0; DW13.P2U = P2U_UNNAMED150; DW13.P3U = P3U_UNNAMED236; DW13.B0U = B0U_UNNAMED143; DW13.B1U = B1U_UNNAMED163; DW14.Value = 0; DW14.B2U = B2U_UNNAMED200; DW14.B3U = B3U_UNNAMED140; DW15.Value = 0; DW152.Value = 0; DW152.DefaultSharpnessLevel = DEFAULT_SHARPNESS_LEVEL_UNNAMED0; DW153.Value = 0; DW153.RgbAdaptive = RGB_ADAPTIVE_DISBLE; DW153.AdaptiveFilterForAllChannels = ADAPTIVE_FILTER_FOR_ALL_CHANNELS_DISBLE; DW153.BypassYAdaptiveFiltering = BYPASS_Y_ADAPTIVE_FILTERING_ENABLE; DW153.BypassXAdaptiveFiltering = BYPASS_X_ADAPTIVE_FILTERING_ENABLE; MOS_ZeroMemory(&Reserved4928, sizeof(Reserved4928)); } mhw_state_heap_g11_X::SAMPLER_STATE_8x8_CONVOLVE_COEFFICIENTS_CMD::SAMPLER_STATE_8x8_CONVOLVE_COEFFICIENTS_CMD() { DW0.Value = 0; DW1.Value = 0; DW2.Value = 0; DW3.Value = 0; DW4.Value = 0; DW5.Value = 0; DW6.Value = 0; DW7.Value = 0; } mhw_state_heap_g11_X::SAMPLER_STATE_8x8_CONVOLVE_CMD::SAMPLER_STATE_8x8_CONVOLVE_CMD() { DW0.Value = 0; DW0.SizeOfTheCoefficient = SIZE_OF_THE_COEFFICIENT_8BIT; DW0.MsbHeight = MSB_HEIGHT_NOCHANGE; DW0.MsbWidth = MSB_WIDTH_NOCHANGE; MOS_ZeroMemory(&Reserved32, sizeof(Reserved32)); } mhw_state_heap_g11_X::SAMPLER_STATE_8x8_ERODE_DILATE_MINMAXFILTER_CMD::SAMPLER_STATE_8x8_ERODE_DILATE_MINMAXFILTER_CMD() { DW0.Value = 0; DW1.Value = 0; DW2.Value = 0; DW3.Value = 0; DW4.Value = 0; DW5.Value = 0; DW6.Value = 0; DW7.Value = 0; } mhw_state_heap_g11_X::SAMPLER_INDIRECT_STATE_CMD::SAMPLER_INDIRECT_STATE_CMD() { DW0.Value = 0; DW1.Value = 0; DW2.Value = 0; DW3.Value = 0; MOS_ZeroMemory(&Reserved128, sizeof(Reserved128)); }
{ "pile_set_name": "Github" }
--- external help file: Microsoft.PowerShell.Commands.Management.dll-Help.xml keywords: powershell,cmdlet Locale: en-US Module Name: Microsoft.PowerShell.Management ms.date: 5/14/2019 online version: https://docs.microsoft.com/powershell/module/microsoft.powershell.management/remove-itemproperty?view=powershell-7.1&WT.mc_id=ps-gethelp schema: 2.0.0 title: Remove-ItemProperty --- # Remove-ItemProperty ## SYNOPSIS Deletes the property and its value from an item. ## SYNTAX ### Path (Default) ``` Remove-ItemProperty [-Path] <String[]> [-Name] <String[]> [-Force] [-Filter <String>] [-Include <String[]>] [-Exclude <String[]>] [-Credential <PSCredential>] [-InformationAction <ActionPreference>] [-InformationVariable <String>] [-WhatIf] [-Confirm] [<CommonParameters>] ``` ### LiteralPath ``` Remove-ItemProperty -LiteralPath <String[]> [-Name] <String[]> [-Force] [-Filter <String>] [-Include <String[]>] [-Exclude <String[]>] [-Credential <PSCredential>] [-WhatIf] [-Confirm] [<CommonParameters>] ``` ## DESCRIPTION The `Remove-ItemProperty` cmdlet deletes a property and its value from an item. You can use it to delete registry values and the data that they store. ## EXAMPLES ### Example 1: Delete a registry value This command deletes the "SmpProperty" registry value, and its data, from the "SmpApplication" subkey of the `HKEY_LOCAL_MACHINE\Software` registry key. ```powershell Remove-ItemProperty -Path "HKLM:\Software\SmpApplication" -Name "SmpProperty" ``` Because the command is issued from a file system drive (`PS C:\>`), it includes the fully qualified path of the "SmpApplication" subkey, including the drive, `HKLM:`, and the "Software" key. ### Example 2: Delete a registry value from the HKCU location These commands delete the "Options" registry value, and its data, from the "MyApp" subkey of "HKEY_CURRENT_USER\Software\MyCompany". ``` PS C:\> Set-Location HKCU:\Software\MyCompany\MyApp PS HKCU:\Software\MyCompany\MyApp> Remove-ItemProperty -Path . -Name "Options" -Confirm ``` The first command uses the `Set-Location` cmdlet to change the current location to the **HKEY_CURRENT_USER** drive (`HKCU:`) and the `Software\MyCompany\MyApp` subkey. The second command uses `Remove-ItemProperty` to remove the "Options" registry value, and its data, from the "MyApp" subkey. Because **Path** is required, the command uses a dot (`.`) to indicate the current location. The **Confirm** parameter requests a user prompt before deleting the value. ### Example 3: Remove a registry value by using the pipeline This command deletes the "NoOfEmployees" registry value, and its data, from the `HKLM\Software\MyCompany` registry key. ```powershell Get-Item -Path HKLM:\Software\MyCompany | Remove-ItemProperty -Name NoOfEmployees ``` The command uses the `Get-Item` cmdlet to get an item that represents the registry key. It uses a pipeline operator (`|`) to send the object to `Remove-ItemProperty`. Then, it uses the **Name** parameter of `Remove-ItemProperty` to specify the name of the registry value. ## PARAMETERS ### -Credential > [!NOTE] > This parameter is not supported by any providers installed with PowerShell. > To impersonate another user, or elevate your credentials when running this cmdlet, > use [Invoke-Command](../Microsoft.PowerShell.Core/Invoke-Command.md). ```yaml Type: System.Management.Automation.PSCredential Parameter Sets: (All) Aliases: Required: False Position: Named Default value: Current user Accept pipeline input: True (ByPropertyName) Accept wildcard characters: False ``` ### -Exclude Specifies, as a string array, an item or items that this cmdlet excludes in the operation. The value of this parameter qualifies the **Path** parameter. Enter a path element or pattern, such as `*.txt`. Wildcard characters are permitted. The **Exclude** parameter is effective only when the command includes the contents of an item, such as `C:\Windows\*`, where the wildcard character specifies the contents of the `C:\Windows` directory. ```yaml Type: System.String[] Parameter Sets: (All) Aliases: Required: False Position: Named Default value: None Accept pipeline input: False Accept wildcard characters: True ``` ### -Filter Specifies a filter to qualify the **Path** parameter. The [FileSystem](../Microsoft.PowerShell.Core/About/about_FileSystem_Provider.md) provider is the only installed PowerShell provider that supports the use of filters. You can find the syntax for the **FileSystem** filter language in [about_Wildcards](../Microsoft.PowerShell.Core/About/about_Wildcards.md). Filters are more efficient than other parameters, because the provider applies them when the cmdlet gets the objects rather than having PowerShell filter the objects after they are retrieved. ```yaml Type: System.String Parameter Sets: (All) Aliases: Required: False Position: Named Default value: None Accept pipeline input: False Accept wildcard characters: True ``` ### -Force Forces the cmdlet to remove a property of an object that cannot otherwise be accessed by the user. Implementation varies from provider to provider. For more information, see [about_Providers](../Microsoft.PowerShell.Core/About/about_Providers.md). ```yaml Type: System.Management.Automation.SwitchParameter Parameter Sets: (All) Aliases: Required: False Position: Named Default value: False Accept pipeline input: False Accept wildcard characters: False ``` ### -Include Specifies, as a string array, an item or items that this cmdlet includes in the operation. The value of this parameter qualifies the **Path** parameter. Enter a path element or pattern, such as `"*.txt"`. Wildcard characters are permitted. The **Include** parameter is effective only when the command includes the contents of an item, such as `C:\Windows\*`, where the wildcard character specifies the contents of the `C:\Windows` directory. ```yaml Type: System.String[] Parameter Sets: (All) Aliases: Required: False Position: Named Default value: None Accept pipeline input: False Accept wildcard characters: True ``` ### -LiteralPath Specifies a path to one or more locations. The value of **LiteralPath** is used exactly as it is typed. No characters are interpreted as wildcards. If the path includes escape characters, enclose it in single quotation marks. Single quotation marks tell PowerShell not to interpret any characters as escape sequences. For more information, see [about_Quoting_Rules](../Microsoft.Powershell.Core/About/about_Quoting_Rules.md). ```yaml Type: System.String[] Parameter Sets: LiteralPath Aliases: PSPath, LP Required: True Position: Named Default value: None Accept pipeline input: True (ByPropertyName) Accept wildcard characters: False ``` ### -Name Specifies the names of the properties to remove. Wildcard characters are permitted. ```yaml Type: System.String[] Parameter Sets: (All) Aliases: PSProperty Required: True Position: 1 Default value: None Accept pipeline input: True (ByPropertyName) Accept wildcard characters: True ``` ### -Path Specifies the path of the item whose properties are being removed. Wildcard characters are permitted. ```yaml Type: System.String[] Parameter Sets: Path Aliases: Required: True Position: 0 Default value: None Accept pipeline input: True (ByPropertyName, ByValue) Accept wildcard characters: True ``` ### -Confirm Prompts you for confirmation before running the cmdlet. ```yaml Type: System.Management.Automation.SwitchParameter Parameter Sets: (All) Aliases: cf Required: False Position: Named Default value: False Accept pipeline input: False Accept wildcard characters: False ``` ### -WhatIf Shows what would happen if the cmdlet runs. The cmdlet is not run. ```yaml Type: System.Management.Automation.SwitchParameter Parameter Sets: (All) Aliases: wi Required: False Position: Named Default value: False Accept pipeline input: False Accept wildcard characters: False ``` ### CommonParameters This cmdlet supports the common parameters: `-Debug`, `-ErrorAction`, `-ErrorVariable`, `-InformationAction`, `-InformationVariable`, `-OutVariable`, `-OutBuffer`, `-PipelineVariable`, `-Verbose`, `-WarningAction`, and `-WarningVariable`. For more information, see [about_CommonParameters](../Microsoft.PowerShell.Core/About/about_CommonParameters.md). ## INPUTS ### System.String You can pipe a string that contains a path, but not a literal path, to this cmdlet. ## OUTPUTS ### None This cmdlet does not return any output. ## NOTES - In the PowerShell Registry provider, registry values are considered to be properties of a registry key or subkey. You can use the **ItemProperty** cmdlets to manage these values. - `Remove-ItemProperty` is designed to work with the data exposed by any provider. To list the providers available in your session, type `Get-PSProvider`. For more information, see [about_Providers](../Microsoft.PowerShell.Core/About/about_Providers.md). ## RELATED LINKS [Get-Item](Get-Item.md) [Clear-ItemProperty](Clear-ItemProperty.md) [Copy-ItemProperty](Copy-ItemProperty.md) [Get-ItemProperty](Get-ItemProperty.md) [Move-ItemProperty](Move-ItemProperty.md) [New-ItemProperty](New-ItemProperty.md) [Remove-Item](Remove-Item.md) [Rename-ItemProperty](Rename-ItemProperty.md) [Set-ItemProperty](Set-ItemProperty.md) [Set-Location](Set-Location.md) [about_Providers](../Microsoft.PowerShell.Core/About/about_Providers.md)
{ "pile_set_name": "Github" }
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package conversion import ( "fmt" "reflect" ) // EnforcePtr ensures that obj is a pointer of some sort. Returns a reflect.Value // of the dereferenced pointer, ensuring that it is settable/addressable. // Returns an error if this is not possible. func EnforcePtr(obj interface{}) (reflect.Value, error) { v := reflect.ValueOf(obj) if v.Kind() != reflect.Ptr { if v.Kind() == reflect.Invalid { return reflect.Value{}, fmt.Errorf("expected pointer, but got invalid kind") } return reflect.Value{}, fmt.Errorf("expected pointer, but got %v type", v.Type()) } if v.IsNil() { return reflect.Value{}, fmt.Errorf("expected pointer, but got nil") } return v.Elem(), nil }
{ "pile_set_name": "Github" }
update=Fri 10 Nov 2017 05:16:12 PM PST version=1 last_client=kicad [cvpcb] version=1 NetIExt=net [general] version=1 [pcbnew] version=1 PageLayoutDescrFile= LastNetListRead= PadDrill=0.7999999999999999 PadDrillOvalY=0.7999999999999999 PadSizeH=1.35 PadSizeV=1.35 PcbTextSizeV=1.5 PcbTextSizeH=1.5 PcbTextThickness=0.3 ModuleTextSizeV=0.6 ModuleTextSizeH=0.6 ModuleTextSizeThickness=0.127 SolderMaskClearance=0.2 SolderMaskMinWidth=0 DrawSegmentWidth=0.2 BoardOutlineThickness=0.15 ModuleOutlineThickness=0.127 [eeschema] version=1 LibDir=
{ "pile_set_name": "Github" }
# Copyright © 2016 Red Hat. # Copyright © 2016 Mauro Rossi <issor.oruam@gmail.com> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice (including the next # paragraph) shall be included in all copies or substantial portions of the # Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # --------------------------------------- # Build libmesa_amd_common # --------------------------------------- include $(CLEAR_VARS) LOCAL_MODULE := libmesa_amd_common LOCAL_SRC_FILES := \ $(AMD_COMMON_FILES) \ $(AMD_COMPILER_FILES) \ $(AMD_DEBUG_FILES) \ $(AMD_NIR_FILES) LOCAL_CFLAGS += -DFORCE_BUILD_AMDGPU # instructs LLVM to declare LLVMInitializeAMDGPU* functions # generate sources LOCAL_MODULE_CLASS := STATIC_LIBRARIES intermediates := $(call local-generated-sources-dir) LOCAL_GENERATED_SOURCES := $(addprefix $(intermediates)/, $(AMD_GENERATED_FILES)) $(LOCAL_GENERATED_SOURCES): PRIVATE_PYTHON := $(MESA_PYTHON2) $(LOCAL_GENERATED_SOURCES): PRIVATE_CUSTOM_TOOL = $(PRIVATE_PYTHON) $^ > $@ $(intermediates)/common/sid_tables.h: $(LOCAL_PATH)/common/sid_tables.py $(LOCAL_PATH)/common/sid.h $(LOCAL_PATH)/common/gfx9d.h $(transform-generated-source) LOCAL_C_INCLUDES := \ $(MESA_TOP)/include \ $(MESA_TOP)/src \ $(MESA_TOP)/src/amd/common \ $(MESA_TOP)/src/compiler \ $(call generated-sources-dir-for,STATIC_LIBRARIES,libmesa_nir,,)/nir \ $(MESA_TOP)/src/gallium/include \ $(MESA_TOP)/src/gallium/auxiliary \ $(intermediates)/common LOCAL_EXPORT_C_INCLUDE_DIRS := \ $(LOCAL_PATH)/common LOCAL_SHARED_LIBRARIES := \ libdrm_amdgpu LOCAL_STATIC_LIBRARIES := \ libmesa_nir LOCAL_WHOLE_STATIC_LIBRARIES := \ libelf $(call mesa-build-with-llvm) include $(MESA_COMMON_MK) include $(BUILD_STATIC_LIBRARY)
{ "pile_set_name": "Github" }
{{#sortable-table tableClassNames="bordered mt-30" paging=true pagingLabel="pagination.project" headers=headers descending=descending body=model sortBy=sortBy as |sortable kind p dt| }} {{#if (eq kind "row")}} {{project-row model=p dt=dt}} {{else if (eq kind "nomatch")}} <tr><td colspan="{{sortable.fullColspan}}" class="text-center text-muted lacsso pt-20 pb-20">{{t 'projectsPage.noMatch'}}</td></tr> {{else if (eq kind "norows")}} <tr><td colspan="{{sortable.fullColspan}}" class="text-center text-muted lacsso pt-20 pb-20">{{t 'projectsPage.noData'}}</td></tr> {{/if}} {{/sortable-table}}
{ "pile_set_name": "Github" }
package pflag import ( "time" ) // -- time.Duration Value type durationValue time.Duration func newDurationValue(val time.Duration, p *time.Duration) *durationValue { *p = val return (*durationValue)(p) } func (d *durationValue) Set(s string) error { v, err := time.ParseDuration(s) *d = durationValue(v) return err } func (d *durationValue) Type() string { return "duration" } func (d *durationValue) String() string { return (*time.Duration)(d).String() } func durationConv(sval string) (interface{}, error) { return time.ParseDuration(sval) } // GetDuration return the duration value of a flag with the given name func (f *FlagSet) GetDuration(name string) (time.Duration, error) { val, err := f.getFlagType(name, "duration", durationConv) if err != nil { return 0, err } return val.(time.Duration), nil } // DurationVar defines a time.Duration flag with specified name, default value, and usage string. // The argument p points to a time.Duration variable in which to store the value of the flag. func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) { f.VarP(newDurationValue(value, p), name, "", usage) } // DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { f.VarP(newDurationValue(value, p), name, shorthand, usage) } // DurationVar defines a time.Duration flag with specified name, default value, and usage string. // The argument p points to a time.Duration variable in which to store the value of the flag. func DurationVar(p *time.Duration, name string, value time.Duration, usage string) { CommandLine.VarP(newDurationValue(value, p), name, "", usage) } // DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage) } // Duration defines a time.Duration flag with specified name, default value, and usage string. // The return value is the address of a time.Duration variable that stores the value of the flag. func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration { p := new(time.Duration) f.DurationVarP(p, name, "", value, usage) return p } // DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { p := new(time.Duration) f.DurationVarP(p, name, shorthand, value, usage) return p } // Duration defines a time.Duration flag with specified name, default value, and usage string. // The return value is the address of a time.Duration variable that stores the value of the flag. func Duration(name string, value time.Duration, usage string) *time.Duration { return CommandLine.DurationP(name, "", value, usage) } // DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { return CommandLine.DurationP(name, shorthand, value, usage) }
{ "pile_set_name": "Github" }
# Copyright 2016 Pinterest, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
{ "pile_set_name": "Github" }
package LeetCode.tree.ClosestBinarySearchTreeValue_270; import java.util.List; public class Main { public static void main(String[] args) { Solution s = new Solution(); TreeNode root = new TreeNode(7); root.left = new TreeNode(3); root.right = new TreeNode(12); root.left.left = new TreeNode(1); root.left.right = new TreeNode(5); root.right.right = new TreeNode(15); int result = s.closestValue(root, 11.5); System.out.println(result); } }
{ "pile_set_name": "Github" }
{ "IdPrefix": "parks-", "Containers":[ { "Name":"lb", "Count":1, "Image":"openshift/centos-haproxy-simple-balancer", "PublicPorts":[ {"Internal":80,"External":14000} ], "Links":[ { "To":"backend", "NonLocal":true, "AliasPorts":[ {"Internal":3000,"External":8080} ] } ] }, { "Name":"backend", "Count":3, "Image":"parks-map-app", "PublicPorts":[ {"Internal":3000,"External":0} ], "Links":[ {"To":"db"} ] }, { "Name":"db", "Count":1, "Image":"openshift/centos-mongodb", "PublicPorts":[ {"Internal":27017,"External":0} ] } ] }
{ "pile_set_name": "Github" }
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to you under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.calcite.sql; import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.sql.type.OperandTypes; import org.apache.calcite.sql.type.ReturnTypes; import org.apache.calcite.sql.type.SqlTypeUtil; import org.apache.calcite.sql.validate.SqlValidator; import org.apache.calcite.sql.validate.SqlValidatorImpl; import org.apache.calcite.sql.validate.SqlValidatorScope; import static org.apache.calcite.util.Static.RESOURCE; /** * An operator that applies a filter before rows are included in an aggregate * function. * * <p>Operands are as follows:</p> * * <ul> * <li>0: a call to an aggregate function ({@link SqlCall}) * <li>1: predicate * </ul> */ public class SqlFilterOperator extends SqlBinaryOperator { //~ Constructors ----------------------------------------------------------- public SqlFilterOperator() { super("FILTER", SqlKind.FILTER, 2, true, ReturnTypes.ARG0_FORCE_NULLABLE, null, OperandTypes.ANY_ANY); } //~ Methods ---------------------------------------------------------------- @Override public void unparse(SqlWriter writer, SqlCall call, int leftPrec, int rightPrec) { assert call.operandCount() == 2; final SqlWriter.Frame frame = writer.startList(SqlWriter.FrameTypeEnum.SIMPLE); call.operand(0).unparse(writer, leftPrec, getLeftPrec()); writer.sep(getName()); writer.sep("("); writer.sep("WHERE"); call.operand(1).unparse(writer, getRightPrec(), rightPrec); writer.sep(")"); writer.endList(frame); } public void validateCall( SqlCall call, SqlValidator validator, SqlValidatorScope scope, SqlValidatorScope operandScope) { assert call.getOperator() == this; assert call.operandCount() == 2; SqlCall aggCall = getAggCall(call); if (!aggCall.getOperator().isAggregator()) { throw validator.newValidationError(aggCall, RESOURCE.filterNonAggregate()); } final SqlNode condition = call.operand(1); SqlNodeList orderList = null; if (hasWithinGroupCall(call)) { SqlCall withinGroupCall = getWithinGroupCall(call); orderList = withinGroupCall.operand(1); } validator.validateAggregateParams(aggCall, condition, orderList, scope); final RelDataType type = validator.deriveType(scope, condition); if (!SqlTypeUtil.inBooleanFamily(type)) { throw validator.newValidationError(condition, RESOURCE.condMustBeBoolean("FILTER")); } } public RelDataType deriveType( SqlValidator validator, SqlValidatorScope scope, SqlCall call) { // Validate type of the inner aggregate call validateOperands(validator, scope, call); // Assume the first operand is an aggregate call and derive its type. final SqlCall aggCall = getAggCall(call); // Pretend that group-count is 0. This tells the aggregate function that it // might be invoked with 0 rows in a group. Most aggregate functions will // return NULL in this case. SqlCallBinding opBinding = new SqlCallBinding(validator, scope, aggCall) { @Override public int getGroupCount() { return 0; } }; RelDataType ret = aggCall.getOperator().inferReturnType(opBinding); // Copied from validateOperands ((SqlValidatorImpl) validator).setValidatedNodeType(call, ret); ((SqlValidatorImpl) validator).setValidatedNodeType(aggCall, ret); if (hasWithinGroupCall(call)) { ((SqlValidatorImpl) validator).setValidatedNodeType(getWithinGroupCall(call), ret); } return ret; } private static SqlCall getAggCall(SqlCall call) { assert call.getOperator().getKind() == SqlKind.FILTER; call = call.operand(0); if (call.getOperator().getKind() == SqlKind.WITHIN_GROUP) { call = call.operand(0); } return call; } private static SqlCall getWithinGroupCall(SqlCall call) { assert call.getOperator().getKind() == SqlKind.FILTER; call = call.operand(0); if (call.getOperator().getKind() == SqlKind.WITHIN_GROUP) { return call; } throw new AssertionError(); } private static boolean hasWithinGroupCall(SqlCall call) { assert call.getOperator().getKind() == SqlKind.FILTER; call = call.operand(0); return call.getOperator().getKind() == SqlKind.WITHIN_GROUP; } } // End SqlFilterOperator.java
{ "pile_set_name": "Github" }
<?php /** * Zend Framework * * LICENSE * * This source file is subject to the new BSD license that is bundled * with this package in the file LICENSE.txt. * It is also available through the world-wide-web at this URL: * http://framework.zend.com/license/new-bsd * If you did not receive a copy of the license and are unable to * obtain it through the world-wide-web, please send an email * to license@zend.com so we can send you a copy immediately. * * @category Zend * @package Zend_Service_Amazon * @subpackage Ec2 * @copyright Copyright (c) 2005-2011 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License * @version $Id: Abstract.php 23775 2011-03-01 17:25:24Z ralph $ */ /** * @see Zend_Service_Amazon_Abstract */ require_once 'Zend/Service/Amazon/Abstract.php'; /** * @see Zend_Service_Amazon_Ec2_Response */ require_once 'Zend/Service/Amazon/Ec2/Response.php'; /** * @see Zend_Service_Amazon_Ec2_Exception */ require_once 'Zend/Service/Amazon/Ec2/Exception.php'; /** * Provides the basic functionality to send a request to the Amazon Ec2 Query API * * @category Zend * @package Zend_Service_Amazon * @subpackage Ec2 * @copyright Copyright (c) 2005-2011 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License */ abstract class Zend_Service_Amazon_Ec2_Abstract extends Zend_Service_Amazon_Abstract { /** * The HTTP query server */ protected $_ec2Endpoint = 'ec2.amazonaws.com'; /** * The API version to use */ protected $_ec2ApiVersion = '2009-04-04'; /** * Signature Version */ protected $_ec2SignatureVersion = '2'; /** * Signature Encoding Method */ protected $_ec2SignatureMethod = 'HmacSHA256'; /** * Period after which HTTP request will timeout in seconds */ protected $_httpTimeout = 10; /** * @var string Amazon Region */ protected static $_defaultRegion = null; /** * @var string Amazon Region */ protected $_region; /** * An array that contains all the valid Amazon Ec2 Regions. * * @var array */ protected static $_validEc2Regions = array('eu-west-1', 'us-east-1'); /** * Create Amazon client. * * @param string $access_key Override the default Access Key * @param string $secret_key Override the default Secret Key * @param string $region Sets the AWS Region * @return void */ public function __construct($accessKey=null, $secretKey=null, $region=null) { if(!$region) { $region = self::$_defaultRegion; } else { // make rue the region is valid if(!empty($region) && !in_array(strtolower($region), self::$_validEc2Regions, true)) { require_once 'Zend/Service/Amazon/Exception.php'; throw new Zend_Service_Amazon_Exception('Invalid Amazon Ec2 Region'); } } $this->_region = $region; parent::__construct($accessKey, $secretKey); } /** * Set which region you are working in. It will append the * end point automaticly * * @param string $region */ public static function setRegion($region) { if(in_array(strtolower($region), self::$_validEc2Regions, true)) { self::$_defaultRegion = $region; } else { require_once 'Zend/Service/Amazon/Exception.php'; throw new Zend_Service_Amazon_Exception('Invalid Amazon Ec2 Region'); } } /** * Method to fetch the AWS Region * * @return string */ protected function _getRegion() { return (!empty($this->_region)) ? $this->_region . '.' : ''; } /** * Sends a HTTP request to the queue service using Zend_Http_Client * * @param array $params List of parameters to send with the request * @return Zend_Service_Amazon_Ec2_Response * @throws Zend_Service_Amazon_Ec2_Exception */ protected function sendRequest(array $params = array()) { $url = 'https://' . $this->_getRegion() . $this->_ec2Endpoint . '/'; $params = $this->addRequiredParameters($params); try { /* @var $request Zend_Http_Client */ $request = self::getHttpClient(); $request->resetParameters(); $request->setConfig(array( 'timeout' => $this->_httpTimeout )); $request->setUri($url); $request->setMethod(Zend_Http_Client::POST); $request->setParameterPost($params); $httpResponse = $request->request(); } catch (Zend_Http_Client_Exception $zhce) { $message = 'Error in request to AWS service: ' . $zhce->getMessage(); throw new Zend_Service_Amazon_Ec2_Exception($message, $zhce->getCode(), $zhce); } $response = new Zend_Service_Amazon_Ec2_Response($httpResponse); $this->checkForErrors($response); return $response; } /** * Adds required authentication and version parameters to an array of * parameters * * The required parameters are: * - AWSAccessKey * - SignatureVersion * - Timestamp * - Version and * - Signature * * If a required parameter is already set in the <tt>$parameters</tt> array, * it is overwritten. * * @param array $parameters the array to which to add the required * parameters. * * @return array */ protected function addRequiredParameters(array $parameters) { $parameters['AWSAccessKeyId'] = $this->_getAccessKey(); $parameters['SignatureVersion'] = $this->_ec2SignatureVersion; $parameters['Timestamp'] = gmdate('Y-m-d\TH:i:s\Z'); $parameters['Version'] = $this->_ec2ApiVersion; $parameters['SignatureMethod'] = $this->_ec2SignatureMethod; $parameters['Signature'] = $this->signParameters($parameters); return $parameters; } /** * Computes the RFC 2104-compliant HMAC signature for request parameters * * This implements the Amazon Web Services signature, as per the following * specification: * * 1. Sort all request parameters (including <tt>SignatureVersion</tt> and * excluding <tt>Signature</tt>, the value of which is being created), * ignoring case. * * 2. Iterate over the sorted list and append the parameter name (in its * original case) and then its value. Do not URL-encode the parameter * values before constructing this string. Do not use any separator * characters when appending strings. * * @param array $parameters the parameters for which to get the signature. * @param string $secretKey the secret key to use to sign the parameters. * * @return string the signed data. */ protected function signParameters(array $paramaters) { $data = "POST\n"; $data .= $this->_getRegion() . $this->_ec2Endpoint . "\n"; $data .= "/\n"; uksort($paramaters, 'strcmp'); unset($paramaters['Signature']); $arrData = array(); foreach($paramaters as $key => $value) { $arrData[] = $key . '=' . str_replace("%7E", "~", rawurlencode($value)); } $data .= implode('&', $arrData); require_once 'Zend/Crypt/Hmac.php'; $hmac = Zend_Crypt_Hmac::compute($this->_getSecretKey(), 'SHA256', $data, Zend_Crypt_Hmac::BINARY); return base64_encode($hmac); } /** * Checks for errors responses from Amazon * * @param Zend_Service_Amazon_Ec2_Response $response the response object to * check. * * @return void * * @throws Zend_Service_Amazon_Ec2_Exception if one or more errors are * returned from Amazon. */ private function checkForErrors(Zend_Service_Amazon_Ec2_Response $response) { $xpath = new DOMXPath($response->getDocument()); $list = $xpath->query('//Error'); if ($list->length > 0) { $node = $list->item(0); $code = $xpath->evaluate('string(Code/text())', $node); $message = $xpath->evaluate('string(Message/text())', $node); throw new Zend_Service_Amazon_Ec2_Exception($message, 0, $code); } } }
{ "pile_set_name": "Github" }
decentralize.js =============== September 9, 2014
{ "pile_set_name": "Github" }
# -*- coding: utf-8; mode: tcl; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- vim:fenc=utf-8:ft=tcl:et:sw=4:ts=4:sts=4 PortSystem 1.0 name makeicns version 1.4.10a revision 2 categories graphics maintainers nomaintainer platforms darwin license MIT description This program lets you convert all kinds of images to Apple icns format on the command line. long_description ${description} homepage https://bitbucket.org/mkae/makeicns master_sites ${homepage}/downloads use_bzip2 yes worksrcdir ${name} checksums md5 29613f4774d4dc2871a0052826abe1ae \ sha1 2a3b1229781516c8cc36089bf09729d5c17ac17c \ rmd160 8bf438b375dbc3d84a7dc47cef39c5c290a4c2ed use_configure no variant universal { } platform darwin { # This patch is only needed from Mavericks on if {${os.major} >= 13} { patchfiles patch-IconFamily.m.diff } } if {[string match *clang* ${configure.cxx}]} { configure.cxxflags-append -stdlib=${configure.cxx_stdlib} configure.ldflags-append -stdlib=${configure.cxx_stdlib} } build.env-append CC=${configure.cc} \ CXX=${configure.cxx} \ "CFLAGS=${configure.cflags} [get_canonical_archflags cc]" \ "CXXFLAGS=${configure.cxxflags} [get_canonical_archflags cxx]" \ "LDFLAGS=${configure.ldflags} [get_canonical_archflags ld]" # https://trac.macports.org/ticket/50678 # if {${configure.sdkroot} != ""} { build.env-append SDK=${configure.sdkroot} # } destroot { xinstall ${worksrcpath}/${name} ${destroot}${prefix}/bin/${name} } livecheck.type regex livecheck.url [lindex ${master_sites} 0] livecheck.regex "(?!${name}).*${name}-((?!${extract.suffix}).*)${extract.suffix}"
{ "pile_set_name": "Github" }
/* * Generated by class-dump 3.3.4 (64 bit). * * class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2011 by Steve Nygard. */ #import <DevToolsInterface/XCElementCellDisplay.h> @interface XCElementSegmentedCellDisplay : XCElementCellDisplay { } - (void)displayer:(struct NSObject *)arg1 performDelegateAction:(id)arg2; - (void)displayer:(struct NSObject *)arg1 drawForeground:(struct CGRect)arg2 forFrame:(struct CGRect)arg3; - (void)readDataFromDisplayer:(struct NSObject *)arg1 intoCell:(id)arg2; - (void)installLocalDataDelegateForDisplayer:(struct NSObject *)arg1; - (void)configureRoundForControlSize:(unsigned long long)arg1 font:(id)arg2; - (void)configureForControlSize:(unsigned long long)arg1 font:(id)arg2; @end
{ "pile_set_name": "Github" }
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package socket import ( "errors" "net" "runtime" "unsafe" ) const ( sysAF_UNSPEC = 0x0 sysAF_INET = 0x2 sysAF_INET6 = 0xa sysSOCK_RAW = 0x3 ) func probeProtocolStack() int { switch runtime.GOARCH { case "amd64p32", "mips64p32": return 4 default: var p uintptr return int(unsafe.Sizeof(p)) } } func marshalInetAddr(ip net.IP, port int, zone string) []byte { return nil } func parseInetAddr(b []byte, network string) (net.Addr, error) { return nil, errors.New("not implemented") } func getsockopt(s uintptr, level, name int, b []byte) (int, error) { return 0, errors.New("not implemented") } func setsockopt(s uintptr, level, name int, b []byte) error { return errors.New("not implemented") } func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { return 0, errors.New("not implemented") } func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { return 0, errors.New("not implemented") } func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { return 0, errors.New("not implemented") } func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { return 0, errors.New("not implemented") }
{ "pile_set_name": "Github" }
namespace AngleSharp.Network { using System; using System.Collections.Generic; using System.IO; /// <summary> /// Specifies what is used for requesting data. /// </summary> public interface IRequest { /// <summary> /// Gets the used request method. /// </summary> HttpMethod Method { get; } /// <summary> /// Gets the specified request url. /// </summary> Url Address { get; } /// <summary> /// Gets the headers to send with the request. /// </summary> IDictionary<String, String> Headers { get; } /// <summary> /// Gets content to send with the request. /// </summary> Stream Content { get; } } }
{ "pile_set_name": "Github" }
// (C) Copyright John Maddock 2001 - 2002. // Use, modification and distribution are subject to the // Boost Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // See http://www.boost.org for most recent version. #if __IBMCPP__ <= 501 # define BOOST_NO_STD_ALLOCATOR #endif #define BOOST_HAS_MACRO_USE_FACET #define BOOST_NO_STD_MESSAGES // Apple doesn't seem to reliably defined a *unix* macro #if !defined(CYGWIN) && ( defined(__unix__) \ || defined(__unix) \ || defined(unix) \ || defined(__APPLE__) \ || defined(__APPLE) \ || defined(APPLE)) # include <unistd.h> #endif // C++0x headers not yet implemented // # define BOOST_NO_CXX11_HDR_ARRAY # define BOOST_NO_CXX11_HDR_CHRONO # define BOOST_NO_CXX11_HDR_CODECVT # define BOOST_NO_CXX11_HDR_CONDITION_VARIABLE # define BOOST_NO_CXX11_HDR_FORWARD_LIST # define BOOST_NO_CXX11_HDR_FUTURE # define BOOST_NO_CXX11_HDR_INITIALIZER_LIST # define BOOST_NO_CXX11_HDR_MUTEX # define BOOST_NO_CXX11_HDR_RANDOM # define BOOST_NO_CXX11_HDR_RATIO # define BOOST_NO_CXX11_HDR_REGEX # define BOOST_NO_CXX11_HDR_SYSTEM_ERROR # define BOOST_NO_CXX11_HDR_THREAD # define BOOST_NO_CXX11_HDR_TUPLE # define BOOST_NO_CXX11_HDR_TYPE_TRAITS # define BOOST_NO_CXX11_HDR_TYPEINDEX # define BOOST_NO_CXX11_HDR_UNORDERED_MAP # define BOOST_NO_CXX11_HDR_UNORDERED_SET # define BOOST_NO_CXX11_NUMERIC_LIMITS # define BOOST_NO_CXX11_ALLOCATOR # define BOOST_NO_CXX11_POINTER_TRAITS # define BOOST_NO_CXX11_ATOMIC_SMART_PTR # define BOOST_NO_CXX11_SMART_PTR # define BOOST_NO_CXX11_HDR_FUNCTIONAL # define BOOST_NO_CXX11_HDR_ATOMIC # define BOOST_NO_CXX11_STD_ALIGN # define BOOST_NO_CXX11_ADDRESSOF #if defined(__has_include) #if !__has_include(<shared_mutex>) # define BOOST_NO_CXX14_HDR_SHARED_MUTEX #elif __cplusplus < 201402 # define BOOST_NO_CXX14_HDR_SHARED_MUTEX #endif #else # define BOOST_NO_CXX14_HDR_SHARED_MUTEX #endif // C++14 features # define BOOST_NO_CXX14_STD_EXCHANGE // C++17 features # define BOOST_NO_CXX17_STD_APPLY # define BOOST_NO_CXX17_STD_INVOKE # define BOOST_NO_CXX17_ITERATOR_TRAITS #define BOOST_STDLIB "Visual Age default standard library"
{ "pile_set_name": "Github" }
var searchData= [ ['layout',['Layout',['../d6/d11/classpu_1_1ui_1_1_layout.html',1,'pu::ui']]], ['layout',['Layout',['../d6/d11/classpu_1_1ui_1_1_layout.html#ad49ec6b80ed4f53fe77a5d5cc480f460',1,'pu::ui::Layout']]], ['left',['Left',['../d2/d5e/namespacepu_1_1ui_1_1elm.html#ac2066021dbc53818c251c446b6d7f864a945d5e233cf7d6240f6b783b36a374ff',1,'pu::ui::elm']]], ['length',['length',['../d2/de5/classpu_1_1_string.html#a9949d4a4a31ada90de582cdb0c2067b9',1,'pu::String']]], ['load',['Load',['../d8/d8f/namespacepu_1_1audio.html#af7edd2660a20e032006e1a1aa70bf46e',1,'pu::audio']]], ['loaded',['loaded',['../da/dc0/classpu_1_1ui_1_1_application.html#aab06d178dbe6d404fe274b18824cfc3c',1,'pu::ui::Application']]], ['loadfromfile',['LoadFromFile',['../dd/d76/classpu_1_1ttf_1_1_font.html#a5b0611737b851d34df573e3bb3b846c5',1,'pu::ttf::Font']]], ['loadfrommemory',['LoadFromMemory',['../dd/d76/classpu_1_1ttf_1_1_font.html#a6a2263ba4a27b17ef74487abae08dea6',1,'pu::ttf::Font']]], ['loadimage',['LoadImage',['../da/d54/namespacepu_1_1ui_1_1render.html#aebc1158472b468e3833702f8c8e00c8d',1,'pu::ui::render']]], ['loadlayout',['LoadLayout',['../da/dc0/classpu_1_1ui_1_1_application.html#a50c5006e6be62cf602197fbede960180',1,'pu::ui::Application']]], ['lyt',['lyt',['../da/dc0/classpu_1_1ui_1_1_application.html#a43fefd09f752dc74936c2c079c45a533',1,'pu::ui::Application']]] ];
{ "pile_set_name": "Github" }
<?php /** * 2010-2020 Webkul. * * NOTICE OF LICENSE * * All right is reserved, * Please go through this link for complete license : https://store.webkul.com/license.html * * DISCLAIMER * * Do not edit or add to this file if you wish to upgrade this module to newer * versions in the future. If you wish to customize this module for your * needs please refer to https://store.webkul.com/customisation-guidelines/ for more information. * * @author Webkul IN <support@webkul.com> * @copyright 2010-2020 Webkul IN * @license https://store.webkul.com/license.html */ header('Expires: Mon, 26 Jul 1997 05:00:00 GMT'); header('Last-Modified: '.gmdate('D, d M Y H:i:s').' GMT'); header('Cache-Control: no-store, no-cache, must-revalidate'); header('Cache-Control: post-check=0, pre-check=0', false); header('Pragma: no-cache'); header('Location: ../../../'); exit;
{ "pile_set_name": "Github" }
// +build linux darwin /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package sysx import ( "bytes" "golang.org/x/sys/unix" ) // Listxattr calls syscall listxattr and reads all content // and returns a string array func Listxattr(path string) ([]string, error) { return listxattrAll(path, unix.Listxattr) } // Removexattr calls syscall removexattr func Removexattr(path string, attr string) (err error) { return unix.Removexattr(path, attr) } // Setxattr calls syscall setxattr func Setxattr(path string, attr string, data []byte, flags int) (err error) { return unix.Setxattr(path, attr, data, flags) } // Getxattr calls syscall getxattr func Getxattr(path, attr string) ([]byte, error) { return getxattrAll(path, attr, unix.Getxattr) } // LListxattr lists xattrs, not following symlinks func LListxattr(path string) ([]string, error) { return listxattrAll(path, unix.Llistxattr) } // LRemovexattr removes an xattr, not following symlinks func LRemovexattr(path string, attr string) (err error) { return unix.Lremovexattr(path, attr) } // LSetxattr sets an xattr, not following symlinks func LSetxattr(path string, attr string, data []byte, flags int) (err error) { return unix.Lsetxattr(path, attr, data, flags) } // LGetxattr gets an xattr, not following symlinks func LGetxattr(path, attr string) ([]byte, error) { return getxattrAll(path, attr, unix.Lgetxattr) } const defaultXattrBufferSize = 128 type listxattrFunc func(path string, dest []byte) (int, error) func listxattrAll(path string, listFunc listxattrFunc) ([]string, error) { buf := make([]byte, defaultXattrBufferSize) n, err := listFunc(path, buf) for err == unix.ERANGE { // Buffer too small, use zero-sized buffer to get the actual size n, err = listFunc(path, []byte{}) if err != nil { return nil, err } buf = make([]byte, n) n, err = listFunc(path, buf) } if err != nil { return nil, err } ps := bytes.Split(bytes.TrimSuffix(buf[:n], []byte{0}), []byte{0}) var entries []string for _, p := range ps { if len(p) > 0 { entries = append(entries, string(p)) } } return entries, nil } type getxattrFunc func(string, string, []byte) (int, error) func getxattrAll(path, attr string, getFunc getxattrFunc) ([]byte, error) { buf := make([]byte, defaultXattrBufferSize) n, err := getFunc(path, attr, buf) for err == unix.ERANGE { // Buffer too small, use zero-sized buffer to get the actual size n, err = getFunc(path, attr, []byte{}) if err != nil { return nil, err } buf = make([]byte, n) n, err = getFunc(path, attr, buf) } if err != nil { return nil, err } return buf[:n], nil }
{ "pile_set_name": "Github" }
// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv4 import "golang.org/x/net/internal/socket" func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { return errOpNoSupport }
{ "pile_set_name": "Github" }
import _ from "lodash"; import React from "react"; import PropTypes from "prop-types"; import { COLORS } from "../../../shared/util"; import PieChart from "../../../stats/year/components/pie_chart"; const IdCategroryPieChart = ( { project } ) => { if ( !project.identification_categories_loaded ) { return ( <div className="loading_spinner huge" /> ); } if ( _.isEmpty( project.identification_categories.results ) ) { return ( <div /> ); } const data = _.fromPairs( _.map( project.identification_categories.results, r => [r.category, r.count] ) ); const total = _.sum( _.map( project.identification_categories.results, "count" ) ); return ( <div className="IconicTaxaPieChart"> <div className="count-label"> { I18n.t( "x_identifications_", { count: I18n.toNumber( total, { precision: 0 } ) } ) } </div> <PieChart data={[ { label: I18n.t( "improving" ), value: data.improving, color: COLORS.inatGreen, category: "improving" }, { label: I18n.t( "supporting" ), value: data.supporting, color: COLORS.inatGreenLight, category: "supporting" }, { label: I18n.t( "leading" ), value: data.leading, color: COLORS.needsIdYellow, category: "leading" }, { label: I18n.t( "maverick" ), value: data.maverick, color: COLORS.failRed, category: "maverick" } ]} legendColumns={ 1 } legendColumnWidth={ 120 } margin={ { top: 0, bottom: 120, left: 0, right: 0 } } donutWidth={ 20 } /> </div> ); }; IdCategroryPieChart.propTypes = { config: PropTypes.object, project: PropTypes.object, leaders: PropTypes.array, type: PropTypes.string }; export default IdCategroryPieChart;
{ "pile_set_name": "Github" }
#!/usr/bin/env bash # # logstash # # chkconfig: - 57 47 # description: logstash # processname: logstash PIDDIR="/var/run/logstash" export PIDFILE="$PIDDIR/logstash-<%= @name %>.pid" export LS_HOME="<%= @home %>" export LS_HEAP_SIZE="<%= @max_heap %>" export LOGSTASH_OPTS="<%= @args.join(' ') %>" LS_USER="<%= @user %>" LS_GROUP="<%= @group %>" LS_LOG="<%= @log_file %>" LOGDIR="<%= ::File.dirname @log_file %>" export LS_JAVA_OPTS="-server -Xms<%= @min_heap %> -Xmx<%= @max_heap %> -Djava.io.tmpdir=$LS_HOME/tmp/ <%= @java_opts %> <%= '-Djava.net.preferIPv4Stack=true' if @ipv4_only %>" BIN_SCRIPT="/usr/bin/env $LS_HOME/bin/logstash $LOGSTASH_OPTS > $LS_LOG 2>&1 & echo \$! > $PIDFILE" if [ -f /etc/init.d/functions ] ; then . /etc/init.d/functions fi start() { if [ ! -d "$PIDDIR" ] ; then mkdir "$PIDDIR" chown $LS_USER:$LS_GROUP $PIDDIR fi if [ -f $PIDFILE ]; then echo -e "\033[31;1mPID file found in $PIDFILE, already running?\033[0m" ls_pid="$(cat $PIDFILE)" pid_running="$( ps ax | grep 'java' | grep $ls_pid )" if [ ! -z "$pid_running" ] ; then echo -e "\033[31;1mPID $ls_pid still alive, logstash is already running. Doing nothing\033[0m" return 0 fi fi echo -e "\033[1mStarting logstash...\033[0m" pushd $LS_HOME > /dev/null 2>&1 su $LS_USER -c "$BIN_SCRIPT" > /dev/null 2>&1 ls_pid=$! result=$? popd > /dev/null 2>&1 if [ $result -ne 0 ] ; then failure echo -e "Logstash did not start successfully" exit 1 else success echo -e "Logstash started successfully" fi } function stop() { echo -n -e "\033[1mStopping logstash...\033[0m" if [ -z "$SHUTDOWN_WAIT" ]; then SHUTDOWN_WAIT=5 fi if [ ! -z "$PIDFILE" ]; then if [ -f "$PIDFILE" ]; then kill -0 `cat $PIDFILE` >/dev/null 2>&1 if [ $? -gt 0 ]; then echo "PID file ($PIDFILE) found but no matching process was found. Nothing to do." return 0 fi else echo "\$PIDFILE was set ($PIDFILE) but the specified file does not exist. Is Logstash running? Assuming it has stopped and pro\ ceeding." return 0 fi fi kill `cat $PIDFILE` >/dev/null 2>&1 if [ ! -z "$PIDFILE" ]; then if [ -f "$PIDFILE" ]; then while [ $SHUTDOWN_WAIT -ge 0 ]; do kill -0 `cat $PIDFILE` >/dev/null 2>&1 if [ $? -gt 0 ]; then rm $PIDFILE break fi if [ $SHUTDOWN_WAIT -gt 0 ]; then sleep 1 fi SHUTDOWN_WAIT=`expr $SHUTDOWN_WAIT - 1 ` done # still not dead, we may need to resort to drastic measures if [ -f "$PIDFILE" ]; then kill -0 `cat $PIDFILE` >/dev/null 2>&1 if [ $? -eq 0 ]; then echo "Application still alive, sleeping for 20 seconds before sending SIGKILL" sleep 20 kill -0 `cat $PIDFILE` >/dev/null 2>&1 if [ $? -eq 0 ]; then kill -9 `cat $PIDFILE` >/dev/null 2>&1 echo "Killed with extreme prejudice" else echo "Application stopped, no need to use SIGKILL" fi rm $PIDFILE fi fi fi fi } restart() { stop start } status() { # GOT PIDFILE? [ -f $PIDFILE ] && pid=$(cat $PIDFILE) # RUNNING if [[ $pid && -d "/proc/$pid" ]]; then success echo -e "Logstash is running with pid $pid" fi # NOT RUNNING if [[ ! $pid || ! -d "/proc/$pid" ]]; then echo "Logstash not running" exit 3 fi # STALE PID FOUND if [[ ! -d "/proc/$pid" && -f $PIDFILE ]]; then echo -e "\033[1;31;40m[!] Stale PID found in $PIDFILE\033[0m" exit 1 fi } case "$1" in start) start ;; stop) stop ;; restart) restart ;; status) status $2 ;; *) echo $"Usage: $0 {start|stop|restart|status [-v]|}" exit 1 esac exit $?
{ "pile_set_name": "Github" }
{ "type": "ExportDefaultDeclaration", "start": 62, "end": 91, "loc": { "start": { "line": 3, "column": 0 }, "end": { "line": 3, "column": 29 } }, "range": [ 62, 91 ], "declaration": { "type": "Identifier", "start": 77, "end": 90, "loc": { "start": { "line": 3, "column": 15 }, "end": { "line": 3, "column": 28 } }, "range": [ 77, 90 ], "name": "fnDeclaration" } }
{ "pile_set_name": "Github" }
/* * Copyright (C) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA, * All Rights Reserved. * Copyright (C) 2009 VMware, Inc., Palo Alto, CA., USA, * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sub license, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "drmP.h" #include "nouveau_drv.h" int nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_file *file_priv = filp->private_data; struct drm_nouveau_private *dev_priv = file_priv->minor->dev->dev_private; if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) return drm_mmap(filp, vma); return ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev); } static int nouveau_ttm_mem_global_init(struct drm_global_reference *ref) { return ttm_mem_global_init(ref->object); } static void nouveau_ttm_mem_global_release(struct drm_global_reference *ref) { ttm_mem_global_release(ref->object); } int nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv) { struct drm_global_reference *global_ref; int ret; global_ref = &dev_priv->ttm.mem_global_ref; global_ref->global_type = DRM_GLOBAL_TTM_MEM; global_ref->size = sizeof(struct ttm_mem_global); global_ref->init = &nouveau_ttm_mem_global_init; global_ref->release = &nouveau_ttm_mem_global_release; ret = drm_global_item_ref(global_ref); if (unlikely(ret != 0)) { DRM_ERROR("Failed setting up TTM memory accounting\n"); dev_priv->ttm.mem_global_ref.release = NULL; return ret; } dev_priv->ttm.bo_global_ref.mem_glob = global_ref->object; global_ref = &dev_priv->ttm.bo_global_ref.ref; global_ref->global_type = DRM_GLOBAL_TTM_BO; global_ref->size = sizeof(struct ttm_bo_global); global_ref->init = &ttm_bo_global_init; global_ref->release = &ttm_bo_global_release; ret = drm_global_item_ref(global_ref); if (unlikely(ret != 0)) { DRM_ERROR("Failed setting up TTM BO subsystem\n"); drm_global_item_unref(&dev_priv->ttm.mem_global_ref); dev_priv->ttm.mem_global_ref.release = NULL; return ret; } return 0; } void nouveau_ttm_global_release(struct drm_nouveau_private *dev_priv) { if (dev_priv->ttm.mem_global_ref.release == NULL) return; drm_global_item_unref(&dev_priv->ttm.bo_global_ref.ref); drm_global_item_unref(&dev_priv->ttm.mem_global_ref); dev_priv->ttm.mem_global_ref.release = NULL; }
{ "pile_set_name": "Github" }
[/ Copyright 2017 Nick Thompson Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt). ] [section:catmull_rom Catmull-Rom Splines] [heading Synopsis] `` #include <boost/math/interpolators/catmull_rom.hpp> namespace boost{ namespace math{ template<class Point, class RandomAccessContainer = std::vector<Point> > class catmull_rom { public: catmull_rom(RandomAccessContainer&& points, bool closed = false, Real alpha = (Real) 1/ (Real) 2) catmull_rom(std::initializer_list<Point> l, bool closed = false, typename Point::value_type alpha = (typename Point::value_type) 1/ (typename Point::value_type) 2); Real operator()(Real s) const; Real max_parameter() const; Real parameter_at_point(size_t i) const; Point prime(Real s) const; }; }} `` [heading Description] Catmull-Rom splines are a family of interpolating curves which are commonly used in computer graphics and animation. Catmull-Rom splines enjoy the following properties: * Affine invariance: The interpolant commutes with affine transformations. * Local support of the basis functions: This gives stability and fast evaluation. * /C/[super 2]-smoothness * Interpolation of control points-this means the curve passes through the control points. Many curves (such as B[eacute]zier) are /approximating/ - they do not pass through their control points. This makes them more difficult to use than interpolating splines. The `catmull_rom` class provided by Boost.Math creates a cubic Catmull-Rom spline from an array of points in any dimension. Since there are numerous ways to represent a point in /n/-dimensional space, the class attempts to be flexible by templating on the point type. The requirements on the point type are discussing in more detail below, but roughly, it must have a dereference operator defined (e.g., `p[0]` is not a syntax error), it must be able to be dereferenced up to `dimension -1`, and `p[i]` is of type `Real`, define `value_type`, and the free function `size()`. These requirements are met by `std::vector` and `std::array`. The basic usage is shown here: std::vector<std::array<Real, 3>> points(4); points[0] = {0,0,0}; points[1] = {1,0,0}; points[2] = {0,1,0}; points[3] = {0,0,1}; catmull_rom<std::array<Real, 3>> cr(std::move(points)); // Interpolate at s = 0.1: auto point = cr(0.1); The spline can be either open or /closed/, closed meaning that there is some /s > 0/ such that /P(s) = P(0)/. The default is open, but this can be easily changed: // closed = true catmull_rom<std::array<Real, 3>> cr(std::move(points), true); In either case, evaluating the interpolator at /s=0/ returns the first point in the list. If the curve is open, then the first and last segments may have strange behavior. The traditional solution is to prepend a carefully selected control point to the data so that the first data segment (second interpolator segment) has reasonable tangent vectors, and simply ignore the first interpolator segment. A control point is appended to the data using similar criteria. However, we recommend not going through this effort until it proves to be necessary: For most use-cases, the curve is good enough without prepending and appending control points, and responsible selection of non-data control points is difficult. Inside `catmull_rom`, the curve is represented as closed. This is because an open Catmull-Rom curve is /implicitly closed/, but the closing point is the zero vector. There is no reason to suppose that the zero vector is a better closing point than the endpoint (or any other point, for that matter), so traditionally Catmull-Rom splines leave the segment between the first and second point undefined, as well as the segment between the second-to-last and last point. We find this property of the traditional implementation of Catmull-Rom splines annoying and confusing to the user. Hence internally, we close the curve so that the first and last segments are defined. Of course, this causes the /tangent/ vectors to the first and last points to be bizarre. This is a "pick your poison" design decision-either the curve cannot interpolate in its first and last segments, or the tangents along the first and last segments are meaningless. In the vast majority of cases, this will be no problem to the user. However, if it becomes a problem, then the user should add one extra point in a position they believe is reasonable and close the curve. Since the routine internally represents the curve as closed, a question arises: Why does the user have to specify if the curve is open or closed? The answer is that the parameterization is chosen by the routine, so it is of interest to the user to understand the values where a meaningful result is returned. Real max_s = cr.max_parameter(); If you attempt to interpolate for `s > max_s`, an exception is thrown. If the curve is closed, then `cr(max_s) = p0`, where `p0` is the first point on the curve. If the curve is open, then `cr(max_s) = pf`, where `pf` is the final point on the curve. The Catmull-Rom curve admits an infinite number of parameterizations. The default parameterization of the `catmull_rom` class is the so-called /centripedal/ parameterization. This parameterization has been shown to be the only parameterization that does not form cusps or self-intersections within segments. However, for advanced users, other parameterizations can be chosen using the /alpha/ parameter: // alpha = 1 is the "chordal" parameterization. catmull_rom<std::array<double, 3>> cr(std::move(points), false, 1.0); The alpha parameter must always be in the range `[0,1]`. Finally, the tangent vector to any point of the curve can be computed via double s = 0.1; Point tangent = cr.prime(s); Since the magnitude of the tangent vector is dependent on the parameterization, it is not meaningful (unless the user chooses the chordal parameterization /alpha = 1/ which parameterizes by Euclidean distance between points.) However, its direction is meaningful no matter the parameterization, so the user may wish to normalize this result. [heading Examples] [import ../../example/catmull_rom_example.cpp] [heading Performance] The following performance numbers were generated for a call to the Catmull-Rom interpolation method. The number that follows the slash is the number of points passed to the interpolant. We see that evaluation of the interpolant is [bigo](/log/(/N/)). Run on 2700 MHz CPU CPU Caches: L1 Data 32K (x2) L1 Instruction 32K (x2) L2 Unified 262K (x2) L3 Unified 3145K (x1) --------------------------------------------------------- Benchmark Time CPU --------------------------------------------------------- BM_CatmullRom<double>/4 20 ns 20 ns BM_CatmullRom<double>/8 21 ns 21 ns BM_CatmullRom<double>/16 23 ns 23 ns BM_CatmullRom<double>/32 24 ns 24 ns BM_CatmullRom<double>/64 27 ns 27 ns BM_CatmullRom<double>/128 27 ns 27 ns BM_CatmullRom<double>/256 30 ns 30 ns BM_CatmullRom<double>/512 32 ns 31 ns BM_CatmullRom<double>/1024 33 ns 33 ns BM_CatmullRom<double>/2048 34 ns 34 ns BM_CatmullRom<double>/4096 36 ns 36 ns BM_CatmullRom<double>/8192 38 ns 38 ns BM_CatmullRom<double>/16384 39 ns 39 ns BM_CatmullRom<double>/32768 40 ns 40 ns BM_CatmullRom<double>/65536 45 ns 44 ns BM_CatmullRom<double>/131072 46 ns 46 ns BM_CatmullRom<double>/262144 50 ns 50 ns BM_CatmullRom<double>/524288 53 ns 52 ns BM_CatmullRom<double>/1048576 58 ns 57 ns BM_CatmullRom<double>_BigO 2.97 lgN 2.97 lgN BM_CatmullRom<double>_RMS 19 % 19 % [heading Point types] We have already discussed that certain conditions on the `Point` type template argument must be obeyed. The following shows a custom point type in 3D which can be used as a template argument to Catmull-Rom: template<class Real> class mypoint3d { public: // Must define a value_type: typedef Real value_type; // Regular constructor--need not be of this form. mypoint3d(Real x, Real y, Real z) {m_vec[0] = x; m_vec[1] = y; m_vec[2] = z; } // Must define a default constructor: mypoint3d() {} // Must define array access: Real operator[](size_t i) const { return m_vec[i]; } // Must define array element assignment: Real& operator[](size_t i) { return m_vec[i]; } private: std::array<Real, 3> m_vec; }; // Must define the free function "size()": template<class Real> constexpr size_t size(const mypoint3d<Real>& c) { return 3; } These conditions are satisfied by both `std::array` and `std::vector`, but it may nonetheless be useful to define your own point class so that (say) you can define geometric distance between them. [heading Caveats] The Catmull-Rom interpolator requires memory for three more points than is provided by the user. This causes the class to call a `resize()` on the input vector. If `v.capacity() >= v.size() + 3`, then no problems arise; there are no reallocs, and in practice this condition is almost always satisfied. However, if `v.capacity() < v.size() + 3`, the `realloc` causes a performance penalty of roughly 20%. [heading Generic Containers] The `Point` type may be stored in a different container than `std::vector`. For example, here is how to store the points in a Boost.uBLAS vector: mypoint3d<Real> p0(0.1, 0.2, 0.3); mypoint3d<Real> p1(0.2, 0.3, 0.4); mypoint3d<Real> p2(0.3, 0.4, 0.5); mypoint3d<Real> p3(0.4, 0.5, 0.6); mypoint3d<Real> p4(0.5, 0.6, 0.7); mypoint3d<Real> p5(0.6, 0.7, 0.8); boost::numeric::ublas::vector<mypoint3d<Real>> u(6); u[0] = p0; u[1] = p1; u[2] = p2; u[3] = p3; u[4] = p4; u[5] = p5; // Tests initializer_list: catmull_rom<mypoint3d<Real>, decltype(u)> cat(std::move(u)); [heading References] * Cem Yuksel, Scott Schaefer, and John Keyser, ['Parameterization and applications of Catmull–Rom curves], Computer-Aided Design 43 (2011) 747–755. * Phillip J. Barry and Ronald N. Goldman, ['A Recursive Evaluation Algorithm for a Class of Catmull-Rom Splines], Computer Graphics, Volume 22, Number 4, August 1988 [endsect] [/section:catmull_rom Catmull-Rom Splines]
{ "pile_set_name": "Github" }
// // XLPagerTabStripViewController // XLPagerTabStrip ( https://github.com/xmartlabs/XLPagerTabStrip ) // // Copyright (c) 2015 Xmartlabs ( http://xmartlabs.com ) // // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. #import "XLPagerTabStripViewController.h" @interface XLPagerTabStripViewController () @property (nonatomic) NSUInteger currentIndex; @end @implementation XLPagerTabStripViewController { NSUInteger _lastPageNumber; CGFloat _lastContentOffset; NSUInteger _pageBeforeRotate; NSArray * _originalPagerTabStripChildViewControllers; CGSize _lastSize; } @synthesize currentIndex = _currentIndex; #pragma maek - initializers -(id)initWithNibName:(NSString *)nibNameOrNil bundle:(NSBundle *)nibBundleOrNil { self = [super initWithNibName:nibNameOrNil bundle:nibBundleOrNil]; if (self){ [self pagerTabStripViewControllerInit]; } return self; } -(id)initWithCoder:(NSCoder *)aDecoder { self = [super initWithCoder:aDecoder]; if (self){ [self pagerTabStripViewControllerInit]; } return self; } -(void)pagerTabStripViewControllerInit { _currentIndex = 0; _delegate = self; _dataSource = self; _lastContentOffset = 0.0f; _isElasticIndicatorLimit = NO; _skipIntermediateViewControllers = YES; _isProgressiveIndicator = NO; } - (void)viewDidLoad { [super viewDidLoad]; if (!self.containerView){ self.containerView = [[UIScrollView alloc] initWithFrame:CGRectMake(0, 0, CGRectGetWidth(self.view.bounds), CGRectGetHeight(self.view.bounds))]; self.containerView.autoresizingMask = UIViewAutoresizingFlexibleWidth | UIViewAutoresizingFlexibleHeight; [self.view addSubview:self.containerView]; } self.containerView.bounces = YES; [self.containerView setAlwaysBounceHorizontal:YES]; [self.containerView setAlwaysBounceVertical:NO]; self.containerView.scrollsToTop = NO; self.containerView.delegate = self; self.containerView.showsVerticalScrollIndicator = NO; self.containerView.showsHorizontalScrollIndicator = NO; self.containerView.pagingEnabled = YES; if (self.dataSource){ _pagerTabStripChildViewControllers = [self.dataSource childViewControllersForPagerTabStripViewController:self]; } } -(void)viewWillAppear:(BOOL)animated { [super viewWillAppear:animated]; _lastSize = self.containerView.bounds.size; } -(void)viewDidAppear:(BOOL)animated { [super viewDidAppear:animated]; [self updateIfNeeded]; } -(void)viewDidLayoutSubviews { [super viewDidLayoutSubviews]; [self updateIfNeeded]; if ([[[UIDevice currentDevice] systemVersion] compare:@"8.0" options:NSNumericSearch] == NSOrderedAscending){ // SYSTEM_VERSION_LESS_THAN 8.0 [self.view layoutSubviews]; } } #pragma mark - move to another view controller -(void)moveToViewControllerAtIndex:(NSUInteger)index { [self moveToViewControllerAtIndex:index animated:YES]; } -(void)moveToViewControllerAtIndex:(NSUInteger)index animated:(bool)animated { if (![self isViewLoaded]){ self.currentIndex = index; } else{ if (self.skipIntermediateViewControllers && ABS(self.currentIndex - index) > 1){ NSArray * originalPagerTabStripChildViewControllers = self.pagerTabStripChildViewControllers; NSMutableArray * tempChildViewControllers = [NSMutableArray arrayWithArray:originalPagerTabStripChildViewControllers]; UIViewController * currentChildVC = [originalPagerTabStripChildViewControllers objectAtIndex:self.currentIndex]; NSUInteger fromIndex = (self.currentIndex < index) ? index - 1 : index + 1; [tempChildViewControllers setObject:[originalPagerTabStripChildViewControllers objectAtIndex:fromIndex] atIndexedSubscript:self.currentIndex]; [tempChildViewControllers setObject:currentChildVC atIndexedSubscript:fromIndex]; _pagerTabStripChildViewControllers = tempChildViewControllers; [self.containerView setContentOffset:CGPointMake([self pageOffsetForChildIndex:fromIndex], 0) animated:NO]; if (self.navigationController){ self.navigationController.view.userInteractionEnabled = NO; } else{ self.view.userInteractionEnabled = NO; } _originalPagerTabStripChildViewControllers = originalPagerTabStripChildViewControllers; [self.containerView setContentOffset:CGPointMake([self pageOffsetForChildIndex:index], 0) animated:YES]; } else{ [self.containerView setContentOffset:CGPointMake([self pageOffsetForChildIndex:index], 0) animated:animated]; } } } -(void)moveToViewController:(UIViewController *)viewController { [self moveToViewControllerAtIndex:[self.pagerTabStripChildViewControllers indexOfObject:viewController]]; } #pragma mark - XLPagerTabStripViewControllerDelegate -(void)pagerTabStripViewController:(XLPagerTabStripViewController *)pagerTabStripViewController updateIndicatorFromIndex:(NSInteger)fromIndex toIndex:(NSInteger)toIndex{ } -(void)pagerTabStripViewController:(XLPagerTabStripViewController *)pagerTabStripViewController updateIndicatorFromIndex:(NSInteger)fromIndex toIndex:(NSInteger)toIndex withProgressPercentage:(CGFloat)progressPercentage { } #pragma mark - XLPagerTabStripViewControllerDataSource -(NSArray *)childViewControllersForPagerTabStripViewController:(XLPagerTabStripViewController *)pagerTabStripViewController { return self.pagerTabStripChildViewControllers; } #pragma mark - Helpers -(void)updateIfNeeded { if (!CGSizeEqualToSize(_lastSize, self.containerView.bounds.size)){ [self updateContent]; } } -(XLPagerTabStripDirection)scrollDirection { if (self.containerView.contentOffset.x > _lastContentOffset){ return XLPagerTabStripDirectionLeft; } else if (self.containerView.contentOffset.x < _lastContentOffset){ return XLPagerTabStripDirectionRight; } return XLPagerTabStripDirectionNone; } -(BOOL)canMoveToIndex:(NSUInteger)index { return (self.currentIndex != index && self.pagerTabStripChildViewControllers.count > index); } -(CGFloat)pageOffsetForChildIndex:(NSUInteger)index { return (index * CGRectGetWidth(self.containerView.bounds)); } -(CGFloat)offsetForChildIndex:(NSUInteger)index { return (index * CGRectGetWidth(self.containerView.bounds) + ((CGRectGetWidth(self.containerView.bounds) - CGRectGetWidth(self.view.bounds)) * 0.5)); } -(CGFloat)offsetForChildViewController:(UIViewController *)viewController { NSInteger index = [self.pagerTabStripChildViewControllers indexOfObject:viewController]; if (index == NSNotFound){ @throw [NSException exceptionWithName:NSRangeException reason:nil userInfo:nil]; } return [self offsetForChildIndex:index]; } -(NSUInteger)pageForContentOffset:(CGFloat)contentOffset { NSInteger result = [self virtualPageForContentOffset:contentOffset]; return [self pageForVirtualPage:result]; } -(NSInteger)virtualPageForContentOffset:(CGFloat)contentOffset { NSInteger result = (contentOffset + (1.5f * [self pageWidth])) / [self pageWidth]; return result - 1; } -(NSUInteger)pageForVirtualPage:(NSInteger)virtualPage { if (virtualPage < 0){ return 0; } if (virtualPage > self.pagerTabStripChildViewControllers.count - 1){ return self.pagerTabStripChildViewControllers.count - 1; } return virtualPage; } -(CGFloat)pageWidth { return CGRectGetWidth(self.containerView.bounds); } -(CGFloat)scrollPercentage { if ([self scrollDirection] == XLPagerTabStripDirectionLeft || [self scrollDirection] == XLPagerTabStripDirectionNone){ return fmodf(self.containerView.contentOffset.x, [self pageWidth]) / [self pageWidth]; } return 1 - fmodf(self.containerView.contentOffset.x >= 0 ? self.containerView.contentOffset.x : [self pageWidth] + self.containerView.contentOffset.x, [self pageWidth]) / [self pageWidth]; } -(void)updateContent { if (!CGSizeEqualToSize(_lastSize, self.containerView.bounds.size)){ _lastSize = self.containerView.bounds.size; [self.containerView setContentOffset:CGPointMake([self pageOffsetForChildIndex:self.currentIndex], 0) animated:NO]; } NSArray * childViewControllers = self.pagerTabStripChildViewControllers; self.containerView.contentSize = CGSizeMake(CGRectGetWidth(self.containerView.bounds) * childViewControllers.count, self.containerView.contentSize.height); [childViewControllers enumerateObjectsUsingBlock:^(id obj, NSUInteger idx, BOOL *stop) { UIViewController * childController = (UIViewController *)obj; CGFloat pageOffsetForChild = [self pageOffsetForChildIndex:idx]; if (fabs(self.containerView.contentOffset.x - pageOffsetForChild) < CGRectGetWidth(self.containerView.bounds)){ if (![childController parentViewController]){ [self addChildViewController:childController]; [childController didMoveToParentViewController:self]; CGFloat childPosition = [self offsetForChildIndex:idx]; [childController.view setFrame:CGRectMake(childPosition, 0, CGRectGetWidth(self.view.bounds), CGRectGetHeight(self.containerView.bounds))]; childController.view.autoresizingMask = UIViewAutoresizingFlexibleHeight | UIViewAutoresizingFlexibleWidth; [self.containerView addSubview:childController.view]; } else{ CGFloat childPosition = [self offsetForChildIndex:idx]; [childController.view setFrame:CGRectMake(childPosition, 0, CGRectGetWidth(self.view.bounds), CGRectGetHeight(self.containerView.bounds))]; childController.view.autoresizingMask = UIViewAutoresizingFlexibleHeight | UIViewAutoresizingFlexibleWidth; } } else{ if ([childController parentViewController]){ [childController.view removeFromSuperview]; [childController willMoveToParentViewController:nil]; [childController removeFromParentViewController]; } } }]; NSUInteger oldCurrentIndex = self.currentIndex; NSInteger virtualPage = [self virtualPageForContentOffset:self.containerView.contentOffset.x]; NSUInteger newCurrentIndex = [self pageForVirtualPage:virtualPage]; self.currentIndex = newCurrentIndex; if (self.isProgressiveIndicator){ if ([self.delegate respondsToSelector:@selector(pagerTabStripViewController:updateIndicatorFromIndex:toIndex:withProgressPercentage:)]){ CGFloat scrollPercentage = [self scrollPercentage]; if (scrollPercentage > 0) { NSInteger fromIndex = self.currentIndex; NSInteger toIndex = self.currentIndex; XLPagerTabStripDirection scrollDirection = [self scrollDirection]; if (scrollDirection == XLPagerTabStripDirectionLeft){ if (virtualPage > self.pagerTabStripChildViewControllers.count - 1){ fromIndex = self.pagerTabStripChildViewControllers.count - 1; toIndex = self.pagerTabStripChildViewControllers.count; } else{ if (scrollPercentage > 0.5f){ fromIndex = MAX(toIndex - 1, 0); } else{ toIndex = fromIndex + 1; } } } else if (scrollDirection == XLPagerTabStripDirectionRight) { if (virtualPage < 0){ fromIndex = 0; toIndex = -1; } else{ if (scrollPercentage > 0.5f){ fromIndex = MIN(toIndex + 1, self.pagerTabStripChildViewControllers.count - 1); } else{ toIndex = fromIndex - 1; } } } [self.delegate pagerTabStripViewController:self updateIndicatorFromIndex:fromIndex toIndex:toIndex withProgressPercentage:(self.isElasticIndicatorLimit ? scrollPercentage : ( toIndex < 0 || toIndex >= self.pagerTabStripChildViewControllers.count ? 0 : scrollPercentage ))]; } } } else{ if ([self.delegate respondsToSelector:@selector(pagerTabStripViewController:updateIndicatorFromIndex:toIndex:)] && oldCurrentIndex != newCurrentIndex){ [self.delegate pagerTabStripViewController:self updateIndicatorFromIndex:MAX(oldCurrentIndex, self.pagerTabStripChildViewControllers.count - 1) toIndex:newCurrentIndex]; } } } -(void)reloadPagerTabStripView { if ([self isViewLoaded]){ [self.pagerTabStripChildViewControllers enumerateObjectsUsingBlock:^(id obj, NSUInteger idx, BOOL *stop) { UIViewController * childController = (UIViewController *)obj; if ([childController parentViewController]){ [childController.view removeFromSuperview]; [childController willMoveToParentViewController:nil]; [childController removeFromParentViewController]; } }]; _pagerTabStripChildViewControllers = self.dataSource ? [self.dataSource childViewControllersForPagerTabStripViewController:self] : @[]; self.containerView.contentSize = CGSizeMake(CGRectGetWidth(self.containerView.bounds) * _pagerTabStripChildViewControllers.count, self.containerView.contentSize.height); if (self.currentIndex >= _pagerTabStripChildViewControllers.count){ self.currentIndex = _pagerTabStripChildViewControllers.count - 1; } [self.containerView setContentOffset:CGPointMake([self pageOffsetForChildIndex:self.currentIndex], 0) animated:NO]; [self updateContent]; } } #pragma mark - UIScrollViewDelegte -(void)scrollViewDidScroll:(UIScrollView *)scrollView { if (self.containerView == scrollView){ [self updateContent]; } } -(void)scrollViewWillBeginDragging:(UIScrollView *)scrollView { if (self.containerView == scrollView){ _lastPageNumber = [self pageForContentOffset:scrollView.contentOffset.x]; _lastContentOffset = scrollView.contentOffset.x; } } -(void)scrollViewDidEndScrollingAnimation:(UIScrollView *)scrollView { if (self.containerView == scrollView && _originalPagerTabStripChildViewControllers){ _pagerTabStripChildViewControllers = _originalPagerTabStripChildViewControllers; _originalPagerTabStripChildViewControllers = nil; if (self.navigationController){ self.navigationController.view.userInteractionEnabled = YES; } else{ self.view.userInteractionEnabled = YES; } [self updateContent]; } } #pragma mark - Orientation - (void)viewWillTransitionToSize:(CGSize)size withTransitionCoordinator:(id<UIViewControllerTransitionCoordinator>)coordinator { [super viewWillTransitionToSize:size withTransitionCoordinator:coordinator]; _pageBeforeRotate = self.currentIndex; __typeof__(self) __weak weakSelf = self; UIInterfaceOrientation fromOrientation = [[UIApplication sharedApplication] statusBarOrientation]; [coordinator animateAlongsideTransition:nil completion:^(id<UIViewControllerTransitionCoordinatorContext> context) { [weakSelf didRotateFromInterfaceOrientation:fromOrientation]; }]; } -(void)willRotateToInterfaceOrientation:(UIInterfaceOrientation)toInterfaceOrientation duration:(NSTimeInterval)duration { _pageBeforeRotate = self.currentIndex; } -(void)didRotateFromInterfaceOrientation:(UIInterfaceOrientation)fromInterfaceOrientation { self.currentIndex = _pageBeforeRotate; [self updateIfNeeded]; } @end
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="utf-8"?> <!-- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See LICENSE in the project root for license information. --> <Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> <PropertyGroup> <MSBuildAllProjects>$(MSBuildAllProjects);$(MSBuildThisFileFullPath)</MSBuildAllProjects> <HasSharedItems>true</HasSharedItems> <SharedGUID>280C91F4-96B5-4BDE-9E02-E573E1DEF583</SharedGUID> </PropertyGroup> <PropertyGroup Label="Configuration"> <Import_RootNamespace>SplitButton_TestUI</Import_RootNamespace> </PropertyGroup> <ItemGroup> <Page Include="$(MSBuildThisFileDirectory)DropDownButtonPage.xaml"> <SubType>Designer</SubType> <Generator>MSBuild:Compile</Generator> </Page> </ItemGroup> <ItemGroup> <Compile Include="$(MSBuildThisFileDirectory)DropDownButtonPage.xaml.cs"> <DependentUpon>DropDownButtonPage.xaml</DependentUpon> </Compile> </ItemGroup> </Project>
{ "pile_set_name": "Github" }
// +build linux,cgo package daemon import ( "context" "math" "os" "runtime" "strconv" "sync" "sync/atomic" "time" "gopkg.in/src-d/go-log.v1" "github.com/cenkalti/backoff" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/bblfsh/bblfshd/daemon/protocol" "gopkg.in/src-d/go-errors.v1" ) var ( // DefaultMaxInstancesPerDriver is the maximum number of instances of // the same driver which can be launched following the default // scaling policy (see DefaultScalingPolicy()). // // Can be changed by setting BBLFSHD_MAX_DRIVER_INSTANCES. DefaultMaxInstancesPerDriver = mustEnvInt("BBLFSHD_MAX_DRIVER_INSTANCES", runtime.NumCPU()) // DefaultMinInstancesPerDriver is the minimal number of instances of // the same driver which will be launched following the default // scaling policy (see DefaultScalingPolicy()). // // Can be changed by setting BBLFSHD_MIN_DRIVER_INSTANCES. DefaultMinInstancesPerDriver = mustEnvInt("BBLFSHD_MIN_DRIVER_INSTANCES", 1) // ErrPoolClosed is returned if the pool was already closed or is being closed. ErrPoolClosed = errors.NewKind("driver pool already closed") // ErrPoolRunning is returned if the pool was already running. ErrPoolRunning = errors.NewKind("driver pool already running") errDriverStopped = errors.NewKind("driver stopped") ) const defaultPolicyTargetWindow = 5 // enough to prevent flickering var ( // policyDefaultWindow is a window for the average function used in the default scaling // policy. The window will be divided by policyDefaultTick intervals to calculate the // size of the window buffer, so this should ideally be a multiple of policyDefaultTick. policyDefaultWindow = mustEnvDur("BBLFSHD_POLICY_WINDOW", 5*time.Second) // policyDefaultTick is a tick rate for the goroutine that re-evaluates the scaling // policy for each driver pool. policyDefaultTick = mustEnvDur("BBLFSHD_POLICY_TICK", 500*time.Millisecond) // policyDefaultScale is a default increment for an additive increase scaling. // // See AIMD for more details. policyDefaultScale = mustEnvInt("BBLFSHD_POLICY_SCALE_INC", 1) // policyDefaultDownscale is a default multiplier for a multiplicative decrease downscaling. // // See AIMD for more details. policyDefaultDownscale = mustEnvFloat("BBLFSHD_POLICY_DOWNSCALE_MULT", 0.25) ) func mustEnvInt(env string, def int) int { s := os.Getenv(env) if s == "" { return def } v, err := strconv.Atoi(s) if err != nil { panic(err) } return v } func mustEnvFloat(env string, def float64) float64 { s := os.Getenv(env) if s == "" { return def } v, err := strconv.ParseFloat(s, 64) if err != nil { panic(err) } return v } func mustEnvDur(env string, def time.Duration) time.Duration { s := os.Getenv(env) if s == "" { return def } v, err := time.ParseDuration(s) if err != nil { panic(err) } return v } // DriverPool controls a pool of drivers and balances requests among them, // ensuring each driver does not get concurrent requests. The number of driver // instances in the driver pool is controlled by a ScalingPolicy. type DriverPool struct { // ScalingPolicy scaling policy used to scale up the instances. ScalingPolicy ScalingPolicy // Logger used during the live of the driver pool. Logger log.Logger // factory function used to spawn new driver instances. factory FactoryFunction // wg tracks all goroutines owned by the driver pool. wg sync.WaitGroup // poolCtx will be cancelled as a signal that the pool is closing. poolCtx context.Context // stop is called to send a stop channel to the pool. stopped channel will be closed // when the pool is fully stopped. stop func() // stopped is closed when the pool (and the manager goroutine) stops. stopped chan struct{} // get serves as a request-response channel to get an idle driver instance. // This channel is used by clients and is accepted by the manager goroutine. get chan driverRequest // put returns the driver to the pool. The driver must be active. put chan Driver // rescale accepts signals passed from the runPolicy goroutine to the manager goroutine. // It allows to re-evaluate scaling conditions when waiting for an idle driver. // The channel must have a buffer and sends to this channel must be used with default. rescale chan struct{} // spawn accepts signals to the spawner goroutine to run a new driver. // The driver is returned on the put channel. spawn chan struct{} // spawnErr optionally communicates the driver creation failures to client goroutines. // The spawn goroutine won't block waiting for this channel to accept an error and will // log the error instead, if no client goroutines are willing to wait for it. spawnErr chan error drivers struct { sync.RWMutex idle map[Driver]struct{} all map[Driver]struct{} } requests atomicInt // requests waiting for a driver running atomicInt // total running instances; synced with len(drivers.all) spawning atomicInt // instances being started targetSize atomicInt // instances wanted exited atomicInt // drivers exited success atomicInt // requests executed successfully errors atomicInt // requests failed metrics struct { scaling struct { total prometheus.Gauge idle prometheus.Gauge load prometheus.Gauge target prometheus.Gauge } spawn struct { total prometheus.Counter err prometheus.Counter kill prometheus.Counter } } } type driverRequest struct { // cancel channel is closes then the client request is cancelled. Set to ctx.Done(). cancel <-chan struct{} // out receives a single Driver value to the client. Channel may also be closed, // signalling that the pool is closing. Either out or err will be triggered. out chan<- Driver // err receives a single error value in case the pool cannot retrieve or create // a driver instance. Either out or err will be triggered. err chan<- error } // FactoryFunction is a factory function that creates new DriverInstance's. type FactoryFunction func(ctx context.Context) (Driver, error) // NewDriverPool creates and starts a new DriverPool. It takes as parameters // a FactoryFunction, used to instantiate new drivers. func NewDriverPool(factory FactoryFunction) *DriverPool { return &DriverPool{ ScalingPolicy: DefaultScalingPolicy(), Logger: log.New(nil), factory: factory, } } func (dp *DriverPool) SetLabels(labels []string) { dp.Logger = log.DefaultLogger.With(log.Fields{ "language": labels[0], "image": labels[1], }) dp.metrics.spawn.total = driversSpawned.WithLabelValues(labels...) dp.metrics.spawn.err = driversSpawnErrors.WithLabelValues(labels...) dp.metrics.spawn.kill = driversKilled.WithLabelValues(labels...) dp.metrics.scaling.total = driversRunning.WithLabelValues(labels...) dp.metrics.scaling.idle = driversIdle.WithLabelValues(labels...) dp.metrics.scaling.load = driversRequests.WithLabelValues(labels...) dp.metrics.scaling.target = driversTarget.WithLabelValues(labels...) } // Start stats the driver pool. func (dp *DriverPool) Start(ctx context.Context) error { if dp.poolCtx != nil { return ErrPoolRunning.New() } select { case <-dp.stopped: return ErrPoolClosed.New() default: } // Yes, it's discouraged to use a long-lived context. // But an alternative is to re-implement a root Context, which is even worse. dp.poolCtx, dp.stop = context.WithCancel(context.Background()) // This channel is read by the pool manager goroutine as a signal to rescale. // The scaling policy goroutine write to this channel whenever the allow number of // drivers changes. The channel must have a buffer of at least 1 and sends to the // channel should not block. dp.rescale = make(chan struct{}, 1) dp.stopped = make(chan struct{}) dp.spawn = make(chan struct{}) dp.spawnErr = make(chan error) dp.get = make(chan driverRequest) dp.put = make(chan Driver) dp.drivers.idle = make(map[Driver]struct{}) dp.drivers.all = make(map[Driver]struct{}) dp.targetSize.Set(1) dp.wg.Add(3) go func() { defer dp.wg.Done() dp.runSpawn(dp.poolCtx) }() go func() { defer dp.wg.Done() dp.runPolicy(dp.poolCtx) }() go func() { defer close(dp.stopped) defer dp.wg.Done() dp.manageDrivers() }() // wait for a single instance to come up d, err := dp.getDriver(ctx) if err != nil { _ = dp.Stop() return err } if err := dp.putDriver(d); err != nil { return err } return nil } // runPolicy goroutine re-evaluates the scaling policy on a regular time interval and sets // a target number of instances. The scaling itself will be performed by the manager goroutine. func (dp *DriverPool) runPolicy(ctx context.Context) { ticker := time.NewTicker(policyDefaultTick) defer ticker.Stop() stop := ctx.Done() for { select { case <-stop: return case <-ticker.C: } dp.drivers.RLock() total := dp.running.Value() load := dp.requests.Value() idle := len(dp.drivers.idle) dp.drivers.RUnlock() target := dp.ScalingPolicy.Scale(total, idle, load) if target < 1 { // there should be always at least 1 instance // TODO(dennwc): policies must never return 0 instances target = 1 } if dp.metrics.scaling.total != nil { dp.metrics.scaling.total.Set(float64(total)) dp.metrics.scaling.load.Set(float64(load)) dp.metrics.scaling.idle.Set(float64(idle)) dp.metrics.scaling.target.Set(float64(target)) } old := dp.targetSize.Set(target) if old != target { // send a signal to the manager goroutine select { // the channel has buffer of 1 so it acts like a deferred signal // the send will fail only if the channel is already full, meaning // that manager goroutine haven't had time to receive the previous // signal yet, and it's ignore the signal we are trying to send case dp.rescale <- struct{}{}: default: } } } } // spawnOne starts a new driver instance. It will keep trying to run it in case of a failure. func (dp *DriverPool) spawnOne() { dp.spawning.Add(1) defer dp.spawning.Add(-1) ticker := backoff.NewTicker(backoff.NewExponentialBackOff()) defer ticker.Stop() // keep trying in case of a failure ctx := dp.poolCtx stop := ctx.Done() for { if dp.metrics.spawn.total != nil { dp.metrics.spawn.total.Add(1) } d, err := dp.factory(ctx) if err == nil { dp.drivers.Lock() dp.drivers.all[d] = struct{}{} dp.running.Add(1) dp.drivers.Unlock() err = dp.putDriver(d) if err == nil { return // done } } if dp.metrics.spawn.err != nil { dp.metrics.spawn.err.Add(1) } dp.Logger.Errorf(err, "failed to start a driver") select { case <-stop: return // cancel case dp.spawnErr <- err: case _, ok := <-ticker.C: if !ok { dp.Logger.Errorf(err, "driver keeps failing, closing the pool; error") // can only run in a goroutine, since Stop will wait for runSpawn to return go dp.Stop() return } } } } // runSpawn is a goroutine responsible for spawning new instances in the background. func (dp *DriverPool) runSpawn(ctx context.Context) { stop := ctx.Done() for { select { case <-stop: return case <-dp.spawn: dp.spawnOne() } } } // peekIdle tries to get an idle driver from the pool. It won't wait for the driver to // become idle, instead it will return false if there are no idle drivers. func (dp *DriverPool) peekIdle() (Driver, bool) { dp.drivers.RLock() n := len(dp.drivers.idle) dp.drivers.RUnlock() if n == 0 { return nil, false } dp.drivers.Lock() defer dp.drivers.Unlock() for d := range dp.drivers.idle { delete(dp.drivers.idle, d) return d, true } return nil, false } // setIdle returns the driver to an idle state. func (dp *DriverPool) setIdle(d Driver) { dp.drivers.Lock() defer dp.drivers.Unlock() dp.drivers.idle[d] = struct{}{} } // killDriver stops are removes the driver from the queue. func (dp *DriverPool) killDriver(d Driver, info string, err error) { if dp.metrics.spawn.kill != nil { dp.metrics.spawn.kill.Add(1) } if err != nil { dp.Logger.Errorf(err, "killDriver(%s): %s", d.ID(), info) } else { dp.Logger.Infof("killDriver(%s): %s", d.ID(), info) } dp.drivers.Lock() delete(dp.drivers.all, d) delete(dp.drivers.idle, d) dp.running.Add(-1) dp.exited.Add(1) dp.drivers.Unlock() if err := d.Stop(); err != nil { dp.Logger.Errorf(err, "error removing stopped driver") } } // scaleDiff returns current difference between the target number of instances and the // current number of running instances. This is positive when scaling up, and negative // when scaling down. func (dp *DriverPool) scaleDiff() int { total := dp.running.Value() return dp.targetSize.Value() - total } // rescaleLater interrupts the scaling and serves the request first. // It will make sure to continue scaling later. func (dp *DriverPool) rescaleLater(req driverRequest) { select { case dp.rescale <- struct{}{}: default: } dp.waitOrScale(req) } // scale the driver pool to the current target number of instances. func (dp *DriverPool) scale() { dn := dp.scaleDiff() if dn == 0 { return } stop := dp.poolCtx.Done() if dn < 0 { // scale down for i := 0; i < -dn; i++ { select { case <-stop: return case req := <-dp.get: // no idle drivers, and there is a client waiting for us // do the scaling "inline" while serving the request dp.rescaleLater(req) return case d := <-dp.put: // prefer to kill driver that are returned by clients instead an idle ones // idle map may be accessed without management goroutine, thus it's more // valuable to keep it full dp.killDriver(d, "scale down - kill driver returned by client", nil) continue default: } // only idle drivers remain - start killing those if d, ok := dp.peekIdle(); ok { dp.killDriver(d, "scale down - kill idle driver", nil) continue } // no drivers are idle, only way to downscale is to wait for clients to put // their drivers back to the pool select { case <-stop: return case req := <-dp.get: dp.rescaleLater(req) return case d := <-dp.put: dp.killDriver(d, "scale down - no drivers are idle", nil) } } return } // scale up for i := 0; i < dn; i++ { select { case req := <-dp.get: dp.rescaleLater(req) return case <-stop: return case d := <-dp.put: // received some existing instance dp.setIdle(d) i-- dn = dp.scaleDiff() case dp.spawn <- struct{}{}: // spawn request sent to the spawn goroutine select { case <-stop: return case d := <-dp.put: dp.setIdle(d) case <-dp.spawnErr: // ignore - already printed to the log } } } } // returnDriver will either return the driver to the client, // or will return it to the idle driver queue. func (dp *DriverPool) returnDriver(req driverRequest, d Driver) { select { case <-req.cancel: dp.setIdle(d) case req.out <- d: } } // scaleUp serves the user request, assuming that the pool is allowed to scale up. func (dp *DriverPool) scaleUp(req driverRequest) { select { case d := <-dp.put: dp.returnDriver(req, d) case err := <-dp.spawnErr: req.err <- err case dp.spawn <- struct{}{}: // spawn request sent to the spawn goroutine select { case d := <-dp.put: dp.returnDriver(req, d) case err := <-dp.spawnErr: req.err <- err case <-req.cancel: } case <-req.cancel: } } // scaleDown serves the user request, assuming that the pool is scaling down, or not // allowed to scale up anymore. It returns the flag whether the request was fulfilled. func (dp *DriverPool) scaleDown(req driverRequest, exact bool) bool { select { case <-req.cancel: return true case err := <-dp.spawnErr: req.err <- err return true case d := <-dp.put: if exact { // exactly the right amount of instances dp.returnDriver(req, d) return true } // bad luck - we are scaling down // TODO(dennwc): add some metric to track if there are cases when the // scaling policy asks us to drain and then asks to scale // back up - we could have returned this driver to the // client instead dp.killDriver(d, "scaleDown", nil) case <-dp.rescale: // worth to re-evaluate scaling conditions } return false } // waitOrScale is executed on the manager goroutine. It will either scale the pool up, // wait for an instance to become available, or scale the pool down. The function will // block until the request is served or cancelled. func (dp *DriverPool) waitOrScale(req driverRequest) { // Note that we don't care about the pool closing in this function. // If it happens, the client will receive this signal before we do and will // cancel the request anyway. This reduces the number of select statements. // loop allows to re-evaluate scaling conditions for { dn := dp.scaleDiff() if dp.running.Value()+dp.spawning.Value() == 0 { if dn < 0 { // This shouldn't really happen: pool is draining, but there are no running // instances. In any case, we don't want to deadlock here, so we will at least // cancel the request with an error. dp.Logger.Warningf("cannot serve the request: pool is draining") req.err <- ErrPoolClosed.New() return } else if dn == 0 { // No instances running and there are running requests in the background, // but the policy doesn't allow us to scale up. // // This may happen when there were no requests for a long time, and the // policy is not smart enough to allow us to run even a single instance. // // So we will pretend we are allowed to run one. dn = 1 } // dn > 0 } if d, ok := dp.peekIdle(); ok { dp.returnDriver(req, d) return } if dn > 0 { // allowed to scale up dp.scaleUp(req) return } // dn <= 0 // not allowed to scale up or we are scaling down if dp.scaleDown(req, dn == 0) { return } } } // drain waits until all instances are stopped. Should only be called from the // manager goroutine. It assumes the stop channel already triggered. func (dp *DriverPool) drain() { defer dp.targetSize.Set(0) for { d, ok := dp.peekIdle() if !ok { break } dp.killDriver(d, "drain-peekIdle", nil) } for dp.running.Value() > 0 { d := <-dp.put dp.killDriver(d, "drain-put", nil) } } // manageDrivers is the main goroutine responsible for managing drivers. // It will accept all client requests for drivers if there are no drivers in the idle state. // It will also take care of draining instances when the pool closes. func (dp *DriverPool) manageDrivers() { defer dp.drain() stop := dp.poolCtx.Done() for { select { case d := <-dp.put: dp.setIdle(d) case req := <-dp.get: dp.waitOrScale(req) case <-dp.rescale: dp.scale() case <-stop: return } } } // getIdle returns an idle driver from the queue. It won't check the driver status. // After the driver is returned, it's owned by the caller, but it still counts toward the // pool scaling limit. The caller should put the instance back to the pool even if the // driver fails. func (dp *DriverPool) getIdle(rctx context.Context) (Driver, error) { // don't do anything if the request is already cancelled, // or we will have to "rollback" it later select { case <-rctx.Done(): return nil, rctx.Err() default: } // fast path - get an idle driver directly from the pool // this function executes on the current goroutine if d, ok := dp.peekIdle(); ok { return d, nil } // slow path - ask the manager goroutine to pick an instance for us dp.requests.Add(1) defer dp.requests.Add(-1) // ensure we can cancel our request on return ctx, cancel := context.WithCancel(rctx) defer cancel() resp := make(chan Driver) errc := make(chan error, 1) req := driverRequest{ out: resp, err: errc, cancel: ctx.Done(), } stop := dp.poolCtx.Done() select { case <-req.cancel: // parent context cancelled (same as rctx.Done()) return nil, ctx.Err() case err := <-errc: return nil, err case dp.get <- req: // send request to get a driver select { case <-req.cancel: return nil, ctx.Err() case err := <-errc: return nil, err case d, ok := <-resp: if ok { return d, nil } case <-stop: } case <-stop: } return nil, ErrPoolClosed.New() } // putDriver returns the driver to the pool. func (dp *DriverPool) putDriver(d Driver) error { if err := dp.checkStatus(d); err != nil { return err } select { case <-dp.poolCtx.Done(): dp.killDriver(d, "putDriver", dp.poolCtx.Err()) return ErrPoolClosed.New() case dp.put <- d: } return nil } // checkStatus will check if driver is still active. If not, the function returns an error // and removes the driver from the pool. func (dp *DriverPool) checkStatus(d Driver) error { status, err := d.Status() if err != nil { dp.killDriver(d, "error getting driver status, removing", err) return err } else if status != protocol.Running { dp.killDriver(d, "removing stopped driver", nil) return errDriverStopped.New() } return nil } // FunctionCtx is a function to be executed using a given driver. type FunctionCtx func(ctx context.Context, d Driver) error // Execute executes the given Function in the first available driver instance. // It gets a driver from the pool and forwards the request to it. If all drivers // are busy, it will return an error after the timeout passes. If the DriverPool // is closed, an error will be returned. // // Deprecated: use ExecuteCtx instead. func (dp *DriverPool) Execute(c FunctionCtx, timeout time.Duration) error { if timeout == 0 { timeout = 5 * time.Second } ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() return dp.ExecuteCtx(ctx, c) } // ExecuteCtx executes the given Function in the first available driver instance. // It gets a driver from the pool and forwards the request to it. If all drivers // are busy, it will return an error after the timeout passes. If the DriverPool // is closed, an error will be returned. func (dp *DriverPool) ExecuteCtx(rctx context.Context, c FunctionCtx) error { sp, ctx := opentracing.StartSpanFromContext(rctx, "bblfshd.pool.Execute") defer sp.Finish() d, err := dp.getDriver(ctx) if err != nil { dp.errors.Add(1) return err } defer dp.putDriver(d) if err := c(ctx, d); err != nil { dp.errors.Add(1) return err } dp.success.Add(1) return nil } // getDriver returns an idle driver instance. It will ensure that driver is running. func (dp *DriverPool) getDriver(rctx context.Context) (Driver, error) { sp, ctx := opentracing.StartSpanFromContext(rctx, "bblfshd.pool.getDriver") defer sp.Finish() if dp.poolCtx == nil { // not running return nil, ErrPoolClosed.New() } for { d, err := dp.getIdle(ctx) if ErrPoolClosed.Is(err) { return nil, err } else if err != nil { dp.Logger.Warningf("unable to allocate a driver instance: %s", err) return nil, err } if dp.checkStatus(d) == nil { return d, nil } // retry until the deadline } } // Current returns a list of the current instances from the pool, it includes // the running ones and those being stopped. func (dp *DriverPool) Current() []Driver { dp.drivers.RLock() defer dp.drivers.RUnlock() list := make([]Driver, 0, len(dp.drivers.all)) for d := range dp.drivers.all { list = append(list, d) } return list } // State current state of driver pool. func (dp *DriverPool) State() *protocol.DriverPoolState { return &protocol.DriverPoolState{ Wanted: dp.targetSize.Value(), Running: dp.running.Value(), Waiting: dp.requests.Value(), Success: dp.success.Value(), Errors: dp.errors.Value(), Exited: dp.exited.Value(), } } // Stop stop the driver pool, including all its underlying driver instances. func (dp *DriverPool) Stop() error { if dp.poolCtx == nil { return nil // not running } select { case <-dp.poolCtx.Done(): <-dp.stopped return ErrPoolClosed.New() case <-dp.stopped: return ErrPoolClosed.New() default: dp.stop() dp.wg.Wait() <-dp.stopped return nil } } type atomicInt struct { val int32 } func (c *atomicInt) Set(n int) int { return int(atomic.SwapInt32(&c.val, int32(n))) } func (c *atomicInt) Add(n int) { atomic.AddInt32(&c.val, int32(n)) } func (c *atomicInt) Value() int { return int(atomic.LoadInt32(&c.val)) } // ScalingPolicy specifies whether instances should be started or stopped to // cope with load. type ScalingPolicy interface { // Scale takes the total number of active instances, number idle instances and the // number of requests waiting to get a driver instance. Idle may not be zero even if // number of waiting requests is non-zero. // Scale returns the new target number of instances to keep running. This number must // not be less than 1. Scale(total, idle, waiting int) int } // defaultScalingPolicy is the same as DefaultScalingPolicy, but has no window. func defaultScalingPolicy() ScalingPolicy { min := DefaultMinInstancesPerDriver if min <= 0 { min = DefaultMaxInstancesPerDriver } return MinMax( min, DefaultMaxInstancesPerDriver, AIMD(policyDefaultScale, policyDefaultDownscale), ) } // DefaultScalingPolicy returns a new instance of the default scaling policy. // Instances returned by this function should not be reused. func DefaultScalingPolicy() ScalingPolicy { windowIn := int(policyDefaultWindow / policyDefaultTick) return TargetMovingAverage(defaultPolicyTargetWindow, MovingAverage(windowIn, defaultScalingPolicy())) } func newMovingAverage(window int) *movingAverage { return &movingAverage{samples: make([]int, 0, window)} } type movingAverage struct { samples []int next int sum int } func (m *movingAverage) AddSample(v int) int { if len(m.samples) < cap(m.samples) { m.sum += v m.samples = append(m.samples, v) m.next++ } else { next := m.next % cap(m.samples) m.sum = (m.sum + v) - m.samples[next] m.samples[next] = v m.next = next + 1 } return int(math.Ceil(float64(m.sum) / float64(len(m.samples)))) } type loadMovingAverage struct { sub ScalingPolicy // policy that will use an average loads *movingAverage } // MovingAverage computes a moving average of the load and forwards it to the // underlying scaling policy. This policy is stateful and not thread-safe, do not // reuse its instances for multiple pools. func MovingAverage(window int, p ScalingPolicy) ScalingPolicy { return &loadMovingAverage{ sub: p, loads: newMovingAverage(window), } } func (p *loadMovingAverage) Scale(total, idle, load int) int { avg := p.loads.AddSample(load) return p.sub.Scale(total, idle, avg) } type targetMovingAverage struct { sub ScalingPolicy // policy that we will average targets *movingAverage } // TargetMovingAverage computes a moving average of the target instance count. // This policy is stateful and not thread-safe, do not reuse its instances for multiple pools. func TargetMovingAverage(window int, p ScalingPolicy) ScalingPolicy { return &targetMovingAverage{ sub: p, targets: newMovingAverage(window), } } func (p *targetMovingAverage) Scale(total, idle, load int) int { target := p.sub.Scale(total, idle, load) return p.targets.AddSample(target) } type minMax struct { sub ScalingPolicy // policy to take min-max from min, max int } // MinMax wraps a ScalingPolicy and applies a minimum and maximum to the number // of instances. func MinMax(min, max int, p ScalingPolicy) ScalingPolicy { if min < 1 { min = 1 } return &minMax{ sub: p, min: min, max: max, } } func (p *minMax) Scale(total, idle, load int) int { v := p.sub.Scale(total, idle, load) if v > p.max { return p.max } if v < p.min { return p.min } return v } type aimd struct { add int mul float64 } // AIMD returns a ScalingPolicy of additive increase / multiplicative decrease. // Increases are of min(add, load). Decreases are of (unused * mul). func AIMD(add int, mul float64) ScalingPolicy { return &aimd{add: add, mul: mul} } func (p *aimd) Scale(total, idle, waiting int) int { load := waiting - idle if load >= 0 { dn := p.add if p.add > load { dn = load } total += dn } else { unused := -load total -= int(math.Ceil(float64(unused) * p.mul)) } if total < 1 { total = 1 // must not return 0 } return total }
{ "pile_set_name": "Github" }
/* * gunicode-win32-uwp.c: UWP unicode support. * * Copyright 2016 Microsoft * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include <config.h> #include <glib.h> #if G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT) #define CODESET 1 #include <windows.h> extern const char *eg_my_charset; static gboolean is_utf8; gboolean g_get_charset (G_CONST_RETURN char **charset) { if (eg_my_charset == NULL) { static char buf [14]; CPINFOEXW cp_info; GetCPInfoExW (CP_ACP, 0, &cp_info); sprintf (buf, "CP%u", cp_info.CodePage); eg_my_charset = buf; is_utf8 = FALSE; } if (charset != NULL) *charset = eg_my_charset; return is_utf8; } #else /* G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT) */ #ifdef _MSC_VER // Quiet Visual Studio linker warning, LNK4221, in cases when this source file intentional ends up empty. void __mono_win32_gunicode_win32_uwp_quiet_lnk4221(void) {} #endif #endif /* G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT) */
{ "pile_set_name": "Github" }
/* * This file is part of the PSL software. * Copyright 2011-2015 University of Maryland * Copyright 2013-2019 The Regents of the University of California * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.linqs.psl.model; import org.linqs.psl.application.ModelApplication; import org.linqs.psl.model.rule.Rule; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Collections; import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Set; /** * A probabilistic soft logic model. * Encapsulates a set of {@link Rule Rules}. */ public class Model { private static final Logger log = LoggerFactory.getLogger(Model.class); protected final List<Rule> rules; public Model() { rules = new LinkedList<Rule>(); } public List<Rule> getRules() { return Collections.unmodifiableList(rules); } /** * Adds a Rule to this Model. * * @throws IllegalArgumentException if the Rule is already in this Model. */ public void addRule(Rule rule) { if (rules.contains(rule)) { log.warn("Rule already added to this model, skipping add: " + rule); return; } if (!rule.requiresSplit()) { rules.add(rule); return; } log.info("Rule is being split into multiple rules: {}", rule); // This rule needs to be split into multiple rules. for (Rule splitRule : rule.split()) { rules.add(splitRule); } } /** * Removes a Rule from this Model. * * @throws IllegalArgumentException if the Rule is not in this Model. */ public void removeRule(Rule rule) { if (!rules.contains(rule)) { throw new IllegalArgumentException("Rule (" + rule + ") not in this model."); } rules.remove(rule); } public void clear() { rules.clear(); } @Override public String toString() { StringBuilder s = new StringBuilder(); s.append("Model:\n"); s.append(asString()); return s.toString(); } /** * Create a model string that can be directly interpreted by the parser. */ public String asString() { StringBuilder s = new StringBuilder(); if (rules.size() > 0) { s.append(rules.get(0)); } for (int i = 1; i < rules.size(); i++) { s.append("\n").append(rules.get(i)); } return s.toString(); } }
{ "pile_set_name": "Github" }
/* * Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.tencentcloudapi.vpc.v20170312.models; import com.tencentcloudapi.common.AbstractModel; import com.google.gson.annotations.SerializedName; import com.google.gson.annotations.Expose; import java.util.HashMap; public class ModifyIp6RuleRequest extends AbstractModel{ /** * IPV6转换实例唯一ID,形如ip6-xxxxxxxx */ @SerializedName("Ip6TranslatorId") @Expose private String Ip6TranslatorId; /** * IPV6转换规则唯一ID,形如rule6-xxxxxxxx */ @SerializedName("Ip6RuleId") @Expose private String Ip6RuleId; /** * IPV6转换规则修改后的名称 */ @SerializedName("Ip6RuleName") @Expose private String Ip6RuleName; /** * IPV6转换规则修改后的IPV4地址 */ @SerializedName("Vip") @Expose private String Vip; /** * IPV6转换规则修改后的IPV4端口号 */ @SerializedName("Vport") @Expose private Long Vport; /** * Get IPV6转换实例唯一ID,形如ip6-xxxxxxxx * @return Ip6TranslatorId IPV6转换实例唯一ID,形如ip6-xxxxxxxx */ public String getIp6TranslatorId() { return this.Ip6TranslatorId; } /** * Set IPV6转换实例唯一ID,形如ip6-xxxxxxxx * @param Ip6TranslatorId IPV6转换实例唯一ID,形如ip6-xxxxxxxx */ public void setIp6TranslatorId(String Ip6TranslatorId) { this.Ip6TranslatorId = Ip6TranslatorId; } /** * Get IPV6转换规则唯一ID,形如rule6-xxxxxxxx * @return Ip6RuleId IPV6转换规则唯一ID,形如rule6-xxxxxxxx */ public String getIp6RuleId() { return this.Ip6RuleId; } /** * Set IPV6转换规则唯一ID,形如rule6-xxxxxxxx * @param Ip6RuleId IPV6转换规则唯一ID,形如rule6-xxxxxxxx */ public void setIp6RuleId(String Ip6RuleId) { this.Ip6RuleId = Ip6RuleId; } /** * Get IPV6转换规则修改后的名称 * @return Ip6RuleName IPV6转换规则修改后的名称 */ public String getIp6RuleName() { return this.Ip6RuleName; } /** * Set IPV6转换规则修改后的名称 * @param Ip6RuleName IPV6转换规则修改后的名称 */ public void setIp6RuleName(String Ip6RuleName) { this.Ip6RuleName = Ip6RuleName; } /** * Get IPV6转换规则修改后的IPV4地址 * @return Vip IPV6转换规则修改后的IPV4地址 */ public String getVip() { return this.Vip; } /** * Set IPV6转换规则修改后的IPV4地址 * @param Vip IPV6转换规则修改后的IPV4地址 */ public void setVip(String Vip) { this.Vip = Vip; } /** * Get IPV6转换规则修改后的IPV4端口号 * @return Vport IPV6转换规则修改后的IPV4端口号 */ public Long getVport() { return this.Vport; } /** * Set IPV6转换规则修改后的IPV4端口号 * @param Vport IPV6转换规则修改后的IPV4端口号 */ public void setVport(Long Vport) { this.Vport = Vport; } /** * Internal implementation, normal users should not use it. */ public void toMap(HashMap<String, String> map, String prefix) { this.setParamSimple(map, prefix + "Ip6TranslatorId", this.Ip6TranslatorId); this.setParamSimple(map, prefix + "Ip6RuleId", this.Ip6RuleId); this.setParamSimple(map, prefix + "Ip6RuleName", this.Ip6RuleName); this.setParamSimple(map, prefix + "Vip", this.Vip); this.setParamSimple(map, prefix + "Vport", this.Vport); } }
{ "pile_set_name": "Github" }
#include<libtransistor/display/display.h> #include<libtransistor/types.h> #include<libtransistor/util.h> #include<libtransistor/internal_util.h> #include<libtransistor/ipc/vi.h> #include<libtransistor/loader_config.h> static display_t display; static int display_initializations = 0; static bool display_initialized_am = false; result_t display_init() { if(display_initializations++ > 0) { return RESULT_OK; } result_t r; if((r = gpu_initialize()) != RESULT_OK) { goto fail; } if((r = vi_init()) != RESULT_OK) { goto fail_gpu; } if((r = vi_open_display("Default", &display)) != RESULT_OK) { goto fail_vi; } display_initialized_am = (r = am_init()) == RESULT_OK; return RESULT_OK; fail_vi: vi_finalize(); fail_gpu: gpu_finalize(); fail: display_initializations--; return r; } result_t display_open_layer(surface_t *surface) { INITIALIZATION_GUARD(display); result_t r; uint64_t layer_id; uint64_t aruid = loader_config.applet_workaround_aruid; bool using_am = display_initialized_am && !loader_config.applet_workaround_active; if(using_am) { r = am_iwc_acquire_foreground_rights(); if(r != RESULT_OK) { goto fail; } r = am_iwc_get_applet_resource_user_id(&aruid); if(r != RESULT_OK) { goto fail; } r = am_isc_create_managed_display_layer(&layer_id); if(r != RESULT_OK) { goto fail; } } else { if((r = vi_create_managed_layer(1, &display, 0, &layer_id)) != RESULT_OK) { goto fail; } } igbp_t igbp; if((r = vi_open_layer("Default", layer_id, aruid, &igbp)) != RESULT_OK) { goto fail_managed_layer; } if((r = surface_create(surface, layer_id, igbp)) != RESULT_OK) { goto fail_igbp; } if((r = vi_iads_set_layer_scaling_mode(2, layer_id)) != RESULT_OK) { goto fail_surface; } if(!using_am) { uint32_t stacks[] = {0x0, 0x2, 0x4, 0x5, 0xA}; for(size_t i = 0; i < ARRAY_LENGTH(stacks); i++) { if((r = vi_imds_add_to_layer_stack(stacks[i], layer_id)) != RESULT_OK) { goto fail_surface; } } if((r = vi_isds_set_layer_z(layer_id, 2)) != RESULT_OK) { goto fail_surface; } } return RESULT_OK; fail_surface: surface_destroy(surface); // surface takes ownership of IGBP and layer return r; fail_igbp: vi_adjust_refcount(igbp.igbp_binder.handle, -1, 1); vi_close_layer(layer_id); fail_managed_layer: vi_destroy_managed_layer(layer_id); fail: return r; } void display_close_layer(surface_t *surface) { INITIALIZATION_GUARD_RETURN_VOID(display); surface_destroy(surface); vi_adjust_refcount(surface->igbp.igbp_binder.handle, -1, 1); vi_close_layer(surface->layer_id); vi_destroy_managed_layer(surface->layer_id); } result_t display_get_vsync_event(revent_h *event) { INITIALIZATION_GUARD(display); if(display.vsync == 0) { result_t r; if((r = vi_get_display_vsync_event(&display)) != RESULT_OK) { return r; } } *event = display.vsync; return RESULT_OK; } static void display_force_finalize() { if(display_initialized_am) { am_finalize(); display_initialized_am = false; } vi_close_display(&display); vi_finalize(); gpu_finalize(); display_initializations = 0; } void display_finalize() { if(--display_initializations == 0) { display_force_finalize(); } } static __attribute__((destructor)) void display_destruct() { if(display_initializations > 0) { display_force_finalize(); } }
{ "pile_set_name": "Github" }
/****************************************************** * * * 文件夹: ▲05 数组和广义表\03 RowLinkSparseMatrix * * * * 文件名: RowLinkSparseMatrix.h * * * * 内 容: 行逻辑链接的顺序表(稀疏矩阵)相关操作列表 * * * ******************************************************/ #ifndef ROWLINKSPARSEMATRIX_H #define ROWLINKSPARSEMATRIX_H #include <stdio.h> #include <stdarg.h> //提供宏va_list、va_start、va_arg、va_end #include "../../▲01 绪论/Status.h" //**▲01 绪论**// #include "../../▲01 绪论/Scanf.c" //**▲01 绪论**// /* 宏定义 */ #define MAXSIZE 400 //假设非零元个数的最大值为400 #define MAXRC 20 //各行元素个数的最大值 /* 行逻辑链接的稀疏矩阵类型定义 */ typedef int MElemType_RLSq; typedef struct { int i, j; //该非零元的行下标和列下标 MElemType_RLSq e; }Triple; typedef struct { Triple data[MAXSIZE+1]; //非零元三元组表data[0]未用 int rpos[MAXRC+1]; //各行第一个非零元在三元组表中的位置表 int mu, nu, tu; //矩阵的行数、列数和非零元个数 }RLSMatrix; /* 行逻辑链接的顺序表(稀疏矩阵)基础操作 */ Status CreateSMatrix_RL(FILE *fp, int n, ...); /*━━━━━━━━┓ ┃(01)创建矩阵M。 ┃ ┗━━━━━━━━*/ void DestroySMatrix_RL(RLSMatrix *M); /*━━━━━━━┓ ┃(02)销毁矩阵。┃ ┗━━━━━━━*/ void PrintSMatrix_RL(RLSMatrix M); /*━━━━━━━┓ ┃(03)输出矩阵。┃ ┗━━━━━━━*/ void CopySMatrix_RL(RLSMatrix M, RLSMatrix *T); /*━━━━━━━━┓ ┃(04)矩阵的复制。┃ ┗━━━━━━━━*/ Status AddSMatri_RL(RLSMatrix M, RLSMatrix N, RLSMatrix *Q); /*━━━━━━━━┓ ┃(05)Q = M + N。 ┃ ┗━━━━━━━━*/ Status SubSMatrix_RL(RLSMatrix M, RLSMatrix N, RLSMatrix *Q); /*━━━━━━━━┓ ┃(06)Q = M - N。 ┃ ┗━━━━━━━━*/ Status MultSMatrix_RL(RLSMatrix M, RLSMatrix N, RLSMatrix *Q); /*━━━━━━━━━━━━┓ ┃(07)算法5.3:Q = M * N。┃ ┗━━━━━━━━━━━━*/ void TransposeSMatrix_RL(RLSMatrix M, RLSMatrix *T); /*━━━━━━━┓ ┃(08)矩阵转置。┃ ┗━━━━━━━*/ void FastTransposeSMatrix_RL(RLSMatrix M, RLSMatrix *T); /*━━━━━━━━━┓ ┃(09)矩阵快速转置。┃ ┗━━━━━━━━━*/ #endif
{ "pile_set_name": "Github" }
DROP TABLE IF EXISTS t1; CREATE TABLE t1(c1 CHAR(30) NOT NULL, c2 CHAR(20) NOT NULL, c3 CHAR(10) NOT NULL); SHOW TABLES; Tables_in_test t1 SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` char(30) NOT NULL, `c2` char(20) NOT NULL, `c3` char(10) NOT NULL ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci ALTER TABLE t1 ADD PRIMARY KEY (c1,c2); SHOW TABLES; Tables_in_test t1 SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` char(30) NOT NULL, `c2` char(20) NOT NULL, `c3` char(10) NOT NULL, PRIMARY KEY (`c1`,`c2`) ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci DROP TABLE t1; SHOW TABLES; Tables_in_test CREATE TABLE t1(c1 VARCHAR(30) NOT NULL, c2 VARCHAR(20) NOT NULL, c3 VARCHAR(10) NOT NULL); SHOW TABLES; Tables_in_test t1 SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` varchar(30) NOT NULL, `c2` varchar(20) NOT NULL, `c3` varchar(10) NOT NULL ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci ALTER TABLE t1 ADD PRIMARY KEY (c1,c2); SHOW TABLES; Tables_in_test t1 SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` varchar(30) NOT NULL, `c2` varchar(20) NOT NULL, `c3` varchar(10) NOT NULL, PRIMARY KEY (`c1`,`c2`) ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci DROP TABLE t1; SHOW TABLES; Tables_in_test CREATE TABLE t1(c1 BINARY(30) NOT NULL, c2 BINARY(20) NOT NULL, c3 BINARY(10) NOT NULL); SHOW TABLES; Tables_in_test t1 SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` binary(30) NOT NULL, `c2` binary(20) NOT NULL, `c3` binary(10) NOT NULL ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci ALTER TABLE t1 ADD PRIMARY KEY (c1,c2); SHOW TABLES; Tables_in_test t1 SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` binary(30) NOT NULL, `c2` binary(20) NOT NULL, `c3` binary(10) NOT NULL, PRIMARY KEY (`c1`,`c2`) ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci DROP TABLE t1; SHOW TABLES; Tables_in_test CREATE TABLE t1(c1 VARBINARY(30) NOT NULL, c2 VARBINARY(20) NOT NULL, c3 VARBINARY(10) NOT NULL); SHOW TABLES; Tables_in_test t1 SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` varbinary(30) NOT NULL, `c2` varbinary(20) NOT NULL, `c3` varbinary(10) NOT NULL ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci ALTER TABLE t1 ADD PRIMARY KEY (c1,c2); SHOW TABLES; Tables_in_test t1 SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` varbinary(30) NOT NULL, `c2` varbinary(20) NOT NULL, `c3` varbinary(10) NOT NULL, PRIMARY KEY (`c1`,`c2`) ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci DROP TABLE t1; SHOW TABLES; Tables_in_test
{ "pile_set_name": "Github" }
#region License // Copyright (c) 2016 Tyler Brinkley // // Permission is hereby granted, free of charge, to any person // obtaining a copy of this software and associated documentation // files (the "Software"), to deal in the Software without // restriction, including without limitation the rights to use, // copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following // conditions: // // The above copyright notice and this permission notice shall be // included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES // OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT // HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, // WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR // OTHER DEALINGS IN THE SOFTWARE. #endregion using System; using System.Collections.Generic; using System.ComponentModel; namespace EnumsNET.Unsafe { #pragma warning disable CS1591 // Missing XML comment for publicly visible type or member [Obsolete("UnsafeEnums members have moved to the Enums static class")] [EditorBrowsable(EditorBrowsableState.Never)] public static class UnsafeEnums { public static Type GetUnderlyingType<TEnum>() => Enums.GetUnderlyingTypeUnsafe<TEnum>(); #if ICONVERTIBLE public static TypeCode GetTypeCode<TEnum>() => Enums.GetTypeCodeUnsafe<TEnum>(); #endif public static int GetMemberCount<TEnum>() => Enums.GetMemberCountUnsafe<TEnum>(); public static int GetMemberCount<TEnum>(EnumMemberSelection selection) => Enums.GetMemberCountUnsafe<TEnum>(selection); public static IEnumerable<EnumMember<TEnum>> GetMembers<TEnum>() => Enums.GetMembersUnsafe<TEnum>(); public static IEnumerable<EnumMember<TEnum>> GetMembers<TEnum>(EnumMemberSelection selection) => Enums.GetMembersUnsafe<TEnum>(selection); public static IEnumerable<string> GetNames<TEnum>() => Enums.GetNamesUnsafe<TEnum>(); public static IEnumerable<string> GetNames<TEnum>(EnumMemberSelection selection) => Enums.GetNamesUnsafe<TEnum>(selection); public static IEnumerable<TEnum> GetValues<TEnum>() => Enums.GetValuesUnsafe<TEnum>(); public static IEnumerable<TEnum> GetValues<TEnum>(EnumMemberSelection selection) => Enums.GetValuesUnsafe<TEnum>(selection); public static TEnum ToObject<TEnum>(object value) => Enums.ToObjectUnsafe<TEnum>(value); public static TEnum ToObject<TEnum>(object value, EnumValidation validation) => Enums.ToObjectUnsafe<TEnum>(value, validation); [CLSCompliant(false)] public static TEnum ToObject<TEnum>(sbyte value) => Enums.ToObjectUnsafe<TEnum>(value); [CLSCompliant(false)] public static TEnum ToObject<TEnum>(sbyte value, EnumValidation validation) => Enums.ToObjectUnsafe<TEnum>(value, validation); public static TEnum ToObject<TEnum>(byte value) => Enums.ToObjectUnsafe<TEnum>(value); public static TEnum ToObject<TEnum>(byte value, EnumValidation validation) => Enums.ToObjectUnsafe<TEnum>(value, validation); public static TEnum ToObject<TEnum>(short value) => Enums.ToObjectUnsafe<TEnum>(value); public static TEnum ToObject<TEnum>(short value, EnumValidation validation) => Enums.ToObjectUnsafe<TEnum>(value, validation); [CLSCompliant(false)] public static TEnum ToObject<TEnum>(ushort value) => Enums.ToObjectUnsafe<TEnum>(value); [CLSCompliant(false)] public static TEnum ToObject<TEnum>(ushort value, EnumValidation validation) => Enums.ToObjectUnsafe<TEnum>(value, validation); public static TEnum ToObject<TEnum>(int value) => Enums.ToObjectUnsafe<TEnum>(value); public static TEnum ToObject<TEnum>(int value, EnumValidation validation) => Enums.ToObjectUnsafe<TEnum>(value, validation); [CLSCompliant(false)] public static TEnum ToObject<TEnum>(uint value) => Enums.ToObjectUnsafe<TEnum>(value); [CLSCompliant(false)] public static TEnum ToObject<TEnum>(uint value, EnumValidation validation) => Enums.ToObjectUnsafe<TEnum>(value, validation); public static TEnum ToObject<TEnum>(long value) => Enums.ToObjectUnsafe<TEnum>(value); public static TEnum ToObject<TEnum>(long value, EnumValidation validation) => Enums.ToObjectUnsafe<TEnum>(value, validation); [CLSCompliant(false)] public static TEnum ToObject<TEnum>(ulong value) => Enums.ToObjectUnsafe<TEnum>(value); [CLSCompliant(false)] public static TEnum ToObject<TEnum>(ulong value, EnumValidation validation) => Enums.ToObjectUnsafe<TEnum>(value, validation); public static bool TryToObject<TEnum>(object? value, out TEnum result) => Enums.TryToObjectUnsafe(value, out result); public static bool TryToObject<TEnum>(object? value, EnumValidation validation, out TEnum result) => Enums.TryToObjectUnsafe(value, out result, validation); [CLSCompliant(false)] public static bool TryToObject<TEnum>(sbyte value, out TEnum result) => Enums.TryToObjectUnsafe(value, out result); [CLSCompliant(false)] public static bool TryToObject<TEnum>(sbyte value, EnumValidation validation, out TEnum result) => Enums.TryToObjectUnsafe(value, out result, validation); public static bool TryToObject<TEnum>(byte value, out TEnum result) => Enums.TryToObjectUnsafe(value, out result); public static bool TryToObject<TEnum>(byte value, EnumValidation validation, out TEnum result) => Enums.TryToObjectUnsafe(value, out result, validation); public static bool TryToObject<TEnum>(short value, out TEnum result) => Enums.TryToObjectUnsafe(value, out result); public static bool TryToObject<TEnum>(short value, EnumValidation validation, out TEnum result) => Enums.TryToObjectUnsafe(value, out result, validation); [CLSCompliant(false)] public static bool TryToObject<TEnum>(ushort value, out TEnum result) => Enums.TryToObjectUnsafe(value, out result); [CLSCompliant(false)] public static bool TryToObject<TEnum>(ushort value, EnumValidation validation, out TEnum result) => Enums.TryToObjectUnsafe(value, out result, validation); public static bool TryToObject<TEnum>(int value, out TEnum result) => Enums.TryToObjectUnsafe(value, out result); public static bool TryToObject<TEnum>(int value, EnumValidation validation, out TEnum result) => Enums.TryToObjectUnsafe(value, out result, validation); [CLSCompliant(false)] public static bool TryToObject<TEnum>(uint value, out TEnum result) => Enums.TryToObjectUnsafe(value, out result); [CLSCompliant(false)] public static bool TryToObject<TEnum>(uint value, EnumValidation validation, out TEnum result) => Enums.TryToObjectUnsafe(value, out result, validation); public static bool TryToObject<TEnum>(long value, out TEnum result) => Enums.TryToObjectUnsafe(value, out result); public static bool TryToObject<TEnum>(long value, EnumValidation validation, out TEnum result) => Enums.TryToObjectUnsafe(value, out result, validation); [CLSCompliant(false)] public static bool TryToObject<TEnum>(ulong value, out TEnum result) => Enums.TryToObjectUnsafe(value, out result); [CLSCompliant(false)] public static bool TryToObject<TEnum>(ulong value, EnumValidation validation, out TEnum result) => Enums.TryToObjectUnsafe(value, out result, validation); public static bool IsValid<TEnum>(TEnum value) => Enums.IsValidUnsafe(value); public static bool IsValid<TEnum>(TEnum value, EnumValidation validation) => Enums.IsValidUnsafe(value, validation); public static bool IsDefined<TEnum>(TEnum value) => Enums.IsDefinedUnsafe(value); public static TEnum Validate<TEnum>(TEnum value, string paramName) => Enums.ValidateUnsafe(value, paramName); public static TEnum Validate<TEnum>(TEnum value, string paramName, EnumValidation validation) => Enums.ValidateUnsafe(value, paramName, validation); public static string AsString<TEnum>(TEnum value) => Enums.AsStringUnsafe(value); public static string AsString<TEnum>(TEnum value, string? format) => Enums.AsStringUnsafe(value, format); public static string? AsString<TEnum>(TEnum value, EnumFormat format) => Enums.AsStringUnsafe(value, format); public static string? AsString<TEnum>(TEnum value, EnumFormat format0, EnumFormat format1) => Enums.AsStringUnsafe(value, format0, format1); public static string? AsString<TEnum>(TEnum value, EnumFormat format0, EnumFormat format1, EnumFormat format2) => Enums.AsStringUnsafe(value, format0, format1, format2); public static string? AsString<TEnum>(TEnum value, params EnumFormat[]? formats) => Enums.AsStringUnsafe(value, formats); public static string Format<TEnum>(TEnum value, string format) => Enums.FormatUnsafe(value, format); public static string? Format<TEnum>(TEnum value, params EnumFormat[] formats) => Enums.FormatUnsafe(value, formats); public static object GetUnderlyingValue<TEnum>(TEnum value) => Enums.GetUnderlyingValueUnsafe(value); [CLSCompliant(false)] public static sbyte ToSByte<TEnum>(TEnum value) => Enums.ToSByteUnsafe(value); public static byte ToByte<TEnum>(TEnum value) => Enums.ToByteUnsafe(value); public static short ToInt16<TEnum>(TEnum value) => Enums.ToInt16Unsafe(value); [CLSCompliant(false)] public static ushort ToUInt16<TEnum>(TEnum value) => Enums.ToUInt16Unsafe(value); public static int ToInt32<TEnum>(TEnum value) => Enums.ToInt32Unsafe(value); [CLSCompliant(false)] public static uint ToUInt32<TEnum>(TEnum value) => Enums.ToUInt32Unsafe(value); public static long ToInt64<TEnum>(TEnum value) => Enums.ToInt64Unsafe(value); [CLSCompliant(false)] public static ulong ToUInt64<TEnum>(TEnum value) => Enums.ToUInt64Unsafe(value); public static int GetHashCode<TEnum>(TEnum value) => Enums.GetHashCodeUnsafe(value); public static bool Equals<TEnum>(TEnum value, TEnum other) => Enums.EqualsUnsafe(value, other); public static int CompareTo<TEnum>(TEnum value, TEnum other) => Enums.CompareToUnsafe(value, other); public static string? GetName<TEnum>(TEnum value) => Enums.GetNameUnsafe(value); public static AttributeCollection? GetAttributes<TEnum>(TEnum value) => Enums.GetAttributesUnsafe(value); public static EnumMember<TEnum>? GetMember<TEnum>(TEnum value) => Enums.GetMemberUnsafe(value); public static EnumMember<TEnum>? GetMember<TEnum>(string name) => Enums.GetMemberUnsafe<TEnum>(name); public static EnumMember<TEnum>? GetMember<TEnum>(string name, bool ignoreCase) => Enums.GetMemberUnsafe<TEnum>(name, ignoreCase); public static EnumMember<TEnum>? GetMember<TEnum>(string value, EnumFormat format) => Enums.GetMemberUnsafe<TEnum>(value, format); public static EnumMember<TEnum>? GetMember<TEnum>(string value, EnumFormat format0, EnumFormat format1) => Enums.GetMemberUnsafe<TEnum>(value, format0, format1); public static EnumMember<TEnum>? GetMember<TEnum>(string value, EnumFormat format0, EnumFormat format1, EnumFormat format2) => Enums.GetMemberUnsafe<TEnum>(value, format0, format1, format2); public static EnumMember<TEnum>? GetMember<TEnum>(string value, params EnumFormat[]? formats) => Enums.GetMemberUnsafe<TEnum>(value, formats); public static EnumMember<TEnum>? GetMember<TEnum>(string value, bool ignoreCase, EnumFormat format) => Enums.GetMemberUnsafe<TEnum>(value, ignoreCase, format); public static EnumMember<TEnum>? GetMember<TEnum>(string value, bool ignoreCase, EnumFormat format0, EnumFormat format1) => Enums.GetMemberUnsafe<TEnum>(value, ignoreCase, format0, format1); public static EnumMember<TEnum>? GetMember<TEnum>(string value, bool ignoreCase, EnumFormat format0, EnumFormat format1, EnumFormat format2) => Enums.GetMemberUnsafe<TEnum>(value, ignoreCase, format0, format1, format2); public static EnumMember<TEnum>? GetMember<TEnum>(string value, bool ignoreCase, params EnumFormat[]? formats) => Enums.GetMemberUnsafe<TEnum>(value, ignoreCase, formats); public static TEnum Parse<TEnum>(string value) => Enums.ParseUnsafe<TEnum>(value); public static TEnum Parse<TEnum>(string value, EnumFormat format) => Enums.ParseUnsafe<TEnum>(value, format); public static TEnum Parse<TEnum>(string value, EnumFormat format0, EnumFormat format1) => Enums.ParseUnsafe<TEnum>(value, format0, format1); public static TEnum Parse<TEnum>(string value, EnumFormat format0, EnumFormat format1, EnumFormat format2) => Enums.ParseUnsafe<TEnum>(value, format0, format1, format2); public static TEnum Parse<TEnum>(string value, params EnumFormat[]? formats) => Enums.ParseUnsafe<TEnum>(value, formats); public static TEnum Parse<TEnum>(string value, bool ignoreCase) => Enums.ParseUnsafe<TEnum>(value, ignoreCase); public static TEnum Parse<TEnum>(string value, bool ignoreCase, EnumFormat format) => Enums.ParseUnsafe<TEnum>(value, ignoreCase, format); public static TEnum Parse<TEnum>(string value, bool ignoreCase, EnumFormat format0, EnumFormat format1) => Enums.ParseUnsafe<TEnum>(value, ignoreCase, format0, format1); public static TEnum Parse<TEnum>(string value, bool ignoreCase, EnumFormat format0, EnumFormat format1, EnumFormat format2) => Enums.ParseUnsafe<TEnum>(value, ignoreCase, format0, format1, format2); public static TEnum Parse<TEnum>(string value, bool ignoreCase, params EnumFormat[]? formats) => Enums.ParseUnsafe<TEnum>(value, ignoreCase, formats); public static bool TryParse<TEnum>(string? value, out TEnum result) => Enums.TryParseUnsafe(value, out result); public static bool TryParse<TEnum>(string? value, out TEnum result, EnumFormat format) => Enums.TryParseUnsafe(value, out result, format); public static bool TryParse<TEnum>(string? value, out TEnum result, EnumFormat format0, EnumFormat format1) => Enums.TryParseUnsafe(value, out result, format0, format1); public static bool TryParse<TEnum>(string? value, out TEnum result, EnumFormat format0, EnumFormat format1, EnumFormat format2) => Enums.TryParseUnsafe(value, out result, format0, format1, format2); public static bool TryParse<TEnum>(string? value, out TEnum result, params EnumFormat[]? formats) => Enums.TryParseUnsafe(value, out result, formats); public static bool TryParse<TEnum>(string? value, bool ignoreCase, out TEnum result) => Enums.TryParseUnsafe(value, ignoreCase, out result); public static bool TryParse<TEnum>(string? value, bool ignoreCase, out TEnum result, EnumFormat format) => Enums.TryParseUnsafe(value, ignoreCase, out result, format); public static bool TryParse<TEnum>(string? value, bool ignoreCase, out TEnum result, EnumFormat format0, EnumFormat format1) => Enums.TryParseUnsafe(value, ignoreCase, out result, format0, format1); public static bool TryParse<TEnum>(string? value, bool ignoreCase, out TEnum result, EnumFormat format0, EnumFormat format1, EnumFormat format2) => Enums.TryParseUnsafe(value, ignoreCase, out result, format0, format1, format2); public static bool TryParse<TEnum>(string? value, bool ignoreCase, out TEnum result, params EnumFormat[]? formats) => Enums.TryParseUnsafe(value, ignoreCase, out result, formats); #pragma warning restore CS1591 // Missing XML comment for publicly visible type or member } }
{ "pile_set_name": "Github" }
// -------------------------------------------------------------------------------------------------------------------- // <copyright file="AnalysisHelperTest.cs" company="https://github.com/StyleCop"> // MS-PL // </copyright> // <license> // This source code is subject to terms and conditions of the Microsoft // Public License. A copy of the license can be found in the License.html // file at the root of this distribution. If you cannot locate the // Microsoft Public License, please send an email to dlr@microsoft.com. // By using this source code in any fashion, you are agreeing to be bound // by the terms of the Microsoft Public License. You must not remove this // notice, or any other, from this software. // </license> // <summary> // This is a test class for AnalysisHelperTest and is intended // to contain all AnalysisHelperTest Unit Tests // </summary> // -------------------------------------------------------------------------------------------------------------------- namespace VSPackageUnitTest { using System; using Microsoft.VisualStudio.TestTools.UnitTesting; using StyleCop; using StyleCop.VisualStudio; using VSPackageUnitTest.Mocks; /// <summary> /// This is a test class for AnalysisHelperTest and is intended /// to contain all AnalysisHelperTest Unit Tests /// </summary> [TestClass] public class AnalysisHelperTest : BasicUnitTest { #region Public Methods /// <summary> /// A test for AnalysisHelper Constructor /// </summary> [TestMethod] public void AnalysisHelperConstructorTest() { IServiceProvider serviceProvider = new MockServiceProvider(); StyleCopCore core = new StyleCopCore(); FileAnalysisHelper specificTarget = new FileAnalysisHelper(serviceProvider, core); AnalysisHelper target = specificTarget; Assert.IsNotNull(target, "Unable to instantiate the AnalysisHelper class"); Assert.IsNotNull(target.Core, "AnalysisHelper.Core was null"); } #endregion } }
{ "pile_set_name": "Github" }
syntax = "proto3";
{ "pile_set_name": "Github" }
# Kodi Media Center language file # Addon Name: VDR VNSI Client # Addon id: pvr.vdr.vnsi # Addon Provider: FernetMenta, Team XBMC msgid "" msgstr "" "Project-Id-Version: XBMC Main\n" "Report-Msgid-Bugs-To: http://trac.xbmc.org/\n" "POT-Creation-Date: YEAR-MO-DA HO:MI+ZONE\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: Kodi Translation Team\n" "Language-Team: Chinese (Traditional) (http://www.transifex.com/projects/p/xbmc-main/language/zh_TW/)\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Language: zh_TW\n" "Plural-Forms: nplurals=1; plural=0;\n" msgctxt "#30000" msgid "VDR Hostname or IP" msgstr "VDR的主機名稱或IP" msgctxt "#30001" msgid "VNSI Port" msgstr "VNSI端口" msgctxt "#30002" msgid "Priority" msgstr "優先順序" msgctxt "#30003" msgid "Character Set Conversion" msgstr "字元集轉換" msgctxt "#30004" msgid "Connect timeout (s)" msgstr "連接超時(秒)" msgctxt "#30005" msgid "Allow VDR Messages" msgstr "允許VDR訊息" msgctxt "#30006" msgid "Read recordings from directory" msgstr "從目錄中讀取錄影檔案" msgctxt "#30007" msgid "VDR recordings directory" msgstr "VDR錄影目錄" msgctxt "#30008" msgid "Channel search" msgstr "頻道搜尋" msgctxt "#30009" msgid "Channel search - Settings" msgstr "頻道搜尋-設定" msgctxt "#30010" msgid "Start Channel search" msgstr "開始頻道搜尋" msgctxt "#30011" msgid "Source Type" msgstr "來源類型" msgctxt "#30012" msgid "TV channels" msgstr "電視頻道" msgctxt "#30013" msgid "Radio channels" msgstr "無線電廣播頻道" msgctxt "#30014" msgid "FTA channels" msgstr "FTA頻道" msgctxt "#30015" msgid "Scrambled channels" msgstr "加密頻道" msgctxt "#30016" msgid "HD channels" msgstr "高清頻道" msgctxt "#30017" msgid "Country" msgstr "國家" msgctxt "#30018" msgid "Cable Inversion" msgstr "電纜倒位" msgctxt "#30019" msgid "Cable Symbolrate" msgstr "電纜符碼率" msgctxt "#30020" msgid "Cable modulation" msgstr "電纜調變器" msgctxt "#30021" msgid "Terr Inversion" msgstr "Terr 倒位" msgctxt "#30022" msgid "Satellite" msgstr "衛星" msgctxt "#30023" msgid "ATSC Type" msgstr "ATSC類型" msgctxt "#30024" msgid "Back" msgstr "返回" msgctxt "#30025" msgid "Channel search - running... %i %%" msgstr "頻道搜索 - 運行中... %i %%" msgctxt "#30026" msgid "Type:" msgstr "類型:" msgctxt "#30027" msgid "Device:" msgstr "設備:" msgctxt "#30028" msgid "Scan: %i" msgstr "掃描: %i" msgctxt "#30029" msgid "Signal: %i %%" msgstr "信號: %i %%" msgctxt "#30030" msgid "New channels: %i" msgstr "新頻道: %i" msgctxt "#30031" msgid "All channels: %i" msgstr "所有頻道: %i" msgctxt "#30032" msgid "Analog TV" msgstr "類比電視" msgctxt "#30033" msgid "Analog Radio" msgstr "類比電台" msgctxt "#30034" msgid "Transponder:" msgstr "發射機應答器:" msgctxt "#30035" msgid "New channels" msgstr "新頻道" msgctxt "#30036" msgid "Channel search - Finished" msgstr "頻道搜索-已完成" msgctxt "#30037" msgid "No device available - exiting" msgstr "沒有可用設備-退出中" msgctxt "#30038" msgid "No DVB-S2 device available - trying fallback to DVB-S" msgstr "沒有可用的DVB-S2設備 - 試圖退回使用DVB-S" msgctxt "#30039" msgid "Running" msgstr "正在執行" msgctxt "#30040" msgid "Stopped" msgstr "已停止" msgctxt "#30041" msgid "Finished" msgstr "已完成" msgctxt "#30042" msgid "Channel search - Canceled" msgstr "頻道搜索 - 取消" msgctxt "#30043" msgid "Channel search - Error" msgstr "頻道搜索 - 錯誤" msgctxt "#30044" msgid "Lost connection to VDR Server" msgstr "VDR服務器失去連線" msgctxt "#30045" msgid "Connection to VDR Server restored" msgstr "VDR服務器連線已恢復" msgctxt "#30046" msgid "Create channel groups automatically on the server" msgstr "自動在服務器上建立頻道群組" msgctxt "#30047" msgid "Request Timeshift" msgstr "請求時間平移" msgctxt "#30100" msgid "VDR OSD" msgstr "VDR OSD選單" msgctxt "#30101" msgid "Setup" msgstr "系統設定" msgctxt "#30102" msgid "Controlling OSD - press info to exit" msgstr "控制OSD選單中 - 按 info 鍵離開" msgctxt "#30103" msgid "Select to control OSD" msgstr "選擇控制OSD選單" msgctxt "#30104" msgid "Timeshift Mode" msgstr "時間平移模式" msgctxt "#30105" msgid "Timeshift Buffer (RAM) x100 MB" msgstr "時間平移緩衝 (RAM) x 100MB" msgctxt "#30106" msgid "Timeshift Buffer (File) x1 GB" msgstr "時間平移緩衝 (檔案) x 1 GB" msgctxt "#30107" msgid "VDR Admin" msgstr "VDR 管理" msgctxt "#30108" msgid "Channels" msgstr "頻道"
{ "pile_set_name": "Github" }
/* * Gretty * * Copyright (C) 2013-2015 Andrey Hihlovskiy and contributors. * * See the file "LICENSE" for copying and usage permission. * See the file "CONTRIBUTORS" for complete list of contributors. */ package org.akhikhl.gretty; import org.eclipse.jetty.security.ServerAuthException; import org.eclipse.jetty.security.authentication.DeferredAuthentication; import org.eclipse.jetty.security.authentication.SessionAuthentication; import org.eclipse.jetty.security.authentication.SpnegoAuthenticator; import org.eclipse.jetty.server.Authentication; import org.eclipse.jetty.server.HttpChannel; import org.eclipse.jetty.server.Request; import org.eclipse.jetty.server.UserIdentity; import org.eclipse.jetty.util.MultiMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpSession; import static org.eclipse.jetty.security.authentication.FormAuthenticator.*; /** * * @author akhikhl */ public class SSOSpnegoAuthenticator extends SpnegoAuthenticator { private static final Logger LOG = LoggerFactory.getLogger(SSOSpnegoAuthenticator.class); public SSOSpnegoAuthenticator() { } public SSOSpnegoAuthenticator(String authMethod) { super(authMethod); } // "login" is copied without changes from FormAuthenticator @Override public UserIdentity login(String username, Object password, ServletRequest request) { UserIdentity user = super.login(username,password,request); if (user!=null) { HttpSession session = ((HttpServletRequest)request).getSession(true); Authentication cached=new SessionAuthentication(getAuthMethod(),user,password); session.setAttribute(SessionAuthentication.__J_AUTHENTICATED, cached); } return user; } @Override public Authentication validateRequest(ServletRequest req, ServletResponse res, boolean mandatory) throws ServerAuthException { HttpServletRequest request = (HttpServletRequest)req; if (!mandatory) return new DeferredAuthentication(this); // ++ copied from FormAuthenticator HttpSession session = request.getSession(true); // Look for cached authentication Authentication authentication = (Authentication) session.getAttribute(SessionAuthentication.__J_AUTHENTICATED); if (authentication != null) { // Has authentication been revoked? if (authentication instanceof Authentication.User && _loginService!=null && !_loginService.validate(((Authentication.User)authentication).getUserIdentity())) { LOG.debug("auth revoked {}",authentication); session.removeAttribute(SessionAuthentication.__J_AUTHENTICATED); } else { synchronized (session) { String j_uri=(String)session.getAttribute(__J_URI); if (j_uri!=null) { //check if the request is for the same url as the original and restore //params if it was a post LOG.debug("auth retry {}->{}",authentication,j_uri); StringBuffer buf = request.getRequestURL(); if (request.getQueryString() != null) buf.append("?").append(request.getQueryString()); if (j_uri.equals(buf.toString())) { MultiMap<String> j_post = (MultiMap<String>)session.getAttribute(__J_POST); if (j_post!=null) { LOG.debug("auth rePOST {}->{}",authentication,j_uri); Request base_request = HttpChannel.getCurrentHttpChannel().getRequest(); base_request.setContentParameters(j_post); } session.removeAttribute(__J_URI); session.removeAttribute(__J_METHOD); session.removeAttribute(__J_POST); } } } LOG.debug("auth {}",authentication); return authentication; } } // -- copied from FormAuthenticator return super.validateRequest(req, res, mandatory); } }
{ "pile_set_name": "Github" }
{ "CVE_data_meta": { "ASSIGNER": "cve@mitre.org", "ID": "CVE-2008-1204", "STATE": "PUBLIC" }, "affects": { "vendor": { "vendor_data": [ { "product": { "product_data": [ { "product_name": "n/a", "version": { "version_data": [ { "version_value": "n/a" } ] } } ] }, "vendor_name": "n/a" } ] } }, "data_format": "MITRE", "data_type": "CVE", "data_version": "4.0", "description": { "description_data": [ { "lang": "eng", "value": "Multiple cross-site scripting (XSS) vulnerabilities in the Administration Console in Sun Java System Access Manager 7.1 and 7 2005Q4 allow remote attackers to inject arbitrary web script or HTML via unspecified vectors related to the (1) Help and (2) Version windows." } ] }, "problemtype": { "problemtype_data": [ { "description": [ { "lang": "eng", "value": "n/a" } ] } ] }, "references": { "reference_data": [ { "name": "28113", "refsource": "BID", "url": "http://www.securityfocus.com/bid/28113" }, { "name": "sun-jsam-adminconsole-xss(41024)", "refsource": "XF", "url": "https://exchange.xforce.ibmcloud.com/vulnerabilities/41024" }, { "name": "29252", "refsource": "SECUNIA", "url": "http://secunia.com/advisories/29252" }, { "name": "201251", "refsource": "SUNALERT", "url": "http://sunsolve.sun.com/search/document.do?assetkey=1-26-201251-1" }, { "name": "ADV-2008-0784", "refsource": "VUPEN", "url": "http://www.vupen.com/english/advisories/2008/0784" } ] } }
{ "pile_set_name": "Github" }
# Event 200 - GetSiloCaps ###### Version: 0 ## Description None ## Data Dictionary |Standard Name|Field Name|Type|Description|Sample Value| |---|---|---|---|---| |TBD|Context|AnsiString|None|`None`| |TBD|Param1|HexInt64|None|`None`| |TBD|Param2|HexInt64|None|`None`| |TBD|Param3|HexInt64|None|`None`| |TBD|Param4|HexInt64|None|`None`| ## Tags * etw_level_Informational * etw_opcode_Start * etw_task_GetSiloCaps
{ "pile_set_name": "Github" }
# Stubs for networkx.utils.union_find (Python 3.5) # # NOTE: This dynamically typed stub was automatically generated by stubgen. from typing import Any, Optional class UnionFind: parents: Any = ... weights: Any = ... def __init__(self, elements: Optional[Any] = ...) -> None: ... def __getitem__(self, object): ... def __iter__(self): ... def to_sets(self): ... def union(self, *objects): ...
{ "pile_set_name": "Github" }
fileFormatVersion: 2 guid: cf1fe50a641faac4691bf49eb32ce333 MonoImporter: externalObjects: {} serializedVersion: 2 defaultReferences: - m_PersistentViewDataDictionary: {instanceID: 0} - LightSkin: {fileID: 11400000, guid: 0c156a7b2f4d450da1716b1625b5441d, type: 2} - DarkSkin: {fileID: 11400000, guid: 9d345c3252c147c89e8b61a249a46a9d, type: 2} - TMPEssentials: {fileID: 102900000, guid: ce4ff17ca867d2b48b5c8a4181611901, type: 3} - TMPExamples: {fileID: 102900000, guid: bc00e25696e4132499f56528d3fed2e3, type: 3} executionOrder: 0 icon: {instanceID: 0} userData: assetBundleName: assetBundleVariant:
{ "pile_set_name": "Github" }
/testing/guestbin/swan-prep ipsec start /testing/pluto/bin/wait-until-pluto-started ipsec auto --add east-any ipsec whack --impair suppress-retransmits echo initdone
{ "pile_set_name": "Github" }
/** * @prettier */ import * as Bluebird from 'bluebird'; import { CoinFamily, BaseCoin as StaticsBaseCoin } from '@bitgo/statics'; const co = Bluebird.coroutine; import * as bitgoAccountLib from '@bitgo/account-lib'; import { BaseCoin, KeyPair, ParsedTransaction, ParseTransactionOptions, SignedTransaction, SignTransactionOptions, VerifyAddressOptions, VerifyTransactionOptions, TransactionFee, TransactionRecipient as Recipient, TransactionPrebuild as BaseTransactionPrebuild, TransactionExplanation, } from '../baseCoin'; import { BitGo } from '../../bitgo'; import { NodeCallback } from '../types'; import { InvalidAddressError, InvalidMemoIdError, MethodNotImplementedError } from '../../errors'; import * as stellar from 'stellar-sdk'; import { SeedValidator } from '../internal/seedValidator'; export interface HbarSignTransactionOptions extends SignTransactionOptions { txPrebuild: TransactionPrebuild; prv: string; } export interface TxInfo { recipients: Recipient[]; from: string; txid: string; } export interface TransactionPrebuild extends BaseTransactionPrebuild { txHex: string; txInfo: TxInfo; feeInfo: TransactionFee; source: string; } export interface ExplainTransactionOptions { txHex?: string; halfSigned?: { txHex: string; }; feeInfo: TransactionFee; // TODO(BG-24809): get the memo from the toJson memo?: { type: string; value: string; }; } interface AddressDetails { address: string; memoId?: string; } export class Hbar extends BaseCoin { protected readonly _staticsCoin: Readonly<StaticsBaseCoin>; constructor(bitgo: BitGo, staticsCoin?: Readonly<StaticsBaseCoin>) { super(bitgo); if (!staticsCoin) { throw new Error('missing required constructor parameter staticsCoin'); } this._staticsCoin = staticsCoin; } getChain() { return this._staticsCoin.name; } getFamily(): CoinFamily { return this._staticsCoin.family; } getFullName() { return this._staticsCoin.fullName; } getBaseFactor() { return Math.pow(10, this._staticsCoin.decimalPlaces); } static createInstance(bitgo: BitGo, staticsCoin?: Readonly<StaticsBaseCoin>): BaseCoin { return new Hbar(bitgo, staticsCoin); } /** * Flag for sending value of 0 * @returns {boolean} True if okay to send 0 value, false otherwise */ valuelessTransferAllowed(): boolean { return false; } /** * Checks if this is a valid base58 or hex address * @param address */ isValidAddress(address: string): boolean { try { const addressDetails = this.getAddressDetails(address); if (typeof addressDetails.memoId === 'undefined' || addressDetails.memoId === '') { // we want addresses to normalize without a memoId address = address.replace('?memoId=', ''); } return address === this.normalizeAddress(addressDetails); } catch (e) { return false; } } /** * Generate Hedera Hashgraph key pair * * @param seed * @returns {Object} object with generated pub, prv */ generateKeyPair(seed?: Buffer): KeyPair { const keyPair = seed ? new bitgoAccountLib.Hbar.KeyPair({ seed }) : new bitgoAccountLib.Hbar.KeyPair(); const keys = keyPair.getKeys(); if (!keys.prv) { throw new Error('Keypair generation failed to generate a prv'); } return { pub: keys.pub, prv: keys.prv, }; } parseTransaction( params: ParseTransactionOptions, callback?: NodeCallback<ParsedTransaction> ): Bluebird<ParsedTransaction> { return Bluebird.resolve({}).asCallback(callback); } verifyAddress(params: VerifyAddressOptions): boolean { return true; } verifyTransaction(params: VerifyTransactionOptions, callback?: NodeCallback<boolean>): Bluebird<boolean> { return Bluebird.resolve(true).asCallback(callback); } /** * Assemble keychain and half-sign prebuilt transaction * * @param params * @param params.txPrebuild {Object} prebuild object returned by platform * @param params.prv {String} user prv * @param params.wallet.addressVersion {String} this is the version of the Algorand multisig address generation format * @param callback * @returns Bluebird<SignedTransaction> */ signTransaction( params: HbarSignTransactionOptions, callback?: NodeCallback<SignedTransaction> ): Bluebird<SignedTransaction> { const self = this; return co<SignedTransaction>(function*() { const factory = bitgoAccountLib.register(self.getChain(), bitgoAccountLib.Hbar.TransactionBuilderFactory); const txBuilder = factory.from(params.txPrebuild.txHex); txBuilder.sign({ key: params.prv }); const transaction: any = yield txBuilder.build(); if (!transaction) { throw new Error('Invalid messaged passed to signMessage'); } const response = { txHex: transaction.toBroadcastFormat(), }; return transaction.signature.length >= 2 ? response : { halfSigned: response }; }) .call(this) .asCallback(callback); } /** * Sign message with private key * * @param key * @param message * @return {Buffer} A signature over the given message using the given key */ signMessage(key: KeyPair, message: string | Buffer, callback?: NodeCallback<Buffer>): Bluebird<Buffer> { return co<Buffer>(function* cosignMessage() { const msg = Buffer.isBuffer(message) ? message.toString('utf8') : message; // reconstitute keys and sign return new bitgoAccountLib.Hbar.KeyPair({ prv: key.prv }).signMessage(msg); }) .call(this) .asCallback(callback); } /** * Builds a funds recovery transaction without BitGo. * We need to do three queries during this: * 1) Node query - how much money is in the account * 2) Build transaction - build our transaction for the amount * 3) Send signed build - send our signed build to a public node * @param params * @param callback */ recover(params: any, callback?: NodeCallback<any>): Bluebird<any> { throw new MethodNotImplementedError(); } /** * Explain a Hedera transaction from txHex * @param params * @param callback */ explainTransaction( params: ExplainTransactionOptions, callback?: NodeCallback<TransactionExplanation> ): Bluebird<TransactionExplanation> { const self = this; return co<TransactionExplanation>(function*() { const txHex = params.txHex || (params.halfSigned && params.halfSigned.txHex); if (!txHex) { throw new Error('missing explain tx parameters'); } if (!params.feeInfo) { throw new Error('missing fee information'); } const factory = bitgoAccountLib.register(self.getChain(), bitgoAccountLib.Hbar.TransactionBuilderFactory); const txBuilder = factory.from(txHex); const tx = yield txBuilder.build(); const txJson = tx.toJson(); if (tx._txBody.data !== 'cryptoTransfer') { // don't explain this throw new Error('Transaction format outside of cryptoTransfer not supported for explanation.'); } const displayOrder = [ 'id', 'outputAmount', 'changeAmount', 'outputs', 'changeOutputs', 'fee', 'timestamp', 'expiration', 'memo', ]; // TODO(BG-24809): get the memo from the toJson let memo = ''; if (params.memo) { memo = params.memo.value; } const outputs = [ { amount: txJson.amount.toString(), address: txJson.to, memo, }, ]; const explanationResult: SignTransactionOptions = { displayOrder, id: txJson.id, outputs, outputAmount: outputs[0].amount, changeOutputs: [], // account based does not use change outputs changeAmount: '0', // account base does not make change fee: params.feeInfo, timestamp: txJson.startTime, expiration: txJson.validDuration, }; return explanationResult; }) .call(this) .asCallback(callback); } /** * Process address into address and memo id * * @param address the address * @returns object containing address and memo id */ getAddressDetails(rawAddress: string): AddressDetails { let memoId: string | undefined = undefined; let address = rawAddress; if (rawAddress.includes('?memoId=')) { address = rawAddress.substr(0, rawAddress.indexOf('?')); } // failed to parse OR bad address if (!address || !bitgoAccountLib.Hbar.Utils.isValidAddress(address)) { throw new InvalidAddressError(`invalid address: ${rawAddress}`); } // address doesn't have a memo id - this is ok if (rawAddress === address) { return { address, memoId }; } memoId = rawAddress.substr(rawAddress.indexOf('?memoId=') + 8); // undefined is valid as in has not been specified if (typeof memoId !== 'undefined' && !this.isValidMemoId(memoId)) { throw new InvalidMemoIdError(`invalid address: '${address}', memoId is not valid`); } return { address, memoId }; } /** * Validate and return address with appended memo id * * @param address * @param memoId */ normalizeAddress({ address, memoId }: AddressDetails): string { if (memoId && this.isValidMemoId(memoId)) { return `${address}?memoId=${memoId}`; } return address; } /** * Validates whether a memo is potentially correct in hedera. * * @param memoId */ isValidMemoId(memoId: string) { // TODO: change this to account-lib helper once its published if (typeof memoId !== 'undefined' && Buffer.from(memoId).length > 100) { return false; } return true; } isStellarSeed(seed: string): boolean { return SeedValidator.isValidEd25519SeedForCoin(seed, CoinFamily.XLM); } convertFromStellarSeed(seed: string): string | null { // assume this is a trust custodial seed if its a valid ed25519 prv if (!this.isStellarSeed(seed) || SeedValidator.hasCompetingSeedFormats(seed)) { return null; } if (SeedValidator.isValidEd25519SeedForCoin(seed, CoinFamily.XLM)) { const keyFromSeed = new bitgoAccountLib.Hbar.KeyPair({ seed: stellar.StrKey.decodeEd25519SecretSeed(seed) }); const keys = keyFromSeed.getKeys(); if (keys !== undefined && keys.prv) { return keys.prv; } } return null; } isValidPub(pub: string): boolean { return bitgoAccountLib.Hbar.Utils.isValidPublicKey(pub); } }
{ "pile_set_name": "Github" }
(ns chromex.ext.chromeos-info-private " * available since Chrome 35" (:refer-clojure :only [defmacro defn apply declare meta let partial]) (:require [chromex.wrapgen :refer [gen-wrap-helper]] [chromex.callgen :refer [gen-call-helper gen-tap-all-events-call]])) (declare api-table) (declare gen-call) ; -- functions -------------------------------------------------------------------------------------------------------------- (defmacro get "Fetches customization values for the given property names. See property names in the declaration of the returned dictionary. |property-names| - Chrome OS Property names This function returns a core.async channel of type `promise-chan` which eventually receives a result value. Signature of the result value put on the channel is [properties-dictionary] where: |properties-dictionary| - Dictionary which contains all requested properties In case of an error the channel closes without receiving any value and relevant error object can be obtained via chromex.error/get-last-error." ([property-names] (gen-call :function ::get &form property-names))) (defmacro set "Sets values for the given system property. |property-name| - Chrome OS system property name |property-value| - Chrome OS system property value" ([property-name property-value] (gen-call :function ::set &form property-name property-value))) ; -- convenience ------------------------------------------------------------------------------------------------------------ (defmacro tap-all-events "Taps all valid non-deprecated events in chromex.ext.chromeos-info-private namespace." [chan] (gen-tap-all-events-call api-table (meta &form) chan)) ; --------------------------------------------------------------------------------------------------------------------------- ; -- API TABLE -------------------------------------------------------------------------------------------------------------- ; --------------------------------------------------------------------------------------------------------------------------- (def api-table {:namespace "chrome.chromeosInfoPrivate", :since "35", :functions [{:id ::get, :name "get", :callback? true, :params [{:name "property-names", :type "[array-of-strings]"} {:name "callback", :type :callback, :callback {:params [{:name "properties-dictionary", :type "object"}]}}]} {:id ::set, :name "set", :params [{:name "property-name", :type "chromeosInfoPrivate.PropertyName"} {:name "property-value", :type "any"}]}]}) ; -- helpers ---------------------------------------------------------------------------------------------------------------- ; code generation for native API wrapper (defmacro gen-wrap [kind item-id config & args] (apply gen-wrap-helper api-table kind item-id config args)) ; code generation for API call-site (def gen-call (partial gen-call-helper api-table))
{ "pile_set_name": "Github" }
@import url(http://fonts.googleapis.com/earlyaccess/nanumgothic.css); html, body { font-family: "Nanum Gothic"; margin: 0; padding: 0; height: 100%; font-size: 12px; line-height: 1.6em; text-shadow: 1px 1px 1px rgba(0, 0, 0, 0.004); text-stroke: 0.6px; font-smoothing: antialiased; -webkit-font-smoothing: antialiased; color: #5e5e5e; background: #ffffff; } html a, body a { color: #6e7a86; text-decoration: none; } html a:hover, body a:hover { color: #565b61; } html a:active, body a:active { color: #82898f; } html h1, body h1 { font-size: 20px; } html h2, body h2 { font-size: 16px; } html h3, body h3 { font-size: 12px; } .ax-wrap { position: relative; margin: 0 auto; } .H10 { height: 10px; font-size: 1px; } .H20 { height: 20px; font-size: 1px; } .H30 { height: 30px; font-size: 1px; } .H40 { height: 40px; font-size: 1px; } .H50 { height: 50px; font-size: 1px; } #AXPage { font-family: "Nanum Gothic"; font-size: 12px; overflow: hidden; } #AXPage .ax-wrap { margin: 0 auto; } #AXPage .ax-clear { clear: both; } #AXPage .ax-unit { margin: 0 10px; position: relative; } #AXPage div[class^="ax-col-"] { float: left; } #AXPage .ax-col-1 { width: 8.3334%; } #AXPage .ax-col-2 { width: 16.6667%; } #AXPage .ax-col-3 { width: 25%; } #AXPage .ax-col-4 { width: 33.3334%; } #AXPage .ax-col-5 { width: 41.6667%; } #AXPage .ax-col-6 { width: 50%; } #AXPage .ax-col-7 { width: 58.3334%; } #AXPage .ax-col-8 { width: 66.6667%; } #AXPage .ax-col-9 { width: 75%; } #AXPage .ax-col-10 { width: 83.3334%; } #AXPage .ax-col-11 { width: 91.6667%; } #AXPage .ax-col-12 { width: 100%; } #AXPage #AXPageHead { position: relative; z-index: 10; } #AXPage #AXPageHead .ax-layer-1 { position: relative; z-index: 10; } #AXPage #AXPageHead .ax-layer-1 .ax-unit { background: #d9d9d9; margin: 0px; } #AXPage #AXPageHead .ax-layer-1 .ax-unit .navMenu { display: block; height: 40px; overflow: hidden; border-bottom: 1px solid #6c7b8a; } #AXPage #AXPageHead .ax-layer-1 .ax-unit .navMenu h3 { float: left; margin: 0px; padding: 0px; line-height: 40px; font-size: 12px; font-weight: normal; border-right: 1px solid #6c7b8a; background: #526271; color: #ffffff; } #AXPage #AXPageHead .ax-layer-1 .ax-unit .navMenu h3 a { display: block; height: 40px; line-height: 40px; } #AXPage #AXPageHead .ax-layer-1 .ax-unit .navMenu #sampleMenuBox { position: absolute; left: 110px; top: 0px; } #AXPage #AXPageHead .ax-layer-1 .ax-unit .navMenu ul { list-style: none; margin: 0px; padding: 0px; } #AXPage #AXPageHead .ax-layer-1 .ax-unit .navMenu ul li { float: left; border-right: 1px solid #6c7b8a; } #AXPage #AXPageHead .ax-layer-1 .ax-unit .navMenu ul li a { font-size: 13px; display: block; line-height: 40px; padding: 0px 10px; background: #d9d9d9; } #AXPage #AXPageHead .ax-layer-1 .ax-unit .navMenu ul li a span { font-size: 10px; margin: 0px 2px; } #AXPage #AXPageHead .ax-layer-1 .ax-unit .navMenu ul li a:hover { color: #526271; background: #c6cdd3; } #AXPage #AXPageHead .ax-layer-1 .ax-unit .navMenu ul li a.on { color: #526271; background: #c6cdd3; } #AXPage #AXPageHead .ax-layer-1 .ax-unit .navMenu .parentMenu { position: relative; } #AXPage #AXPageHead .ax-layer-1 .ax-unit .navMenu .childMenu { position: absolute; line-height: 25px; font-size: 12px; display: none; } #AXPage #AXPageHead .ax-layer-1 .ax-unit .navMenu .childMenu ul { padding: 0px; margin: 0px; display: block; position: relative; border-top: 1px solid #6c7b8a; } #AXPage #AXPageHead .ax-layer-1 .ax-unit .navMenu .childMenu ul li { float: none; padding: 0px; margin: 0px; border: 1px solid #6c7b8a; border-top: 0px none; } #AXPage #AXPageHead .ax-layer-1 .ax-unit .navMenu .childMenu ul li a { display: block; _width: 100%; padding: 0px 10px; height: 30px; line-height: 30px; background: #d9d9d9; text-decoration: none; white-space: nowrap; } #AXPage #AXPageHead .ax-layer-1 .ax-unit .navMenu .childMenu ul li a:hover { background: #c6cdd3; } #AXPage #AXPageHead .ax-layer-1 .ax-unit .navMenu .childMenu ul li a.on { background: #c6cdd3; } #AXPage #AXPageHead .ax-layer-1 .ax-unit .navMenu .childsMenu { position: absolute; line-height: 20px; font-size: 12px; } #AXPage #AXPageHead .ax-layer-2 { position: relative; z-index: 9; } #AXPage #AXPageHead .ax-layer-2 .ax-unit { margin: 0px; } #AXPage #AXPageHead .ax-layer-2 .ax-unit .themeInfo { display: block; background: #d7dde3; height: 70px; } #AXPage #AXPageHead .ax-layer-2 .ax-unit .themeInfo .logo { overflow: hidden; position: absolute; top: 5px; left: 5px; width: 60px; height: 60px; background: #eaeaea; border-radius: 30px; } #AXPage #AXPageHead .ax-layer-2 .ax-unit .themeInfo h1 { position: absolute; margin-left: 80px; font-size: 14px; margin-top: 16px; font-weight: bold; color: #5e5e5e; } #AXPage #AXPageHead .ax-layer-2 .ax-unit .themeInfo h1 .support { font-size: 10px; background: #d9d9d9; color: #526271; border-radius: 10px; padding: 2px 6px; margin-left: 2px; } #AXPage #AXPageHead .ax-layer-2 .ax-unit .themeInfo h2 { position: absolute; margin-left: 80px; font-size: 11px; margin-top: 38px; font-weight: normal; color: #5e5e5e; } #AXPage #AXPageHead .ax-layer-2 .ax-unit .themeInfo h2 a { color: #6e7a86; } #AXPage #AXPageHead .ax-layer-2 .ax-unit .themeInfo h2 a:hover { color: #565b61; text-decoration: underline; } #AXPage #AXPageHead .ax-layer-2 .ax-unit .themeInfo .facebook { position: absolute; right: 5px; top: 5px; font-size: 11px; } #AXPage #AXPageHead .ax-layer-2 .ax-unit .themeInfo .github { position: absolute; right: 5px; top: 25px; font-size: 11px; } #AXPage #AXPageHead .ax-layer-2 .ax-unit .themeInfo .api { position: absolute; right: 5px; top: 45px; font-size: 11px; } #AXPage #AXPageHead .ax-layer-3 .ax-unit { margin: 0px; } #AXPage #AXPageHead .ax-layer-3 .ax-unit .AXdemoPageTabTarget { background: #d7dde3; } #AXPage #AXPageHead #devCentermobileMenu { z-index: 10; display: none; position: absolute; top: 0px; right: 0px; } #AXPage #AXPageBody .title h1 { text-indent: 10px; } #AXPage #AXPageBody .ax-wrap { margin-bottom: 30px; } #AXPage #AXPageBody .ax-wrap .ax-layer-1 .secBlock { border: 2px solid #d9d9d9; margin: 10px; overflow: hidden; } #AXPage #AXPageBody .ax-wrap .ax-layer-1 .secBlock h3 { text-align: center; background: #d9d9d9; margin: 4px; padding: 0px; height: 40px; line-height: 40px; } #AXPage #AXPageBody .ax-wrap .ax-layer-1 .secBlock ul { font-size: 14px; margin: 10px; padding: 0px; list-style-position: inside; } #AXPage #AXPageBody .ax-wrap .ax-layer-1 .secBlock ul li { line-height: 180%; } #AXPage #AXPageBody .ax-wrap .ax-layer-1 .secBlock ul li a:hover { text-decoration: underline; } #AXPage #AXPageBody.SampleAXButton label { display: inline-block; margin-right: 10px; vertical-align: middle; width: 60px; text-align: right; } #AXPage #AXPageBody.SampleAXButton .withIcons button { margin-bottom: 10px; } #AXPage #AXPageBody.SampleAXInput label { display: inline-block; margin-right: 10px; vertical-align: middle; width: 80px; text-align: right; } #AXPage #AXPageBody.SampleAXInput .bindCheckbox label { text-align: left; } #AXPage #AXPageFoot { position: fixed; bottom: 0px; width: 100%; z-index: 100; } #AXPage #AXPageFoot .ax-layer-1 .ax-unit { background: #d9d9d9; margin: 0px; } #AXPage #AXPageFoot .ax-layer-1 .ax-unit .navTheme { height: 30px; overflow: hidden; border-top: 1px solid #6c7b8a; } #AXPage #AXPageFoot .ax-layer-1 .ax-unit .navTheme h3 { float: left; margin: 0px; padding: 0px; line-height: 30px; font-size: 12px; font-weight: normal; padding: 0 20px; border-right: 1px solid #6c7b8a; background: #526271; color: #ffffff; } #AXPage #AXPageFoot .ax-layer-1 .ax-unit .navTheme ul { list-style: none; margin: 0px; padding: 0px; } #AXPage #AXPageFoot .ax-layer-1 .ax-unit .navTheme ul li { float: left; border-right: 1px solid #6c7b8a; } #AXPage #AXPageFoot .ax-layer-1 .ax-unit .navTheme ul li a { font-size: 10px; display: block; line-height: 30px; padding: 0px 10px; background: #d9d9d9; } #AXPage #AXPageFoot .ax-layer-1 .ax-unit .navTheme ul li a span { font-size: 10px; margin: 0px 2px; } #AXPage #AXPageFoot .ax-layer-1 .ax-unit .navTheme ul li a:hover { color: #526271; background: #c6cdd3; } #AXPage #AXPageFoot .ax-layer-1 .ax-unit .navTheme ul li a.on { color: #526271; background: #c6cdd3; font-size: 13px; } @media only screen and (min-width: 768px) and (max-width: 1023px) { #AXPageBody .ax-wrap { width: 100%; } #AXPageBody .ax-col-3 { width: 50%; } } @media (max-width: 767px) { #AXPageBody .ax-wrap { width: 100%; } #AXPageBody .ax-col-3 { width: 100%; } #sampleMenuBox { display: none; } #AXPage #AXPageHead #devCentermobileMenu { display: block; position: absolute; top: 5px; right: 10px; } } /* { oldCode Css -------------------------------------------------------------------- */ .AXdemoPageTabTarget { padding: 5px 0px 0px 0px; } .AXdemoPageContent { padding: 0px 10px 50px 10px; } .clear { clear: both; } .Hspace1 { font-size: 1px; line-height: 1px; height: 1px; } .Hspace2 { font-size: 1px; line-height: 1px; height: 2px; } .Hspace3 { font-size: 1px; line-height: 1px; height: 3px; } .Hspace5 { font-size: 1px; line-height: 1px; height: 5px; } .Hspace10 { font-size: 1px; line-height: 1px; height: 10px; } .Hspace15 { font-size: 1px; line-height: 1px; height: 15px; } .Hspace20 { font-size: 1px; line-height: 1px; height: 20px; } .Hspace30 { font-size: 1px; line-height: 1px; height: 30px; } .description { padding-left: 20px; color: #888; } .method { position: relative; border: 1px solid #6c7b8a; border-radius: 4px; overflow: hidden; line-height: 1.6em; } .method .methodName { padding: 10px; font-size: 14px; font-weight: bold; border-bottom: 1px solid #6c7b8a; background: #d9d9d9; } .method .methodDesc { padding: 10px; padding-left: 10px; } .method .methodDesc .desc { color: #5e5e5e; padding-bottom: 5px; font-style: italic; } .method .methodDesc ul { padding: 0px; margin: 0px; } .method .methodDesc ul li { padding: 3px; font-size: 14px; } .method .methodDesc ul ul { padding-left: 20px; } .method .methodDesc .methodDescContent { padding: 5px 10px; color: 1px solid #6c7b8a; border: 1px solid #6c7b8a; border-radius: 5px; background: #c6cdd3; } .method .methodExam { padding: 10px; border-top: 1px solid #6c7b8a; background: #c6cdd3; position: relative; } .method table { border-top: 1px solid #E3E3E3; width: 100%; } .method table caption { text-align: left; padding: 10px; color: #0080C0; } .method table th { border-right: 1px solid #E3E3E3; background: #E1F0FF; } .method table th, .method table td { border-bottom: 1px solid #E3E3E3; } .contentContainer { padding: 20px; } table.setConfig { border: 1px solid #E3E3E3; width: 100%; border-collapse: collapse; } table.setConfig caption { text-align: left; padding: 10px; color: #0080C0; } table.setConfig th { border: 1px solid #E3E3E3; background: #E1F0FF; } table.setConfig td { border: 1px solid #E3E3E3; } table.setConfig td.ti5 { text-indent: 5px; } table.setConfig td.ti10 { text-indent: 10px; } table.setConfig td.ti20 { text-indent: 20px; } table.setConfig pre { margin: 0px; } .tac { text-align: center; } .fieldsetBody { width: 100%; background: #eeeeee; border: 0px none; padding: 0px; font-size: 12px; line-height: 1.8em; } /* -------------------------------------------------------------------- oldCode Css} */ .prettyprint { border: 1px solid #ccc; background: #f3f3f3; border-radius: 10px; padding: 20px; }
{ "pile_set_name": "Github" }
using System; using System.Linq; using System.Collections.Generic; using GoogleMapsApi.Entities.Common; using GoogleMapsApi.StaticMaps.Entities; using GoogleMapsApi.StaticMaps.Enums; namespace GoogleMapsApi.StaticMaps { /// <summary> /// Creates a URL to google's static map according to properly filled up StaticMapsRequest /// http://code.google.com/apis/maps/documentation/staticmaps/ /// </summary> public class StaticMapsEngine { protected static readonly string BaseUrl; private static readonly int[] ValidScales = { 1, 2, 4 }; static StaticMapsEngine() { BaseUrl = @"maps.google.com/maps/api/staticmap"; } public string GenerateStaticMapURL(StaticMapRequest request) { string scheme = request.IsSSL ? "https://" : "http://"; var parametersList = new QueryStringParametersList(); if (!string.IsNullOrEmpty(request.ApiKey)) { string apiKey = request.ApiKey; parametersList.Add("key", apiKey); } if (request.Center != null) { ILocationString center = request.Center; string centerLocation = center.LocationString; parametersList.Add("center", centerLocation); } if (request.Zoom != default(int)) { parametersList.Add("zoom", request.Zoom.ToString()); } if (request.Scale != default(int)) { if (!ValidScales.Contains(request.Scale)) { throw new ArgumentException("Scale is invalid; must be a value of 1, 2 or 4"); } parametersList.Add("scale", request.Scale.ToString()); } if (request.Size.Width != default(int) || request.Size.Height != default(int)) { ImageSize imageSize = request.Size; parametersList.Add("size", string.Format("{0}x{1}", imageSize.Width, imageSize.Height)); } else { throw new ArgumentException("Size is invalid"); } if (request.ImageFormat != default(ImageFormat)) { string format; switch (request.ImageFormat) { case ImageFormat.PNG8: format = "png8"; break; case ImageFormat.PNG32: format = "png32"; break; case ImageFormat.GIF: format = "gif"; break; case ImageFormat.JPG: format = "jpg"; break; case ImageFormat.JPG_baseline: format = "jpg-baseline"; break; default: throw new ArgumentOutOfRangeException("ImageFormat"); } parametersList.Add("format", format); } if (request.MapType != null) { string type; switch (request.MapType) { case MapType.Roadmap: type = "roadmap"; break; case MapType.Satellite: type = "satellite"; break; case MapType.Terrain: type = "terrain"; break; case MapType.Hybrid: type = "hybrid"; break; default: throw new ArgumentOutOfRangeException("MapType"); } parametersList.Add("maptype", type); } if (request.Style != null) { MapStyle style = request.Style; var styleComponents = new List<string>(); if (style.MapFeature != default(MapFeature)) { string mapFeature; switch (style.MapFeature) { case MapFeature.All: mapFeature = "all"; break; case MapFeature.Road: mapFeature = "road"; break; case MapFeature.Landscape: mapFeature = "landscape"; break; default: throw new ArgumentOutOfRangeException(); } styleComponents.Add("feature:" + mapFeature); } if (style.MapElement != default(MapElement)) { string element; switch (style.MapElement) { case MapElement.All: element = "all"; break; case MapElement.Geometry: element = "geometry"; break; case MapElement.Labels: element = "lables"; break; default: throw new ArgumentOutOfRangeException(); } styleComponents.Add("element:" + element); } string hue = style.HUE; if (hue != null) { styleComponents.Add("hue:" + hue); } float? lightness = style.Lightness; if (lightness != null) { styleComponents.Add("lightness:" + lightness); } float? saturation = style.Saturation; if (saturation != null) { styleComponents.Add("saturation:" + saturation); } float? gamma = style.Gamma; if (gamma != null) { styleComponents.Add("gamma:" + gamma); } bool inverseLightness = style.InverseLightness; if (inverseLightness) { styleComponents.Add("inverse_lightnes:true"); } MapVisibility mapVisibility = style.MapVisibility; if (mapVisibility != default(MapVisibility)) { string visibility; switch (mapVisibility) { case MapVisibility.On: visibility = "on"; break; case MapVisibility.Off: visibility = "off"; break; case MapVisibility.Simplified: visibility = "simplified"; break; default: throw new ArgumentOutOfRangeException(); } styleComponents.Add("visibility:" + visibility); } parametersList.Add("style", string.Join("|", styleComponents)); } IList<Marker> markers = request.Markers; if (markers != null) { foreach (Marker marker in markers) { var markerStyleParams = new List<string>(); MarkerStyle markerStyle = marker.Style; if (markerStyle != null) { if (string.IsNullOrWhiteSpace(markerStyle.Color)) { throw new ArgumentException("Marker style color can't be empty"); } markerStyleParams.Add("color:" + markerStyle.Color); if (!string.IsNullOrWhiteSpace(markerStyle.Label)) { markerStyleParams.Add("label:" + markerStyle.Label); } if (markerStyle.Size != default(MarkerSize)) { switch (markerStyle.Size) { case MarkerSize.Mid: markerStyleParams.Add("size:mid"); break; case MarkerSize.Tiny: markerStyleParams.Add("size:tiny"); break; case MarkerSize.Small: markerStyleParams.Add("size:small"); break; default: throw new ArgumentOutOfRangeException(); } } } string styleString = string.Join("|", markerStyleParams); string locations = string.Join("|", marker.Locations.Select(location => location.LocationString)); parametersList.Add("markers", string.Format("{0}|{1}", styleString, locations)); } } IList<Path> pathes = request.Pathes; if (pathes != null) { foreach (Path path in pathes) { var pathStyleParams = new List<string>(); PathStyle pathStyle = path.Style; if (pathStyle != null) { if (string.IsNullOrWhiteSpace(pathStyle.Color)) { throw new ArgumentException("Path style color can't be empty"); } pathStyleParams.Add("color:" + pathStyle.Color); if (!string.IsNullOrWhiteSpace(pathStyle.FillColor)) { pathStyleParams.Add("fillcolor:" + pathStyle.FillColor); } if (pathStyle.Weight != default(int)) { pathStyleParams.Add("weight:" + pathStyle.Weight); } } string styleString = string.Join("|", pathStyleParams); string locations = string.Join("|", path.Locations.Select(location => location.LocationString)); parametersList.Add("path", string.Format("{0}|{1}", styleString, locations)); } } return scheme + BaseUrl + "?" + parametersList.GetQueryStringPostfix(); } } }
{ "pile_set_name": "Github" }
{ "id": "35518949", "url": "https:\/\/collection.cooperhewitt.org\/types\/35518949\/", "name": "Sarong (kain songket)", "count_objects": "1", "supersedes": "0", "superseded_by": "0" }
{ "pile_set_name": "Github" }
/* ---------------------------------------------------------------------------- * ATMEL Microcontroller Software Support * ---------------------------------------------------------------------------- * Copyright (c) 2008, Atmel Corporation * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the disclaimer below. * * Atmel's name may not be used to endorse or promote products derived from * this software without specific prior written permission. * * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ---------------------------------------------------------------------------- */ /** * \file * * USB Device Framework configurations. * */ #ifndef USBD_CONFIG_H #define USBD_CONFIG_H /*---------------------------------------------------------------------------- * Headers *----------------------------------------------------------------------------*/ /** \addtogroup usbd_config *@{ */ /*---------------------------------------------------------------------------- * Constants *----------------------------------------------------------------------------*/ /** \addtogroup usbd_ids USBD Device IDs * @{ */ #define USBD_VID_ATMEL 0x03EB /**< Vendor ID: Atmel */ #define USBD_PID_ENUM 0x0001 /**< Product ID: Enum (Core) */ #define USBD_PID_CDCDSERIAL 0x6119 /**< Product ID: CDC Serial */ #define USBD_PID_HIDKEYBOARD 0x6127 /**< Product ID: HID Keyboard */ #define USBD_PID_AUDIO 0x6128 /**< Product ID: Audio devices */ #define USBD_PID_MSD 0x6129 /**< Product ID: Massstorage */ #define USBD_PID_CDCHID 0x6130 /**< Product ID: composite */ #define USBD_PID_CDCAUDIO 0x6131 /**< Product ID: composite */ #define USBD_PID_CDCMSD 0x6132 /**< Product ID: composite */ #define USBD_PID_CDCCDC 0x6133 /**< Product ID: composite */ #define USBD_PID_HIDAUDIO 0x6134 /**< Product ID: composite */ #define USBD_PID_HIDMSD 0x6135 /**< Product ID: composite */ #define USBD_PID_HIDMOUSE 0x6200 /**< Product ID: HID Mouse */ #define USBD_PID_HIDTRANSFER 0x6201 /**< Product ID: HID Transfer */ #define USBD_PID_CCID 0x6203 /**< Product ID: CCID */ #define USBD_RELEASE_1_00 0x0100 /**< Release: 1.00 */ /** @}*/ /** \addtogroup usbd_general_config USBD General Configure * @{ * This page lists general configurations for all USB device drivers. * - \ref USBD_BMATTRIBUTES */ /** default USB Device attributes configuration descriptor * (bus or self powered, remote wakeup) */ #define USBD_BMATTRIBUTES BOARD_USB_BMATTRIBUTES /** @}*/ /*---------------------------------------------------------------------------- * USB Device - Mass storage *----------------------------------------------------------------------------*/ /** \addtogroup usbd_msdd_config USB MassStorage Configure * @{ * This page lists the defines used by the Mass Storage driver. * * \section msd_ep_addr Endpoint Addresses * - \ref MSDDriverDescriptors_BULKOUT * - \ref MSDDriverDescriptors_BULKIN */ /** Address of the Mass Storage bulk-out endpoint.*/ #define MSDDriverDescriptors_BULKOUT 1 /** Address of the Mass Storage bulk-in endpoint.*/ #define MSDDriverDescriptors_BULKIN 2 /** @}*/ /*---------------------------------------------------------------------------- * USB Device - CDC Serial *----------------------------------------------------------------------------*/ /** \addtogroup usbd_cdc_serial_config USB CDC Serial Configure * @{ * This page lists the defines used by the CDC Serial Device Driver. * * \section cdcd_ep_addr Endpoint Addresses * - \ref CDCDSerialDriverDescriptors_DATAOUT * - \ref CDCDSerialDriverDescriptors_DATAIN * - \ref CDCDSerialDriverDescriptors_NOTIFICATION */ /** Data OUT endpoint number */ #define CDCDSerialDriverDescriptors_DATAOUT 1 /** Data IN endpoint number */ #define CDCDSerialDriverDescriptors_DATAIN 2 /** Notification endpoint number */ #define CDCDSerialDriverDescriptors_NOTIFICATION 3 /** @}*/ /*---------------------------------------------------------------------------- * USB Device - Audio *----------------------------------------------------------------------------*/ /** \addtogroup usbd_audio_config USB Audio General Configure * @{ * This page lists definitions for USB Audio Devices Drivers. * - \ref */ #if defined(at91sam7s) || defined(at91sam9xe) /** Sample rate in Hz. */ #define AUDDevice_SAMPLERATE 32000UL /** Number of channels in audio stream. */ #define AUDDevice_NUMCHANNELS 1 /** Number of bytes in one sample. */ #define AUDDevice_BYTESPERSAMPLE 2 #else /** Sample rate in Hz. */ #define AUDDevice_SAMPLERATE 48000UL /** Number of channels in audio stream. */ #define AUDDevice_NUMCHANNELS 2 /** Number of bytes in one sample. */ #define AUDDevice_BYTESPERSAMPLE 2 #endif /** Number of bits in one sample. */ #define AUDDevice_BITSPERSAMPLE (AUDDevice_BYTESPERSAMPLE * 8) /** Number of bytes in one USB subframe. */ #define AUDDevice_BYTESPERSUBFRAME (AUDDevice_NUMCHANNELS * \ AUDDevice_BYTESPERSAMPLE) /** Number of samples in one USB frame. */ #define AUDDevice_SAMPLESPERFRAME (AUDDevice_SAMPLERATE / 1000 \ * AUDDevice_NUMCHANNELS) /** Number of bytes in one USB frame. */ #define AUDDevice_BYTESPERFRAME (AUDDevice_SAMPLESPERFRAME * \ AUDDevice_BYTESPERSAMPLE) /** @}*/ /*---------------------------------------------------------------------------- * USB Device - Audio - Desktop Speaker *----------------------------------------------------------------------------*/ /** \addtogroup usbd_audio_speaker_config USB Speaker Configure * @{ * This page lists the definitions for USB Audio Speaker Device Driver. * - \ref AUDDSpeakerDriverDescriptors_DATAOUT * - \ref AUDDSpeakerDriverDescriptors_FS_INTERVAL * - \ref AUDDSpeakerDriverDescriptors_HS_INTERVAL * * \note for UDP, uses IN EPs that support double buffer; for UDPHS, uses * IN EPs that support DMA and High bandwidth. */ /** Data out endpoint number. */ #define AUDDSpeakerDriverDescriptors_DATAOUT 0x04 /** Endpoint polling interval 2^(x-1) * 125us */ #define AUDDSpeakerDriverDescriptors_HS_INTERVAL 0x04 /** Endpoint polling interval 2^(x-1) * ms */ #define AUDDSpeakerDriverDescriptors_FS_INTERVAL 0x01 /** @}*/ /*---------------------------------------------------------------------------- * USB Device - Audio - Speaker Phone *----------------------------------------------------------------------------*/ /** \addtogroup usbd_audio_speakerphone_config USB Speaker Phone Configure * @{ * This page lists the definitions for USB Audio Speaker Phone Device Driver. * - \ref AUDDSpeakerPhoneDriverDescriptors_DATAOUT * - \ref AUDDSpeakerPhoneDriverDescriptors_DATAIN * - \ref AUDDSpeakerPhoneDriverDescriptors_HS_INTERVAL * - \ref AUDDSpeakerPhoneDriverDescriptors_FS_INTERVAL */ #if defined(at91sam7s) || defined(at91sam9xe) /** Data out endpoint number, size 64B */ #define AUDDSpeakerPhoneDriverDescriptors_DATAOUT 0x01 /** Data in endpoint number, size 64B */ #define AUDDSpeakerPhoneDriverDescriptors_DATAIN 0x02 #elif defined(CHIP_USB_UDP) /** Data out endpoint number, size 192B */ #define AUDDSpeakerPhoneDriverDescriptors_DATAOUT 0x04 /** Data in endpoint number, size 192B */ #define AUDDSpeakerPhoneDriverDescriptors_DATAIN 0x05 #elif defined(at91sam9m10ek) /** Data out endpoint number, size 192B */ #define AUDDSpeakerPhoneDriverDescriptors_DATAOUT 0x01 /** Data in endpoint number, size 192B */ #define AUDDSpeakerPhoneDriverDescriptors_DATAIN 0x06 #else /** Data out endpoint number, size 192B */ #define AUDDSpeakerPhoneDriverDescriptors_DATAOUT 0x05 /** Data in endpoint number, size 192B */ #define AUDDSpeakerPhoneDriverDescriptors_DATAIN 0x06 #endif /** Endpoint polling interval 2^(x-1) * 125us */ #define AUDDSpeakerPhoneDriverDescriptors_HS_INTERVAL 0x04 /** Endpoint polling interval 2^(x-1) * ms */ #define AUDDSpeakerPhoneDriverDescriptors_FS_INTERVAL 0x01 /** @}*/ /*---------------------------------------------------------------------------- * USB Device - HID - Keyboard *----------------------------------------------------------------------------*/ /** \addtogroup usbd_hid_keyboard_config USB HID Keyboard Device Configure * @{ * This page lists the defines used by the HID Keyboard Device Driver. * * \section hidd_k_ep_addr Endpoint Addresses * - \ref HIDDKeyboardDriverDescriptors_INTERRUPTIN * - \ref HIDDKeyboardDriverDescriptors_INTERRUPTOUT * \section hidd_k_ep_polling Endpoint Polling Rate * - \ref HIDDKeyboardDriverDescriptors_INTERRUPTIN_POLLING * - \ref HIDDKeyboardDriverDescriptors_INTERRUPTOUT_POLLING */ /** Interrupt IN endpoint number */ #define HIDDKeyboardDriverDescriptors_INTERRUPTIN 1 /** Interrupt IN endpoint polling rate (in milliseconds) */ #define HIDDKeyboardDriverDescriptors_INTERRUPTIN_POLLING 10 /** Interrupt OUT endpoint number */ #define HIDDKeyboardDriverDescriptors_INTERRUPTOUT 2 /** Interrupt OUT endpoint polling rate (in milliseconds) */ #define HIDDKeyboardDriverDescriptors_INTERRUPTOUT_POLLING 10 /** @}*/ /*---------------------------------------------------------------------------- * USB Device - HID - Mouse *----------------------------------------------------------------------------*/ /** \addtogroup usbd_hid_mouse_config USB HID Mouse Device Configure * @{ * This page lists the defines used by the HID Mouse Device Driver. * * \section hidd_m_ep_addr Endpoint Addresses * - \ref HIDDMouseDriverDescriptors_INTERRUPTIN * \section hidd_m_ep_polling Endpoint Polling Rate * - \ref HIDDMouseDriverDescriptors_INTERRUPTIN_POLLING */ /** Interrupt IN endpoint number */ #define HIDDMouseDriverDescriptors_INTERRUPTIN 1 /** Interrupt IN endpoint polling rate (in milliseconds) */ #define HIDDMouseDriverDescriptors_INTERRUPTIN_POLLING 10 /** @}*/ /*---------------------------------------------------------------------------- * USB Device - HID - Transfer (Customize device) *----------------------------------------------------------------------------*/ /** \addtogroup usbd_hid_xfr_config USB HID Transfer Device Configure * @{ * This page lists the defines used by the HID Transfer Device Driver. * * \section hidd_t_ep_addr Endpoint Addresses * - \ref HIDDTransferDriverDescriptors_INTERRUPTIN * - \ref HIDDTransferDriverDescriptors_INTERRUPTOUT * \section hidd_t_ep_polling Endpoint Polling Rate * - \ref HIDDTransferDriverDescriptors_INTERRUPTIN_POLLING * - \ref HIDDTransferDriverDescriptors_INTERRUPTOUT_POLLING */ /** Interrupt IN endpoint number. */ #define HIDDTransferDriverDescriptors_INTERRUPTIN 1 /** Polling rate in ms */ #define HIDDTransferDriverDescriptors_INTERRUPTIN_POLLING 50 /** Interrupt IN endpoint polling rate (in milliseconds). */ #define HIDDTransferDriverDescriptors_INTERRUPTOUT 2 /** Polling rate in ms */ #define HIDDTransferDriverDescriptors_INTERRUPTOUT_POLLING 50 /** @}*/ /*---------------------------------------------------------------------------- * USB Device - Composite *----------------------------------------------------------------------------*/ /** \addtogroup usbd_composite_config USB Composite Device Configure * @{ */ /** @}*/ /**@}*/ #endif //#ifndef USBD_CONFIG_H
{ "pile_set_name": "Github" }
/** * @author MadhavBahlMD * @date 16/01/2019 */ /* ========================================= */ /* ===== Array Partition Problem In JS ===== */ /* ========================================= */ function partition (array, size) { let partitionedArray = [], toBeAdded = []; for (let i=0; i<array.length; i++) { if ((i)%size === 0) { if (toBeAdded.length > 0) partitionedArray.push (toBeAdded); toBeAdded = []; } toBeAdded.push (array[i]); } if (toBeAdded.length > 0) partitionedArray.push (toBeAdded); return partitionedArray; } // Test our partition function console.log(partition ([1,2,3,4,5,6,7,8], 2)); console.log(partition ([1,2,3,4,5,6,7], 2)); console.log(partition ([1,2,5,3,4,6,7,1,2,4,6,4,5], 3)); console.log(partition ([1,2,5,3,4,6,7,1,2,4,6,4,5], 4)); /** * Required Output * [ [ 1, 2 ], [ 3, 4 ], [ 5, 6 ], [ 7, 8 ] ] * [ [ 1, 2 ], [ 3, 4 ], [ 5, 6 ], [ 7 ] ] * [ [ 1, 2, 5 ], [ 3, 4, 6 ], [ 7, 1, 2 ], [ 4, 6, 4 ], [ 5 ] ] * [ [ 1, 2, 5, 3 ], [ 4, 6, 7, 1 ], [ 2, 4, 6, 4 ], [ 5 ] ] */
{ "pile_set_name": "Github" }
import types import weakref from .lock import allocate_lock class BaseTypeByIdentity(object): is_array_type = False is_raw_function = False def get_c_name(self, replace_with='', context='a C file'): result = self.c_name_with_marker assert result.count('&') == 1 # some logic duplication with ffi.getctype()... :-( replace_with = replace_with.strip() if replace_with: if replace_with.startswith('*') and '&[' in result: replace_with = '(%s)' % replace_with elif not replace_with[0] in '[(': replace_with = ' ' + replace_with result = result.replace('&', replace_with) if '$' in result: from .ffiplatform import VerificationError raise VerificationError( "cannot generate '%s' in %s: unknown type name" % (self._get_c_name(), context)) return result def _get_c_name(self): return self.c_name_with_marker.replace('&', '') def has_c_name(self): return '$' not in self._get_c_name() def get_cached_btype(self, ffi, finishlist, can_delay=False): try: BType = ffi._cached_btypes[self] except KeyError: BType = self.build_backend_type(ffi, finishlist) BType2 = ffi._cached_btypes.setdefault(self, BType) assert BType2 is BType return BType def __repr__(self): return '<%s>' % (self._get_c_name(),) def _get_items(self): return [(name, getattr(self, name)) for name in self._attrs_] class BaseType(BaseTypeByIdentity): def __eq__(self, other): return (self.__class__ == other.__class__ and self._get_items() == other._get_items()) def __ne__(self, other): return not self == other def __hash__(self): return hash((self.__class__, tuple(self._get_items()))) class VoidType(BaseType): _attrs_ = () def __init__(self): self.c_name_with_marker = 'void&' def build_backend_type(self, ffi, finishlist): return global_cache(self, ffi, 'new_void_type') void_type = VoidType() class PrimitiveType(BaseType): _attrs_ = ('name',) ALL_PRIMITIVE_TYPES = { 'char': 'c', 'short': 'i', 'int': 'i', 'long': 'i', 'long long': 'i', 'signed char': 'i', 'unsigned char': 'i', 'unsigned short': 'i', 'unsigned int': 'i', 'unsigned long': 'i', 'unsigned long long': 'i', 'float': 'f', 'double': 'f', 'long double': 'f', '_Bool': 'i', # the following types are not primitive in the C sense 'wchar_t': 'c', 'int8_t': 'i', 'uint8_t': 'i', 'int16_t': 'i', 'uint16_t': 'i', 'int32_t': 'i', 'uint32_t': 'i', 'int64_t': 'i', 'uint64_t': 'i', 'intptr_t': 'i', 'uintptr_t': 'i', 'ptrdiff_t': 'i', 'size_t': 'i', 'ssize_t': 'i', } def __init__(self, name): assert name in self.ALL_PRIMITIVE_TYPES self.name = name self.c_name_with_marker = name + '&' def is_char_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'c' def is_integer_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'i' def is_float_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'f' def build_backend_type(self, ffi, finishlist): return global_cache(self, ffi, 'new_primitive_type', self.name) class BaseFunctionType(BaseType): _attrs_ = ('args', 'result', 'ellipsis') def __init__(self, args, result, ellipsis): self.args = args self.result = result self.ellipsis = ellipsis # reprargs = [arg._get_c_name() for arg in self.args] if self.ellipsis: reprargs.append('...') reprargs = reprargs or ['void'] replace_with = self._base_pattern % (', '.join(reprargs),) self.c_name_with_marker = ( self.result.c_name_with_marker.replace('&', replace_with)) class RawFunctionType(BaseFunctionType): # Corresponds to a C type like 'int(int)', which is the C type of # a function, but not a pointer-to-function. The backend has no # notion of such a type; it's used temporarily by parsing. _base_pattern = '(&)(%s)' is_raw_function = True def build_backend_type(self, ffi, finishlist): from . import api raise api.CDefError("cannot render the type %r: it is a function " "type, not a pointer-to-function type" % (self,)) def as_function_pointer(self): return FunctionPtrType(self.args, self.result, self.ellipsis) class FunctionPtrType(BaseFunctionType): _base_pattern = '(*&)(%s)' def build_backend_type(self, ffi, finishlist): result = self.result.get_cached_btype(ffi, finishlist) args = [] for tp in self.args: args.append(tp.get_cached_btype(ffi, finishlist)) return global_cache(self, ffi, 'new_function_type', tuple(args), result, self.ellipsis) class PointerType(BaseType): _attrs_ = ('totype',) _base_pattern = " *&" _base_pattern_array = "(*&)" def __init__(self, totype): self.totype = totype if totype.is_array_type: extra = self._base_pattern_array else: extra = self._base_pattern self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra) def build_backend_type(self, ffi, finishlist): BItem = self.totype.get_cached_btype(ffi, finishlist, can_delay=True) return global_cache(self, ffi, 'new_pointer_type', BItem) voidp_type = PointerType(void_type) class ConstPointerType(PointerType): _base_pattern = " const *&" _base_pattern_array = "(const *&)" const_voidp_type = ConstPointerType(void_type) class NamedPointerType(PointerType): _attrs_ = ('totype', 'name') def __init__(self, totype, name): PointerType.__init__(self, totype) self.name = name self.c_name_with_marker = name + '&' class ArrayType(BaseType): _attrs_ = ('item', 'length') is_array_type = True def __init__(self, item, length): self.item = item self.length = length # if length is None: brackets = '&[]' elif length == '...': brackets = '&[/*...*/]' else: brackets = '&[%d]' % length self.c_name_with_marker = ( self.item.c_name_with_marker.replace('&', brackets)) def resolve_length(self, newlength): return ArrayType(self.item, newlength) def build_backend_type(self, ffi, finishlist): if self.length == '...': from . import api raise api.CDefError("cannot render the type %r: unknown length" % (self,)) self.item.get_cached_btype(ffi, finishlist) # force the item BType BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) char_array_type = ArrayType(PrimitiveType('char'), None) class StructOrUnionOrEnum(BaseTypeByIdentity): _attrs_ = ('name',) forcename = None def build_c_name_with_marker(self): name = self.forcename or '%s %s' % (self.kind, self.name) self.c_name_with_marker = name + '&' def force_the_name(self, forcename): self.forcename = forcename self.build_c_name_with_marker() def get_official_name(self): assert self.c_name_with_marker.endswith('&') return self.c_name_with_marker[:-1] class StructOrUnion(StructOrUnionOrEnum): fixedlayout = None completed = False partial = False packed = False def __init__(self, name, fldnames, fldtypes, fldbitsize): self.name = name self.fldnames = fldnames self.fldtypes = fldtypes self.fldbitsize = fldbitsize self.build_c_name_with_marker() def enumfields(self): for name, type, bitsize in zip(self.fldnames, self.fldtypes, self.fldbitsize): if name == '' and isinstance(type, StructOrUnion): # nested anonymous struct/union for result in type.enumfields(): yield result else: yield (name, type, bitsize) def force_flatten(self): # force the struct or union to have a declaration that lists # directly all fields returned by enumfields(), flattening # nested anonymous structs/unions. names = [] types = [] bitsizes = [] for name, type, bitsize in self.enumfields(): names.append(name) types.append(type) bitsizes.append(bitsize) self.fldnames = tuple(names) self.fldtypes = tuple(types) self.fldbitsize = tuple(bitsizes) def get_cached_btype(self, ffi, finishlist, can_delay=False): BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist, can_delay) if not can_delay: self.finish_backend_type(ffi, finishlist) return BType def finish_backend_type(self, ffi, finishlist): if self.completed: if self.completed != 2: raise NotImplementedError("recursive structure declaration " "for '%s'" % (self.name,)) return BType = ffi._cached_btypes[self] if self.fldtypes is None: return # not completing it: it's an opaque struct # self.completed = 1 # if self.fixedlayout is None: fldtypes = [tp.get_cached_btype(ffi, finishlist) for tp in self.fldtypes] lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) sflags = 0 if self.packed: sflags = 8 # SF_PACKED ffi._backend.complete_struct_or_union(BType, lst, self, -1, -1, sflags) # else: fldtypes = [] fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout for i in range(len(self.fldnames)): fsize = fieldsize[i] ftype = self.fldtypes[i] # if isinstance(ftype, ArrayType) and ftype.length == '...': # fix the length to match the total size BItemType = ftype.item.get_cached_btype(ffi, finishlist) nlen, nrest = divmod(fsize, ffi.sizeof(BItemType)) if nrest != 0: self._verification_error( "field '%s.%s' has a bogus size?" % ( self.name, self.fldnames[i] or '{}')) ftype = ftype.resolve_length(nlen) self.fldtypes = (self.fldtypes[:i] + (ftype,) + self.fldtypes[i+1:]) # BFieldType = ftype.get_cached_btype(ffi, finishlist) if isinstance(ftype, ArrayType) and ftype.length is None: assert fsize == 0 else: bitemsize = ffi.sizeof(BFieldType) if bitemsize != fsize: self._verification_error( "field '%s.%s' is declared as %d bytes, but is " "really %d bytes" % (self.name, self.fldnames[i] or '{}', bitemsize, fsize)) fldtypes.append(BFieldType) # lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs)) ffi._backend.complete_struct_or_union(BType, lst, self, totalsize, totalalignment) self.completed = 2 def _verification_error(self, msg): from .ffiplatform import VerificationError raise VerificationError(msg) def check_not_partial(self): if self.partial and self.fixedlayout is None: from . import ffiplatform raise ffiplatform.VerificationMissing(self._get_c_name()) def build_backend_type(self, ffi, finishlist): self.check_not_partial() finishlist.append(self) # return global_cache(self, ffi, 'new_%s_type' % self.kind, self.get_official_name(), key=self) class StructType(StructOrUnion): kind = 'struct' class UnionType(StructOrUnion): kind = 'union' class EnumType(StructOrUnionOrEnum): kind = 'enum' partial = False partial_resolved = False def __init__(self, name, enumerators, enumvalues, baseinttype=None): self.name = name self.enumerators = enumerators self.enumvalues = enumvalues self.baseinttype = baseinttype self.build_c_name_with_marker() def force_the_name(self, forcename): StructOrUnionOrEnum.force_the_name(self, forcename) if self.forcename is None: name = self.get_official_name() self.forcename = '$' + name.replace(' ', '_') def check_not_partial(self): if self.partial and not self.partial_resolved: from . import ffiplatform raise ffiplatform.VerificationMissing(self._get_c_name()) def build_backend_type(self, ffi, finishlist): self.check_not_partial() base_btype = self.build_baseinttype(ffi, finishlist) return global_cache(self, ffi, 'new_enum_type', self.get_official_name(), self.enumerators, self.enumvalues, base_btype, key=self) def build_baseinttype(self, ffi, finishlist): if self.baseinttype is not None: return self.baseinttype.get_cached_btype(ffi, finishlist) # if self.enumvalues: smallest_value = min(self.enumvalues) largest_value = max(self.enumvalues) else: smallest_value = 0 largest_value = 0 if smallest_value < 0: # needs a signed type sign = 1 candidate1 = PrimitiveType("int") candidate2 = PrimitiveType("long") else: sign = 0 candidate1 = PrimitiveType("unsigned int") candidate2 = PrimitiveType("unsigned long") btype1 = candidate1.get_cached_btype(ffi, finishlist) btype2 = candidate2.get_cached_btype(ffi, finishlist) size1 = ffi.sizeof(btype1) size2 = ffi.sizeof(btype2) if (smallest_value >= ((-1) << (8*size1-1)) and largest_value < (1 << (8*size1-sign))): return btype1 if (smallest_value >= ((-1) << (8*size2-1)) and largest_value < (1 << (8*size2-sign))): return btype2 raise api.CDefError("%s values don't all fit into either 'long' " "or 'unsigned long'" % self._get_c_name()) def unknown_type(name, structname=None): if structname is None: structname = '$%s' % name tp = StructType(structname, None, None, None) tp.force_the_name(name) return tp def unknown_ptr_type(name, structname=None): if structname is None: structname = '*$%s' % name tp = StructType(structname, None, None, None) return NamedPointerType(tp, name) global_lock = allocate_lock() def global_cache(srctype, ffi, funcname, *args, **kwds): key = kwds.pop('key', (funcname, args)) assert not kwds try: return ffi._backend.__typecache[key] except KeyError: pass except AttributeError: # initialize the __typecache attribute, either at the module level # if ffi._backend is a module, or at the class level if ffi._backend # is some instance. if isinstance(ffi._backend, types.ModuleType): ffi._backend.__typecache = weakref.WeakValueDictionary() else: type(ffi._backend).__typecache = weakref.WeakValueDictionary() try: res = getattr(ffi._backend, funcname)(*args) except NotImplementedError as e: raise NotImplementedError("%s: %r: %s" % (funcname, srctype, e)) # note that setdefault() on WeakValueDictionary is not atomic # and contains a rare bug (http://bugs.python.org/issue19542); # we have to use a lock and do it ourselves cache = ffi._backend.__typecache with global_lock: res1 = cache.get(key) if res1 is None: cache[key] = res return res else: return res1 def pointer_cache(ffi, BType): return global_cache('?', ffi, 'new_pointer_type', BType) def attach_exception_info(e, name): if e.args and type(e.args[0]) is str: e.args = ('%s: %s' % (name, e.args[0]),) + e.args[1:]
{ "pile_set_name": "Github" }
package io import ( "fmt" "github.com/stellar/go/xdr" ) // StatsLedgerTransactionProcessor is a state processors that counts number of changes types // and entry types. type StatsLedgerTransactionProcessor struct { results StatsLedgerTransactionProcessorResults } // StatsLedgerTransactionProcessorResults contains results after running StatsLedgerTransactionProcessor. type StatsLedgerTransactionProcessorResults struct { Transactions int64 TransactionsSuccessful int64 TransactionsFailed int64 Operations int64 OperationsInSuccessful int64 OperationsInFailed int64 OperationsCreateAccount int64 OperationsPayment int64 OperationsPathPaymentStrictReceive int64 OperationsManageSellOffer int64 OperationsCreatePassiveSellOffer int64 OperationsSetOptions int64 OperationsChangeTrust int64 OperationsAllowTrust int64 OperationsAccountMerge int64 OperationsInflation int64 OperationsManageData int64 OperationsBumpSequence int64 OperationsManageBuyOffer int64 OperationsPathPaymentStrictSend int64 } func (p *StatsLedgerTransactionProcessor) ProcessTransaction(transaction LedgerTransaction) error { p.results.Transactions++ ops := int64(len(transaction.Envelope.Operations())) p.results.Operations += ops if transaction.Result.Successful() { p.results.TransactionsSuccessful++ p.results.OperationsInSuccessful += ops } else { p.results.TransactionsFailed++ p.results.OperationsInFailed += ops } for _, op := range transaction.Envelope.Operations() { switch op.Body.Type { case xdr.OperationTypeCreateAccount: p.results.OperationsCreateAccount++ case xdr.OperationTypePayment: p.results.OperationsPayment++ case xdr.OperationTypePathPaymentStrictReceive: p.results.OperationsPathPaymentStrictReceive++ case xdr.OperationTypeManageSellOffer: p.results.OperationsManageSellOffer++ case xdr.OperationTypeCreatePassiveSellOffer: p.results.OperationsCreatePassiveSellOffer++ case xdr.OperationTypeSetOptions: p.results.OperationsSetOptions++ case xdr.OperationTypeChangeTrust: p.results.OperationsChangeTrust++ case xdr.OperationTypeAllowTrust: p.results.OperationsAllowTrust++ case xdr.OperationTypeAccountMerge: p.results.OperationsAccountMerge++ case xdr.OperationTypeInflation: p.results.OperationsInflation++ case xdr.OperationTypeManageData: p.results.OperationsManageData++ case xdr.OperationTypeBumpSequence: p.results.OperationsBumpSequence++ case xdr.OperationTypeManageBuyOffer: p.results.OperationsManageBuyOffer++ case xdr.OperationTypePathPaymentStrictSend: p.results.OperationsPathPaymentStrictSend++ default: panic(fmt.Sprintf("Unkown operation type: %d", op.Body.Type)) } } return nil } func (p *StatsLedgerTransactionProcessor) GetResults() StatsLedgerTransactionProcessorResults { return p.results } func (stats *StatsLedgerTransactionProcessorResults) Map() map[string]interface{} { return map[string]interface{}{ "stats_transactions": stats.Transactions, "stats_transactions_successful": stats.TransactionsSuccessful, "stats_transactions_failed": stats.TransactionsFailed, "stats_operations": stats.Operations, "stats_operations_in_successful": stats.OperationsInSuccessful, "stats_operations_in_failed": stats.OperationsInFailed, "stats_operations_create_account": stats.OperationsCreateAccount, "stats_operations_payment": stats.OperationsPayment, "stats_operations_path_payment_strict_receive": stats.OperationsPathPaymentStrictReceive, "stats_operations_manage_sell_offer": stats.OperationsManageSellOffer, "stats_operations_create_passive_sell_offer": stats.OperationsCreatePassiveSellOffer, "stats_operations_set_options": stats.OperationsSetOptions, "stats_operations_change_trust": stats.OperationsChangeTrust, "stats_operations_allow_trust": stats.OperationsAllowTrust, "stats_operations_account_merge": stats.OperationsAccountMerge, "stats_operations_inflation": stats.OperationsInflation, "stats_operations_manage_data": stats.OperationsManageData, "stats_operations_bump_sequence": stats.OperationsBumpSequence, "stats_operations_manage_buy_offer": stats.OperationsManageBuyOffer, "stats_operations_path_payment_strict_send": stats.OperationsPathPaymentStrictSend, } }
{ "pile_set_name": "Github" }
<div class="apiDetail"> <div> <h2><span>Function(event, treeId, treeNode)</span><span class="path">setting.callback.</span>onCheck</h2> <h3>概述<span class="h3_info">[ 依赖 <span class="highlight_green">jquery.ztree.excheck</span> 扩展 js ]</span></h3> <div class="desc"> <p></p> <div class="longdesc"> <p>用于捕获 checkbox / radio 被勾选 或 取消勾选的事件回调函数</p> <p class="highlight_red">如果设置了 setting.callback.beforeCheck 方法,且返回 false,将无法触发 onCheck 事件回调函数。</p> <p>默认值:null</p> </div> </div> <h3>Function 参数说明</h3> <div class="desc"> <h4><b>event</b><span>js event 对象</span></h4> <p>标准的 js event 对象</p> <h4 class="topLine"><b>treeId</b><span>String</span></h4> <p>对应 zTree 的 <b class="highlight_red">treeId</b>,便于用户操控</p> <h4 class="topLine"><b>treeNode</b><span>JSON</span></h4> <p>被勾选 或 取消勾选的节点 JSON 数据对象</p> </div> <h3>setting & function 举例</h3> <h4>1. 每次点击 checkbox 或 radio 后, 弹出该节点的 tId、name 以及当前勾选状态的信息</h4> <pre xmlns=""><code>function zTreeOnCheck(event, treeId, treeNode) { alert(treeNode.tId + ", " + treeNode.name + "," + treeNode.checked); }; var setting = { callback: { onCheck: zTreeOnCheck } }; ......</code></pre> </div> </div>
{ "pile_set_name": "Github" }
/* * Copyright (c) 2014 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include <clc/clc.h> _CLC_OVERLOAD _CLC_DEF float length(float p) { return fabs(p); } #define V_FLENGTH(p) \ float l2 = dot(p, p); \ \ if (l2 < FLT_MIN) { \ p *= 0x1.0p+86F; \ return sqrt(dot(p, p)) * 0x1.0p-86F; \ } else if (l2 == INFINITY) { \ p *= 0x1.0p-65F; \ return sqrt(dot(p, p)) * 0x1.0p+65F; \ } \ \ return sqrt(l2); _CLC_OVERLOAD _CLC_DEF float length(float2 p) { V_FLENGTH(p); } _CLC_OVERLOAD _CLC_DEF float length(float3 p) { V_FLENGTH(p); } _CLC_OVERLOAD _CLC_DEF float length(float4 p) { V_FLENGTH(p); } #ifdef cl_khr_fp64 #pragma OPENCL EXTENSION cl_khr_fp64 : enable _CLC_OVERLOAD _CLC_DEF double length(double p){ return fabs(p); } #define V_DLENGTH(p) \ double l2 = dot(p, p); \ \ if (l2 < DBL_MIN) { \ p *= 0x1.0p+563; \ return sqrt(dot(p, p)) * 0x1.0p-563; \ } else if (l2 == INFINITY) { \ p *= 0x1.0p-513; \ return sqrt(dot(p, p)) * 0x1.0p+513; \ } \ \ return sqrt(l2); _CLC_OVERLOAD _CLC_DEF double length(double2 p) { V_DLENGTH(p); } _CLC_OVERLOAD _CLC_DEF double length(double3 p) { V_DLENGTH(p); } _CLC_OVERLOAD _CLC_DEF double length(double4 p) { V_DLENGTH(p); } #endif #ifdef cl_khr_fp16 #pragma OPENCL EXTENSION cl_khr_fp16 : enable _CLC_OVERLOAD _CLC_DEF half length(half p){ return fabs(p); } // Only available in CLC1.2 #ifndef HALF_MIN #define HALF_MIN 0x1.0p-14h #endif #define V_HLENGTH(p) \ half l2 = dot(p, p); \ \ if (l2 < HALF_MIN) { \ p *= 0x1.0p+12h; \ return sqrt(dot(p, p)) * 0x1.0p-12h; \ } else if (l2 == INFINITY) { \ p *= 0x1.0p-7h; \ return sqrt(dot(p, p)) * 0x1.0p+7h; \ } \ \ return sqrt(l2); _CLC_OVERLOAD _CLC_DEF half length(half2 p) { V_HLENGTH(p); } _CLC_OVERLOAD _CLC_DEF half length(half3 p) { V_HLENGTH(p); } _CLC_OVERLOAD _CLC_DEF half length(half4 p) { V_HLENGTH(p); } #endif
{ "pile_set_name": "Github" }
// // Generated by class-dump 3.5 (64 bit). // // class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by Steve Nygard. // @class NSString, Protocol; @protocol NSObject - (NSString *)description; - (unsigned long long)retainCount; - (id)autorelease; - (oneway void)release; - (id)retain; - (BOOL)respondsToSelector:(SEL)arg1; - (BOOL)conformsToProtocol:(Protocol *)arg1; - (BOOL)isMemberOfClass:(Class)arg1; - (BOOL)isKindOfClass:(Class)arg1; - (BOOL)isProxy; - (id)performSelector:(SEL)arg1 withObject:(id)arg2 withObject:(id)arg3; - (id)performSelector:(SEL)arg1 withObject:(id)arg2; - (id)performSelector:(SEL)arg1; - (struct _NSZone *)zone; - (id)self; - (Class)class; - (Class)superclass; - (unsigned long long)hash; - (BOOL)isEqual:(id)arg1; @optional - (NSString *)debugDescription; @end
{ "pile_set_name": "Github" }
/* $Id: um_xdi.h,v 1.1.2.2 2002/10/02 14:38:38 armin Exp $ */ #ifndef __DIVA_USER_MODE_XDI_H__ #define __DIVA_USER_MODE_XDI_H__ /* Contains declaratiom of structures shared between application and user mode idi driver */ typedef struct _diva_um_idi_adapter_features { dword type; dword features; dword channels; dword serial_number; char name[128]; } diva_um_idi_adapter_features_t; #define DIVA_UM_IDI_REQ_MASK 0x0000FFFF #define DIVA_UM_IDI_REQ_TYPE_MASK (~(DIVA_UM_IDI_REQ_MASK)) #define DIVA_UM_IDI_GET_FEATURES 1 /* trigger features indication */ #define DIVA_UM_IDI_REQ 2 #define DIVA_UM_IDI_REQ_TYPE_MAN 0x10000000 #define DIVA_UM_IDI_REQ_TYPE_SIG 0x20000000 #define DIVA_UM_IDI_REQ_TYPE_NET 0x30000000 #define DIVA_UM_IDI_REQ_MAN (DIVA_UM_IDI_REQ | DIVA_UM_IDI_REQ_TYPE_MAN) #define DIVA_UM_IDI_REQ_SIG (DIVA_UM_IDI_REQ | DIVA_UM_IDI_REQ_TYPE_SIG) #define DIVA_UM_IDI_REQ_NET (DIVA_UM_IDI_REQ | DIVA_UM_IDI_REQ_TYPE_NET) /* data_length bytes will follow this structure */ typedef struct _diva_um_idi_req_hdr { dword type; dword Req; dword ReqCh; dword data_length; } diva_um_idi_req_hdr_t; typedef struct _diva_um_idi_ind_parameters { dword Ind; dword IndCh; } diva_um_idi_ind_parameters_t; typedef struct _diva_um_idi_rc_parameters { dword Rc; dword RcCh; } diva_um_idi_rc_parameters_t; typedef union _diva_um_idi_ind { diva_um_idi_adapter_features_t features; diva_um_idi_ind_parameters_t ind; diva_um_idi_rc_parameters_t rc; } diva_um_idi_ind_t; #define DIVA_UM_IDI_IND_FEATURES 1 /* features indication */ #define DIVA_UM_IDI_IND 2 #define DIVA_UM_IDI_IND_RC 3 /* data_length bytes of data follow this structure */ typedef struct _diva_um_idi_ind_hdr { dword type; diva_um_idi_ind_t hdr; dword data_length; } diva_um_idi_ind_hdr_t; #endif
{ "pile_set_name": "Github" }
var, f, cada_bra, what = 1, 2, 3, 4 a = f"my string {var}, but also {f} and {cada_bra}"
{ "pile_set_name": "Github" }
From fork-admin@xent.com Thu Jul 25 11:08:29 2002 Return-Path: <fork-admin@xent.com> Delivered-To: yyyy@localhost.netnoteinc.com Received: from localhost (localhost [127.0.0.1]) by phobos.labs.netnoteinc.com (Postfix) with ESMTP id 49D5E440D7 for <jm@localhost>; Thu, 25 Jul 2002 06:07:49 -0400 (EDT) Received: from phobos [127.0.0.1] by localhost with IMAP (fetchmail-5.9.0) for jm@localhost (single-drop); Thu, 25 Jul 2002 11:07:49 +0100 (IST) Received: from xent.com ([64.161.22.236]) by dogma.slashnull.org (8.11.6/8.11.6) with ESMTP id g6OIna401332 for <jm@jmason.org>; Wed, 24 Jul 2002 19:49:36 +0100 Received: from lair.xent.com (localhost [127.0.0.1]) by xent.com (Postfix) with ESMTP id D0D45294156; Wed, 24 Jul 2002 11:48:05 -0700 (PDT) Delivered-To: fork@spamassassin.taint.org Received: from cats.ucsc.edu (cats-mx2.ucsc.edu [128.114.129.35]) by xent.com (Postfix) with ESMTP id 523D8294152 for <fork@xent.com>; Wed, 24 Jul 2002 11:47:10 -0700 (PDT) Received: from Tycho (dhcp-60-118.cse.ucsc.edu [128.114.60.118]) by cats.ucsc.edu (8.10.1/8.10.1) with SMTP id g6OIku026813 for <fork@xent.com>; Wed, 24 Jul 2002 11:46:57 -0700 (PDT) From: "Jim Whitehead" <ejw@cse.ucsc.edu> To: <fork@spamassassin.taint.org> Subject: RE: USA USA WE ARE NUMBER ....six. Message-Id: <AMEPKEBLDJJCCDEJHAMIGEPAFCAA.ejw@cse.ucsc.edu> MIME-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit X-Priority: 3 (Normal) X-Msmail-Priority: Normal X-Mailer: Microsoft Outlook IMO, Build 9.0.2416 (9.0.2911.0) Importance: Normal In-Reply-To: <Pine.BSO.4.44.0207241312010.357-100000@crank.slack.net> X-Mimeole: Produced By Microsoft MimeOLE V5.50.4133.2400 X-Ucsc-Cats-Mailscanner: Found to be clean Sender: fork-admin@xent.com Errors-To: fork-admin@xent.com X-Beenthere: fork@spamassassin.taint.org X-Mailman-Version: 2.0.11 Precedence: bulk List-Help: <mailto:fork-request@xent.com?subject=help> List-Post: <mailto:fork@spamassassin.taint.org> List-Subscribe: <http://xent.com/mailman/listinfo/fork>, <mailto:fork-request@xent.com?subject=subscribe> List-Id: Friends of Rohit Khare <fork.xent.com> List-Unsubscribe: <http://xent.com/mailman/listinfo/fork>, <mailto:fork-request@xent.com?subject=unsubscribe> List-Archive: <http://xent.com/pipermail/fork/> Date: Wed, 24 Jul 2002 11:45:12 -0700 > "Around the world, there is a growing sense that democracy has not > delivered development" Sakiko Fukuda-Parr UN report author As opposed to some other form of government which has? - Jim http://xent.com/mailman/listinfo/fork
{ "pile_set_name": "Github" }
/* * trace-event-perl. Feed perf script events to an embedded Perl interpreter. * * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include <errno.h> #include <linux/bitmap.h> #include <linux/time64.h> #include "../util.h" #include <EXTERN.h> #include <perl.h> #include "../../perf.h" #include "../callchain.h" #include "../machine.h" #include "../thread.h" #include "../event.h" #include "../trace-event.h" #include "../evsel.h" #include "../debug.h" void boot_Perf__Trace__Context(pTHX_ CV *cv); void boot_DynaLoader(pTHX_ CV *cv); typedef PerlInterpreter * INTERP; void xs_init(pTHX); void xs_init(pTHX) { const char *file = __FILE__; dXSUB_SYS; newXS("Perf::Trace::Context::bootstrap", boot_Perf__Trace__Context, file); newXS("DynaLoader::boot_DynaLoader", boot_DynaLoader, file); } INTERP my_perl; #define TRACE_EVENT_TYPE_MAX \ ((1 << (sizeof(unsigned short) * 8)) - 1) static DECLARE_BITMAP(events_defined, TRACE_EVENT_TYPE_MAX); extern struct scripting_context *scripting_context; static char *cur_field_name; static int zero_flag_atom; static void define_symbolic_value(const char *ev_name, const char *field_name, const char *field_value, const char *field_str) { unsigned long long value; dSP; value = eval_flag(field_value); ENTER; SAVETMPS; PUSHMARK(SP); XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); XPUSHs(sv_2mortal(newSVpv(field_name, 0))); XPUSHs(sv_2mortal(newSVuv(value))); XPUSHs(sv_2mortal(newSVpv(field_str, 0))); PUTBACK; if (get_cv("main::define_symbolic_value", 0)) call_pv("main::define_symbolic_value", G_SCALAR); SPAGAIN; PUTBACK; FREETMPS; LEAVE; } static void define_symbolic_values(struct print_flag_sym *field, const char *ev_name, const char *field_name) { define_symbolic_value(ev_name, field_name, field->value, field->str); if (field->next) define_symbolic_values(field->next, ev_name, field_name); } static void define_symbolic_field(const char *ev_name, const char *field_name) { dSP; ENTER; SAVETMPS; PUSHMARK(SP); XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); XPUSHs(sv_2mortal(newSVpv(field_name, 0))); PUTBACK; if (get_cv("main::define_symbolic_field", 0)) call_pv("main::define_symbolic_field", G_SCALAR); SPAGAIN; PUTBACK; FREETMPS; LEAVE; } static void define_flag_value(const char *ev_name, const char *field_name, const char *field_value, const char *field_str) { unsigned long long value; dSP; value = eval_flag(field_value); ENTER; SAVETMPS; PUSHMARK(SP); XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); XPUSHs(sv_2mortal(newSVpv(field_name, 0))); XPUSHs(sv_2mortal(newSVuv(value))); XPUSHs(sv_2mortal(newSVpv(field_str, 0))); PUTBACK; if (get_cv("main::define_flag_value", 0)) call_pv("main::define_flag_value", G_SCALAR); SPAGAIN; PUTBACK; FREETMPS; LEAVE; } static void define_flag_values(struct print_flag_sym *field, const char *ev_name, const char *field_name) { define_flag_value(ev_name, field_name, field->value, field->str); if (field->next) define_flag_values(field->next, ev_name, field_name); } static void define_flag_field(const char *ev_name, const char *field_name, const char *delim) { dSP; ENTER; SAVETMPS; PUSHMARK(SP); XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); XPUSHs(sv_2mortal(newSVpv(field_name, 0))); XPUSHs(sv_2mortal(newSVpv(delim, 0))); PUTBACK; if (get_cv("main::define_flag_field", 0)) call_pv("main::define_flag_field", G_SCALAR); SPAGAIN; PUTBACK; FREETMPS; LEAVE; } static void define_event_symbols(struct event_format *event, const char *ev_name, struct print_arg *args) { if (args == NULL) return; switch (args->type) { case PRINT_NULL: break; case PRINT_ATOM: define_flag_value(ev_name, cur_field_name, "0", args->atom.atom); zero_flag_atom = 0; break; case PRINT_FIELD: free(cur_field_name); cur_field_name = strdup(args->field.name); break; case PRINT_FLAGS: define_event_symbols(event, ev_name, args->flags.field); define_flag_field(ev_name, cur_field_name, args->flags.delim); define_flag_values(args->flags.flags, ev_name, cur_field_name); break; case PRINT_SYMBOL: define_event_symbols(event, ev_name, args->symbol.field); define_symbolic_field(ev_name, cur_field_name); define_symbolic_values(args->symbol.symbols, ev_name, cur_field_name); break; case PRINT_HEX: case PRINT_HEX_STR: define_event_symbols(event, ev_name, args->hex.field); define_event_symbols(event, ev_name, args->hex.size); break; case PRINT_INT_ARRAY: define_event_symbols(event, ev_name, args->int_array.field); define_event_symbols(event, ev_name, args->int_array.count); define_event_symbols(event, ev_name, args->int_array.el_size); break; case PRINT_BSTRING: case PRINT_DYNAMIC_ARRAY: case PRINT_DYNAMIC_ARRAY_LEN: case PRINT_STRING: case PRINT_BITMASK: break; case PRINT_TYPE: define_event_symbols(event, ev_name, args->typecast.item); break; case PRINT_OP: if (strcmp(args->op.op, ":") == 0) zero_flag_atom = 1; define_event_symbols(event, ev_name, args->op.left); define_event_symbols(event, ev_name, args->op.right); break; case PRINT_FUNC: default: pr_err("Unsupported print arg type\n"); /* we should warn... */ return; } if (args->next) define_event_symbols(event, ev_name, args->next); } static SV *perl_process_callchain(struct perf_sample *sample, struct perf_evsel *evsel, struct addr_location *al) { AV *list; list = newAV(); if (!list) goto exit; if (!symbol_conf.use_callchain || !sample->callchain) goto exit; if (thread__resolve_callchain(al->thread, &callchain_cursor, evsel, sample, NULL, NULL, scripting_max_stack) != 0) { pr_err("Failed to resolve callchain. Skipping\n"); goto exit; } callchain_cursor_commit(&callchain_cursor); while (1) { HV *elem; struct callchain_cursor_node *node; node = callchain_cursor_current(&callchain_cursor); if (!node) break; elem = newHV(); if (!elem) goto exit; if (!hv_stores(elem, "ip", newSVuv(node->ip))) { hv_undef(elem); goto exit; } if (node->sym) { HV *sym = newHV(); if (!sym) { hv_undef(elem); goto exit; } if (!hv_stores(sym, "start", newSVuv(node->sym->start)) || !hv_stores(sym, "end", newSVuv(node->sym->end)) || !hv_stores(sym, "binding", newSVuv(node->sym->binding)) || !hv_stores(sym, "name", newSVpvn(node->sym->name, node->sym->namelen)) || !hv_stores(elem, "sym", newRV_noinc((SV*)sym))) { hv_undef(sym); hv_undef(elem); goto exit; } } if (node->map) { struct map *map = node->map; const char *dsoname = "[unknown]"; if (map && map->dso) { if (symbol_conf.show_kernel_path && map->dso->long_name) dsoname = map->dso->long_name; else dsoname = map->dso->name; } if (!hv_stores(elem, "dso", newSVpv(dsoname,0))) { hv_undef(elem); goto exit; } } callchain_cursor_advance(&callchain_cursor); av_push(list, newRV_noinc((SV*)elem)); } exit: return newRV_noinc((SV*)list); } static void perl_process_tracepoint(struct perf_sample *sample, struct perf_evsel *evsel, struct addr_location *al) { struct thread *thread = al->thread; struct event_format *event = evsel->tp_format; struct format_field *field; static char handler[256]; unsigned long long val; unsigned long s, ns; int pid; int cpu = sample->cpu; void *data = sample->raw_data; unsigned long long nsecs = sample->time; const char *comm = thread__comm_str(thread); dSP; if (evsel->attr.type != PERF_TYPE_TRACEPOINT) return; if (!event) { pr_debug("ug! no event found for type %" PRIu64, (u64)evsel->attr.config); return; } pid = raw_field_value(event, "common_pid", data); sprintf(handler, "%s::%s", event->system, event->name); if (!test_and_set_bit(event->id, events_defined)) define_event_symbols(event, handler, event->print_fmt.args); s = nsecs / NSEC_PER_SEC; ns = nsecs - s * NSEC_PER_SEC; scripting_context->event_data = data; scripting_context->pevent = evsel->tp_format->pevent; ENTER; SAVETMPS; PUSHMARK(SP); XPUSHs(sv_2mortal(newSVpv(handler, 0))); XPUSHs(sv_2mortal(newSViv(PTR2IV(scripting_context)))); XPUSHs(sv_2mortal(newSVuv(cpu))); XPUSHs(sv_2mortal(newSVuv(s))); XPUSHs(sv_2mortal(newSVuv(ns))); XPUSHs(sv_2mortal(newSViv(pid))); XPUSHs(sv_2mortal(newSVpv(comm, 0))); XPUSHs(sv_2mortal(perl_process_callchain(sample, evsel, al))); /* common fields other than pid can be accessed via xsub fns */ for (field = event->format.fields; field; field = field->next) { if (field->flags & FIELD_IS_STRING) { int offset; if (field->flags & FIELD_IS_DYNAMIC) { offset = *(int *)(data + field->offset); offset &= 0xffff; } else offset = field->offset; XPUSHs(sv_2mortal(newSVpv((char *)data + offset, 0))); } else { /* FIELD_IS_NUMERIC */ val = read_size(event, data + field->offset, field->size); if (field->flags & FIELD_IS_SIGNED) { XPUSHs(sv_2mortal(newSViv(val))); } else { XPUSHs(sv_2mortal(newSVuv(val))); } } } PUTBACK; if (get_cv(handler, 0)) call_pv(handler, G_SCALAR); else if (get_cv("main::trace_unhandled", 0)) { XPUSHs(sv_2mortal(newSVpv(handler, 0))); XPUSHs(sv_2mortal(newSViv(PTR2IV(scripting_context)))); XPUSHs(sv_2mortal(newSVuv(cpu))); XPUSHs(sv_2mortal(newSVuv(nsecs))); XPUSHs(sv_2mortal(newSViv(pid))); XPUSHs(sv_2mortal(newSVpv(comm, 0))); XPUSHs(sv_2mortal(perl_process_callchain(sample, evsel, al))); call_pv("main::trace_unhandled", G_SCALAR); } SPAGAIN; PUTBACK; FREETMPS; LEAVE; } static void perl_process_event_generic(union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel) { dSP; if (!get_cv("process_event", 0)) return; ENTER; SAVETMPS; PUSHMARK(SP); XPUSHs(sv_2mortal(newSVpvn((const char *)event, event->header.size))); XPUSHs(sv_2mortal(newSVpvn((const char *)&evsel->attr, sizeof(evsel->attr)))); XPUSHs(sv_2mortal(newSVpvn((const char *)sample, sizeof(*sample)))); XPUSHs(sv_2mortal(newSVpvn((const char *)sample->raw_data, sample->raw_size))); PUTBACK; call_pv("process_event", G_SCALAR); SPAGAIN; PUTBACK; FREETMPS; LEAVE; } static void perl_process_event(union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct addr_location *al) { perl_process_tracepoint(sample, evsel, al); perl_process_event_generic(event, sample, evsel); } static void run_start_sub(void) { dSP; /* access to Perl stack */ PUSHMARK(SP); if (get_cv("main::trace_begin", 0)) call_pv("main::trace_begin", G_DISCARD | G_NOARGS); } /* * Start trace script */ static int perl_start_script(const char *script, int argc, const char **argv) { const char **command_line; int i, err = 0; command_line = malloc((argc + 2) * sizeof(const char *)); command_line[0] = ""; command_line[1] = script; for (i = 2; i < argc + 2; i++) command_line[i] = argv[i - 2]; my_perl = perl_alloc(); perl_construct(my_perl); if (perl_parse(my_perl, xs_init, argc + 2, (char **)command_line, (char **)NULL)) { err = -1; goto error; } if (perl_run(my_perl)) { err = -1; goto error; } if (SvTRUE(ERRSV)) { err = -1; goto error; } run_start_sub(); free(command_line); return 0; error: perl_free(my_perl); free(command_line); return err; } static int perl_flush_script(void) { return 0; } /* * Stop trace script */ static int perl_stop_script(void) { dSP; /* access to Perl stack */ PUSHMARK(SP); if (get_cv("main::trace_end", 0)) call_pv("main::trace_end", G_DISCARD | G_NOARGS); perl_destruct(my_perl); perl_free(my_perl); return 0; } static int perl_generate_script(struct pevent *pevent, const char *outfile) { struct event_format *event = NULL; struct format_field *f; char fname[PATH_MAX]; int not_first, count; FILE *ofp; sprintf(fname, "%s.pl", outfile); ofp = fopen(fname, "w"); if (ofp == NULL) { fprintf(stderr, "couldn't open %s\n", fname); return -1; } fprintf(ofp, "# perf script event handlers, " "generated by perf script -g perl\n"); fprintf(ofp, "# Licensed under the terms of the GNU GPL" " License version 2\n\n"); fprintf(ofp, "# The common_* event handler fields are the most useful " "fields common to\n"); fprintf(ofp, "# all events. They don't necessarily correspond to " "the 'common_*' fields\n"); fprintf(ofp, "# in the format files. Those fields not available as " "handler params can\n"); fprintf(ofp, "# be retrieved using Perl functions of the form " "common_*($context).\n"); fprintf(ofp, "# See Context.pm for the list of available " "functions.\n\n"); fprintf(ofp, "use lib \"$ENV{'PERF_EXEC_PATH'}/scripts/perl/" "Perf-Trace-Util/lib\";\n"); fprintf(ofp, "use lib \"./Perf-Trace-Util/lib\";\n"); fprintf(ofp, "use Perf::Trace::Core;\n"); fprintf(ofp, "use Perf::Trace::Context;\n"); fprintf(ofp, "use Perf::Trace::Util;\n\n"); fprintf(ofp, "sub trace_begin\n{\n\t# optional\n}\n\n"); fprintf(ofp, "sub trace_end\n{\n\t# optional\n}\n"); fprintf(ofp, "\n\ sub print_backtrace\n\ {\n\ my $callchain = shift;\n\ for my $node (@$callchain)\n\ {\n\ if(exists $node->{sym})\n\ {\n\ printf( \"\\t[\\%%x] \\%%s\\n\", $node->{ip}, $node->{sym}{name});\n\ }\n\ else\n\ {\n\ printf( \"\\t[\\%%x]\\n\", $node{ip});\n\ }\n\ }\n\ }\n\n\ "); while ((event = trace_find_next_event(pevent, event))) { fprintf(ofp, "sub %s::%s\n{\n", event->system, event->name); fprintf(ofp, "\tmy ("); fprintf(ofp, "$event_name, "); fprintf(ofp, "$context, "); fprintf(ofp, "$common_cpu, "); fprintf(ofp, "$common_secs, "); fprintf(ofp, "$common_nsecs,\n"); fprintf(ofp, "\t $common_pid, "); fprintf(ofp, "$common_comm, "); fprintf(ofp, "$common_callchain,\n\t "); not_first = 0; count = 0; for (f = event->format.fields; f; f = f->next) { if (not_first++) fprintf(ofp, ", "); if (++count % 5 == 0) fprintf(ofp, "\n\t "); fprintf(ofp, "$%s", f->name); } fprintf(ofp, ") = @_;\n\n"); fprintf(ofp, "\tprint_header($event_name, $common_cpu, " "$common_secs, $common_nsecs,\n\t " "$common_pid, $common_comm, $common_callchain);\n\n"); fprintf(ofp, "\tprintf(\""); not_first = 0; count = 0; for (f = event->format.fields; f; f = f->next) { if (not_first++) fprintf(ofp, ", "); if (count && count % 4 == 0) { fprintf(ofp, "\".\n\t \""); } count++; fprintf(ofp, "%s=", f->name); if (f->flags & FIELD_IS_STRING || f->flags & FIELD_IS_FLAG || f->flags & FIELD_IS_SYMBOLIC) fprintf(ofp, "%%s"); else if (f->flags & FIELD_IS_SIGNED) fprintf(ofp, "%%d"); else fprintf(ofp, "%%u"); } fprintf(ofp, "\\n\",\n\t "); not_first = 0; count = 0; for (f = event->format.fields; f; f = f->next) { if (not_first++) fprintf(ofp, ", "); if (++count % 5 == 0) fprintf(ofp, "\n\t "); if (f->flags & FIELD_IS_FLAG) { if ((count - 1) % 5 != 0) { fprintf(ofp, "\n\t "); count = 4; } fprintf(ofp, "flag_str(\""); fprintf(ofp, "%s::%s\", ", event->system, event->name); fprintf(ofp, "\"%s\", $%s)", f->name, f->name); } else if (f->flags & FIELD_IS_SYMBOLIC) { if ((count - 1) % 5 != 0) { fprintf(ofp, "\n\t "); count = 4; } fprintf(ofp, "symbol_str(\""); fprintf(ofp, "%s::%s\", ", event->system, event->name); fprintf(ofp, "\"%s\", $%s)", f->name, f->name); } else fprintf(ofp, "$%s", f->name); } fprintf(ofp, ");\n\n"); fprintf(ofp, "\tprint_backtrace($common_callchain);\n"); fprintf(ofp, "}\n\n"); } fprintf(ofp, "sub trace_unhandled\n{\n\tmy ($event_name, $context, " "$common_cpu, $common_secs, $common_nsecs,\n\t " "$common_pid, $common_comm, $common_callchain) = @_;\n\n"); fprintf(ofp, "\tprint_header($event_name, $common_cpu, " "$common_secs, $common_nsecs,\n\t $common_pid, " "$common_comm, $common_callchain);\n"); fprintf(ofp, "\tprint_backtrace($common_callchain);\n"); fprintf(ofp, "}\n\n"); fprintf(ofp, "sub print_header\n{\n" "\tmy ($event_name, $cpu, $secs, $nsecs, $pid, $comm) = @_;\n\n" "\tprintf(\"%%-20s %%5u %%05u.%%09u %%8u %%-20s \",\n\t " "$event_name, $cpu, $secs, $nsecs, $pid, $comm);\n}\n"); fprintf(ofp, "\n# Packed byte string args of process_event():\n" "#\n" "# $event:\tunion perf_event\tutil/event.h\n" "# $attr:\tstruct perf_event_attr\tlinux/perf_event.h\n" "# $sample:\tstruct perf_sample\tutil/event.h\n" "# $raw_data:\tperf_sample->raw_data\tutil/event.h\n" "\n" "sub process_event\n" "{\n" "\tmy ($event, $attr, $sample, $raw_data) = @_;\n" "\n" "\tmy @event\t= unpack(\"LSS\", $event);\n" "\tmy @attr\t= unpack(\"LLQQQQQLLQQ\", $attr);\n" "\tmy @sample\t= unpack(\"QLLQQQQQLL\", $sample);\n" "\tmy @raw_data\t= unpack(\"C*\", $raw_data);\n" "\n" "\tuse Data::Dumper;\n" "\tprint Dumper \\@event, \\@attr, \\@sample, \\@raw_data;\n" "}\n"); fclose(ofp); fprintf(stderr, "generated Perl script: %s\n", fname); return 0; } struct scripting_ops perl_scripting_ops = { .name = "Perl", .start_script = perl_start_script, .flush_script = perl_flush_script, .stop_script = perl_stop_script, .process_event = perl_process_event, .generate_script = perl_generate_script, };
{ "pile_set_name": "Github" }
#ifndef DataFormats_Common_HandleBase_h #define DataFormats_Common_HandleBase_h /*---------------------------------------------------------------------- Handle: Non-owning "smart pointer" for reference to products and their provenances. This is a very preliminary version, and lacks safety features and elegance. If the pointed-to product or provenance is destroyed, use of the Handle becomes undefined. There is no way to query the Handle to discover if this has happened. Handles can have: -- Product and Provenance pointers both null; -- Both pointers valid To check validity, one can use the isValid() function. If failedToGet() returns true then the requested data is not available If failedToGet() returns false but isValid() is also false then no attempt to get data has occurred ----------------------------------------------------------------------*/ #include <cassert> #include "DataFormats/Provenance/interface/ProductID.h" #include "DataFormats/Provenance/interface/ProvenanceFwd.h" #include "DataFormats/Common/interface/HandleExceptionFactory.h" #include <algorithm> #include <memory> namespace cms { class Exception; } namespace edm { class HandleBase { public: HandleBase() : product_(nullptr), prov_(nullptr) {} HandleBase(void const* prod, Provenance const* prov) : product_(prod), prov_(prov) { assert(prod); assert(prov); } ~HandleBase() {} void clear() { product_ = nullptr; prov_ = nullptr; whyFailedFactory_.reset(); } void swap(HandleBase& other) { using std::swap; swap(product_, other.product_); std::swap(prov_, other.prov_); swap(whyFailedFactory_, other.whyFailedFactory_); } HandleBase& operator=(HandleBase const& rhs) { HandleBase temp(rhs); this->swap(temp); return *this; } bool isValid() const { return product_ && prov_; } bool failedToGet() const { return bool(whyFailedFactory_); } Provenance const* provenance() const { return prov_; } ProductID id() const; HandleBase(HandleBase const&) = default; ///Used when the attempt to get the data failed HandleBase(std::shared_ptr<HandleExceptionFactory const>&& iWhyFailed) : product_(), prov_(nullptr), whyFailedFactory_(iWhyFailed) {} HandleBase& operator=(HandleBase&& rhs) { product_ = rhs.product_; prov_ = rhs.prov_; whyFailedFactory_ = std::move(rhs.whyFailedFactory_); return *this; } std::shared_ptr<cms::Exception> whyFailed() const { if (whyFailedFactory_.get()) { return whyFailedFactory_->make(); } return std::shared_ptr<cms::Exception>(); } std::shared_ptr<HandleExceptionFactory const> const& whyFailedFactory() const { return whyFailedFactory_; } explicit operator bool() const { return isValid(); } bool operator!() const { return not isValid(); } protected: void const* productStorage() const; private: void const* product_; Provenance const* prov_; std::shared_ptr<HandleExceptionFactory const> whyFailedFactory_; }; // Free swap function inline void swap(HandleBase& a, HandleBase& b) { a.swap(b); } } // namespace edm #endif
{ "pile_set_name": "Github" }
/* * Copyright (c) 2017 MediaTek Inc. * Author: Kevin Chen <kevin-cw.chen@mediatek.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/clk-provider.h> #include <linux/platform_device.h> #include "clk-mtk.h" #include "clk-gate.h" #include <dt-bindings/clock/mt6797-clk.h> static const struct mtk_gate_regs venc_cg_regs = { .set_ofs = 0x0004, .clr_ofs = 0x0008, .sta_ofs = 0x0000, }; #define GATE_VENC(_id, _name, _parent, _shift) { \ .id = _id, \ .name = _name, \ .parent_name = _parent, \ .regs = &venc_cg_regs, \ .shift = _shift, \ .ops = &mtk_clk_gate_ops_setclr_inv, \ } static const struct mtk_gate venc_clks[] = { GATE_VENC(CLK_VENC_0, "venc_0", "mm_sel", 0), GATE_VENC(CLK_VENC_1, "venc_1", "venc_sel", 4), GATE_VENC(CLK_VENC_2, "venc_2", "venc_sel", 8), GATE_VENC(CLK_VENC_3, "venc_3", "venc_sel", 12), }; static const struct of_device_id of_match_clk_mt6797_venc[] = { { .compatible = "mediatek,mt6797-vencsys", }, {} }; static int clk_mt6797_venc_probe(struct platform_device *pdev) { struct clk_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; clk_data = mtk_alloc_clk_data(CLK_VENC_NR); mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks), clk_data); r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); if (r) dev_err(&pdev->dev, "could not register clock provider: %s: %d\n", pdev->name, r); return r; } static struct platform_driver clk_mt6797_venc_drv = { .probe = clk_mt6797_venc_probe, .driver = { .name = "clk-mt6797-venc", .of_match_table = of_match_clk_mt6797_venc, }, }; builtin_platform_driver(clk_mt6797_venc_drv);
{ "pile_set_name": "Github" }
// Copyright (C) 2014 Vicente J. Botet Escriba // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef BOOST_THREAD_EXECUTORS_GENERIC_EXECUTOR_REF_HPP #define BOOST_THREAD_EXECUTORS_GENERIC_EXECUTOR_REF_HPP #include <boost/thread/detail/config.hpp> #include <boost/thread/detail/delete.hpp> #include <boost/thread/detail/move.hpp> #include <boost/thread/executors/executor.hpp> #include <boost/shared_ptr.hpp> #include <boost/config/abi_prefix.hpp> namespace boost { namespace executors { template <class Executor> class executor_ref : public executor { Executor& ex; public: /// type-erasure to store the works to do typedef executors::work work; /// executor is not copyable. BOOST_THREAD_NO_COPYABLE(executor_ref) executor_ref(Executor& ex) : ex(ex) {} /** * \par Effects * Destroys the executor. * * \par Synchronization * The completion of all the closures happen before the completion of the executor destructor. */ ~executor_ref() {}; /** * \par Effects * Close the \c executor for submissions. * The worker threads will work until there is no more closures to run. */ void close() { ex.close(); } /** * \par Returns * Whether the pool is closed for submissions. */ bool closed() { return ex.closed(); } /** * \par Effects * The specified closure will be scheduled for execution at some point in the future. * If invoked closure throws an exception the executor will call std::terminate, as is the case with threads. * * \par Synchronization * Ccompletion of closure on a particular thread happens before destruction of thread's thread local variables. * * \par Throws * \c sync_queue_is_closed if the thread pool is closed. * Whatever exception that can be throw while storing the closure. */ void submit(BOOST_THREAD_RV_REF(work) closure) { ex.submit(boost::move(closure)); } // void submit(work& closure) { // ex.submit(closure); // } /** * \par Effects * Try to execute one task. * * \par Returns * Whether a task has been executed. * * \par Throws * Whatever the current task constructor throws or the task() throws. */ bool try_executing_one() { return ex.try_executing_one(); } }; class generic_executor_ref { shared_ptr<executor> ex; public: /// type-erasure to store the works to do typedef executors::work work; template<typename Executor> generic_executor_ref(Executor& ex) //: ex(make_shared<executor_ref<Executor> >(ex)) // todo check why this doesn't works with C++03 : ex( new executor_ref<Executor>(ex) ) { } //generic_executor_ref(generic_executor_ref const& other) noexcept {} //generic_executor_ref& operator=(generic_executor_ref const& other) noexcept {} /** * \par Effects * Close the \c executor for submissions. * The worker threads will work until there is no more closures to run. */ void close() { ex->close(); } /** * \par Returns * Whether the pool is closed for submissions. */ bool closed() { return ex->closed(); } /** * \par Requires * \c Closure is a model of Callable(void()) and a model of CopyConstructible/MoveConstructible. * * \par Effects * The specified closure will be scheduled for execution at some point in the future. * If invoked closure throws an exception the thread pool will call std::terminate, as is the case with threads. * * \par Synchronization * Completion of closure on a particular thread happens before destruction of thread's thread local variables. * * \par Throws * \c sync_queue_is_closed if the thread pool is closed. * Whatever exception that can be throw while storing the closure. */ void submit(BOOST_THREAD_RV_REF(work) closure) { ex->submit(boost::move(closure)); } #if defined(BOOST_NO_CXX11_RVALUE_REFERENCES) template <typename Closure> void submit(Closure & closure) { //work w ((closure)); //submit(boost::move(w)); submit(work(closure)); } #endif void submit(void (*closure)()) { work w ((closure)); submit(boost::move(w)); //submit(work(closure)); } template <typename Closure> void submit(BOOST_THREAD_FWD_REF(Closure) closure) { work w((boost::forward<Closure>(closure))); submit(boost::move(w)); } // size_t num_pending_closures() const // { // return ex->num_pending_closures(); // } /** * \par Effects * Try to execute one task. * * \par Returns * Whether a task has been executed. * * \par Throws * Whatever the current task constructor throws or the task() throws. */ bool try_executing_one() { return ex->try_executing_one(); } /** * \par Requires * This must be called from an scheduled task. * * \par Effects * reschedule functions until pred() */ template <typename Pred> bool reschedule_until(Pred const& pred) { do { //schedule_one_or_yield(); if ( ! try_executing_one()) { return false; } } while (! pred()); return true; } }; } using executors::executor_ref; using executors::generic_executor_ref; } #include <boost/config/abi_suffix.hpp> #endif
{ "pile_set_name": "Github" }
var tap = require('tap') var fs = require('fs') var server = require('./lib/server.js') var common = require('./lib/common.js') var auth = { username: 'username', password: '%1234@asdf%', email: 'i@izs.me', alwaysAuth: true } var client = common.freshClient() tap.test('publish again', function (t) { // not really a tarball, but doesn't matter var bodyPath = require.resolve('../package.json') var tarball = fs.createReadStream(bodyPath) var pd = fs.readFileSync(bodyPath) var pkg = require('../package.json') var lastTime = null server.expect('/npm-registry-client', function (req, res) { t.equal(req.method, 'PUT') var b = '' req.setEncoding('utf8') req.on('data', function (d) { b += d }) req.on('end', function () { var o = lastTime = JSON.parse(b) t.equal(o._id, 'npm-registry-client') t.equal(o['dist-tags'].latest, pkg.version) t.has(o.versions[pkg.version], pkg) t.same(o.maintainers, [ { name: 'username', email: 'i@izs.me' } ]) var att = o._attachments[ pkg.name + '-' + pkg.version + '.tgz' ] t.same(att.data, pd.toString('base64')) res.statusCode = 409 res.json({reason: 'must supply latest _rev to update existing package'}) }) }) server.expect('/npm-registry-client?write=true', function (req, res) { t.equal(req.method, 'GET') t.ok(lastTime) for (var i in lastTime.versions) { var v = lastTime.versions[i] delete lastTime.versions[i] lastTime.versions['0.0.2'] = v lastTime['dist-tags'] = { latest: '0.0.2' } } lastTime._rev = 'asdf' res.json(lastTime) }) server.expect('/npm-registry-client', function (req, res) { t.equal(req.method, 'PUT') t.ok(lastTime) var b = '' req.setEncoding('utf8') req.on('data', function (d) { b += d }) req.on('end', function () { var o = JSON.parse(b) t.equal(o._rev, 'asdf') t.deepEqual(o.versions['0.0.2'], o.versions[pkg.version]) res.statusCode = 201 res.json({created: true}) }) }) var params = { metadata: pkg, access: 'public', body: tarball, auth: auth } client.publish('http://localhost:1337/', params, function (er, data) { if (er) throw er t.deepEqual(data, { created: true }) server.close() t.end() }) })
{ "pile_set_name": "Github" }
using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; using RuriLib.Models; namespace RuriLib.Interfaces { /// <summary> /// Interface for a class that manages a collection of wordlists. /// </summary> public interface IWordlistManager { /// <summary> /// Adds a wordlist to the collection. /// </summary> /// <param name="wordlist">The wordlist to add</param> void Add(Wordlist wordlist); /// <summary> /// The collection of available wordlists. /// </summary> IEnumerable<Wordlist> Wordlists { get; } /// <summary> /// Updates a wordlist. /// </summary> /// <param name="wordlist">The updated wordlist</param> void Update(Wordlist wordlist); /// <summary> /// Removes a given wordlist from the collection. /// </summary> /// <param name="wordlist">The wordlist to remove</param> void Remove(Wordlist wordlist); /// <summary> /// Deletes wordlists that reference a missing file from the collection. /// </summary> void DeleteNotFound(); /// <summary> /// Removes all wordlists from the collection. /// </summary> void RemoveAll(); } }
{ "pile_set_name": "Github" }
namespace OpenQA.Selenium.DevTools.Network { using System; using Newtonsoft.Json; /// <summary> /// Fired when WebSocket message error occurs. /// </summary> public sealed class WebSocketFrameErrorEventArgs : EventArgs { /// <summary> /// Request identifier. /// </summary> [JsonProperty("requestId")] public string RequestId { get; set; } /// <summary> /// Timestamp. /// </summary> [JsonProperty("timestamp")] public double Timestamp { get; set; } /// <summary> /// WebSocket error message. /// </summary> [JsonProperty("errorMessage")] public string ErrorMessage { get; set; } } }
{ "pile_set_name": "Github" }
{ "id": "my-melody-dresser", "name": "My Melody Dresser", "category": "Furniture", "games": { "nl": { "orderable": false, "interiorThemes": [ "Fairy Tale" ], "set": "Welcome amiibo Update", "rvs": [ "chelsea" ], "buyPrices": [ { "currency": "meow", "value": 3 } ] } } }
{ "pile_set_name": "Github" }
# Source: https://github.com/davidtvs/PyTorch-ENet (MIT) """ Implementation of `ENet: A Deep Neural Network Architecture for Real-Time Semantic Segmentation <https://arxiv.org/abs/1606.02147>`_ """ import torch.nn as nn import torch __all__ = ['ENet'] class InitialBlock(nn.Module): """The initial block is composed of two branches: 1. a main branch which performs a regular convolution with stride 2; 2. an extension branch which performs max-pooling. Doing both operations in parallel and concatenating their results allows for efficient downsampling and expansion. The main branch outputs 13 feature maps while the extension branch outputs 3, for a total of 16 feature maps after concatenation. Keyword arguments: - in_channels (int): the number of input channels. - out_channels (int): the number output channels. - kernel_size (int, optional): the kernel size of the filters used in the convolution layer. Default: 3. - padding (int, optional): zero-padding added to both sides of the input. Default: 0. - bias (bool, optional): Adds a learnable bias to the output if ``True``. Default: False. - relu (bool, optional): When ``True`` ReLU is used as the activation function; otherwise, PReLU is used. Default: True. """ def __init__(self, in_channels, out_channels, kernel_size=3, padding=0, bias=False, relu=True): super().__init__() if relu: activation = nn.ReLU() else: activation = nn.PReLU() # Main branch - As stated above the number of output channels for this # branch is the total minus 3, since the remaining channels come from # the extension branch self.main_branch = nn.Conv2d( in_channels, out_channels - 3, kernel_size=kernel_size, stride=2, padding=padding, bias=bias) # Extension branch self.ext_branch = nn.MaxPool2d(kernel_size, stride=2, padding=padding) # Initialize batch normalization to be used after concatenation self.batch_norm = nn.BatchNorm2d(out_channels) # PReLU layer to apply after concatenating the branches self.out_prelu = activation def forward(self, x): main = self.main_branch(x) ext = self.ext_branch(x) # Concatenate branches out = torch.cat((main, ext), 1) # Apply batch normalization out = self.batch_norm(out) return self.out_prelu(out) class RegularBottleneck(nn.Module): """Regular bottlenecks are the main building block of ENet. Main branch: 1. Shortcut connection. Extension branch: 1. 1x1 convolution which decreases the number of channels by ``internal_ratio``, also called a projection; 2. regular, dilated or asymmetric convolution; 3. 1x1 convolution which increases the number of channels back to ``channels``, also called an expansion; 4. dropout as a regularizer. Keyword arguments: - channels (int): the number of input and output channels. - internal_ratio (int, optional): a scale factor applied to ``channels`` used to compute the number of channels after the projection. eg. given ``channels`` equal to 128 and internal_ratio equal to 2 the number of channels after the projection is 64. Default: 4. - kernel_size (int, optional): the kernel size of the filters used in the convolution layer described above in item 2 of the extension branch. Default: 3. - padding (int, optional): zero-padding added to both sides of the input. Default: 0. - dilation (int, optional): spacing between kernel elements for the convolution described in item 2 of the extension branch. Default: 1. asymmetric (bool, optional): flags if the convolution described in item 2 of the extension branch is asymmetric or not. Default: False. - dropout_prob (float, optional): probability of an element to be zeroed. Default: 0 (no dropout). - bias (bool, optional): Adds a learnable bias to the output if ``True``. Default: False. - relu (bool, optional): When ``True`` ReLU is used as the activation function; otherwise, PReLU is used. Default: True. """ def __init__(self, channels, internal_ratio=4, kernel_size=3, padding=0, dilation=1, asymmetric=False, dropout_prob=0, bias=False, relu=True): super().__init__() # Check in the internal_scale parameter is within the expected range # [1, channels] if internal_ratio <= 1 or internal_ratio > channels: raise RuntimeError("Value out of range. Expected value in the " "interval [1, {0}], got internal_scale={1}." .format(channels, internal_ratio)) internal_channels = channels // internal_ratio if relu: activation = nn.ReLU() else: activation = nn.PReLU() # Main branch - shortcut connection # Extension branch - 1x1 convolution, followed by a regular, dilated or # asymmetric convolution, followed by another 1x1 convolution, and, # finally, a regularizer (spatial dropout). Number of channels is constant. # 1x1 projection convolution self.ext_conv1 = nn.Sequential( nn.Conv2d( channels, internal_channels, kernel_size=1, stride=1, bias=bias), nn.BatchNorm2d(internal_channels), activation) # If the convolution is asymmetric we split the main convolution in # two. Eg. for a 5x5 asymmetric convolution we have two convolution: # the first is 5x1 and the second is 1x5. if asymmetric: self.ext_conv2 = nn.Sequential( nn.Conv2d( internal_channels, internal_channels, kernel_size=(kernel_size, 1), stride=1, padding=(padding, 0), dilation=dilation, bias=bias), nn.BatchNorm2d(internal_channels), activation, nn.Conv2d( internal_channels, internal_channels, kernel_size=(1, kernel_size), stride=1, padding=(0, padding), dilation=dilation, bias=bias), nn.BatchNorm2d(internal_channels), activation) else: self.ext_conv2 = nn.Sequential( nn.Conv2d( internal_channels, internal_channels, kernel_size=kernel_size, stride=1, padding=padding, dilation=dilation, bias=bias), nn.BatchNorm2d(internal_channels), activation) # 1x1 expansion convolution self.ext_conv3 = nn.Sequential( nn.Conv2d( internal_channels, channels, kernel_size=1, stride=1, bias=bias), nn.BatchNorm2d(channels), activation) self.ext_regul = nn.Dropout2d(p=dropout_prob) # PReLU layer to apply after adding the branches self.out_prelu = activation def forward(self, x): # Main branch shortcut main = x # Extension branch ext = self.ext_conv1(x) ext = self.ext_conv2(ext) ext = self.ext_conv3(ext) ext = self.ext_regul(ext) # Add main and extension branches out = main + ext return self.out_prelu(out) class DownsamplingBottleneck(nn.Module): """Downsampling bottlenecks further downsample the feature map size. Main branch: 1. max pooling with stride 2; indices are saved to be used for unpooling later. Extension branch: 1. 2x2 convolution with stride 2 that decreases the number of channels by ``internal_ratio``, also called a projection; 2. regular convolution (by default, 3x3); 3. 1x1 convolution which increases the number of channels to ``out_channels``, also called an expansion; 4. dropout as a regularizer. Keyword arguments: - in_channels (int): the number of input channels. - out_channels (int): the number of output channels. - internal_ratio (int, optional): a scale factor applied to ``channels`` used to compute the number of channels after the projection. eg. given ``channels`` equal to 128 and internal_ratio equal to 2 the number of channels after the projection is 64. Default: 4. - kernel_size (int, optional): the kernel size of the filters used in the convolution layer described above in item 2 of the extension branch. Default: 3. - padding (int, optional): zero-padding added to both sides of the input. Default: 0. - dilation (int, optional): spacing between kernel elements for the convolution described in item 2 of the extension branch. Default: 1. - asymmetric (bool, optional): flags if the convolution described in item 2 of the extension branch is asymmetric or not. Default: False. - return_indices (bool, optional): if ``True``, will return the max indices along with the outputs. Useful when unpooling later. - dropout_prob (float, optional): probability of an element to be zeroed. Default: 0 (no dropout). - bias (bool, optional): Adds a learnable bias to the output if ``True``. Default: False. - relu (bool, optional): When ``True`` ReLU is used as the activation function; otherwise, PReLU is used. Default: True. """ def __init__(self, in_channels, out_channels, internal_ratio=4, kernel_size=3, padding=0, return_indices=False, dropout_prob=0, bias=False, relu=True): super().__init__() # Store parameters that are needed later self.return_indices = return_indices # Check in the internal_scale parameter is within the expected range # [1, channels] if internal_ratio <= 1 or internal_ratio > in_channels: raise RuntimeError("Value out of range. Expected value in the " "interval [1, {0}], got internal_scale={1}. " .format(in_channels, internal_ratio)) internal_channels = in_channels // internal_ratio if relu: activation = nn.ReLU() else: activation = nn.PReLU() # Main branch - max pooling followed by feature map (channels) padding self.main_max1 = nn.MaxPool2d( kernel_size, stride=2, padding=padding, return_indices=return_indices) # Extension branch - 2x2 convolution, followed by a regular, dilated or # asymmetric convolution, followed by another 1x1 convolution. Number # of channels is doubled. # 2x2 projection convolution with stride 2 self.ext_conv1 = nn.Sequential( nn.Conv2d( in_channels, internal_channels, kernel_size=2, stride=2, bias=bias), nn.BatchNorm2d(internal_channels), activation) # Convolution self.ext_conv2 = nn.Sequential( nn.Conv2d( internal_channels, internal_channels, kernel_size=kernel_size, stride=1, padding=padding, bias=bias), nn.BatchNorm2d(internal_channels), activation) # 1x1 expansion convolution self.ext_conv3 = nn.Sequential( nn.Conv2d( internal_channels, out_channels, kernel_size=1, stride=1, bias=bias), nn.BatchNorm2d(out_channels), activation) self.ext_regul = nn.Dropout2d(p=dropout_prob) # PReLU layer to apply after concatenating the branches self.out_prelu = activation def forward(self, x): # Main branch shortcut if self.return_indices: main, max_indices = self.main_max1(x) else: main = self.main_max1(x) # Extension branch ext = self.ext_conv1(x) ext = self.ext_conv2(ext) ext = self.ext_conv3(ext) ext = self.ext_regul(ext) # Main branch channel padding n, ch_ext, h, w = ext.size() ch_main = main.size()[1] padding = torch.zeros(n, ch_ext - ch_main, h, w) # Before concatenating, check if main is on the CPU or GPU and # convert padding accordingly if main.is_cuda: padding = padding.cuda() # Concatenate main = torch.cat((main, padding), 1) # Add main and extension branches out = main + ext return self.out_prelu(out), max_indices class UpsamplingBottleneck(nn.Module): """The upsampling bottlenecks upsample the feature map resolution using max pooling indices stored from the corresponding downsampling bottleneck. Main branch: 1. 1x1 convolution with stride 1 that decreases the number of channels by ``internal_ratio``, also called a projection; 2. max unpool layer using the max pool indices from the corresponding downsampling max pool layer. Extension branch: 1. 1x1 convolution with stride 1 that decreases the number of channels by ``internal_ratio``, also called a projection; 2. transposed convolution (by default, 3x3); 3. 1x1 convolution which increases the number of channels to ``out_channels``, also called an expansion; 4. dropout as a regularizer. Keyword arguments: - in_channels (int): the number of input channels. - out_channels (int): the number of output channels. - internal_ratio (int, optional): a scale factor applied to ``in_channels`` used to compute the number of channels after the projection. eg. given ``in_channels`` equal to 128 and ``internal_ratio`` equal to 2 the number of channels after the projection is 64. Default: 4. - kernel_size (int, optional): the kernel size of the filters used in the convolution layer described above in item 2 of the extension branch. Default: 3. - padding (int, optional): zero-padding added to both sides of the input. Default: 0. - dropout_prob (float, optional): probability of an element to be zeroed. Default: 0 (no dropout). - bias (bool, optional): Adds a learnable bias to the output if ``True``. Default: False. - relu (bool, optional): When ``True`` ReLU is used as the activation function; otherwise, PReLU is used. Default: True. """ def __init__(self, in_channels, out_channels, internal_ratio=4, kernel_size=3, padding=0, dropout_prob=0, bias=False, relu=True): super().__init__() # Check in the internal_scale parameter is within the expected range # [1, channels] if internal_ratio <= 1 or internal_ratio > in_channels: raise RuntimeError("Value out of range. Expected value in the " "interval [1, {0}], got internal_scale={1}. " .format(in_channels, internal_ratio)) internal_channels = in_channels // internal_ratio if relu: activation = nn.ReLU() else: activation = nn.PReLU() # Main branch - max pooling followed by feature map (channels) padding self.main_conv1 = nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias), nn.BatchNorm2d(out_channels)) # Remember that the stride is the same as the kernel_size, just like # the max pooling layers self.main_unpool1 = nn.MaxUnpool2d(kernel_size=2) # Extension branch - 1x1 convolution, followed by a regular, dilated or # asymmetric convolution, followed by another 1x1 convolution. Number # of channels is doubled. # 1x1 projection convolution with stride 1 self.ext_conv1 = nn.Sequential( nn.Conv2d( in_channels, internal_channels, kernel_size=1, bias=bias), nn.BatchNorm2d(internal_channels), activation) # Transposed convolution self.ext_conv2 = nn.Sequential( nn.ConvTranspose2d( internal_channels, internal_channels, kernel_size=kernel_size, stride=2, padding=padding, output_padding=1, bias=bias), nn.BatchNorm2d(internal_channels), activation) # 1x1 expansion convolution self.ext_conv3 = nn.Sequential( nn.Conv2d( internal_channels, out_channels, kernel_size=1, bias=bias), nn.BatchNorm2d(out_channels), activation) self.ext_regul = nn.Dropout2d(p=dropout_prob) # PReLU layer to apply after concatenating the branches self.out_prelu = activation def forward(self, x, max_indices): # Main branch shortcut main = self.main_conv1(x) main = self.main_unpool1(main, max_indices) # Extension branch ext = self.ext_conv1(x) ext = self.ext_conv2(ext) ext = self.ext_conv3(ext) ext = self.ext_regul(ext) # Add main and extension branches out = main + ext return self.out_prelu(out) class ENet(nn.Module): """Generate the ENet model. :param num_classes: (int): the number of classes to segment. :param encoder_relu: (bool, optional): When ``True`` ReLU is used as the activation function in the encoder blocks/layers; otherwise, PReLU is used. Default: False. :param decoder_relu: (bool, optional): When ``True`` ReLU is used as the activation function in the decoder blocks/layers; otherwise, PReLU is used. Default: True. """ def __init__(self, num_classes, encoder_relu=False, decoder_relu=True, **kwargs): super().__init__() self.initial_block = InitialBlock(3, 16, padding=1, relu=encoder_relu) # Stage 1 - Encoder self.downsample1_0 = DownsamplingBottleneck( 16, 64, padding=1, return_indices=True, dropout_prob=0.01, relu=encoder_relu) self.regular1_1 = RegularBottleneck( 64, padding=1, dropout_prob=0.01, relu=encoder_relu) self.regular1_2 = RegularBottleneck( 64, padding=1, dropout_prob=0.01, relu=encoder_relu) self.regular1_3 = RegularBottleneck( 64, padding=1, dropout_prob=0.01, relu=encoder_relu) self.regular1_4 = RegularBottleneck( 64, padding=1, dropout_prob=0.01, relu=encoder_relu) # Stage 2 - Encoder self.downsample2_0 = DownsamplingBottleneck( 64, 128, padding=1, return_indices=True, dropout_prob=0.1, relu=encoder_relu) self.regular2_1 = RegularBottleneck( 128, padding=1, dropout_prob=0.1, relu=encoder_relu) self.dilated2_2 = RegularBottleneck( 128, dilation=2, padding=2, dropout_prob=0.1, relu=encoder_relu) self.asymmetric2_3 = RegularBottleneck( 128, kernel_size=5, padding=2, asymmetric=True, dropout_prob=0.1, relu=encoder_relu) self.dilated2_4 = RegularBottleneck( 128, dilation=4, padding=4, dropout_prob=0.1, relu=encoder_relu) self.regular2_5 = RegularBottleneck( 128, padding=1, dropout_prob=0.1, relu=encoder_relu) self.dilated2_6 = RegularBottleneck( 128, dilation=8, padding=8, dropout_prob=0.1, relu=encoder_relu) self.asymmetric2_7 = RegularBottleneck( 128, kernel_size=5, asymmetric=True, padding=2, dropout_prob=0.1, relu=encoder_relu) self.dilated2_8 = RegularBottleneck( 128, dilation=16, padding=16, dropout_prob=0.1, relu=encoder_relu) # Stage 3 - Encoder self.regular3_0 = RegularBottleneck( 128, padding=1, dropout_prob=0.1, relu=encoder_relu) self.dilated3_1 = RegularBottleneck( 128, dilation=2, padding=2, dropout_prob=0.1, relu=encoder_relu) self.asymmetric3_2 = RegularBottleneck( 128, kernel_size=5, padding=2, asymmetric=True, dropout_prob=0.1, relu=encoder_relu) self.dilated3_3 = RegularBottleneck( 128, dilation=4, padding=4, dropout_prob=0.1, relu=encoder_relu) self.regular3_4 = RegularBottleneck( 128, padding=1, dropout_prob=0.1, relu=encoder_relu) self.dilated3_5 = RegularBottleneck( 128, dilation=8, padding=8, dropout_prob=0.1, relu=encoder_relu) self.asymmetric3_6 = RegularBottleneck( 128, kernel_size=5, asymmetric=True, padding=2, dropout_prob=0.1, relu=encoder_relu) self.dilated3_7 = RegularBottleneck( 128, dilation=16, padding=16, dropout_prob=0.1, relu=encoder_relu) # Stage 4 - Decoder self.upsample4_0 = UpsamplingBottleneck( 128, 64, padding=1, dropout_prob=0.1, relu=decoder_relu) self.regular4_1 = RegularBottleneck( 64, padding=1, dropout_prob=0.1, relu=decoder_relu) self.regular4_2 = RegularBottleneck( 64, padding=1, dropout_prob=0.1, relu=decoder_relu) # Stage 5 - Decoder self.upsample5_0 = UpsamplingBottleneck( 64, 16, padding=1, dropout_prob=0.1, relu=decoder_relu) self.regular5_1 = RegularBottleneck( 16, padding=1, dropout_prob=0.1, relu=decoder_relu) self.transposed_conv = nn.ConvTranspose2d( 16, num_classes, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False) def forward(self, x): # Initial block x = self.initial_block(x) # Stage 1 - Encoder x, max_indices1_0 = self.downsample1_0(x) x = self.regular1_1(x) x = self.regular1_2(x) x = self.regular1_3(x) x = self.regular1_4(x) # Stage 2 - Encoder x, max_indices2_0 = self.downsample2_0(x) x = self.regular2_1(x) x = self.dilated2_2(x) x = self.asymmetric2_3(x) x = self.dilated2_4(x) x = self.regular2_5(x) x = self.dilated2_6(x) x = self.asymmetric2_7(x) x = self.dilated2_8(x) # Stage 3 - Encoder x = self.regular3_0(x) x = self.dilated3_1(x) x = self.asymmetric3_2(x) x = self.dilated3_3(x) x = self.regular3_4(x) x = self.dilated3_5(x) x = self.asymmetric3_6(x) x = self.dilated3_7(x) # Stage 4 - Decoder x = self.upsample4_0(x, max_indices2_0) x = self.regular4_1(x) x = self.regular4_2(x) # Stage 5 - Decoder x = self.upsample5_0(x, max_indices1_0) x = self.regular5_1(x) x = self.transposed_conv(x) return x
{ "pile_set_name": "Github" }
/****************************************************************************** * Copyright (C) 2013 by Jerome Maye * * jerome.maye@gmail.com * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the Lesser GNU General Public License as published by* * the Free Software Foundation; either version 3 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * Lesser GNU General Public License for more details. * * * * You should have received a copy of the Lesser GNU General Public License * * along with this program. If not, see <http://www.gnu.org/licenses/>. * ******************************************************************************/ /** \file DiscreteDistribution1v.h \brief This file contains an interface to the univariate discrete distributions */ #include "aslam/calibration/functions/DiscreteFunction.h" #include "aslam/calibration/statistics/Distribution.h" namespace aslam { namespace calibration { /** The DiscreteDistribution1v class represents an interface to the univariate discrete distributions. \brief Univariate discrete distribution */ template <typename X> class DiscreteDistribution<X> : public DiscreteFunction<double, X>, public virtual Distribution<X> { public: /** \name Types @{ */ /// Distribution type typedef DiscreteDistribution<X> DistributionType; /// Random variable type typedef X RandomVariable; /// Mean type typedef double Mean; /// Variance type typedef double Variance; /// Mode type typedef X Mode; /// Median type typedef double Median; /** @} */ /** \name Constructors/Destructor @{ */ /// Destructor virtual ~DiscreteDistribution(); /** @} */ /** \name Accessors @{ */ /// Access the probablity of the random variable to take on the given val. virtual double pmf(const RandomVariable& value) const = 0; /// Interface to function virtual double getValue(const X& argument) const; /** @} */ }; } } #include "aslam/calibration/statistics/DiscreteDistribution1v.tpp"
{ "pile_set_name": "Github" }
{ "images" : [ { "idiom" : "universal", "filename" : "menu7_h@2x-2.png", "scale" : "1x" }, { "idiom" : "universal", "filename" : "menu7_h@2x-1.png", "scale" : "2x" }, { "idiom" : "universal", "filename" : "menu7_h@2x.png", "scale" : "3x" } ], "info" : { "version" : 1, "author" : "xcode" } }
{ "pile_set_name": "Github" }
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * $Id: AttributeIterator.java 468655 2006-10-28 07:12:06Z minchau $ */ package org.apache.xpath.axes; import org.apache.xml.dtm.DTM; import org.apache.xpath.compiler.Compiler; /** * This class implements an optimized iterator for * attribute axes patterns. * @see org.apache.xpath.axes#ChildTestIterator * @xsl.usage advanced */ public class AttributeIterator extends ChildTestIterator { static final long serialVersionUID = -8417986700712229686L; /** * Create a AttributeIterator object. * * @param compiler A reference to the Compiler that contains the op map. * @param opPos The position within the op map, which contains the * location path expression for this itterator. * * @throws javax.xml.transform.TransformerException */ AttributeIterator(Compiler compiler, int opPos, int analysis) throws javax.xml.transform.TransformerException { super(compiler, opPos, analysis); } /** * Get the next node via getFirstAttribute && getNextAttribute. */ protected int getNextNode() { m_lastFetched = (DTM.NULL == m_lastFetched) ? m_cdtm.getFirstAttribute(m_context) : m_cdtm.getNextAttribute(m_lastFetched); return m_lastFetched; } /** * Returns the axis being iterated, if it is known. * * @return Axis.CHILD, etc., or -1 if the axis is not known or is of multiple * types. */ public int getAxis() { return org.apache.xml.dtm.Axis.ATTRIBUTE; } }
{ "pile_set_name": "Github" }
--- layout: 'default' hljs: 'light' component: 'prompt' prop: 'autoReset' propType: 'p' label: '{Boolean}' --- <section class="blue"> <div class="content"> <div class="grid two"> <div class="column"> <h1> Prompt Dialog</h1> A prompt dialog is often used if you want the user to input a value. When a prompt dialog pops up, the user will have to click either "OK" or "Cancel" to proceed after entering an input value. </div> <div class="right column"> <%- @partial('ad') %> </div> </div> </div> </section> <section class="lic"> <div class="content"> Looking for a commercial license ? Keep your source code proprietary and <a href="https://www.uplabs.com/posts/alertifyjs" target="_blank"> Buy a Commercial License Today!</a> </div> </section> <section class="dark"> <div class="content"> <!--Settings--> <div class="segment has-menu"> <%- @partial('nomotion') %> <%- @partial('segment',false, @getDataItem(@document.component, @document.prop)) %> </div> <!--//Settings--> <%- @partial('menu', true, @getDataItem(@document.component, @document.prop)) %> </div> </section>
{ "pile_set_name": "Github" }
package src.test.java; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.openqa.selenium.By; import org.openqa.selenium.WebDriver; import org.openqa.selenium.firefox.FirefoxDriver; import java.util.Set; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.CoreMatchers.*; public class MultipleWindows { WebDriver driver; @Before public void setUp() throws Exception { driver = new FirefoxDriver(); } @After public void tearDown() throws Exception { driver.quit(); } @Test public void multipleWindows() throws InterruptedException { driver.get("http://the-internet.herokuapp.com/windows"); driver.findElement(By.cssSelector(".example a")).click(); Thread.sleep(2000); Object[] allWindows = driver.getWindowHandles().toArray(); driver.switchTo().window(allWindows[0].toString()); assertThat(driver.getTitle(), is(not("New Window"))); driver.switchTo().window(allWindows[1].toString()); assertThat(driver.getTitle(), is("New Window")); } @Test public void multipleWindowsRedux() throws InterruptedException { driver.get("http://the-internet.herokuapp.com/windows"); // Get initial window handle String firstWindow = driver.getWindowHandle(); // Create a newWindow variable String newWindow = ""; // Trigger new window to open driver.findElement(By.cssSelector(".example a")).click(); // Add delay to account for new window load time Thread.sleep(2000); // Grab all window handles Set<String> allWindows = driver.getWindowHandles(); // Iterate through window handles collection // Find the new window handle, storing it in the newWindow variable for (String window : allWindows) { if (!window.equals(firstWindow)) { newWindow = window; } } // Switch to the first window & verify driver.switchTo().window(firstWindow); assertThat(driver.getTitle(), is(not(equalTo("New Window")))); // Switch to the new window & verify driver.switchTo().window(newWindow); assertThat(driver.getTitle(), is(equalTo("New Window"))); } }
{ "pile_set_name": "Github" }
//===- llvm/unittest/ADT/SetVector.cpp ------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // SetVector unit tests. // //===----------------------------------------------------------------------===// #include "llvm/ADT/SetVector.h" #include "gtest/gtest.h" using namespace llvm; TEST(SetVector, EraseTest) { SetVector<int> S; S.insert(0); S.insert(1); S.insert(2); auto I = S.erase(std::next(S.begin())); // Test that the returned iterator is the expected one-after-erase // and the size/contents is the expected sequence {0, 2}. EXPECT_EQ(std::next(S.begin()), I); EXPECT_EQ(2u, S.size()); EXPECT_EQ(0, *S.begin()); EXPECT_EQ(2, *std::next(S.begin())); }
{ "pile_set_name": "Github" }
import React from 'react' // eslint-disable-next-line let context = React.createContext({}) if (typeof document !== 'undefined') { context = React.createContext(window.__routeInfo) } export default context
{ "pile_set_name": "Github" }
var React = require('react-native') var View = React.createClass({ setNativeProps() { var {view} = this.refs view.setNativeProps.apply(view, arguments) }, render() { return ( <React.View {...this.props} ref="view" style={[styles.view].concat(this.props.style || [])} /> ) } }) var styles = React.StyleSheet.create({ view: { backgroundColor: 'white', }, }) module.exports = View
{ "pile_set_name": "Github" }
using System; using System.Runtime.InteropServices; namespace ezTransXP.ExtProtocol { public sealed class UnmanagedLibraryLoader : IDisposable { private IntPtr _libraryPointer; private bool _disposed = false; public void LoadLibrary( string path ) { _libraryPointer = Kernel32.LoadLibrary( path ); if( _libraryPointer == IntPtr.Zero ) throw new Exception( $"Could not load the unmanaged library '{path}'." ); } public TDelegate LoadFunction<TDelegate>( string name ) { var addr = Kernel32.GetProcAddress( _libraryPointer, name ); if( addr == IntPtr.Zero ) throw new Exception( $"Could not find the function pointer for '{name}'." ); return (TDelegate)(object)Marshal.GetDelegateForFunctionPointer( addr, typeof( TDelegate ) ); } #region IDisposable Support void Dispose( bool disposing ) { if( !_disposed ) { Kernel32.FreeLibrary( _libraryPointer ); _disposed = true; } } ~UnmanagedLibraryLoader() { Dispose( false ); } public void Dispose() { Dispose( true ); GC.SuppressFinalize( this ); } #endregion } }
{ "pile_set_name": "Github" }
Integrating and Migrating Existing Logging ========================================== If you have an existing code base, you likely have existing log messages. This document will explain how to migrate and integrate existing logging into your new Eliot log setup. In particular, this will focus on the Python standard library ``logging`` package, but the same principles apply to other logging libraries. .. _migrating: Route existing logs to Eliot ---------------------------- Eliot includes a ``logging.Handler`` that can take standard library log messages and route them into Eliot. These log messages will *automatically* appear in the correct place in the action tree! Once you add actions to your code these log messages will automatically benefit from Eliot's causal information. To begin with, however, we'll just add routing of log messages to Eliot: .. code-block:: python # Add Eliot Handler to root Logger. You may wish to only route specific # Loggers to Eliot. import logging from eliot.stdlib import EliotHandler logging.getLogger().addHandler(EliotHandler()) Add actions at entry points and other key points ------------------------------------------------ Simply by adding a few key actions—the entry points to the code, as well as key sub-actions—you can start getting value from Eliot's functionality while still getting information from your existing logs. You can leave existing log messages in place, replacing them with Eliot logging opportunistically; they will still be included in your output. .. literalinclude:: ../../../examples/stdlib.py The stdlib logging messages will be included in the correct part of the tree: .. code-block:: shell-session $ python examples/stdlib.py | eliot-tree 3f465ee3-7fa9-40e2-8b20-9c0595612a8b └── mypackage:main/1 ⇒ started ├── timestamp: 2018-07-15 16:50:39.230467 ├── mypackage:do_a_thing/2/1 ⇒ started │ ├── timestamp: 2018-07-15 16:50:39.230709 │ └── mypackage:do_a_thing/2/2 ⇒ succeeded │ └── timestamp: 2018-07-15 16:50:39.230836 ├── mypackage:do_a_thing/3/1 ⇒ started │ ├── timestamp: 2018-07-15 16:50:39.230980 │ ├── eliot:stdlib/3/2 │ │ ├── log_level: ERROR │ │ ├── logger: mypackage │ │ ├── message: The number 3 is a bad number, don't use it. │ │ └── timestamp: 2018-07-15 16:50:39.231157 │ └── mypackage:do_a_thing/3/3 ⇒ failed │ ├── exception: builtins.ValueError │ ├── reason: I hate the number 3 │ └── timestamp: 2018-07-15 16:50:39.231364 ├── eliot:stdlib/4 │ ├── log_level: INFO │ ├── logger: mypackage │ ├── message: Number 3 was rejected. │ └── timestamp: 2018-07-15 16:50:39.231515 └── mypackage:main/5 ⇒ succeeded └── timestamp: 2018-07-15 16:50:39.231641
{ "pile_set_name": "Github" }
// RUN: %empty-directory(%t) // RUN: %target-clang %s -all_load %test-resource-dir/%target-sdk-name/libswiftCompatibility50.a %test-resource-dir/%target-sdk-name/libswiftCompatibility51.a -lobjc -o %t/main // RUN: %target-codesign %t/main // RUN: %target-run %t/main // REQUIRES: objc_interop // REQUIRES: executable_test // The compatibility library needs to have no build-time dependencies on // libswiftCore so it can be linked into a program that doesn't link // libswiftCore, but will load it at runtime, such as xctest. // // Test this by linking it into a plain C program and making sure it builds. int main(void) {}
{ "pile_set_name": "Github" }
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ proxy/cluster/Cluster.h is now iocore/cluster/P_ClusterCache.h Clustering compiles but only works if this module is compiled with the entire TS because there are a lot of stuff required for clustering in proxy/http. cache ifdefs FIXME_HOSTDB -> Store ifdef to be turned off when HostDB stops using Store FIXME_MGMT -> Interface for warnings on the manager FIXME_CLUSTER_ARGUMENTS -> runtime arguments for clustering etc. Needs to be added by Eric cluster ifdefs FIXME_WIRELESS_API -> RPC API's for wireless which is probably not going to be used in the future
{ "pile_set_name": "Github" }
/****************************************************** * * canvasdollarzero - implementation file * * * copyleft (c) IOhannes m zmölnig * * 2007:forum::für::umläute:2007 * * institute of electronic music and acoustics (iem) * * based on iemlib2 * copyright (c) 2000-2006 Thomas Musil, IEM, KUG, Graz/Austria * ****************************************************** * * license: GNU General Public License v.2 (or later) * ******************************************************/ #include "iemguts.h" #include "g_canvas.h" /* -------------- canvasdollarzero --------------- */ /* -- receives the $0 value of the parent canvas --*/ static t_class *canvasdollarzero_class; typedef struct _canvasdollarzero { t_object x_obj; t_symbol *s_dollzero; } t_canvasdollarzero; static void canvasdollarzero_bang(t_canvasdollarzero *x) { if(x->s_dollzero) outlet_symbol(x->x_obj.ob_outlet, x->s_dollzero); } static void *canvasdollarzero_new(t_floatarg f) { t_canvasdollarzero *x = (t_canvasdollarzero *)pd_new(canvasdollarzero_class); t_glist *glist=(t_glist *)canvas_getcurrent(); t_canvas *canvas=(t_canvas*)glist_getcanvas(glist); int depth=(int)f; if(depth<0)depth=0; while(depth && canvas) { canvas=canvas->gl_owner; depth--; } x->s_dollzero=0; if(canvas) { x->s_dollzero = canvas_realizedollar(canvas, gensym("$0")); } outlet_new(&x->x_obj, &s_symbol); return (x); } void canvasdollarzero_setup(void) { iemguts_boilerplate("[canvasdollarzero]", 0); canvasdollarzero_class = class_new(gensym("canvasdollarzero"), (t_newmethod)canvasdollarzero_new, 0, sizeof(t_canvasdollarzero), 0, A_DEFFLOAT, 0); class_addbang(canvasdollarzero_class, (t_method)canvasdollarzero_bang); }
{ "pile_set_name": "Github" }
/* * Created by SharpDevelop. * User: Alexander Petrovskiy * Date: 4/12/2012 * Time: 8:27 PM * * To change this template use Tools | Options | Coding | Edit Standard Headers. */ namespace UIAutomationTest.Commands.Profile { using System.Management.Automation; /// <summary> /// Description of SetUiaCurrentTestProfileCommandTestFixture. /// </summary> [Cmdlet(VerbsCommon.Set, "UiaCurrentTestProfile")] public class SetUiaCurrentTestProfileCommandTestFixture { [MbUnit.Framework.SetUp][NUnit.Framework.SetUp] public void PrepareRunspace() { MiddleLevelCode.PrepareRunspace(); CmdletUnitTest.TestRunspace.RunPSCode( @"[void]([UIAutomation.CurrentData]::ResetData());"); CmdletUnitTest.TestRunspace.RunPSCode( @"[void]([UIAutomation.CurrentData]::Profiles.Clear());"); } [MbUnit.Framework.TearDown][NUnit.Framework.TearDown] public void DisposeRunspace() { MiddleLevelCode.DisposeRunspace(); } } }
{ "pile_set_name": "Github" }
/* * SK's Minecraft Launcher * Copyright (C) 2010-2014 Albert Pham <http://www.sk89q.com> and contributors * Please see LICENSE.txt for license information. */ package com.skcraft.launcher.auth; import java.util.Map; /** * Represents an authenticated (or virtual) session. */ public interface Session { /** * Get the user's UUID. * * @return the user's UUID */ String getUuid(); /** * Get the user's game username. * * @return the username */ String getName(); /** * Get the client token. * * @return client token */ String getClientToken(); /** * Get the access token. * * @return the access token */ String getAccessToken(); /** * Get a map of user properties. * * @return the map of user properties */ Map<String, String> getUserProperties(); /** * Get the session token string, which is in the form of * <code>token:accessToken:uuid</code> for authenticated players, and * simply <code>-</code> for offline players. * * @return the session token */ String getSessionToken(); /** * Get the user type. * * @return the user type */ UserType getUserType(); /** * Return true if the user is in an online session. * * @return true if online */ boolean isOnline(); }
{ "pile_set_name": "Github" }
<?php /** * Smarty Internal Plugin Compile Function Plugin * * Compiles code for the execution of function plugin * * @package Smarty * @subpackage Compiler * @author Uwe Tews */ /** * Smarty Internal Plugin Compile Function Plugin Class * * @package Smarty * @subpackage Compiler */ class Smarty_Internal_Compile_Private_Function_Plugin extends Smarty_Internal_CompileBase { /** * Attribute definition: Overwrites base class. * * @var array * @see Smarty_Internal_CompileBase */ public $required_attributes = array(); /** * Attribute definition: Overwrites base class. * * @var array * @see Smarty_Internal_CompileBase */ public $optional_attributes = array('_any'); /** * Compiles code for the execution of function plugin * * @param array $args array with attributes from parser * @param object $compiler compiler object * @param array $parameter array with compilation parameter * @param string $tag name of function plugin * @param string $function PHP function name * @return string compiled code */ public function compile($args, $compiler, $parameter, $tag, $function) { // This tag does create output $compiler->has_output = true; // check and get attributes $_attr = $this->getAttributes($compiler, $args); if ($_attr['nocache'] === true) { $compiler->tag_nocache = true; } unset($_attr['nocache']); // convert attributes into parameter array string $_paramsArray = array(); foreach ($_attr as $_key => $_value) { if (is_int($_key)) { $_paramsArray[] = "$_key=>$_value"; } else { $_paramsArray[] = "'$_key'=>$_value"; } } $_params = 'array(' . implode(",", $_paramsArray) . ')'; // compile code $output = "<?php echo {$function}({$_params},\$_smarty_tpl);?>\n"; return $output; } } ?>
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="UTF-8" standalone="yes"?> <!-- Licensed to Apereo under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. Apereo licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at the following location: http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <xs:schema xmlns="https://source.jasig.org/schemas/uportal/io/portlet-definition" targetNamespace="https://source.jasig.org/schemas/uportal/io/portlet-definition" xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:up="https://source.jasig.org/schemas/uportal" xmlns:io="https://source.jasig.org/schemas/uportal/io" elementFormDefault="qualified" attributeFormDefault="unqualified" version="1.0"> <xs:import namespace="https://source.jasig.org/schemas/uportal" schemaLocation="common-types-4.0.xsd"/> <xs:import namespace="https://source.jasig.org/schemas/uportal/io" schemaLocation="common-io-types-4.0.xsd"/> <xs:element name="portlet-definition"> <xs:complexType> <xs:complexContent> <xs:extension base="io:basePortalDataType50"> <xs:sequence> <xs:element name="title" type="xs:string"/> <!-- 'title' is the value that will be displayed in the portlet chrome and in navigation menus. 'name' is intended primarily for internal or admin usage. 'name' can be used to distinguish between similar portlets that may share a title. --> <xs:element name="name" type="xs:string"/> <xs:element name="fname" type="up:fname-type"/> <xs:element name="desc" type="xs:string" minOccurs="0"/> <xs:element name="type" type="xs:string"/> <xs:element name="timeout" type="xs:positiveInteger"/> <xs:element name="actionTimeout" type="xs:positiveInteger" minOccurs="0"/> <xs:element name="eventTimeout" type="xs:positiveInteger" minOccurs="0"/> <xs:element name="renderTimeout" type="xs:positiveInteger" minOccurs="0"/> <xs:element name="resourceTimeout" type="xs:positiveInteger" minOccurs="0"/> <xs:element name="portlet-descriptor" type="up:portlet-descriptor"/> <xs:element name="lifecycle" type="lifecycle" minOccurs="0" maxOccurs="1"/> <xs:element name="category" type="xs:string" minOccurs="0" maxOccurs="unbounded"/> <xs:element name="group" type="xs:string" minOccurs="0" maxOccurs="unbounded"/> <xs:element name="user" type="xs:string" minOccurs="0" maxOccurs="unbounded"/> <xs:element name="permissions" type="externalPermissions" minOccurs="0" maxOccurs="1"/> <xs:element name="parameter" type="externalPortletParameter" minOccurs="0" maxOccurs="unbounded"/> <xs:element name="portlet-preference" type="externalPortletPreference" minOccurs="0" maxOccurs="unbounded"/> </xs:sequence> </xs:extension> </xs:complexContent> </xs:complexType> <xs:unique name="unique-category"> <xs:selector xpath="category"/> <xs:field xpath="."/> </xs:unique> <xs:unique name="unique-group"> <xs:selector xpath="group"/> <xs:field xpath="."/> </xs:unique> <xs:unique name="unique-user"> <xs:selector xpath="user"/> <xs:field xpath="."/> </xs:unique> <xs:unique name="unique-parameter"> <xs:selector xpath="parameter"/> <xs:field xpath="name"/> </xs:unique> <xs:unique name="unique-preference"> <xs:selector xpath="portlet-preference"/> <xs:field xpath="name"/> </xs:unique> <xs:unique name="unique-permission-group"> <xs:selector xpath="permissions/permission/group"/> <xs:field xpath="group"/> </xs:unique> </xs:element> <xs:complexType name="lifecycle"> <xs:sequence> <xs:element name="entry" type="lifecycleEntry" minOccurs="0" maxOccurs="unbounded" /> </xs:sequence> </xs:complexType> <xs:complexType name="lifecycleEntry"> <xs:simpleContent> <!-- Date-time of the format [-]CCYY-MM-DDThh:mm:ss[Z|(+|-)hh:mm]; see http://books.xmlschemata.org/relaxng/ch19-77049.html --> <xs:extension base="xs:dateTime"> <xs:attribute name="name" type="xs:string"/> <!-- user attribute is optional. --> <xs:attribute name="user" type="xs:string"/> </xs:extension> </xs:simpleContent> </xs:complexType> <xs:complexType name="externalPortletParameter"> <xs:sequence> <xs:element name="name" type="xs:string"/> <xs:element name="value" type="xs:string" minOccurs="0"/> <xs:element name="description" type="xs:string" minOccurs="0"/> </xs:sequence> </xs:complexType> <xs:complexType name="externalPortletPreference"> <xs:sequence> <xs:element name="name" type="xs:string"/> <xs:element name="readOnly" type="xs:boolean" default="false" minOccurs="0"/> <xs:element name="value" type="xs:string" minOccurs="0" maxOccurs="unbounded"/> </xs:sequence> </xs:complexType> <!-- Portlet specific grants: - system: The permission manager name - activity: The name of the permission to grant - group: the list of groups that will be granted the permission --> <xs:complexType name="externalPermissionMemberList"> <xs:sequence> <xs:element name="group" type="xs:string" minOccurs="0" maxOccurs="unbounded"/> </xs:sequence> <xs:attribute name="system" type="xs:string" use="required"/> <xs:attribute name="activity" type="xs:string" use="required"/> </xs:complexType> <!-- optional top level permissions tag --> <xs:complexType name="externalPermissions"> <xs:sequence> <xs:element name="permission" type="externalPermissionMemberList" minOccurs="0" maxOccurs="unbounded"/> </xs:sequence> </xs:complexType> </xs:schema>
{ "pile_set_name": "Github" }
const Suite = require("./default-suite").Suite; const Immutable = require("immutable"); const Denque = require("denque"); const L = require("../../dist/index"); const Finger = require("@paldepind/finger-tree"); const { Cons } = require("./list"); const n = 10000; module.exports = Suite("prepend") .add("Array", function() { let arr = []; for (let i = 0; i < n; ++i) { arr.unshift(i); } return arr.length === n; }) .add("Pure array", function() { let arr = []; for (let i = 0; i < n; ++i) { arr = [i].concat(arr); } return arr.length === n; }) .add("Immutable.js", function() { let list = new Immutable.List(); for (let i = 0; i < n; ++i) { list = list.unshift(i); } return list.size === n; }) .add("Denque", function() { let denque = new Denque(); for (let i = 0; i < n; ++i) { denque.unshift(i); } return denque.length === n; }) .add("Cons", function() { let cons = undefined; for (let i = 0; i < n; ++i) { cons = new Cons(i, cons); } return cons.value === n - 1; }) .add("List", function() { let list = L.empty(); for (let i = 0; i < n; ++i) { list = L.prepend(i, list); } return list.length === n - 1; }) .add("Finger", function() { let tree = Finger.nil; for (let i = 0; i < n; ++i) { tree = Finger.prepend(i, tree); } return tree.suffix.c === n - 1; }) .run({ async: true });
{ "pile_set_name": "Github" }
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/task_manager/tab_contents_information.h" #include <stddef.h> #include "base/callback.h" #include "base/macros.h" #include "base/strings/utf_string_conversions.h" #include "chrome/browser/browser_process.h" #include "chrome/browser/devtools/devtools_window.h" #include "chrome/browser/favicon/favicon_utils.h" #include "chrome/browser/prerender/prerender_manager.h" #include "chrome/browser/prerender/prerender_manager_factory.h" #include "chrome/browser/profiles/profile.h" #include "chrome/browser/profiles/profile_manager.h" #include "chrome/browser/task_manager/renderer_resource.h" #include "chrome/browser/task_manager/task_manager_util.h" #include "chrome/browser/ui/browser.h" #include "chrome/browser/ui/browser_finder.h" #include "chrome/browser/ui/tab_contents/tab_contents_iterator.h" #include "components/favicon/content/content_favicon_driver.h" #include "content/public/browser/render_process_host.h" #include "content/public/browser/web_contents.h" #include "extensions/browser/extension_registry.h" #include "extensions/browser/process_map.h" #include "extensions/common/constants.h" #include "extensions/common/extension_set.h" #include "grit/theme_resources.h" #include "ui/base/l10n/l10n_util.h" #include "ui/base/resource/resource_bundle.h" #include "ui/gfx/image/image_skia.h" using content::WebContents; namespace task_manager { namespace { // Returns true if the WebContents is currently owned by the prerendering // manager. bool IsContentsPrerendering(WebContents* web_contents) { Profile* profile = Profile::FromBrowserContext(web_contents->GetBrowserContext()); prerender::PrerenderManager* prerender_manager = prerender::PrerenderManagerFactory::GetForProfile(profile); return prerender_manager && prerender_manager->IsWebContentsPrerendering(web_contents, NULL); } } // namespace // Tracks a single tab contents, prerendered page, or Instant page. class TabContentsResource : public RendererResource { public: explicit TabContentsResource(content::WebContents* web_contents); ~TabContentsResource() override; // Resource methods: Type GetType() const override; base::string16 GetTitle() const override; gfx::ImageSkia GetIcon() const override; content::WebContents* GetWebContents() const override; private: // Returns true if contains content rendered by an extension. bool HostsExtension() const; static gfx::ImageSkia* prerender_icon_; content::WebContents* web_contents_; Profile* profile_; DISALLOW_COPY_AND_ASSIGN(TabContentsResource); }; gfx::ImageSkia* TabContentsResource::prerender_icon_ = NULL; TabContentsResource::TabContentsResource(WebContents* web_contents) : RendererResource(web_contents->GetRenderProcessHost()->GetHandle(), web_contents->GetRenderViewHost()), web_contents_(web_contents), profile_(Profile::FromBrowserContext(web_contents->GetBrowserContext())) { if (!prerender_icon_) { ResourceBundle& rb = ResourceBundle::GetSharedInstance(); prerender_icon_ = rb.GetImageSkiaNamed(IDR_PRERENDER); } } TabContentsResource::~TabContentsResource() {} bool TabContentsResource::HostsExtension() const { return web_contents_->GetURL().SchemeIs(extensions::kExtensionScheme); } Resource::Type TabContentsResource::GetType() const { // A tab that loads an extension URL is considered to be an extension for // these purposes, although it's tracked as a TabContentsResource. return HostsExtension() ? EXTENSION : RENDERER; } base::string16 TabContentsResource::GetTitle() const { // Fall back on the URL if there's no title. GURL url = web_contents_->GetURL(); base::string16 tab_title = util::GetTitleFromWebContents(web_contents_); // Only classify as an app if the URL is an app and the tab is hosting an // extension process. (It's possible to be showing the URL from before it // was installed as an app.) extensions::ProcessMap* process_map = extensions::ProcessMap::Get(profile_); bool is_app = extensions::ExtensionRegistry::Get(profile_) ->enabled_extensions().GetAppByURL(url) != NULL && process_map->Contains(web_contents_->GetRenderProcessHost()->GetID()); int message_id = util::GetMessagePrefixID( is_app, HostsExtension(), profile_->IsOffTheRecord(), IsContentsPrerendering(web_contents_), false); // is_background return l10n_util::GetStringFUTF16(message_id, tab_title); } gfx::ImageSkia TabContentsResource::GetIcon() const { if (IsContentsPrerendering(web_contents_)) return *prerender_icon_; favicon::CreateContentFaviconDriverForWebContents(web_contents_); return favicon::ContentFaviconDriver::FromWebContents(web_contents_) ->GetFavicon() .AsImageSkia(); } WebContents* TabContentsResource::GetWebContents() const { return web_contents_; } TabContentsInformation::TabContentsInformation() {} TabContentsInformation::~TabContentsInformation() {} bool TabContentsInformation::CheckOwnership( content::WebContents* web_contents) { return chrome::FindBrowserWithWebContents(web_contents) || DevToolsWindow::IsDevToolsWindow(web_contents) || IsContentsPrerendering(web_contents); } void TabContentsInformation::GetAll(const NewWebContentsCallback& callback) { for (TabContentsIterator iterator; !iterator.done(); iterator.Next()) { callback.Run(*iterator); WebContents* devtools = DevToolsWindow::GetInTabWebContents(*iterator, NULL); if (devtools) callback.Run(devtools); } // Because a WebContents* may start its life as a prerender, and later be // put into a tab, this class tracks the prerender contents in the same // way as the tab contents. std::vector<Profile*> profiles( g_browser_process->profile_manager()->GetLoadedProfiles()); for (size_t i = 0; i < profiles.size(); ++i) { prerender::PrerenderManager* prerender_manager = prerender::PrerenderManagerFactory::GetForProfile(profiles[i]); if (prerender_manager) { const std::vector<content::WebContents*> contentses = prerender_manager->GetAllPrerenderingContents(); for (size_t j = 0; j < contentses.size(); ++j) callback.Run(contentses[j]); } } } std::unique_ptr<RendererResource> TabContentsInformation::MakeResource( content::WebContents* web_contents) { return std::unique_ptr<RendererResource>( new TabContentsResource(web_contents)); } } // namespace task_manager
{ "pile_set_name": "Github" }
{ "eggPlugin": { "name": "c" } }
{ "pile_set_name": "Github" }
57--Angler/57_Angler_peoplefishing_57_53.jpg 1 780.7754375 293.9236875 32.4326875 35.1625625 0.997288227081
{ "pile_set_name": "Github" }
// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build darwin,arm,!go1.12 package unix func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { return 0, ENOSYS }
{ "pile_set_name": "Github" }
{% set version = "1.26.0" %} {% set name = "flowCL" %} {% set bioc = "3.11" %} package: name: 'bioconductor-{{ name|lower }}' version: '{{ version }}' source: url: - 'https://bioconductor.org/packages/{{ bioc }}/bioc/src/contrib/{{ name }}_{{ version }}.tar.gz' - 'https://bioarchive.galaxyproject.org/{{ name }}_{{ version }}.tar.gz' - 'https://depot.galaxyproject.org/software/bioconductor-{{ name|lower }}/bioconductor-{{ name|lower }}_{{ version }}_src_all.tar.gz' md5: 6cc6b019fc5df5a6c2ced85f7d29d7ba build: number: 0 rpaths: - lib/R/lib/ - lib/ noarch: generic # Suggests: RUnit, BiocGenerics requirements: host: - 'bioconductor-graph >=1.66.0,<1.67.0' - 'bioconductor-rgraphviz >=2.32.0,<2.33.0' - r-base - r-sparql run: - 'bioconductor-graph >=1.66.0,<1.67.0' - 'bioconductor-rgraphviz >=2.32.0,<2.33.0' - r-base - r-sparql test: commands: - '$R -e "library(''{{ name }}'')"' about: home: 'https://bioconductor.org/packages/{{ bioc }}/bioc/html/{{ name }}.html' license: Artistic-2.0 summary: 'Semantic labelling of flow cytometric cell populations' description: 'Semantic labelling of flow cytometric cell populations.' extra: identifiers: - biotools:flowcl - doi:10.1093/bioinformatics/btu807 parent_recipe: name: bioconductor-flowcl path: recipes/bioconductor-flowcl version: 1.18.1
{ "pile_set_name": "Github" }
/* * Copyright 1999-2018 Alibaba Group Holding Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.alibaba.csp.sentinel.dashboard.controller.gateway; import com.alibaba.csp.sentinel.dashboard.auth.AuthorizationInterceptor; import com.alibaba.csp.sentinel.dashboard.auth.FakeAuthServiceImpl; import com.alibaba.csp.sentinel.dashboard.client.SentinelApiClient; import com.alibaba.csp.sentinel.dashboard.datasource.entity.gateway.GatewayFlowRuleEntity; import com.alibaba.csp.sentinel.dashboard.datasource.entity.gateway.GatewayParamFlowItemEntity; import com.alibaba.csp.sentinel.dashboard.discovery.AppManagement; import com.alibaba.csp.sentinel.dashboard.discovery.SimpleMachineDiscovery; import com.alibaba.csp.sentinel.dashboard.domain.Result; import com.alibaba.csp.sentinel.dashboard.domain.vo.gateway.rule.AddFlowRuleReqVo; import com.alibaba.csp.sentinel.dashboard.domain.vo.gateway.rule.GatewayParamFlowItemVo; import com.alibaba.csp.sentinel.dashboard.domain.vo.gateway.rule.UpdateFlowRuleReqVo; import com.alibaba.csp.sentinel.dashboard.repository.gateway.InMemGatewayFlowRuleStore; import com.alibaba.fastjson.JSON; import com.alibaba.fastjson.JSONObject; import com.alibaba.fastjson.TypeReference; import org.apache.commons.lang3.time.DateUtils; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.autoconfigure.web.servlet.WebMvcTest; import org.springframework.boot.test.mock.mockito.MockBean; import org.springframework.context.annotation.Import; import org.springframework.http.MediaType; import org.springframework.test.context.junit4.SpringRunner; import org.springframework.test.web.servlet.MockMvc; import org.springframework.test.web.servlet.MvcResult; import org.springframework.test.web.servlet.request.MockHttpServletRequestBuilder; import org.springframework.test.web.servlet.request.MockMvcRequestBuilders; import org.springframework.test.web.servlet.result.MockMvcResultHandlers; import org.springframework.test.web.servlet.result.MockMvcResultMatchers; import java.util.ArrayList; import java.util.Date; import java.util.List; import java.util.concurrent.CompletableFuture; import static com.alibaba.csp.sentinel.slots.block.RuleConstant.*; import static com.alibaba.csp.sentinel.adapter.gateway.common.SentinelGatewayConstants.*; import static org.junit.Assert.*; import static org.mockito.BDDMockito.*; /** * Test cases for {@link GatewayFlowRuleController}. * * @author cdfive */ @RunWith(SpringRunner.class) @WebMvcTest(GatewayFlowRuleController.class) @Import({FakeAuthServiceImpl.class, InMemGatewayFlowRuleStore.class, AppManagement.class, SimpleMachineDiscovery.class, AuthorizationInterceptor.class }) public class GatewayFlowRuleControllerTest { private static final String TEST_APP = "test_app"; private static final String TEST_IP = "localhost"; private static final Integer TEST_PORT = 8719; @Autowired private MockMvc mockMvc; @Autowired private InMemGatewayFlowRuleStore repository; @MockBean private SentinelApiClient sentinelApiClient; @Before public void before() { repository.clearAll(); } @Test public void testQueryFlowRules() throws Exception { String path = "/gateway/flow/list.json"; List<GatewayFlowRuleEntity> entities = new ArrayList<>(); // Mock two entities GatewayFlowRuleEntity entity = new GatewayFlowRuleEntity(); entity.setId(1L); entity.setApp(TEST_APP); entity.setIp(TEST_IP); entity.setPort(TEST_PORT); entity.setResource("httpbin_route"); entity.setResourceMode(RESOURCE_MODE_ROUTE_ID); entity.setGrade(FLOW_GRADE_QPS); entity.setCount(5D); entity.setInterval(30L); entity.setIntervalUnit(GatewayFlowRuleEntity.INTERVAL_UNIT_SECOND); entity.setControlBehavior(CONTROL_BEHAVIOR_DEFAULT); entity.setBurst(0); entity.setMaxQueueingTimeoutMs(0); GatewayParamFlowItemEntity itemEntity = new GatewayParamFlowItemEntity(); entity.setParamItem(itemEntity); itemEntity.setParseStrategy(PARAM_PARSE_STRATEGY_CLIENT_IP); entities.add(entity); GatewayFlowRuleEntity entity2 = new GatewayFlowRuleEntity(); entity2.setId(2L); entity2.setApp(TEST_APP); entity2.setIp(TEST_IP); entity2.setPort(TEST_PORT); entity2.setResource("some_customized_api"); entity2.setResourceMode(RESOURCE_MODE_CUSTOM_API_NAME); entity2.setCount(30D); entity2.setInterval(2L); entity2.setIntervalUnit(GatewayFlowRuleEntity.INTERVAL_UNIT_MINUTE); entity2.setControlBehavior(CONTROL_BEHAVIOR_DEFAULT); entity2.setBurst(0); entity2.setMaxQueueingTimeoutMs(0); GatewayParamFlowItemEntity itemEntity2 = new GatewayParamFlowItemEntity(); entity2.setParamItem(itemEntity2); itemEntity2.setParseStrategy(PARAM_PARSE_STRATEGY_CLIENT_IP); entities.add(entity2); CompletableFuture<List<GatewayFlowRuleEntity>> completableFuture = mock(CompletableFuture.class); given(completableFuture.get()).willReturn(entities); given(sentinelApiClient.fetchGatewayFlowRules(TEST_APP, TEST_IP, TEST_PORT)).willReturn(completableFuture); MockHttpServletRequestBuilder requestBuilder = MockMvcRequestBuilders.get(path); requestBuilder.param("app", TEST_APP); requestBuilder.param("ip", TEST_IP); requestBuilder.param("port", String.valueOf(TEST_PORT)); // Do controller logic MvcResult mvcResult = mockMvc.perform(requestBuilder) .andExpect(MockMvcResultMatchers.status().isOk()).andDo(MockMvcResultHandlers.print()).andReturn(); // Verify the fetchGatewayFlowRules method has been called verify(sentinelApiClient).fetchGatewayFlowRules(TEST_APP, TEST_IP, TEST_PORT); // Verify if two same entities are got Result<List<GatewayFlowRuleEntity>> result = JSONObject.parseObject(mvcResult.getResponse().getContentAsString(), new TypeReference<Result<List<GatewayFlowRuleEntity>>>(){}); assertTrue(result.isSuccess()); List<GatewayFlowRuleEntity> data = result.getData(); assertEquals(2, data.size()); assertEquals(entities, data); // Verify the entities are add into memory repository List<GatewayFlowRuleEntity> entitiesInMem = repository.findAllByApp(TEST_APP); assertEquals(2, entitiesInMem.size()); assertEquals(entities, entitiesInMem); } @Test public void testAddFlowRule() throws Exception { String path = "/gateway/flow/new.json"; AddFlowRuleReqVo reqVo = new AddFlowRuleReqVo(); reqVo.setApp(TEST_APP); reqVo.setIp(TEST_IP); reqVo.setPort(TEST_PORT); reqVo.setResourceMode(RESOURCE_MODE_ROUTE_ID); reqVo.setResource("httpbin_route"); reqVo.setGrade(FLOW_GRADE_QPS); reqVo.setCount(5D); reqVo.setInterval(30L); reqVo.setIntervalUnit(GatewayFlowRuleEntity.INTERVAL_UNIT_SECOND); reqVo.setControlBehavior(CONTROL_BEHAVIOR_DEFAULT); reqVo.setBurst(0); reqVo.setMaxQueueingTimeoutMs(0); given(sentinelApiClient.modifyGatewayFlowRules(eq(TEST_APP), eq(TEST_IP), eq(TEST_PORT), any())).willReturn(true); MockHttpServletRequestBuilder requestBuilder = MockMvcRequestBuilders.post(path); requestBuilder.content(JSON.toJSONString(reqVo)).contentType(MediaType.APPLICATION_JSON); // Do controller logic MvcResult mvcResult = mockMvc.perform(requestBuilder) .andExpect(MockMvcResultMatchers.status().isOk()) .andDo(MockMvcResultHandlers.print()).andReturn(); // Verify the modifyGatewayFlowRules method has been called verify(sentinelApiClient).modifyGatewayFlowRules(eq(TEST_APP), eq(TEST_IP), eq(TEST_PORT), any()); Result<GatewayFlowRuleEntity> result = JSONObject.parseObject(mvcResult.getResponse().getContentAsString(), new TypeReference<Result<GatewayFlowRuleEntity>>() {}); assertTrue(result.isSuccess()); // Verify the result GatewayFlowRuleEntity entity = result.getData(); assertNotNull(entity); assertEquals(TEST_APP, entity.getApp()); assertEquals(TEST_IP, entity.getIp()); assertEquals(TEST_PORT, entity.getPort()); assertEquals(RESOURCE_MODE_ROUTE_ID, entity.getResourceMode().intValue()); assertEquals("httpbin_route", entity.getResource()); assertNotNull(entity.getId()); assertNotNull(entity.getGmtCreate()); assertNotNull(entity.getGmtModified()); // Verify the entity which is add in memory repository List<GatewayFlowRuleEntity> entitiesInMem = repository.findAllByApp(TEST_APP); assertEquals(1, entitiesInMem.size()); assertEquals(entity, entitiesInMem.get(0)); } @Test public void testUpdateFlowRule() throws Exception { String path = "/gateway/flow/save.json"; // Add one entity into memory repository for update GatewayFlowRuleEntity addEntity = new GatewayFlowRuleEntity(); addEntity.setId(1L); addEntity.setApp(TEST_APP); addEntity.setIp(TEST_IP); addEntity.setPort(TEST_PORT); addEntity.setResource("httpbin_route"); addEntity.setResourceMode(RESOURCE_MODE_ROUTE_ID); addEntity.setGrade(FLOW_GRADE_QPS); addEntity.setCount(5D); addEntity.setInterval(30L); addEntity.setIntervalUnit(GatewayFlowRuleEntity.INTERVAL_UNIT_SECOND); addEntity.setControlBehavior(CONTROL_BEHAVIOR_DEFAULT); addEntity.setBurst(0); addEntity.setMaxQueueingTimeoutMs(0); Date date = new Date(); // To make the gmtModified different when do update date = DateUtils.addSeconds(date, -1); addEntity.setGmtCreate(date); addEntity.setGmtModified(date); GatewayParamFlowItemEntity addItemEntity = new GatewayParamFlowItemEntity(); addEntity.setParamItem(addItemEntity); addItemEntity.setParseStrategy(PARAM_PARSE_STRATEGY_CLIENT_IP); repository.save(addEntity); UpdateFlowRuleReqVo reqVo = new UpdateFlowRuleReqVo(); reqVo.setId(addEntity.getId()); reqVo.setApp(TEST_APP); reqVo.setGrade(FLOW_GRADE_QPS); reqVo.setCount(6D); reqVo.setInterval(2L); reqVo.setIntervalUnit(GatewayFlowRuleEntity.INTERVAL_UNIT_MINUTE); reqVo.setControlBehavior(CONTROL_BEHAVIOR_RATE_LIMITER); reqVo.setMaxQueueingTimeoutMs(500); GatewayParamFlowItemVo itemVo = new GatewayParamFlowItemVo(); reqVo.setParamItem(itemVo); itemVo.setParseStrategy(PARAM_PARSE_STRATEGY_URL_PARAM); itemVo.setFieldName("pa"); given(sentinelApiClient.modifyGatewayFlowRules(eq(TEST_APP), eq(TEST_IP), eq(TEST_PORT), any())).willReturn(true); MockHttpServletRequestBuilder requestBuilder = MockMvcRequestBuilders.post(path); requestBuilder.content(JSON.toJSONString(reqVo)).contentType(MediaType.APPLICATION_JSON); // Do controller logic MvcResult mvcResult = mockMvc.perform(requestBuilder) .andExpect(MockMvcResultMatchers.status().isOk()) .andDo(MockMvcResultHandlers.print()).andReturn(); // Verify the modifyGatewayFlowRules method has been called verify(sentinelApiClient).modifyGatewayFlowRules(eq(TEST_APP), eq(TEST_IP), eq(TEST_PORT), any()); Result<GatewayFlowRuleEntity> result = JSONObject.parseObject(mvcResult.getResponse().getContentAsString(), new TypeReference<Result<GatewayFlowRuleEntity>>() { }); assertTrue(result.isSuccess()); GatewayFlowRuleEntity entity = result.getData(); assertNotNull(entity); assertEquals(RESOURCE_MODE_ROUTE_ID, entity.getResourceMode().intValue()); assertEquals("httpbin_route", entity.getResource()); assertEquals(6D, entity.getCount().doubleValue(), 0); assertEquals(2L, entity.getInterval().longValue()); assertEquals(GatewayFlowRuleEntity.INTERVAL_UNIT_MINUTE, entity.getIntervalUnit().intValue()); assertEquals(CONTROL_BEHAVIOR_RATE_LIMITER, entity.getControlBehavior().intValue()); assertEquals(0, entity.getBurst().intValue()); assertEquals(500, entity.getMaxQueueingTimeoutMs().intValue()); assertEquals(date, entity.getGmtCreate()); // To make sure gmtModified has been set and it's different from gmtCreate assertNotNull(entity.getGmtModified()); assertNotEquals(entity.getGmtCreate(), entity.getGmtModified()); // Verify the entity which is update in memory repository GatewayParamFlowItemEntity itemEntity = entity.getParamItem(); assertEquals(PARAM_PARSE_STRATEGY_URL_PARAM, itemEntity.getParseStrategy().intValue()); assertEquals("pa", itemEntity.getFieldName()); } @Test public void testDeleteFlowRule() throws Exception { String path = "/gateway/flow/delete.json"; // Add one entity into memory repository for delete GatewayFlowRuleEntity addEntity = new GatewayFlowRuleEntity(); addEntity.setId(1L); addEntity.setApp(TEST_APP); addEntity.setIp(TEST_IP); addEntity.setPort(TEST_PORT); addEntity.setResource("httpbin_route"); addEntity.setResourceMode(RESOURCE_MODE_ROUTE_ID); addEntity.setGrade(FLOW_GRADE_QPS); addEntity.setCount(5D); addEntity.setInterval(30L); addEntity.setIntervalUnit(GatewayFlowRuleEntity.INTERVAL_UNIT_SECOND); addEntity.setControlBehavior(CONTROL_BEHAVIOR_DEFAULT); addEntity.setBurst(0); addEntity.setMaxQueueingTimeoutMs(0); Date date = new Date(); date = DateUtils.addSeconds(date, -1); addEntity.setGmtCreate(date); addEntity.setGmtModified(date); GatewayParamFlowItemEntity addItemEntity = new GatewayParamFlowItemEntity(); addEntity.setParamItem(addItemEntity); addItemEntity.setParseStrategy(PARAM_PARSE_STRATEGY_CLIENT_IP); repository.save(addEntity); given(sentinelApiClient.modifyGatewayFlowRules(eq(TEST_APP), eq(TEST_IP), eq(TEST_PORT), any())).willReturn(true); MockHttpServletRequestBuilder requestBuilder = MockMvcRequestBuilders.post(path); requestBuilder.param("id", String.valueOf(addEntity.getId())); // Do controller logic MvcResult mvcResult = mockMvc.perform(requestBuilder) .andExpect(MockMvcResultMatchers.status().isOk()).andDo(MockMvcResultHandlers.print()).andReturn(); // Verify the modifyGatewayFlowRules method has been called verify(sentinelApiClient).modifyGatewayFlowRules(eq(TEST_APP), eq(TEST_IP), eq(TEST_PORT), any()); // Verify the result Result<Long> result = JSONObject.parseObject(mvcResult.getResponse().getContentAsString(), new TypeReference<Result<Long>>() {}); assertTrue(result.isSuccess()); assertEquals(addEntity.getId(), result.getData()); // Now no entities in memory List<GatewayFlowRuleEntity> entitiesInMem = repository.findAllByApp(TEST_APP); assertEquals(0, entitiesInMem.size()); } }
{ "pile_set_name": "Github" }
// moment.js language configuration // language : bosnian (bs) // author : Nedim Cholich : https://github.com/frontyard // based on (hr) translation by Bojan Marković (function (factory) { if (typeof define === 'function' && define.amd) { define(['moment'], factory); // AMD } else if (typeof exports === 'object') { module.exports = factory(require('../moment')); // Node } else { factory(window.moment); // Browser global } }(function (moment) { function translate(number, withoutSuffix, key) { var result = number + " "; switch (key) { case 'm': return withoutSuffix ? 'jedna minuta' : 'jedne minute'; case 'mm': if (number === 1) { result += 'minuta'; } else if (number === 2 || number === 3 || number === 4) { result += 'minute'; } else { result += 'minuta'; } return result; case 'h': return withoutSuffix ? 'jedan sat' : 'jednog sata'; case 'hh': if (number === 1) { result += 'sat'; } else if (number === 2 || number === 3 || number === 4) { result += 'sata'; } else { result += 'sati'; } return result; case 'dd': if (number === 1) { result += 'dan'; } else { result += 'dana'; } return result; case 'MM': if (number === 1) { result += 'mjesec'; } else if (number === 2 || number === 3 || number === 4) { result += 'mjeseca'; } else { result += 'mjeseci'; } return result; case 'yy': if (number === 1) { result += 'godina'; } else if (number === 2 || number === 3 || number === 4) { result += 'godine'; } else { result += 'godina'; } return result; } } return moment.lang('bs', { months : "januar_februar_mart_april_maj_juni_juli_avgust_septembar_oktobar_novembar_decembar".split("_"), monthsShort : "jan._feb._mar._apr._maj._jun._jul._avg._sep._okt._nov._dec.".split("_"), weekdays : "nedjelja_ponedjeljak_utorak_srijeda_četvrtak_petak_subota".split("_"), weekdaysShort : "ned._pon._uto._sri._čet._pet._sub.".split("_"), weekdaysMin : "ne_po_ut_sr_če_pe_su".split("_"), longDateFormat : { LT : "H:mm", L : "DD. MM. YYYY", LL : "D. MMMM YYYY", LLL : "D. MMMM YYYY LT", LLLL : "dddd, D. MMMM YYYY LT" }, calendar : { sameDay : '[danas u] LT', nextDay : '[sutra u] LT', nextWeek : function () { switch (this.day()) { case 0: return '[u] [nedjelju] [u] LT'; case 3: return '[u] [srijedu] [u] LT'; case 6: return '[u] [subotu] [u] LT'; case 1: case 2: case 4: case 5: return '[u] dddd [u] LT'; } }, lastDay : '[jučer u] LT', lastWeek : function () { switch (this.day()) { case 0: case 3: return '[prošlu] dddd [u] LT'; case 6: return '[prošle] [subote] [u] LT'; case 1: case 2: case 4: case 5: return '[prošli] dddd [u] LT'; } }, sameElse : 'L' }, relativeTime : { future : "za %s", past : "prije %s", s : "par sekundi", m : translate, mm : translate, h : translate, hh : translate, d : "dan", dd : translate, M : "mjesec", MM : translate, y : "godinu", yy : translate }, ordinal : '%d.', week : { dow : 1, // Monday is the first day of the week. doy : 7 // The week that contains Jan 1st is the first week of the year. } }); }));
{ "pile_set_name": "Github" }
CREATE TABLE t1(id INT NOT NULL); ########################################## # Shutdown ########################################## ########################################## # Restart ########################################## ########################################## # Run plugin ########################################## INSTALL PLUGIN test_session_info SONAME 'TEST_SESSION_INFO'; ########################################## # Stop plugin ########################################## UNINSTALL PLUGIN test_session_info; ########################################## # Plugin log ########################################## ============================================================================================ Test in a server thread Opening Session 1 Opening Session 2 ============================================================================================ Session 1 : srv_session_info_get_thd and srv_session_info_get_session_id ============================================================================================ Thread handler id IS equal to session id returned by srv_session_info_get_session_id(Session_1) ============================================================================================ Session 2 : srv_session_info_get_thd and srv_session_info_get_session_id ============================================================================================ Thread handler id IS equal to session id returned by srv_session_info_get_session_id(Session_2) SELECT name,type,processlist_id,processlist_user,processlist_host,processlist_db,processlist_command,processlist_state,processlist_info,`role`,instrumented,history,connection_type FROM performance_schema.threads WHERE processlist_id = 9 name type processlist_id processlist_user processlist_host processlist_db processlist_command processlist_state processlist_info role instrumented history connection_type thread/sql/one_connection FOREGROUND 9 root localhost test Query executing SELECT name,type,processlist_id,processlist_user,processlist_host,processlist_db,processlist_command,processlist_state,processlist_info,`role`,instrumented,history,connection_type FROM performance_schema.threads WHERE processlist_id = 9 [NULL] YES YES Plugin num_cols : 13 nb rows : 1 affected rows : 0 server status : 2 warn count : 0 SELECT name,type,processlist_id,processlist_user,processlist_host,processlist_db,processlist_command,processlist_state,processlist_info,`role`,instrumented,history,connection_type FROM performance_schema.threads WHERE processlist_id = 10 name type processlist_id processlist_user processlist_host processlist_db processlist_command processlist_state processlist_info role instrumented history connection_type thread/sql/one_connection FOREGROUND 10 root localhost test Query executing SELECT name,type,processlist_id,processlist_user,processlist_host,processlist_db,processlist_command,processlist_state,processlist_info,`role`,instrumented,history,connection_type FROM performance_schema.threads WHERE processlist_id = 10 [NULL] YES YES Plugin num_cols : 13 nb rows : 1 affected rows : 0 server status : 2 warn count : 0 ============================================================================================ Session 1 : srv_session_info_get_current_db ============================================================================================ /*Session_1*/ SHOW TABLES LIKE '%slave%' error : 1046 error msg : No database selected current_db before init_db : (null) current_db after init_db : mysql /*Session_1*/ SHOW TABLES LIKE '%slave%' Tables_in_mysql (%slave%) slave_master_info slave_relay_log_info slave_worker_info num_cols : 1 nb rows : 3 affected rows : 0 server status : 2 warn count : 0 /*Session_1*/ USE information_schema affected rows : 0 server status : 2 warn count : 0 current_db after 'USE db_name' command : information_schema current_db before init_db : information_schema current_db after init_db : test /*Session_1*/ SHOW TABLES Tables_in_test t1 num_cols : 1 nb rows : 1 affected rows : 0 server status : 2 warn count : 0 ============================================================================================ Session 2 : srv_session_info_get_current_db ============================================================================================ /*Session_2*/ SHOW TABLES LIKE '%slave%' error : 1046 error msg : No database selected current_db before init_db : (null) current_db after init_db : mysql Session 2's view /*Session_2*/ SHOW TABLES LIKE '%slave%' Tables_in_mysql (%slave%) slave_master_info slave_relay_log_info slave_worker_info num_cols : 1 nb rows : 3 affected rows : 0 server status : 2 warn count : 0 Session 2's view /*Session_2*/ USE information_schema affected rows : 0 server status : 2 warn count : 0 current_db after 'USE db_name' command : information_schema current_db before init_db : information_schema current_db after init_db : test Session 2's view /*Session_2*/ SHOW TABLES Tables_in_test t1 num_cols : 1 nb rows : 1 affected rows : 0 server status : 2 warn count : 0 ============================================================================================ Session 1 : srv_session_info_set/get_client_port ============================================================================================ Port before srv_session_info_set_client_port : 0 Port after srv_session_info_set_client_port : 100 Session 1's view /*Session_1*/ SELECT host FROM INFORMATION_SCHEMA.PROCESSLIST WHERE info LIKE 'PLUGIN%' ORDER BY id host localhost:100 localhost num_cols : 1 nb rows : 2 affected rows : 0 server status : 34 warn count : 0 Session 2's view /*Session_2*/ SELECT host FROM INFORMATION_SCHEMA.PROCESSLIST WHERE info LIKE 'PLUGIN%' ORDER BY id host localhost:100 localhost num_cols : 1 nb rows : 2 affected rows : 0 server status : 34 warn count : 0 ============================================================================================ Session 2 : srv_session_info_set/get_client_port ============================================================================================ Port before srv_session_info_set_client_port : 0 Port after srv_session_info_set_client_port : 200 Session 1's view /*Session_1*/ SELECT host FROM INFORMATION_SCHEMA.PROCESSLIST WHERE info LIKE 'PLUGIN%' ORDER BY id host localhost:100 localhost:200 num_cols : 1 nb rows : 2 affected rows : 0 server status : 34 warn count : 0 Session 2's view /*Session_2*/ SELECT host FROM INFORMATION_SCHEMA.PROCESSLIST WHERE info LIKE 'PLUGIN%' ORDER BY id host localhost:100 localhost:200 num_cols : 1 nb rows : 2 affected rows : 0 server status : 34 warn count : 0 ============================================================================================ Session 1 : srv_session_info_set_connection_type ============================================================================================ Session 1's view SELECT CONNECTION_TYPE, CONNECTION_TYPE IS NULL FROM performance_schema.threads WHERE PROCESSLIST_ID = 9 /*session_1_id*/ CONNECTION_TYPE CONNECTION_TYPE IS NULL Plugin 0 num_cols : 2 nb rows : 1 affected rows : 0 server status : 2 warn count : 0 Setting NO_VIO_TYPE on session_1 Session 1's view SELECT CONNECTION_TYPE FROM performance_schema.threads WHERE PROCESSLIST_ID = 9 /*session_1_id*/ CONNECTION_TYPE Plugin num_cols : 1 nb rows : 1 affected rows : 0 server status : 2 warn count : 0 Setting VIO_TYPE_TCPIP on session_1 Session 1's view SELECT CONNECTION_TYPE FROM performance_schema.threads WHERE PROCESSLIST_ID = 9 /*session_1_id*/ CONNECTION_TYPE TCP/IP num_cols : 1 nb rows : 1 affected rows : 0 server status : 2 warn count : 0 Setting VIO_TYPE_NAMEDPIPE on session_1 Session 1's view SELECT CONNECTION_TYPE FROM performance_schema.threads WHERE PROCESSLIST_ID = 9 /*session_1_id*/ CONNECTION_TYPE Named Pipe num_cols : 1 nb rows : 1 affected rows : 0 server status : 2 warn count : 0 ============================================================================================ Session 2 : srv_session_info_set_connection_type ============================================================================================ Session 2's view SELECT CONNECTION_TYPE FROM performance_schema.threads WHERE PROCESSLIST_ID = 10 /*session_2_id*/ CONNECTION_TYPE Plugin num_cols : 1 nb rows : 1 affected rows : 0 server status : 2 warn count : 0 Setting VIO_TYPE_SSL on session_2 Setting VIO_TYPE_TCPIP on session_1 Session 2's view SELECT CONNECTION_TYPE FROM performance_schema.threads WHERE PROCESSLIST_ID = 10 /*session_2_id*/ CONNECTION_TYPE SSL/TLS num_cols : 1 nb rows : 1 affected rows : 0 server status : 2 warn count : 0 Session 2's view SELECT CONNECTION_TYPE FROM performance_schema.threads WHERE PROCESSLIST_ID = 10 /*session_2_id*/ CONNECTION_TYPE Shared Memory num_cols : 1 nb rows : 1 affected rows : 0 server status : 2 warn count : 0 ============================================================================================ BEFORE kill of Session 1 ============================================================================================ SELECT ID, USER, HOST, DB, COMMAND, INFO FROM INFORMATION_SCHEMA.PROCESSLIST WHERE info LIKE 'PLUGIN%' ORDER BY id ID USER HOST DB COMMAND INFO 9 root localhost:100 test Query PLUGIN: SELECT ID, USER, HOST, DB, COMMAND, INFO FROM INFORMATION_SCHEMA.PROCESSLIST WHERE info LIKE 'PLUGIN%' ORDER BY id 10 root localhost:200 test Sleep PLUGIN num_cols : 6 nb rows : 2 affected rows : 0 server status : 34 warn count : 0 ============================================================================================ srv_session_info_killed(Session_1) : 0 srv_session_info_killed(Session_2) : 0 ============================================================================================ Killing Session 1 KILL CONNECTION 9 /*session_1_id*/ affected rows : 0 server status : 2 warn count : 0 ============================================================================================ AFTER kill of Session 1 ============================================================================================ SELECT ID, USER, HOST, DB, COMMAND, INFO FROM INFORMATION_SCHEMA.PROCESSLIST WHERE info LIKE 'PLUGIN%' ORDER BY id ============================================================================================ srv_session_info_killed(Session 1) : 1 srv_session_info_killed(Session 2) : 0 ============================================================================================ Closing Session 1 Closing Session 1 failed as expected. It was already closed by EXEC_TEST_CMD ============================================================================================ Get/Set session info with closed session(Session 1) ============================================================================================ srv_session_info_get_thd : 0 srv_session_info_get_session_id : 0 srv_session_info_set_client_port : 1 srv_session_info_get_client_port : 0 srv_session_info_get_current_db : (null) srv_session_info_set_connection_type : 1 ============================================================================================ SELECT ID, USER, HOST, DB, COMMAND, INFO FROM INFORMATION_SCHEMA.PROCESSLIST WHERE info LIKE 'PLUGIN%' ORDER BY id ============================================================================================ Perform KILL QUERY and suicide (KILL CONNECTION) on Session 2 ============================================================================================ KILL QUERY 10 /*session_2_id*/ srv_session_info_killed(Session 2) : 0 KILL CONNECTION 10 /*session_2_id*/ srv_session_info_killed(Session 2) : 1 ============================================================================================ Get/Set session info with killed session(Session 2) ============================================================================================ srv_session_info_get_thd : 1 srv_session_info_get_session_id : 10 srv_session_info_set_client_port : 0 srv_session_info_get_client_port : 11111 srv_session_info_get_current_db : test srv_session_info_set_connection_type : 0 ============================================================================================ Session 2 got killed but not closed, thus it will appear in the processlist as Killed ============================================================================================ /*Session 3*/SELECT ID, USER, HOST, DB, COMMAND, INFO FROM INFORMATION_SCHEMA.PROCESSLIST WHERE info LIKE 'PLUGIN%' ORDER BY id ID USER HOST DB COMMAND INFO 10 root localhost:11111 test Killed PLUGIN 11 root localhost [NULL] Query PLUGIN: /*Session 3*/SELECT ID, USER, HOST, DB, COMMAND, INFO FROM INFORMATION_SCHEMA.PROCESSLIST WHERE info LIKE 'PLUGIN%' ORDER BY id num_cols : 6 nb rows : 2 affected rows : 0 server status : 34 warn count : 0 Closing Session 2 Closing Session 3 Follows threaded run ============================================================================================ init thread Opening Session 1 Opening Session 2 ============================================================================================ Session 1 : srv_session_info_get_thd and srv_session_info_get_session_id ============================================================================================ Thread handler id IS equal to session id returned by srv_session_info_get_session_id(Session_1) ============================================================================================ Session 2 : srv_session_info_get_thd and srv_session_info_get_session_id ============================================================================================ Thread handler id IS equal to session id returned by srv_session_info_get_session_id(Session_2) SELECT name,type,processlist_id,processlist_user,processlist_host,processlist_db,processlist_command,processlist_state,processlist_info,`role`,instrumented,history,connection_type FROM performance_schema.threads WHERE processlist_id = 12 name type processlist_id processlist_user processlist_host processlist_db processlist_command processlist_state processlist_info role instrumented history connection_type thread/test_service_sql/session_info FOREGROUND 12 root localhost [NULL] Query executing SELECT name,type,processlist_id,processlist_user,processlist_host,processlist_db,processlist_command,processlist_state,processlist_info,`role`,instrumented,history,connection_type FROM performance_schema.threads WHERE processlist_id = 12 [NULL] YES YES Plugin num_cols : 13 nb rows : 1 affected rows : 0 server status : 2 warn count : 0 SELECT name,type,processlist_id,processlist_user,processlist_host,processlist_db,processlist_command,processlist_state,processlist_info,`role`,instrumented,history,connection_type FROM performance_schema.threads WHERE processlist_id = 13 name type processlist_id processlist_user processlist_host processlist_db processlist_command processlist_state processlist_info role instrumented history connection_type thread/test_service_sql/session_info FOREGROUND 13 root localhost [NULL] Query executing SELECT name,type,processlist_id,processlist_user,processlist_host,processlist_db,processlist_command,processlist_state,processlist_info,`role`,instrumented,history,connection_type FROM performance_schema.threads WHERE processlist_id = 13 [NULL] YES YES Plugin num_cols : 13 nb rows : 1 affected rows : 0 server status : 2 warn count : 0 ============================================================================================ Session 1 : srv_session_info_get_current_db ============================================================================================ /*Session_1*/ SHOW TABLES LIKE '%slave%' error : 1046 error msg : No database selected current_db before init_db : (null) current_db after init_db : mysql /*Session_1*/ SHOW TABLES LIKE '%slave%' Tables_in_mysql (%slave%) slave_master_info slave_relay_log_info slave_worker_info num_cols : 1 nb rows : 3 affected rows : 0 server status : 2 warn count : 0 /*Session_1*/ USE information_schema affected rows : 0 server status : 2 warn count : 0 current_db after 'USE db_name' command : information_schema current_db before init_db : information_schema current_db after init_db : test /*Session_1*/ SHOW TABLES Tables_in_test t1 num_cols : 1 nb rows : 1 affected rows : 0 server status : 2 warn count : 0 ============================================================================================ Session 2 : srv_session_info_get_current_db ============================================================================================ /*Session_2*/ SHOW TABLES LIKE '%slave%' error : 1046 error msg : No database selected current_db before init_db : (null) current_db after init_db : mysql Session 2's view /*Session_2*/ SHOW TABLES LIKE '%slave%' Tables_in_mysql (%slave%) slave_master_info slave_relay_log_info slave_worker_info num_cols : 1 nb rows : 3 affected rows : 0 server status : 2 warn count : 0 Session 2's view /*Session_2*/ USE information_schema affected rows : 0 server status : 2 warn count : 0 current_db after 'USE db_name' command : information_schema current_db before init_db : information_schema current_db after init_db : test Session 2's view /*Session_2*/ SHOW TABLES Tables_in_test t1 num_cols : 1 nb rows : 1 affected rows : 0 server status : 2 warn count : 0 ============================================================================================ Session 1 : srv_session_info_set/get_client_port ============================================================================================ Port before srv_session_info_set_client_port : 0 Port after srv_session_info_set_client_port : 100 Session 1's view /*Session_1*/ SELECT host FROM INFORMATION_SCHEMA.PROCESSLIST WHERE info LIKE 'PLUGIN%' ORDER BY id host localhost:100 localhost num_cols : 1 nb rows : 2 affected rows : 0 server status : 34 warn count : 0 Session 2's view /*Session_2*/ SELECT host FROM INFORMATION_SCHEMA.PROCESSLIST WHERE info LIKE 'PLUGIN%' ORDER BY id host localhost:100 localhost num_cols : 1 nb rows : 2 affected rows : 0 server status : 34 warn count : 0 ============================================================================================ Session 2 : srv_session_info_set/get_client_port ============================================================================================ Port before srv_session_info_set_client_port : 0 Port after srv_session_info_set_client_port : 200 Session 1's view /*Session_1*/ SELECT host FROM INFORMATION_SCHEMA.PROCESSLIST WHERE info LIKE 'PLUGIN%' ORDER BY id host localhost:100 localhost:200 num_cols : 1 nb rows : 2 affected rows : 0 server status : 34 warn count : 0 Session 2's view /*Session_2*/ SELECT host FROM INFORMATION_SCHEMA.PROCESSLIST WHERE info LIKE 'PLUGIN%' ORDER BY id host localhost:100 localhost:200 num_cols : 1 nb rows : 2 affected rows : 0 server status : 34 warn count : 0 ============================================================================================ Session 1 : srv_session_info_set_connection_type ============================================================================================ Session 1's view SELECT CONNECTION_TYPE, CONNECTION_TYPE IS NULL FROM performance_schema.threads WHERE PROCESSLIST_ID = 12 /*session_1_id*/ CONNECTION_TYPE CONNECTION_TYPE IS NULL Plugin 0 num_cols : 2 nb rows : 1 affected rows : 0 server status : 2 warn count : 0 Setting NO_VIO_TYPE on session_1 Session 1's view SELECT CONNECTION_TYPE FROM performance_schema.threads WHERE PROCESSLIST_ID = 12 /*session_1_id*/ CONNECTION_TYPE Plugin num_cols : 1 nb rows : 1 affected rows : 0 server status : 2 warn count : 0 Setting VIO_TYPE_TCPIP on session_1 Session 1's view SELECT CONNECTION_TYPE FROM performance_schema.threads WHERE PROCESSLIST_ID = 12 /*session_1_id*/ CONNECTION_TYPE TCP/IP num_cols : 1 nb rows : 1 affected rows : 0 server status : 2 warn count : 0 Setting VIO_TYPE_NAMEDPIPE on session_1 Session 1's view SELECT CONNECTION_TYPE FROM performance_schema.threads WHERE PROCESSLIST_ID = 12 /*session_1_id*/ CONNECTION_TYPE Named Pipe num_cols : 1 nb rows : 1 affected rows : 0 server status : 2 warn count : 0 ============================================================================================ Session 2 : srv_session_info_set_connection_type ============================================================================================ Session 2's view SELECT CONNECTION_TYPE FROM performance_schema.threads WHERE PROCESSLIST_ID = 13 /*session_2_id*/ CONNECTION_TYPE Plugin num_cols : 1 nb rows : 1 affected rows : 0 server status : 2 warn count : 0 Setting VIO_TYPE_SSL on session_2 Setting VIO_TYPE_TCPIP on session_1 Session 2's view SELECT CONNECTION_TYPE FROM performance_schema.threads WHERE PROCESSLIST_ID = 13 /*session_2_id*/ CONNECTION_TYPE SSL/TLS num_cols : 1 nb rows : 1 affected rows : 0 server status : 2 warn count : 0 Session 2's view SELECT CONNECTION_TYPE FROM performance_schema.threads WHERE PROCESSLIST_ID = 13 /*session_2_id*/ CONNECTION_TYPE Shared Memory num_cols : 1 nb rows : 1 affected rows : 0 server status : 2 warn count : 0 ============================================================================================ BEFORE kill of Session 1 ============================================================================================ SELECT ID, USER, HOST, DB, COMMAND, INFO FROM INFORMATION_SCHEMA.PROCESSLIST WHERE info LIKE 'PLUGIN%' ORDER BY id ID USER HOST DB COMMAND INFO 12 root localhost:100 test Query PLUGIN: SELECT ID, USER, HOST, DB, COMMAND, INFO FROM INFORMATION_SCHEMA.PROCESSLIST WHERE info LIKE 'PLUGIN%' ORDER BY id 13 root localhost:200 test Sleep PLUGIN num_cols : 6 nb rows : 2 affected rows : 0 server status : 34 warn count : 0 ============================================================================================ srv_session_info_killed(Session_1) : 0 srv_session_info_killed(Session_2) : 0 ============================================================================================ Killing Session 1 KILL CONNECTION 12 /*session_1_id*/ affected rows : 0 server status : 2 warn count : 0 ============================================================================================ AFTER kill of Session 1 ============================================================================================ SELECT ID, USER, HOST, DB, COMMAND, INFO FROM INFORMATION_SCHEMA.PROCESSLIST WHERE info LIKE 'PLUGIN%' ORDER BY id ============================================================================================ srv_session_info_killed(Session 1) : 1 srv_session_info_killed(Session 2) : 0 ============================================================================================ Closing Session 1 Closing Session 1 failed as expected. It was already closed by EXEC_TEST_CMD ============================================================================================ Get/Set session info with closed session(Session 1) ============================================================================================ srv_session_info_get_thd : 0 srv_session_info_get_session_id : 0 srv_session_info_set_client_port : 1 srv_session_info_get_client_port : 0 srv_session_info_get_current_db : (null) srv_session_info_set_connection_type : 1 ============================================================================================ SELECT ID, USER, HOST, DB, COMMAND, INFO FROM INFORMATION_SCHEMA.PROCESSLIST WHERE info LIKE 'PLUGIN%' ORDER BY id ============================================================================================ Perform KILL QUERY and suicide (KILL CONNECTION) on Session 2 ============================================================================================ KILL QUERY 13 /*session_2_id*/ srv_session_info_killed(Session 2) : 0 KILL CONNECTION 13 /*session_2_id*/ srv_session_info_killed(Session 2) : 1 ============================================================================================ Get/Set session info with killed session(Session 2) ============================================================================================ srv_session_info_get_thd : 1 srv_session_info_get_session_id : 13 srv_session_info_set_client_port : 0 srv_session_info_get_client_port : 11111 srv_session_info_get_current_db : test srv_session_info_set_connection_type : 0 ============================================================================================ Session 2 got killed but not closed, thus it will appear in the processlist as Killed ============================================================================================ /*Session 3*/SELECT ID, USER, HOST, DB, COMMAND, INFO FROM INFORMATION_SCHEMA.PROCESSLIST WHERE info LIKE 'PLUGIN%' ORDER BY id ID USER HOST DB COMMAND INFO 13 root localhost:11111 test Killed PLUGIN 14 root localhost [NULL] Query PLUGIN: /*Session 3*/SELECT ID, USER, HOST, DB, COMMAND, INFO FROM INFORMATION_SCHEMA.PROCESSLIST WHERE info LIKE 'PLUGIN%' ORDER BY id num_cols : 6 nb rows : 2 affected rows : 0 server status : 34 warn count : 0 Closing Session 2 Closing Session 3 deinit thread ########################################## # Cleanup ########################################## DROP TABLE t1;
{ "pile_set_name": "Github" }
//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto package runtime // import "github.com/docker/docker/api/types/swarm/runtime"
{ "pile_set_name": "Github" }
graph G { graph [bb="0,0,54,36"]; node [label="\N"]; a [height=0.5, label="", pos="27,18", shape=ellipse, width=0.75]; }
{ "pile_set_name": "Github" }
/* * Tencent is pleased to support the open source community by making BK-CI 蓝鲸持续集成平台 available. * * Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. * * BK-CI 蓝鲸持续集成平台 is licensed under the MIT license. * * A copy of the MIT License is included in this file. * * * Terms of the MIT License: * --------------------------------------------------- * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT * LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN * NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ package com.tencent.devops.artifactory.resources import com.tencent.devops.artifactory.api.service.ServiceImageManageResource import com.tencent.devops.common.api.pojo.Result import com.tencent.devops.common.web.RestResource import net.coobird.thumbnailator.Thumbnails import org.apache.commons.codec.binary.Base64 import org.slf4j.LoggerFactory import org.springframework.beans.factory.annotation.Autowired import java.net.URL import java.nio.file.Files @RestResource class ServiceImageManageResourceImpl @Autowired constructor() : ServiceImageManageResource { private val logger = LoggerFactory.getLogger(ServiceImageManageResourceImpl::class.java) /** * 按照规定大小压缩图片 */ override fun compressImage(imageUrl: String, compressWidth: Int, compressHeight: Int): Result<String> { val file = Files.createTempFile("random_" + System.currentTimeMillis(), ".png").toFile() val url = URL(imageUrl) val bytes: ByteArray? try { Thumbnails.of(url) .size(compressWidth, compressHeight) .outputFormat("png") .toFile(file) bytes = Files.readAllBytes(file.toPath()) } finally { file.delete() } val data = "data:image/png;base64," + Base64.encodeBase64String(bytes) logger.info("the compressImage base64 data is:$data") return Result(data) } }
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <dict> <key>CFBundleDevelopmentRegion</key> <string>$(DEVELOPMENT_LANGUAGE)</string> <key>CFBundleExecutable</key> <string>$(EXECUTABLE_NAME)</string> <key>CFBundleIdentifier</key> <string>$(PRODUCT_BUNDLE_IDENTIFIER)</string> <key>CFBundleInfoDictionaryVersion</key> <string>6.0</string> <key>CFBundleName</key> <string>$(PRODUCT_NAME)</string> <key>CFBundlePackageType</key> <string>$(PRODUCT_BUNDLE_PACKAGE_TYPE)</string> <key>CFBundleShortVersionString</key> <string>1.0</string> <key>CFBundleVersion</key> <string>1</string> <key>LSRequiresIPhoneOS</key> <true/> <key>UILaunchStoryboardName</key> <string>LaunchScreen</string> <key>UIMainStoryboardFile</key> <string>Main</string> <key>UIRequiredDeviceCapabilities</key> <array> <string>armv7</string> </array> <key>UISupportedInterfaceOrientations</key> <array> <string>UIInterfaceOrientationPortrait</string> <string>UIInterfaceOrientationLandscapeLeft</string> <string>UIInterfaceOrientationLandscapeRight</string> </array> <key>UISupportedInterfaceOrientations~ipad</key> <array> <string>UIInterfaceOrientationPortrait</string> <string>UIInterfaceOrientationPortraitUpsideDown</string> <string>UIInterfaceOrientationLandscapeLeft</string> <string>UIInterfaceOrientationLandscapeRight</string> </array> </dict> </plist>
{ "pile_set_name": "Github" }