code
stringlengths
3
1.01M
repo_name
stringlengths
5
116
path
stringlengths
3
311
language
stringclasses
30 values
license
stringclasses
15 values
size
int64
3
1.01M
--- layout: vakit_dashboard title: KONYA, TÜRKİYE için iftar, namaz vakitleri ve hava durumu - ilçe/eyalet seç permalink: /TÜRKİYE/KONYA/AKÖREN --- <script type="text/javascript"> var GLOBAL_COUNTRY = 'TÜRKİYE'; var GLOBAL_CITY = 'KONYA'; var GLOBAL_STATE = 'AKÖREN'; var lat = 72; var lon = 21; </script>
hakanu/iftar
_posts_/vakit/TÜRKİYE/KONYA/AKÖREN/2017-02-01-AKÖREN.markdown
Markdown
apache-2.0
328
# coding: utf-8 """ Copyright 2015 SmartBear Software Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ref: https://github.com/swagger-api/swagger-codegen """ from datetime import datetime from pprint import pformat from six import iteritems class BuildRecordSetSingleton(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self): """ BuildRecordSetSingleton - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'content': 'BuildRecordSetRest' } self.attribute_map = { 'content': 'content' } self._content = None @property def content(self): """ Gets the content of this BuildRecordSetSingleton. :return: The content of this BuildRecordSetSingleton. :rtype: BuildRecordSetRest """ return self._content @content.setter def content(self, content): """ Sets the content of this BuildRecordSetSingleton. :param content: The content of this BuildRecordSetSingleton. :type: BuildRecordSetRest """ self._content = content def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, datetime): result[attr] = str(value.date()) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str()
jianajavier/pnc-cli
pnc_cli/swagger_client/models/build_record_set_singleton.py
Python
apache-2.0
2,941
<div class="panel panel-default mdw-panel"> <div class="panel-heading mdw-heading"> <div class="mdw-heading-label">Add New Attribute</div> </div> <div class="form-horizontal mdw-section"> <div class="form-group mdw-static-group"> <label class="control-label col-xs-2 mdw-required" for="{{attribute.name}}">Name:</label> <div class="col-xs-8"> <input id='newAttr-Name' type="text" ng-model="attribute.name" class="form-control" /> </div> </div> <div class="form-group mdw-static-group"> <label class="control-label col-xs-2 mdw-required" for="{{attribute.value}}">Value:</label> <div class="col-xs-8"> <input id='newAttr-Value' type="text" ng-model="attribute.value" class="form-control" /> </div> <div class="col-xs-2"> <button class="btn btn-primary mdw-btn" type="button" ng-click="addAttribute()"> <span class="glyphicon glyphicon-add"></span> Add </button> </div> </div> </div> </div> <br /> <!-- pre>{{user.attributes|json}}</pre -->
CenturyLinkCloud/mdw
mdw-hub/web/users/userAttributes.html
HTML
apache-2.0
1,199
/* * Hibernate Validator, declare and validate application constraints * * License: Apache License, Version 2.0 * See the license.txt file in the root directory or <http://www.apache.org/licenses/LICENSE-2.0>. */ package org.hibernate.validator.ap.testmodel.annotationparameters; import javax.validation.constraints.Pattern; /** * @author Marko Bekhta */ public class ValidPatternParameters { @Pattern(regexp = "test") private String strings1; @Pattern(regexp = "[test]") private String strings2; @Pattern(regexp = "\\.") private String strings3; @Pattern.List({ @Pattern(regexp = "\\."), @Pattern(regexp = "[test]"), @Pattern(regexp = "test") }) private String strings4; public ValidPatternParameters( @Pattern(regexp = "\\.") String strings1, @Pattern(regexp = "[test]") String strings2, @Pattern(regexp = "test") String strings3 ) { } public void doSomething( @Pattern(regexp = "\\.") String strings1, @Pattern(regexp = "[test]") String strings2, @Pattern(regexp = "test") String strings3 ) { } @Pattern(regexp = "\\.") public String doSomething() { return ""; } }
shahramgdz/hibernate-validator
annotation-processor/src/test/java/org/hibernate/validator/ap/testmodel/annotationparameters/ValidPatternParameters.java
Java
apache-2.0
1,124
package at.forsyte.apalache.infra.passes class TerminalPassWithTlaModule extends TerminalPass with TlaModuleMixin
konnov/apalache
mod-infra/src/main/scala/at/forsyte/apalache/infra/passes/TerminalPassWithTlaModule.scala
Scala
apache-2.0
115
// Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using System; using System.Collections.Generic; using System.Collections.Immutable; namespace Microsoft.CodeAnalysis.Diagnostics { internal abstract partial class CompilerDiagnosticAnalyzer : DiagnosticAnalyzer { private const string Origin = "Origin"; private const string Syntactic = "Syntactic"; private const string Declaration = "Declaration"; private static readonly ImmutableDictionary<string, string> s_syntactic = ImmutableDictionary<string, string>.Empty.Add(Origin, Syntactic); private static readonly ImmutableDictionary<string, string> s_declaration = ImmutableDictionary<string, string>.Empty.Add(Origin, Declaration); /// <summary> /// Per-compilation DiagnosticAnalyzer for compiler's syntax/semantic/compilation diagnostics. /// </summary> private class CompilationAnalyzer { private readonly Compilation _compilation; public CompilationAnalyzer(Compilation compilation) { _compilation = compilation; } public void AnalyzeSyntaxTree(SyntaxTreeAnalysisContext context) { var semanticModel = _compilation.GetSemanticModel(context.Tree); var diagnostics = semanticModel.GetSyntaxDiagnosticsIncludingSuppressions(cancellationToken: context.CancellationToken); ReportDiagnostics(diagnostics, context.ReportDiagnostic, IsSourceLocation, s_syntactic); } public static void AnalyzeSemanticModel(SemanticModelAnalysisContext context) { var declDiagnostics = context.SemanticModel.GetDeclarationDiagnosticsIncludingSuppressions(cancellationToken: context.CancellationToken); ReportDiagnostics(declDiagnostics, context.ReportDiagnostic, IsSourceLocation, s_declaration); var bodyDiagnostics = context.SemanticModel.GetMethodBodyDiagnosticsIncludingSuppressions(cancellationToken: context.CancellationToken); ReportDiagnostics(bodyDiagnostics, context.ReportDiagnostic, IsSourceLocation); } public static void AnalyzeCompilation(CompilationAnalysisContext context) { var diagnostics = context.Compilation.GetDeclarationDiagnosticsIncludingSuppressions(cancellationToken: context.CancellationToken); ReportDiagnostics(diagnostics, context.ReportDiagnostic, location => !IsSourceLocation(location), s_declaration); } private static bool IsSourceLocation(Location location) { return location != null && location.Kind == LocationKind.SourceFile; } private static void ReportDiagnostics( ImmutableArray<Diagnostic> diagnostics, Action<Diagnostic> reportDiagnostic, Func<Location, bool> locationFilter, ImmutableDictionary<string, string> properties = null) { foreach (var diagnostic in diagnostics) { if (locationFilter(diagnostic.Location) && diagnostic.Severity != DiagnosticSeverity.Hidden) { var current = properties == null ? diagnostic : new CompilerDiagnostic(diagnostic, properties); reportDiagnostic(current); } } } private class CompilerDiagnostic : Diagnostic { private readonly Diagnostic _original; private readonly ImmutableDictionary<string, string> _properties; public CompilerDiagnostic(Diagnostic original, ImmutableDictionary<string, string> properties) { _original = original; _properties = properties; } #pragma warning disable RS0013 // we are delegating so it is okay here public override DiagnosticDescriptor Descriptor => _original.Descriptor; #pragma warning restore RS0013 public override string Id => _original.Id; public override DiagnosticSeverity Severity => _original.Severity; public override int WarningLevel => _original.WarningLevel; public override Location Location => _original.Location; public override IReadOnlyList<Location> AdditionalLocations => _original.AdditionalLocations; public override bool IsSuppressed => _original.IsSuppressed; public override ImmutableDictionary<string, string> Properties => _properties; public override string GetMessage(IFormatProvider formatProvider = null) { return _original.GetMessage(formatProvider); } public override bool Equals(object obj) { return _original.Equals(obj); } public override int GetHashCode() { return _original.GetHashCode(); } public override bool Equals(Diagnostic obj) { return _original.Equals(obj); } internal override Diagnostic WithLocation(Location location) { return new CompilerDiagnostic(_original.WithLocation(location), _properties); } internal override Diagnostic WithSeverity(DiagnosticSeverity severity) { return new CompilerDiagnostic(_original.WithSeverity(severity), _properties); } internal override Diagnostic WithIsSuppressed(bool isSuppressed) { return new CompilerDiagnostic(_original.WithIsSuppressed(isSuppressed), _properties); } } } } }
russpowers/roslyn
src/Compilers/Core/Portable/DiagnosticAnalyzer/CompilerDiagnosticAnalyzer.CompilationAnalyzer.cs
C#
apache-2.0
6,159
#region Apache License Version 2.0 /*---------------------------------------------------------------- Copyright 2019 Jeffrey Su & Suzhou Senparc Network Technology Co.,Ltd. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Detail: https://github.com/JeffreySu/WeiXinMPSDK/blob/master/license.md ----------------------------------------------------------------*/ #endregion Apache License Version 2.0 /*---------------------------------------------------------------- Copyright (C) 2019 Senparc 文件名:WiFiStatisticsResultJson.cs 文件功能描述:数据统计返回结果 创建标识:Senparc - 20150709 ----------------------------------------------------------------*/ using System.Collections.Generic; using Senparc.Weixin.Entities; namespace Senparc.Weixin.MP.AdvancedAPIs.WiFi { /// <summary> /// 数据统计返回结果 /// </summary> public class GetStatisticsResult : WxJsonResult { public List<GetStatistics_Data> date { get; set; } } public class GetStatistics_Data { /// <summary> /// 门店ID,-1为总统计 /// </summary> public string shop_id { get; set; } /// <summary> /// 统计时间,单位为毫秒 /// </summary> public long statis_time { get; set; } /// <summary> /// 微信连wifi成功人数 /// </summary> public int total_user { get; set; } /// <summary> /// 商家主页访问人数 /// </summary> public int homepage_uv { get; set; } /// <summary> /// 新增公众号关注人数 /// </summary> public int new_fans { get; set; } /// <summary> /// 累计公众号关注人数 /// </summary> public int total_fans { get; set; } } }
lishewen/WeiXinMPSDK
src/Senparc.Weixin.MP/Senparc.Weixin.MP/AdvancedAPIs/WiFi/WiFiJson/WiFiStatisticsResultJson.cs
C#
apache-2.0
2,320
/**************************************************************** * Licensed to the AOS Community (AOS) under one or more * * contributor license agreements. See the NOTICE file * * distributed with this work for additional information * * regarding copyright ownership. The AOS licenses this file * * to you under the Apache License, Version 2.0 (the * * "License"); you may not use this file except in compliance * * with the License. You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, * * software distributed under the License is distributed on an * * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * * KIND, either express or implied. See the License for the * * specific language governing permissions and limitations * * under the License. * ****************************************************************/ /* * @(#)BullsEye.java 1.22 10/03/23 * * Copyright (c) 2006, Oracle and/or its affiliates. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * -Redistribution of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * -Redistribution in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * Neither the name of Oracle or the names of contributors may * be used to endorse or promote products derived from this software without * specific prior written permission. * * This software is provided "AS IS," without a warranty of any kind. ALL * EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING * ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE * OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN MICROSYSTEMS, INC. ("SUN") * AND ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE * AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS * DERIVATIVES. IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST * REVENUE, PROFIT OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, * INCIDENTAL OR PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY * OF LIABILITY, ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, * EVEN IF SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. * * You acknowledge that this software is not designed, licensed or intended * for use in the design, construction, operation or maintenance of any * nuclear facility. */ /* * @(#)BullsEye.java 1.22 10/03/23 */ package java2d.demos.Colors; import static java.awt.Color.*; import java.awt.*; import java.awt.geom.Ellipse2D; import java2d.Surface; /** * Creating colors with an alpha value. */ public class BullsEye extends Surface { public BullsEye() { setBackground(WHITE); } public void render(int w, int h, Graphics2D g2) { Color reds[] = { RED.darker(), RED }; for (int N = 0; N < 18; N++) { float i = (N + 2) / 2.0f; float x = (float) (5+i*(w/2/10)); float y = (float) (5+i*(h/2/10)); float ew = (w-10)-(i*w/10); float eh = (h-10)-(i*h/10); float alpha = (N == 0) ? 0.1f : 1.0f / (19.0f - N); if ( N >= 16 ) g2.setColor(reds[N-16]); else g2.setColor(new Color(0f, 0f, 0f, alpha)); g2.fill(new Ellipse2D.Float(x,y,ew,eh)); } } public static void main(String s[]) { createDemoFrame(new BullsEye()); } }
XClouded/t4f-core
java/jdk/demo/plugin/jfc/Java2D/src/java2d/demos/Colors/BullsEye.java
Java
apache-2.0
4,069
/* * Copyright 2011 JBoss Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kie.fluent; import org.kie.io.Resource; import org.kie.io.ResourceConfiguration; import org.kie.io.ResourceType; public interface FluentKnowledgeBuilder<T> { T add(Resource resource, ResourceType type); T add(Resource resource, ResourceType type, ResourceConfiguration configuration); }
mswiderski/droolsjbpm-knowledge
kie-internal/src/main/java/org/kie/fluent/FluentKnowledgeBuilder.java
Java
apache-2.0
941
/* * Copyright © 2016 Cask Data, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package co.cask.tephra.hbase10cdh.coprocessor; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterBase; import java.io.IOException; import java.util.List; /** * {@link Filter} that encapsulates another {@link Filter}. It remembers the last {@link KeyValue} * for which the underlying filter returned the {@link ReturnCode#NEXT_COL} or {@link ReturnCode#INCLUDE_AND_NEXT_COL}, * so that when {@link #filterKeyValue} is called again for the same {@link KeyValue} with different * version, it returns {@link ReturnCode#NEXT_COL} directly without consulting the underlying {@link Filter}. * Please see TEPHRA-169 for more details. */ public class CellSkipFilter extends FilterBase { private final Filter filter; // remember the previous keyvalue processed by filter when the return code was NEXT_COL or INCLUDE_AND_NEXT_COL private KeyValue skipColumn = null; public CellSkipFilter(Filter filter) { this.filter = filter; } /** * Determines whether the current cell should be skipped. The cell will be skipped * if the previous keyvalue had the same key as the current cell. This means filter already responded * for the previous keyvalue with ReturnCode.NEXT_COL or ReturnCode.INCLUDE_AND_NEXT_COL. * @param cell the {@link Cell} to be tested for skipping * @return true is current cell should be skipped, false otherwise */ private boolean skipCellVersion(Cell cell) { return skipColumn != null && CellUtil.matchingRow(cell, skipColumn.getRowArray(), skipColumn.getRowOffset(), skipColumn.getRowLength()) && CellUtil.matchingFamily(cell, skipColumn.getFamilyArray(), skipColumn.getFamilyOffset(), skipColumn.getFamilyLength()) && CellUtil.matchingQualifier(cell, skipColumn.getQualifierArray(), skipColumn.getQualifierOffset(), skipColumn.getQualifierLength()); } @Override public ReturnCode filterKeyValue(Cell cell) throws IOException { if (skipCellVersion(cell)) { return ReturnCode.NEXT_COL; } ReturnCode code = filter.filterKeyValue(cell); if (code == ReturnCode.NEXT_COL || code == ReturnCode.INCLUDE_AND_NEXT_COL) { // only store the reference to the keyvalue if we are returning NEXT_COL or INCLUDE_AND_NEXT_COL skipColumn = KeyValueUtil.createFirstOnRow(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()); } else { skipColumn = null; } return code; } @Override public boolean filterRow() throws IOException { return filter.filterRow(); } @Override public Cell transformCell(Cell cell) throws IOException { return filter.transformCell(cell); } @Override public void reset() throws IOException { filter.reset(); } @Override public boolean filterRowKey(byte[] buffer, int offset, int length) throws IOException { return filter.filterRowKey(buffer, offset, length); } @Override public boolean filterAllRemaining() throws IOException { return filter.filterAllRemaining(); } @Override public void filterRowCells(List<Cell> kvs) throws IOException { filter.filterRowCells(kvs); } @Override public boolean hasFilterRow() { return filter.hasFilterRow(); } @SuppressWarnings("deprecation") @Override public KeyValue getNextKeyHint(KeyValue currentKV) throws IOException { return filter.getNextKeyHint(currentKV); } @Override public Cell getNextCellHint(Cell currentKV) throws IOException { return filter.getNextCellHint(currentKV); } @Override public boolean isFamilyEssential(byte[] name) throws IOException { return filter.isFamilyEssential(name); } @Override public byte[] toByteArray() throws IOException { return filter.toByteArray(); } }
cdapio/tephra
tephra-hbase-compat-1.0-cdh/src/main/java/co/cask/tephra/hbase10cdh/coprocessor/CellSkipFilter.java
Java
apache-2.0
4,910
using Base.Dto; namespace Base.ActivityInterface { public interface ICoreFlowProvider { ActivityOutput SendEmail(EmailDto dto); } }
kanewanggit/tcsworkflow
Core/Base/ActivityInterface/ICoreFlowProvider.cs
C#
apache-2.0
156
<script type="text/javascript"> console.log('calleed!!'); function initialize() { var mapOptions = { center: new google.maps.LatLng(-34.397, 150.644), zoom: 8 }; var map = new google.maps.Map(document.getElementById("map-canvas"), mapOptions); } initialize(); </script> <div id="map-canvas"></div>
SurvivingMondayUMich/officialdemo
views/test.html
HTML
apache-2.0
317
var searchData= [ ['includequery',['includeQuery',['../classcom_1_1example_1_1servlets_1_1_atakan_servlet_1_1_mathematician.html#a2ded5a2636fe235baef719d9f16ad2e1',1,'com::example::servlets::AtakanServlet::Mathematician']]] ];
bounswe/bounswe2016group5
TestWebProject/html/search/functions_5.js
JavaScript
apache-2.0
229
package chapter6.concurrentutils; import java.util.Random; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; /** * CountDownLatch犹如倒计时器 ,调用CountDownLatch对象的countdown方法就将计数器减1 ,当计数到达0时 ,则所有等待者或单个等待者开始执行 */ public class CountDownLatchDemo { public static void main(String args[]) { //线程池 ExecutorService service = Executors.newCachedThreadPool(); //倒计时器 final CountDownLatch mainOrder = new CountDownLatch(1); final CountDownLatch subAnswer = new CountDownLatch(3); for (int x = 0; x < 3; x++) { service.execute(new Runnable() { public void run() { try { System.out.println("将军:" + Thread.currentThread().getName() + "正在等待命定"); Thread.sleep(new Random().nextInt(10000)); mainOrder.await(); } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } System.out.println("将军" + Thread.currentThread().getName() + "正在执行任务"); try { Thread.sleep(new Random().nextInt(10000)); System.out.println("将军" + Thread.currentThread().getName() + "执行任务完毕 上报了任务结果"); subAnswer.countDown(); } catch (InterruptedException e) { e.printStackTrace(); } } }); } try { System.out.println("国王 即将发布命令"); mainOrder.countDown(); System.out.println("国王 已经发布命令"); System.out.println("国王 在等待命定的执行结果"); subAnswer.await(); System.out.println("国王 收到了将军们的命定的执行结果 万事大吉!!!"); } catch (Exception e) { e.printStackTrace(); } service.shutdown(); } }
Ztiany/CodeRepository
Java/Java-Concurrent/Java-Concurrent-Art/src/main/java/chapter6/concurrentutils/CountDownLatchDemo.java
Java
apache-2.0
2,283
/* * Copyright 2009 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.openehealth.ipf.commons.ihe.xds.core.transform.ebxml.ebxml30; import org.openehealth.ipf.commons.ihe.xds.core.ebxml.EbXMLFactory; import org.openehealth.ipf.commons.ihe.xds.core.ebxml.ebxml30.EbXMLFactory30; import org.openehealth.ipf.commons.ihe.xds.core.transform.ebxml.SubmissionSetTransformer; import org.openehealth.ipf.commons.ihe.xds.core.transform.ebxml.SubmissionSetTransformerTestBase; /** * Tests for {@link SubmissionSetTransformer}. * @author Jens Riemschneider */ public class SubmissionSetTransformerTest extends SubmissionSetTransformerTestBase { @Override public EbXMLFactory createFactory() { return new EbXMLFactory30(); } }
oehf/ipf
commons/ihe/xds/src/test/java/org/openehealth/ipf/commons/ihe/xds/core/transform/ebxml/ebxml30/SubmissionSetTransformerTest.java
Java
apache-2.0
1,338
#!/bin/bash # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. . /opt/cloud/bin/setup/common.sh check_reboot_vmware() { if [ "$HYPERVISOR" != "vmware" ]; then return fi if [ -n "$MGMTNET" ]; then MGMT_GW=$(echo $MGMTNET | awk -F "." '{print $1"."$2"."$3".1"}') if ping -n -c 1 -W 3 $MGMT_GW &> /dev/null; then log_it "Management gateway pingable, skipping VR reboot" return fi fi log_it "Management gateway not pingable, rebooting VR" sync reboot } setup_router() { # To save router public interface and gw ip information touch /var/cache/cloud/ifaceGwIp oldmd5= [ -f "/etc/udev/rules.d/70-persistent-net.rules" ] && oldmd5=$(md5sum "/etc/udev/rules.d/70-persistent-net.rules" | awk '{print $1}') if [ -n "$ETH2_IP" ]; then setup_common eth0 eth1 eth2 if [ -n "$EXTRA_PUBNICS" ]; then for ((i = 3; i < 3 + $EXTRA_PUBNICS; i++)); do setup_interface "$i" "0.0.0.0" "255.255.255.255" $GW "force" done fi else setup_common eth0 eth1 if [ -n "$EXTRA_PUBNICS" ]; then for ((i = 2; i < 2 + $EXTRA_PUBNICS; i++)); do setup_interface "$i" "0.0.0.0" "255.255.255.255" $GW "force" done fi fi log_it "Checking udev NIC assignment order changes" if [ "$NIC_MACS" != "" ] then init_interfaces_orderby_macs "$NIC_MACS" "/tmp/interfaces" "/tmp/udev-rules" newmd5=$(md5sum "/tmp/udev-rules" | awk '{print $1}') rm /tmp/interfaces rm /tmp/udev-rules if [ "$oldmd5" != "$newmd5" ] then log_it "Reloading udev for new udev NIC assignment" udevadm control --reload-rules && udevadm trigger check_reboot_vmware fi fi setup_aesni setup_dnsmasq setup_apache2 $ETH0_IP sed -i /$NAME/d /etc/hosts echo "$ETH0_IP $NAME" >> /etc/hosts enable_irqbalance 1 disable_rpfilter_domR enable_fwding 1 enable_rpsrfs 1 enable_passive_ftp 1 cp /etc/iptables/iptables-router /etc/iptables/rules.v4 setup_sshd $ETH1_IP "eth1" # Only allow DNS service for current network sed -i "s/-A INPUT -i eth0 -p udp -m udp --dport 53 -j ACCEPT/-A INPUT -i eth0 -p udp -m udp --dport 53 -s $DHCP_RANGE\/$CIDR_SIZE -j ACCEPT/g" /etc/iptables/rules.v4 sed -i "s/-A INPUT -i eth0 -p tcp -m tcp --dport 53 -j ACCEPT/-A INPUT -i eth0 -p tcp -m tcp --dport 53 -s $DHCP_RANGE\/$CIDR_SIZE -j ACCEPT/g" /etc/iptables/rules.v4 # Setup hourly logrotate if [ -f /etc/cron.daily/logrotate ]; then mv -n /etc/cron.daily/logrotate /etc/cron.hourly 2>&1 fi } routing_svcs if [ $? -gt 0 ] then log_it "Failed to execute routing_svcs" exit 1 fi setup_router
DaanHoogland/cloudstack
systemvm/debian/opt/cloud/bin/setup/router.sh
Shell
apache-2.0
3,355
/* * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software;Designed and Developed mainly by many Chinese * opensource volunteers. you can redistribute it and/or modify it under the * terms of the GNU General Public License version 2 only, as published by the * Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Any questions about this component can be directed to it's project Web address * https://code.google.com/p/opencloudb/. * */ package org.opencloudb.route.function; import junit.framework.Assert; import org.junit.Test; public class PartitionByPrefixPatternTest { @Test public void test() { /** * ASCII编码: * 48-57=0-9阿拉伯数字 * 64、65-90=@、A-Z * 97-122=a-z * */ PartitionByPrefixPattern autoPartition=new PartitionByPrefixPattern(); autoPartition.setPatternValue(32); autoPartition.setPrefixLength(5); autoPartition.setMapFile("partition_prefix_pattern.txt"); autoPartition.init(); String idVal="gf89f9a"; Assert.assertEquals(true, 0==autoPartition.calculate(idVal)); idVal="8df99a"; Assert.assertEquals(true, 4==autoPartition.calculate(idVal)); idVal="8dhdf99a"; Assert.assertEquals(true, 3==autoPartition.calculate(idVal)); } }
youngor/openclouddb
MyCAT_1.2/src/test/java/org/opencloudb/route/function/PartitionByPrefixPatternTest.java
Java
apache-2.0
1,939
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.river.test.spec.discoverymanager; import org.apache.river.qa.harness.QAConfig; import org.apache.river.qa.harness.Test; /** * This class verifies that the <code>LookupDiscoveryManager</code> utility * handles, in a manner consistent with the specification, the changed * events that occur as a result of the replacement of one or more of the * member groups of discovered lookup services. * * The environment in which this class expects to operate is as follows: * <p><ul> * <li> N lookup services having locator L0i, and belonging to groups * {G0i,G1i,G2i}, where i = 0 ... N * <li> one lookup discovery manager configured to discover all of the * lookups by only group discovery * <li> one instance of DiscoveryChangeListener registered with the lookup * discovery manager * <li> after all of the lookup services are successfully discovered, * every other element of each lookup's set of member groups is * replaced with a new element * </ul><p> * * If the lookup discovery manager functions as specified, then the client's * listener will receive the expected number of changed events, with the * expected contents. * * Related bug ids: 4292957 */ public class GroupsMulticastMonitorChange extends MulticastMonitorChange { /** Performs actions necessary to prepare for execution of the * current test (refer to the description of this method in the * parent class). */ public Test construct(QAConfig config) throws Exception { super.construct(config); groupsToDiscover = toGroupsToDiscover(getInitLookupsToStart(), AbstractBaseTest.ALL_BY_GROUP); locatorsToDiscover = toLocatorsToDiscover (getInitLookupsToStart(), AbstractBaseTest.ALL_BY_GROUP); return this; }//end construct }//end class GroupsMulticastMonitorChange
pfirmstone/JGDMS
qa/src/org/apache/river/test/spec/discoverymanager/GroupsMulticastMonitorChange.java
Java
apache-2.0
2,831
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from keystone import catalog from keystone.common import manager from keystone.tests import unit class TestCreateLegacyDriver(unit.BaseTestCase): @mock.patch('oslo_log.versionutils.report_deprecated_feature') def test_class_is_properly_deprecated(self, mock_reporter): Driver = manager.create_legacy_driver(catalog.CatalogDriverV8) # NOTE(dstanek): I want to subvert the requirement for this # class to implement all of the abstract methods. Driver.__abstractmethods__ = set() impl = Driver() details = { 'as_of': 'Liberty', 'what': 'keystone.catalog.core.Driver', 'in_favor_of': 'keystone.catalog.core.CatalogDriverV8', 'remove_in': mock.ANY, } mock_reporter.assert_called_with(mock.ANY, mock.ANY, details) self.assertEqual('N', mock_reporter.call_args[0][2]['remove_in'][0]) self.assertIsInstance(impl, catalog.CatalogDriverV8)
cernops/keystone
keystone/tests/unit/common/test_manager.py
Python
apache-2.0
1,531
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hive.serde2.lazy; import java.io.IOException; import java.io.OutputStream; import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyLongObjectInspector; import org.apache.hadoop.io.LongWritable; /** * LazyObject for storing a value of Long. * * <p> * Part of the code is adapted from Apache Harmony Project. * * As with the specification, this implementation relied on code laid out in <a * href="http://www.hackersdelight.org/">Henry S. Warren, Jr.'s Hacker's * Delight, (Addison Wesley, 2002)</a> as well as <a * href="http://aggregate.org/MAGIC/">The Aggregate's Magic Algorithms</a>. * </p> * */ public class LazyLong extends LazyPrimitive<LazyLongObjectInspector, LongWritable> { public LazyLong(LazyLongObjectInspector oi) { super(oi); data = new LongWritable(); } public LazyLong(LazyLong copy) { super(copy); data = new LongWritable(copy.data.get()); } @Override public void init(ByteArrayRef bytes, int start, int length) { if (!LazyUtils.isNumberMaybe(bytes.getData(), start, length)) { isNull = true; return; } try { data.set(parseLong(bytes.getData(), start, length, 10)); isNull = false; } catch (NumberFormatException e) { isNull = true; logExceptionMessage(bytes, start, length, "BIGINT"); } } /** * Parses the string argument as if it was a long value and returns the * result. Throws NumberFormatException if the string does not represent a * long quantity. * * @param bytes * @param start * @param length * a UTF-8 encoded string representation of a long quantity. * @return long the value represented by the argument * @exception NumberFormatException * if the argument could not be parsed as a long quantity. */ public static long parseLong(byte[] bytes, int start, int length) { return parseLong(bytes, start, length, 10); } /** * Parses the string argument as if it was an long value and returns the * result. Throws NumberFormatException if the string does not represent an * long quantity. The second argument specifies the radix to use when parsing * the value. * * @param bytes * @param start * @param length * a UTF-8 encoded string representation of a long quantity. * @param radix * the base to use for conversion. * @return the value represented by the argument * @exception NumberFormatException * if the argument could not be parsed as an long quantity. */ public static long parseLong(byte[] bytes, int start, int length, int radix) { if (bytes == null) { throw new NumberFormatException("String is null"); } if (radix < Character.MIN_RADIX || radix > Character.MAX_RADIX) { throw new NumberFormatException("Invalid radix: " + radix); } if (length == 0) { throw new NumberFormatException("Empty string!"); } int offset = start; boolean negative = bytes[start] == '-'; if (negative || bytes[start] == '+') { offset++; if (length == 1) { throw new NumberFormatException(LazyUtils.convertToString(bytes, start, length)); } } return parse(bytes, start, length, offset, radix, negative); } /** * /** Parses the string argument as if it was an long value and returns the * result. Throws NumberFormatException if the string does not represent an * long quantity. The second argument specifies the radix to use when parsing * the value. * * @param bytes * @param start * @param length * a UTF-8 encoded string representation of a long quantity. * @param offset * the starting position after the sign (if exists) * @param radix * the base to use for conversion. * @param negative * whether the number is negative. * @return the value represented by the argument * @exception NumberFormatException * if the argument could not be parsed as an long quantity. */ private static long parse(byte[] bytes, int start, int length, int offset, int radix, boolean negative) { byte separator = '.'; long max = Long.MIN_VALUE / radix; long result = 0, end = start + length; while (offset < end) { int digit = LazyUtils.digit(bytes[offset++], radix); if (digit == -1 || max > result) { if (bytes[offset-1] == separator) { // We allow decimals and will return a truncated integer in that case. // Therefore we won't throw an exception here (checking the fractional // part happens below.) break; } throw new NumberFormatException(LazyUtils.convertToString(bytes, start, length)); } long next = result * radix - digit; if (next > result) { throw new NumberFormatException(LazyUtils.convertToString(bytes, start, length)); } result = next; } // This is the case when we've encountered a decimal separator. The fractional // part will not change the number, but we will verify that the fractional part // is well formed. while (offset < end) { int digit = LazyUtils.digit(bytes[offset++], radix); if (digit == -1) { throw new NumberFormatException(LazyUtils.convertToString(bytes, start, length)); } } if (!negative) { result = -result; if (result < 0) { throw new NumberFormatException(LazyUtils.convertToString(bytes, start, length)); } } return result; } /** * Writes out the text representation of an integer using base 10 to an * OutputStream in UTF-8 encoding. * * Note: division by a constant (like 10) is much faster than division by a * variable. That's one of the reasons that we don't make radix a parameter * here. * * @param out * the outputstream to write to * @param i * an int to write out * @throws IOException */ public static void writeUTF8(OutputStream out, long i) throws IOException { if (i == 0) { out.write('0'); return; } boolean negative = i < 0; if (negative) { out.write('-'); } else { // negative range is bigger than positive range, so there is no risk // of overflow here. i = -i; } long start = 1000000000000000000L; while (i / start == 0) { start /= 10; } while (start > 0) { out.write('0' - (int) ((i / start) % 10)); start /= 10; } } public static void writeUTF8NoException(OutputStream out, long i) { try { writeUTF8(out, i); } catch (IOException e) { throw new RuntimeException(e); } } }
BUPTAnderson/apache-hive-2.1.1-src
serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyLong.java
Java
apache-2.0
7,631
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.resourcemanager; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.classification.VisibleForTesting; import com.sun.jersey.spi.container.servlet.ServletContainer; import org.apache.hadoop.yarn.metrics.GenericEventTypeMetrics; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.Marker; import org.slf4j.MarkerFactory; import org.apache.curator.framework.AuthInfo; import org.apache.curator.framework.CuratorFramework; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.source.JvmMetrics; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.service.CompositeService; import org.apache.hadoop.service.Service; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.util.JvmPauseMonitor; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.curator.ZKCuratorManager; import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.conf.ConfigurationProvider; import org.apache.hadoop.yarn.conf.ConfigurationProviderFactory; import org.apache.hadoop.yarn.conf.HAUtil; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.EventDispatcher; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.nodelabels.NodeAttributesManager; import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher; import org.apache.hadoop.yarn.server.resourcemanager.federation.FederationStateStoreService; import org.apache.hadoop.yarn.server.resourcemanager.metrics.CombinedSystemMetricsPublisher; import org.apache.hadoop.yarn.server.resourcemanager.metrics.NoOpSystemMetricPublisher; import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher; import org.apache.hadoop.yarn.server.resourcemanager.metrics.TimelineServiceV1Publisher; import org.apache.hadoop.yarn.server.resourcemanager.metrics.TimelineServiceV2Publisher; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NodeAttributesManagerImpl; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMDelegatedNodeLabelsUpdater; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.recovery.NullRMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStoreFactory; import org.apache.hadoop.yarn.server.resourcemanager.recovery.Recoverable; import org.apache.hadoop.yarn.server.resourcemanager.reservation.AbstractReservationSystem; import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem; import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceProfilesManager; import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceProfilesManagerImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.monitor.RMAppLifetimeMonitor; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.YarnConfigurationStore; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.YarnConfigurationStoreFactory; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.MemoryPlacementConstraintManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintManagerService; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.MultiNodeSortingManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfScheduler; import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer; import org.apache.hadoop.yarn.server.resourcemanager.security.ProxyCAManager; import org.apache.hadoop.yarn.server.resourcemanager.security.QueueACLsManager; import org.apache.hadoop.yarn.server.resourcemanager.timelineservice.RMTimelineCollectorManager; import org.apache.hadoop.yarn.server.resourcemanager.volume.csi.VolumeManager; import org.apache.hadoop.yarn.server.resourcemanager.volume.csi.VolumeManagerImpl; import org.apache.hadoop.yarn.server.resourcemanager.volume.csi.processor.VolumeAMSProcessor; import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebApp; import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebAppUtil; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.apache.hadoop.yarn.server.service.SystemServiceManager; import org.apache.hadoop.yarn.server.webproxy.AppReportFetcher; import org.apache.hadoop.yarn.server.webproxy.ProxyCA; import org.apache.hadoop.yarn.server.webproxy.ProxyUriUtils; import org.apache.hadoop.yarn.server.webproxy.WebAppProxy; import org.apache.hadoop.yarn.server.webproxy.WebAppProxyServlet; import org.apache.hadoop.yarn.webapp.WebApp; import org.apache.hadoop.yarn.webapp.WebApps; import org.apache.hadoop.yarn.webapp.WebApps.Builder; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; import org.apache.zookeeper.server.auth.DigestAuthenticationProvider; import org.eclipse.jetty.webapp.WebAppContext; import java.io.IOException; import java.io.InputStream; import java.io.PrintStream; import java.lang.management.ManagementFactory; import java.lang.management.ThreadMXBean; import java.net.InetSocketAddress; import java.net.URI; import java.net.URL; import java.nio.charset.Charset; import java.security.PrivilegedExceptionAction; import java.security.SecureRandom; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; /** * The ResourceManager is the main class that is a set of components. * "I am the ResourceManager. All your resources belong to us..." * */ @SuppressWarnings("unchecked") public class ResourceManager extends CompositeService implements Recoverable, ResourceManagerMXBean { /** * Priority of the ResourceManager shutdown hook. */ public static final int SHUTDOWN_HOOK_PRIORITY = 30; /** * Used for generation of various ids. */ public static final int EPOCH_BIT_SHIFT = 40; private static final Logger LOG = LoggerFactory.getLogger(ResourceManager.class); private static final Marker FATAL = MarkerFactory.getMarker("FATAL"); private static long clusterTimeStamp = System.currentTimeMillis(); /* * UI2 webapp name */ public static final String UI2_WEBAPP_NAME = "/ui2"; /** * "Always On" services. Services that need to run always irrespective of * the HA state of the RM. */ @VisibleForTesting protected RMContextImpl rmContext; private Dispatcher rmDispatcher; @VisibleForTesting protected AdminService adminService; /** * "Active" services. Services that need to run only on the Active RM. * These services are managed (initialized, started, stopped) by the * {@link CompositeService} RMActiveServices. * * RM is active when (1) HA is disabled, or (2) HA is enabled and the RM is * in Active state. */ protected RMActiveServices activeServices; protected RMSecretManagerService rmSecretManagerService; protected ResourceScheduler scheduler; protected ReservationSystem reservationSystem; private ClientRMService clientRM; protected ApplicationMasterService masterService; protected NMLivelinessMonitor nmLivelinessMonitor; protected NodesListManager nodesListManager; protected RMAppManager rmAppManager; protected ApplicationACLsManager applicationACLsManager; protected QueueACLsManager queueACLsManager; private FederationStateStoreService federationStateStoreService; private ProxyCAManager proxyCAManager; private WebApp webApp; private AppReportFetcher fetcher = null; protected ResourceTrackerService resourceTracker; private JvmMetrics jvmMetrics; private boolean curatorEnabled = false; private ZKCuratorManager zkManager; private final String zkRootNodePassword = Long.toString(new SecureRandom().nextLong()); private boolean recoveryEnabled; @VisibleForTesting protected String webAppAddress; private ConfigurationProvider configurationProvider = null; /** End of Active services */ private Configuration conf; private UserGroupInformation rmLoginUGI; public ResourceManager() { super("ResourceManager"); } public RMContext getRMContext() { return this.rmContext; } public static long getClusterTimeStamp() { return clusterTimeStamp; } public String getRMLoginUser() { return rmLoginUGI.getShortUserName(); } private RMInfo rmStatusInfoBean; @VisibleForTesting protected static void setClusterTimeStamp(long timestamp) { clusterTimeStamp = timestamp; } @VisibleForTesting Dispatcher getRmDispatcher() { return rmDispatcher; } @VisibleForTesting protected ResourceProfilesManager createResourceProfileManager() { ResourceProfilesManager resourceProfilesManager = new ResourceProfilesManagerImpl(); return resourceProfilesManager; } @Override protected void serviceInit(Configuration conf) throws Exception { this.conf = conf; UserGroupInformation.setConfiguration(conf); this.rmContext = new RMContextImpl(); rmContext.setResourceManager(this); rmContext.setYarnConfiguration(conf); rmStatusInfoBean = new RMInfo(this); rmStatusInfoBean.register(); // Set HA configuration should be done before login this.rmContext.setHAEnabled(HAUtil.isHAEnabled(this.conf)); if (this.rmContext.isHAEnabled()) { HAUtil.verifyAndSetConfiguration(this.conf); } // Set UGI and do login // If security is enabled, use login user // If security is not enabled, use current user this.rmLoginUGI = UserGroupInformation.getCurrentUser(); try { doSecureLogin(); } catch(IOException ie) { throw new YarnRuntimeException("Failed to login", ie); } this.configurationProvider = ConfigurationProviderFactory.getConfigurationProvider(conf); this.configurationProvider.init(this.conf); rmContext.setConfigurationProvider(configurationProvider); // load core-site.xml loadConfigurationXml(YarnConfiguration.CORE_SITE_CONFIGURATION_FILE); // Do refreshSuperUserGroupsConfiguration with loaded core-site.xml // Or use RM specific configurations to overwrite the common ones first // if they exist RMServerUtils.processRMProxyUsersConf(conf); ProxyUsers.refreshSuperUserGroupsConfiguration(this.conf); // load yarn-site.xml loadConfigurationXml(YarnConfiguration.YARN_SITE_CONFIGURATION_FILE); validateConfigs(this.conf); // register the handlers for all AlwaysOn services using setupDispatcher(). rmDispatcher = setupDispatcher(); addIfService(rmDispatcher); rmContext.setDispatcher(rmDispatcher); // The order of services below should not be changed as services will be // started in same order // As elector service needs admin service to be initialized and started, // first we add admin service then elector service adminService = createAdminService(); addService(adminService); rmContext.setRMAdminService(adminService); // elector must be added post adminservice if (this.rmContext.isHAEnabled()) { // If the RM is configured to use an embedded leader elector, // initialize the leader elector. if (HAUtil.isAutomaticFailoverEnabled(conf) && HAUtil.isAutomaticFailoverEmbedded(conf)) { EmbeddedElector elector = createEmbeddedElector(); addIfService(elector); rmContext.setLeaderElectorService(elector); } } createAndInitActiveServices(false); webAppAddress = WebAppUtils.getWebAppBindURL(this.conf, YarnConfiguration.RM_BIND_HOST, WebAppUtils.getRMWebAppURLWithoutScheme(this.conf)); RMApplicationHistoryWriter rmApplicationHistoryWriter = createRMApplicationHistoryWriter(); addService(rmApplicationHistoryWriter); rmContext.setRMApplicationHistoryWriter(rmApplicationHistoryWriter); // initialize the RM timeline collector first so that the system metrics // publisher can bind to it if (YarnConfiguration.timelineServiceV2Enabled(this.conf)) { RMTimelineCollectorManager timelineCollectorManager = createRMTimelineCollectorManager(); addService(timelineCollectorManager); rmContext.setRMTimelineCollectorManager(timelineCollectorManager); } SystemMetricsPublisher systemMetricsPublisher = createSystemMetricsPublisher(); addIfService(systemMetricsPublisher); rmContext.setSystemMetricsPublisher(systemMetricsPublisher); registerMXBean(); super.serviceInit(this.conf); } private void loadConfigurationXml(String configurationFile) throws YarnException, IOException { InputStream configurationInputStream = this.configurationProvider.getConfigurationInputStream(this.conf, configurationFile); if (configurationInputStream != null) { this.conf.addResource(configurationInputStream, configurationFile); } } protected EmbeddedElector createEmbeddedElector() throws IOException { EmbeddedElector elector; curatorEnabled = conf.getBoolean(YarnConfiguration.CURATOR_LEADER_ELECTOR, YarnConfiguration.DEFAULT_CURATOR_LEADER_ELECTOR_ENABLED); if (curatorEnabled) { this.zkManager = createAndStartZKManager(conf); elector = new CuratorBasedElectorService(this); } else { elector = new ActiveStandbyElectorBasedElectorService(this); } return elector; } /** * Get ZooKeeper Curator manager, creating and starting if not exists. * @param config Configuration for the ZooKeeper curator. * @return ZooKeeper Curator manager. * @throws IOException If it cannot create the manager. */ public ZKCuratorManager createAndStartZKManager(Configuration config) throws IOException { ZKCuratorManager manager = new ZKCuratorManager(config); // Get authentication List<AuthInfo> authInfos = new ArrayList<>(); if (HAUtil.isHAEnabled(config) && HAUtil.getConfValueForRMInstance( YarnConfiguration.ZK_RM_STATE_STORE_ROOT_NODE_ACL, config) == null) { String zkRootNodeUsername = HAUtil.getConfValueForRMInstance( YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS, config); String defaultFencingAuth = zkRootNodeUsername + ":" + zkRootNodePassword; byte[] defaultFencingAuthData = defaultFencingAuth.getBytes(Charset.forName("UTF-8")); String scheme = new DigestAuthenticationProvider().getScheme(); AuthInfo authInfo = new AuthInfo(scheme, defaultFencingAuthData); authInfos.add(authInfo); } manager.start(authInfos); return manager; } public ZKCuratorManager getZKManager() { return zkManager; } public CuratorFramework getCurator() { if (this.zkManager == null) { return null; } return this.zkManager.getCurator(); } public String getZkRootNodePassword() { return this.zkRootNodePassword; } protected QueueACLsManager createQueueACLsManager(ResourceScheduler scheduler, Configuration conf) { return QueueACLsManager.getQueueACLsManager(scheduler, conf); } @VisibleForTesting protected void setRMStateStore(RMStateStore rmStore) { rmStore.setRMDispatcher(rmDispatcher); rmStore.setResourceManager(this); rmContext.setStateStore(rmStore); } protected EventHandler<SchedulerEvent> createSchedulerEventDispatcher() { String dispatcherName = "SchedulerEventDispatcher"; EventDispatcher dispatcher; int threadMonitorRate = conf.getInt( YarnConfiguration.YARN_DISPATCHER_CPU_MONITOR_SAMPLES_PER_MIN, YarnConfiguration.DEFAULT_YARN_DISPATCHER_CPU_MONITOR_SAMPLES_PER_MIN); if (threadMonitorRate > 0) { dispatcher = new SchedulerEventDispatcher(dispatcherName, threadMonitorRate); ClusterMetrics.getMetrics().setRmEventProcMonitorEnable(true); } else { dispatcher = new EventDispatcher(this.scheduler, dispatcherName); } dispatcher. setMetrics(GenericEventTypeMetricsManager. create(dispatcher.getName(), SchedulerEventType.class)); return dispatcher; } protected Dispatcher createDispatcher() { AsyncDispatcher dispatcher = new AsyncDispatcher("RM Event dispatcher"); // Add 4 busy event types. GenericEventTypeMetrics nodesListManagerEventTypeMetrics = GenericEventTypeMetricsManager. create(dispatcher.getName(), NodesListManagerEventType.class); dispatcher.addMetrics(nodesListManagerEventTypeMetrics, nodesListManagerEventTypeMetrics .getEnumClass()); GenericEventTypeMetrics rmNodeEventTypeMetrics = GenericEventTypeMetricsManager. create(dispatcher.getName(), RMNodeEventType.class); dispatcher.addMetrics(rmNodeEventTypeMetrics, rmNodeEventTypeMetrics .getEnumClass()); GenericEventTypeMetrics rmAppEventTypeMetrics = GenericEventTypeMetricsManager. create(dispatcher.getName(), RMAppEventType.class); dispatcher.addMetrics(rmAppEventTypeMetrics, rmAppEventTypeMetrics .getEnumClass()); GenericEventTypeMetrics rmAppAttemptEventTypeMetrics = GenericEventTypeMetricsManager. create(dispatcher.getName(), RMAppAttemptEventType.class); dispatcher.addMetrics(rmAppAttemptEventTypeMetrics, rmAppAttemptEventTypeMetrics .getEnumClass()); return dispatcher; } protected ResourceScheduler createScheduler() { String schedulerClassName = conf.get(YarnConfiguration.RM_SCHEDULER, YarnConfiguration.DEFAULT_RM_SCHEDULER); LOG.info("Using Scheduler: " + schedulerClassName); try { Class<?> schedulerClazz = Class.forName(schedulerClassName); if (ResourceScheduler.class.isAssignableFrom(schedulerClazz)) { return (ResourceScheduler) ReflectionUtils.newInstance(schedulerClazz, this.conf); } else { throw new YarnRuntimeException("Class: " + schedulerClassName + " not instance of " + ResourceScheduler.class.getCanonicalName()); } } catch (ClassNotFoundException e) { throw new YarnRuntimeException("Could not instantiate Scheduler: " + schedulerClassName, e); } } protected ReservationSystem createReservationSystem() { String reservationClassName = conf.get(YarnConfiguration.RM_RESERVATION_SYSTEM_CLASS, AbstractReservationSystem.getDefaultReservationSystem(scheduler)); if (reservationClassName == null) { return null; } LOG.info("Using ReservationSystem: " + reservationClassName); try { Class<?> reservationClazz = Class.forName(reservationClassName); if (ReservationSystem.class.isAssignableFrom(reservationClazz)) { return (ReservationSystem) ReflectionUtils.newInstance( reservationClazz, this.conf); } else { throw new YarnRuntimeException("Class: " + reservationClassName + " not instance of " + ReservationSystem.class.getCanonicalName()); } } catch (ClassNotFoundException e) { throw new YarnRuntimeException( "Could not instantiate ReservationSystem: " + reservationClassName, e); } } protected SystemServiceManager createServiceManager() { String schedulerClassName = YarnConfiguration.DEFAULT_YARN_API_SYSTEM_SERVICES_CLASS; LOG.info("Using SystemServiceManager: " + schedulerClassName); try { Class<?> schedulerClazz = Class.forName(schedulerClassName); if (SystemServiceManager.class.isAssignableFrom(schedulerClazz)) { return (SystemServiceManager) ReflectionUtils .newInstance(schedulerClazz, this.conf); } else { throw new YarnRuntimeException( "Class: " + schedulerClassName + " not instance of " + SystemServiceManager.class.getCanonicalName()); } } catch (ClassNotFoundException e) { throw new YarnRuntimeException( "Could not instantiate SystemServiceManager: " + schedulerClassName, e); } } protected ApplicationMasterLauncher createAMLauncher() { return new ApplicationMasterLauncher(this.rmContext); } private NMLivelinessMonitor createNMLivelinessMonitor() { return new NMLivelinessMonitor(this.rmContext .getDispatcher()); } protected AMLivelinessMonitor createAMLivelinessMonitor() { return new AMLivelinessMonitor(this.rmDispatcher); } protected RMNodeLabelsManager createNodeLabelManager() throws InstantiationException, IllegalAccessException { return new RMNodeLabelsManager(); } protected NodeAttributesManager createNodeAttributesManager() { NodeAttributesManagerImpl namImpl = new NodeAttributesManagerImpl(); namImpl.setRMContext(rmContext); return namImpl; } protected AllocationTagsManager createAllocationTagsManager() { return new AllocationTagsManager(this.rmContext); } protected PlacementConstraintManagerService createPlacementConstraintManager() { // Use the in memory Placement Constraint Manager. return new MemoryPlacementConstraintManager(); } protected DelegationTokenRenewer createDelegationTokenRenewer() { return new DelegationTokenRenewer(); } protected RMAppManager createRMAppManager() { return new RMAppManager(this.rmContext, this.scheduler, this.masterService, this.applicationACLsManager, this.conf); } protected RMApplicationHistoryWriter createRMApplicationHistoryWriter() { return new RMApplicationHistoryWriter(); } private RMTimelineCollectorManager createRMTimelineCollectorManager() { return new RMTimelineCollectorManager(this); } private FederationStateStoreService createFederationStateStoreService() { return new FederationStateStoreService(rmContext); } protected MultiNodeSortingManager<SchedulerNode> createMultiNodeSortingManager() { return new MultiNodeSortingManager<SchedulerNode>(); } protected SystemMetricsPublisher createSystemMetricsPublisher() { List<SystemMetricsPublisher> publishers = new ArrayList<SystemMetricsPublisher>(); if (YarnConfiguration.timelineServiceV1Enabled(conf) && YarnConfiguration.systemMetricsPublisherEnabled(conf)) { SystemMetricsPublisher publisherV1 = new TimelineServiceV1Publisher(); publishers.add(publisherV1); } if (YarnConfiguration.timelineServiceV2Enabled(conf) && YarnConfiguration.systemMetricsPublisherEnabled(conf)) { // we're dealing with the v.2.x publisher LOG.info("system metrics publisher with the timeline service V2 is " + "configured"); SystemMetricsPublisher publisherV2 = new TimelineServiceV2Publisher( rmContext.getRMTimelineCollectorManager()); publishers.add(publisherV2); } if (publishers.isEmpty()) { LOG.info("TimelineServicePublisher is not configured"); SystemMetricsPublisher noopPublisher = new NoOpSystemMetricPublisher(); publishers.add(noopPublisher); } for (SystemMetricsPublisher publisher : publishers) { addIfService(publisher); } SystemMetricsPublisher combinedPublisher = new CombinedSystemMetricsPublisher(publishers); return combinedPublisher; } // sanity check for configurations protected static void validateConfigs(Configuration conf) { // validate max-attempts int rmMaxAppAttempts = conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS); if (rmMaxAppAttempts <= 0) { throw new YarnRuntimeException("Invalid rm am max attempts configuration" + ", " + YarnConfiguration.RM_AM_MAX_ATTEMPTS + "=" + rmMaxAppAttempts + ", it should be a positive integer."); } int globalMaxAppAttempts = conf.getInt( YarnConfiguration.GLOBAL_RM_AM_MAX_ATTEMPTS, conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS)); if (globalMaxAppAttempts <= 0) { throw new YarnRuntimeException("Invalid global max attempts configuration" + ", " + YarnConfiguration.GLOBAL_RM_AM_MAX_ATTEMPTS + "=" + globalMaxAppAttempts + ", it should be a positive integer."); } // validate expireIntvl >= heartbeatIntvl long expireIntvl = conf.getLong(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, YarnConfiguration.DEFAULT_RM_NM_EXPIRY_INTERVAL_MS); long heartbeatIntvl = conf.getLong(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, YarnConfiguration.DEFAULT_RM_NM_HEARTBEAT_INTERVAL_MS); if (expireIntvl < heartbeatIntvl) { throw new YarnRuntimeException("Nodemanager expiry interval should be no" + " less than heartbeat interval, " + YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS + "=" + expireIntvl + ", " + YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS + "=" + heartbeatIntvl); } } /** * RMActiveServices handles all the Active services in the RM. */ @Private public class RMActiveServices extends CompositeService { private DelegationTokenRenewer delegationTokenRenewer; private EventHandler<SchedulerEvent> schedulerDispatcher; private ApplicationMasterLauncher applicationMasterLauncher; private ContainerAllocationExpirer containerAllocationExpirer; private ResourceManager rm; private boolean fromActive = false; private StandByTransitionRunnable standByTransitionRunnable; private RMNMInfo rmnmInfo; private ScheduledThreadPoolExecutor eventQueueMetricExecutor; RMActiveServices(ResourceManager rm) { super("RMActiveServices"); this.rm = rm; } @Override protected void serviceInit(Configuration configuration) throws Exception { standByTransitionRunnable = new StandByTransitionRunnable(); rmSecretManagerService = createRMSecretManagerService(); addService(rmSecretManagerService); containerAllocationExpirer = new ContainerAllocationExpirer(rmDispatcher); addService(containerAllocationExpirer); rmContext.setContainerAllocationExpirer(containerAllocationExpirer); AMLivelinessMonitor amLivelinessMonitor = createAMLivelinessMonitor(); addService(amLivelinessMonitor); rmContext.setAMLivelinessMonitor(amLivelinessMonitor); AMLivelinessMonitor amFinishingMonitor = createAMLivelinessMonitor(); addService(amFinishingMonitor); rmContext.setAMFinishingMonitor(amFinishingMonitor); RMAppLifetimeMonitor rmAppLifetimeMonitor = createRMAppLifetimeMonitor(); addService(rmAppLifetimeMonitor); rmContext.setRMAppLifetimeMonitor(rmAppLifetimeMonitor); RMNodeLabelsManager nlm = createNodeLabelManager(); nlm.setRMContext(rmContext); addService(nlm); rmContext.setNodeLabelManager(nlm); NodeAttributesManager nam = createNodeAttributesManager(); addService(nam); rmContext.setNodeAttributesManager(nam); AllocationTagsManager allocationTagsManager = createAllocationTagsManager(); rmContext.setAllocationTagsManager(allocationTagsManager); PlacementConstraintManagerService placementConstraintManager = createPlacementConstraintManager(); addService(placementConstraintManager); rmContext.setPlacementConstraintManager(placementConstraintManager); // add resource profiles here because it's used by AbstractYarnScheduler ResourceProfilesManager resourceProfilesManager = createResourceProfileManager(); resourceProfilesManager.init(conf); rmContext.setResourceProfilesManager(resourceProfilesManager); MultiNodeSortingManager<SchedulerNode> multiNodeSortingManager = createMultiNodeSortingManager(); multiNodeSortingManager.setRMContext(rmContext); addService(multiNodeSortingManager); rmContext.setMultiNodeSortingManager(multiNodeSortingManager); RMDelegatedNodeLabelsUpdater delegatedNodeLabelsUpdater = createRMDelegatedNodeLabelsUpdater(); if (delegatedNodeLabelsUpdater != null) { addService(delegatedNodeLabelsUpdater); rmContext.setRMDelegatedNodeLabelsUpdater(delegatedNodeLabelsUpdater); } recoveryEnabled = conf.getBoolean(YarnConfiguration.RECOVERY_ENABLED, YarnConfiguration.DEFAULT_RM_RECOVERY_ENABLED); RMStateStore rmStore = null; if (recoveryEnabled) { rmStore = RMStateStoreFactory.getStore(conf); boolean isWorkPreservingRecoveryEnabled = conf.getBoolean( YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, YarnConfiguration.DEFAULT_RM_WORK_PRESERVING_RECOVERY_ENABLED); rmContext .setWorkPreservingRecoveryEnabled(isWorkPreservingRecoveryEnabled); } else { rmStore = new NullRMStateStore(); } try { rmStore.setResourceManager(rm); rmStore.init(conf); rmStore.setRMDispatcher(rmDispatcher); } catch (Exception e) { // the Exception from stateStore.init() needs to be handled for // HA and we need to give up master status if we got fenced LOG.error("Failed to init state store", e); throw e; } rmContext.setStateStore(rmStore); if (UserGroupInformation.isSecurityEnabled()) { delegationTokenRenewer = createDelegationTokenRenewer(); rmContext.setDelegationTokenRenewer(delegationTokenRenewer); } // Register event handler for NodesListManager nodesListManager = new NodesListManager(rmContext); rmDispatcher.register(NodesListManagerEventType.class, nodesListManager); addService(nodesListManager); rmContext.setNodesListManager(nodesListManager); // Initialize the scheduler scheduler = createScheduler(); scheduler.setRMContext(rmContext); addIfService(scheduler); rmContext.setScheduler(scheduler); schedulerDispatcher = createSchedulerEventDispatcher(); addIfService(schedulerDispatcher); rmDispatcher.register(SchedulerEventType.class, schedulerDispatcher); // Register event handler for RmAppEvents rmDispatcher.register(RMAppEventType.class, new ApplicationEventDispatcher(rmContext)); // Register event handler for RmAppAttemptEvents rmDispatcher.register(RMAppAttemptEventType.class, new ApplicationAttemptEventDispatcher(rmContext)); // Register event handler for RmNodes rmDispatcher.register( RMNodeEventType.class, new NodeEventDispatcher(rmContext)); nmLivelinessMonitor = createNMLivelinessMonitor(); addService(nmLivelinessMonitor); resourceTracker = createResourceTrackerService(); addService(resourceTracker); rmContext.setResourceTrackerService(resourceTracker); MetricsSystem ms = DefaultMetricsSystem.initialize("ResourceManager"); if (fromActive) { JvmMetrics.reattach(ms, jvmMetrics); UserGroupInformation.reattachMetrics(); } else { jvmMetrics = JvmMetrics.initSingleton("ResourceManager", null); } JvmPauseMonitor pauseMonitor = new JvmPauseMonitor(); addService(pauseMonitor); jvmMetrics.setPauseMonitor(pauseMonitor); // Initialize the Reservation system if (conf.getBoolean(YarnConfiguration.RM_RESERVATION_SYSTEM_ENABLE, YarnConfiguration.DEFAULT_RM_RESERVATION_SYSTEM_ENABLE)) { reservationSystem = createReservationSystem(); if (reservationSystem != null) { reservationSystem.setRMContext(rmContext); addIfService(reservationSystem); rmContext.setReservationSystem(reservationSystem); LOG.info("Initialized Reservation system"); } } masterService = createApplicationMasterService(); createAndRegisterOpportunisticDispatcher(masterService); addService(masterService) ; rmContext.setApplicationMasterService(masterService); applicationACLsManager = new ApplicationACLsManager(conf); queueACLsManager = createQueueACLsManager(scheduler, conf); rmAppManager = createRMAppManager(); // Register event handler for RMAppManagerEvents rmDispatcher.register(RMAppManagerEventType.class, rmAppManager); clientRM = createClientRMService(); addService(clientRM); rmContext.setClientRMService(clientRM); applicationMasterLauncher = createAMLauncher(); rmDispatcher.register(AMLauncherEventType.class, applicationMasterLauncher); addService(applicationMasterLauncher); if (UserGroupInformation.isSecurityEnabled()) { addService(delegationTokenRenewer); delegationTokenRenewer.setRMContext(rmContext); } if(HAUtil.isFederationEnabled(conf)) { String cId = YarnConfiguration.getClusterId(conf); if (cId.isEmpty()) { String errMsg = "Cannot initialize RM as Federation is enabled" + " but cluster id is not configured."; LOG.error(errMsg); throw new YarnRuntimeException(errMsg); } federationStateStoreService = createFederationStateStoreService(); addIfService(federationStateStoreService); LOG.info("Initialized Federation membership."); } proxyCAManager = new ProxyCAManager(new ProxyCA(), rmContext); addService(proxyCAManager); rmContext.setProxyCAManager(proxyCAManager); rmnmInfo = new RMNMInfo(rmContext, scheduler); if (conf.getBoolean(YarnConfiguration.YARN_API_SERVICES_ENABLE, false)) { SystemServiceManager systemServiceManager = createServiceManager(); addIfService(systemServiceManager); } // Add volume manager to RM context when it is necessary String[] amsProcessorList = conf.getStrings( YarnConfiguration.RM_APPLICATION_MASTER_SERVICE_PROCESSORS); if (amsProcessorList != null&& Arrays.stream(amsProcessorList) .anyMatch(s -> VolumeAMSProcessor.class.getName().equals(s))) { VolumeManager volumeManager = new VolumeManagerImpl(); rmContext.setVolumeManager(volumeManager); addIfService(volumeManager); } eventQueueMetricExecutor = new ScheduledThreadPoolExecutor(1, new ThreadFactoryBuilder(). setDaemon(true).setNameFormat("EventQueueSizeMetricThread"). build()); eventQueueMetricExecutor.scheduleAtFixedRate(new Runnable() { @Override public void run() { int rmEventQueueSize = ((AsyncDispatcher)getRMContext(). getDispatcher()).getEventQueueSize(); ClusterMetrics.getMetrics().setRmEventQueueSize(rmEventQueueSize); int schedulerEventQueueSize = ((EventDispatcher)schedulerDispatcher). getEventQueueSize(); ClusterMetrics.getMetrics(). setSchedulerEventQueueSize(schedulerEventQueueSize); } }, 1, 1, TimeUnit.SECONDS); super.serviceInit(conf); } private void createAndRegisterOpportunisticDispatcher( ApplicationMasterService service) { if (!isOpportunisticSchedulingEnabled(conf)) { return; } EventDispatcher oppContainerAllocEventDispatcher = new EventDispatcher( (OpportunisticContainerAllocatorAMService) service, OpportunisticContainerAllocatorAMService.class.getName()); // Add an event dispatcher for the // OpportunisticContainerAllocatorAMService to handle node // additions, updates and removals. Since the SchedulerEvent is currently // a super set of theses, we register interest for it. addService(oppContainerAllocEventDispatcher); rmDispatcher .register(SchedulerEventType.class, oppContainerAllocEventDispatcher); } @Override protected void serviceStart() throws Exception { RMStateStore rmStore = rmContext.getStateStore(); // The state store needs to start irrespective of recoveryEnabled as apps // need events to move to further states. rmStore.start(); if(recoveryEnabled) { try { LOG.info("Recovery started"); rmStore.checkVersion(); if (rmContext.isWorkPreservingRecoveryEnabled()) { rmContext.setEpoch(rmStore.getAndIncrementEpoch()); } RMState state = rmStore.loadState(); recover(state); LOG.info("Recovery ended"); } catch (Exception e) { // the Exception from loadState() needs to be handled for // HA and we need to give up master status if we got fenced LOG.error("Failed to load/recover state", e); throw e; } } else { if (HAUtil.isFederationEnabled(conf)) { long epoch = conf.getLong(YarnConfiguration.RM_EPOCH, YarnConfiguration.DEFAULT_RM_EPOCH); rmContext.setEpoch(epoch); LOG.info("Epoch set for Federation: " + epoch); } } super.serviceStart(); } @Override protected void serviceStop() throws Exception { super.serviceStop(); DefaultMetricsSystem.shutdown(); // unregister rmnmInfo bean if (rmnmInfo != null) { rmnmInfo.unregister(); } if (rmContext != null) { RMStateStore store = rmContext.getStateStore(); try { if (null != store) { store.close(); } } catch (Exception e) { LOG.error("Error closing store.", e); } } if (eventQueueMetricExecutor != null) { eventQueueMetricExecutor.shutdownNow(); } } } @Private private class RMFatalEventDispatcher implements EventHandler<RMFatalEvent> { @Override public void handle(RMFatalEvent event) { LOG.error("Received " + event); if (HAUtil.isHAEnabled(getConfig())) { // If we're in an HA config, the right answer is always to go into // standby. LOG.warn("Transitioning the resource manager to standby."); handleTransitionToStandByInNewThread(); } else { // If we're stand-alone, we probably want to shut down, but the if and // how depends on the event. switch(event.getType()) { case STATE_STORE_FENCED: LOG.error(FATAL, "State store fenced even though the resource " + "manager is not configured for high availability. Shutting " + "down this resource manager to protect the integrity of the " + "state store."); ExitUtil.terminate(1, event.getExplanation()); break; case STATE_STORE_OP_FAILED: if (YarnConfiguration.shouldRMFailFast(getConfig())) { LOG.error(FATAL, "Shutting down the resource manager because a " + "state store operation failed, and the resource manager is " + "configured to fail fast. See the yarn.fail-fast and " + "yarn.resourcemanager.fail-fast properties."); ExitUtil.terminate(1, event.getExplanation()); } else { LOG.warn("Ignoring state store operation failure because the " + "resource manager is not configured to fail fast. See the " + "yarn.fail-fast and yarn.resourcemanager.fail-fast " + "properties."); } break; default: LOG.error(FATAL, "Shutting down the resource manager."); ExitUtil.terminate(1, event.getExplanation()); } } } } @Private private class SchedulerEventDispatcher extends EventDispatcher<SchedulerEvent> { private final Thread eventProcessorMonitor; SchedulerEventDispatcher(String name, int samplesPerMin) { super(scheduler, name); this.eventProcessorMonitor = new Thread(new EventProcessorMonitor(getEventProcessorId(), samplesPerMin)); this.eventProcessorMonitor .setName("ResourceManager Event Processor Monitor"); } // EventProcessorMonitor keeps track of how much CPU the EventProcessor // thread is using. It takes a configurable number of samples per minute, // and then reports the Avg and Max of previous 60 seconds as cluster // metrics. Units are usecs per second of CPU used. // Avg is not accurate until one minute of samples have been received. private final class EventProcessorMonitor implements Runnable { private final long tid; private final boolean run; private final ThreadMXBean tmxb; private final ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics(); private final int samples; EventProcessorMonitor(long id, int samplesPerMin) { assert samplesPerMin > 0; this.tid = id; this.samples = samplesPerMin; this.tmxb = ManagementFactory.getThreadMXBean(); if (clusterMetrics != null && tmxb != null && tmxb.isThreadCpuTimeSupported()) { this.run = true; clusterMetrics.setRmEventProcMonitorEnable(true); } else { this.run = false; } } public void run() { int index = 0; long[] values = new long[samples]; int sleepMs = (60 * 1000) / samples; while (run && !isStopped() && !Thread.currentThread().isInterrupted()) { try { long cpuBefore = tmxb.getThreadCpuTime(tid); long wallClockBefore = Time.monotonicNow(); Thread.sleep(sleepMs); long wallClockDelta = Time.monotonicNow() - wallClockBefore; long cpuDelta = tmxb.getThreadCpuTime(tid) - cpuBefore; // Nanoseconds / Milliseconds = usec per second values[index] = cpuDelta / wallClockDelta; index = (index + 1) % samples; long max = 0; long sum = 0; for (int i = 0; i < samples; i++) { sum += values[i]; max = Math.max(max, values[i]); } clusterMetrics.setRmEventProcCPUAvg(sum / samples); clusterMetrics.setRmEventProcCPUMax(max); } catch (InterruptedException e) { LOG.error("Returning, interrupted : " + e); return; } } } } @Override protected void serviceStart() throws Exception { super.serviceStart(); this.eventProcessorMonitor.start(); } @Override protected void serviceStop() throws Exception { super.serviceStop(); this.eventProcessorMonitor.interrupt(); try { this.eventProcessorMonitor.join(); } catch (InterruptedException e) { throw new YarnRuntimeException(e); } } } /** * Transition to standby state in a new thread. The transition operation is * asynchronous to avoid deadlock caused by cyclic dependency. */ private void handleTransitionToStandByInNewThread() { Thread standByTransitionThread = new Thread(activeServices.standByTransitionRunnable); standByTransitionThread.setName("StandByTransitionThread"); standByTransitionThread.start(); } /** * The class to transition RM to standby state. The same * {@link StandByTransitionRunnable} object could be used in multiple threads, * but runs only once. That's because RM can go back to active state after * transition to standby state, the same runnable in the old context can't * transition RM to standby state again. A new runnable is created every time * RM transitions to active state. */ private class StandByTransitionRunnable implements Runnable { // The atomic variable to make sure multiple threads with the same runnable // run only once. private final AtomicBoolean hasAlreadyRun = new AtomicBoolean(false); @Override public void run() { // Run this only once, even if multiple threads end up triggering // this simultaneously. if (hasAlreadyRun.getAndSet(true)) { return; } if (rmContext.isHAEnabled()) { try { // Transition to standby and reinit active services LOG.info("Transitioning RM to Standby mode"); transitionToStandby(true); EmbeddedElector elector = rmContext.getLeaderElectorService(); if (elector != null) { elector.rejoinElection(); } } catch (Exception e) { LOG.error(FATAL, "Failed to transition RM to Standby mode.", e); ExitUtil.terminate(1, e); } } } } @Private public static final class ApplicationEventDispatcher implements EventHandler<RMAppEvent> { private final RMContext rmContext; public ApplicationEventDispatcher(RMContext rmContext) { this.rmContext = rmContext; } @Override public void handle(RMAppEvent event) { ApplicationId appID = event.getApplicationId(); RMApp rmApp = this.rmContext.getRMApps().get(appID); if (rmApp != null) { try { rmApp.handle(event); } catch (Throwable t) { LOG.error("Error in handling event type " + event.getType() + " for application " + appID, t); } } } } @Private public static final class ApplicationAttemptEventDispatcher implements EventHandler<RMAppAttemptEvent> { private final RMContext rmContext; public ApplicationAttemptEventDispatcher(RMContext rmContext) { this.rmContext = rmContext; } @Override public void handle(RMAppAttemptEvent event) { ApplicationAttemptId appAttemptId = event.getApplicationAttemptId(); ApplicationId appId = appAttemptId.getApplicationId(); RMApp rmApp = this.rmContext.getRMApps().get(appId); if (rmApp != null) { RMAppAttempt rmAppAttempt = rmApp.getRMAppAttempt(appAttemptId); if (rmAppAttempt != null) { try { rmAppAttempt.handle(event); } catch (Throwable t) { LOG.error("Error in handling event type " + event.getType() + " for applicationAttempt " + appAttemptId, t); } } else if (rmApp.getApplicationSubmissionContext() != null && rmApp.getApplicationSubmissionContext() .getKeepContainersAcrossApplicationAttempts() && event.getType() == RMAppAttemptEventType.CONTAINER_FINISHED) { // For work-preserving AM restart, failed attempts are still // capturing CONTAINER_FINISHED events and record the finished // containers which will be used by current attempt. // We just keep 'yarn.resourcemanager.am.max-attempts' in // RMStateStore. If the finished container's attempt is deleted, we // use the first attempt in app.attempts to deal with these events. RMAppAttempt previousFailedAttempt = rmApp.getAppAttempts().values().iterator().next(); if (previousFailedAttempt != null) { try { LOG.debug("Event {} handled by {}", event.getType(), previousFailedAttempt); previousFailedAttempt.handle(event); } catch (Throwable t) { LOG.error("Error in handling event type " + event.getType() + " for applicationAttempt " + appAttemptId + " with " + previousFailedAttempt, t); } } else { LOG.error("Event " + event.getType() + " not handled, because previousFailedAttempt is null"); } } } } } @Private public static final class NodeEventDispatcher implements EventHandler<RMNodeEvent> { private final RMContext rmContext; public NodeEventDispatcher(RMContext rmContext) { this.rmContext = rmContext; } @Override public void handle(RMNodeEvent event) { NodeId nodeId = event.getNodeId(); RMNode node = this.rmContext.getRMNodes().get(nodeId); if (node != null) { try { ((EventHandler<RMNodeEvent>) node).handle(event); } catch (Throwable t) { LOG.error("Error in handling event type " + event.getType() + " for node " + nodeId, t); } } } } /** * Return a HttpServer.Builder that the journalnode / namenode / secondary * namenode can use to initialize their HTTP / HTTPS server. * * @param conf configuration object * @param httpAddr HTTP address * @param httpsAddr HTTPS address * @param name Name of the server * @throws IOException from Builder * @return builder object */ public static HttpServer2.Builder httpServerTemplateForRM(Configuration conf, final InetSocketAddress httpAddr, final InetSocketAddress httpsAddr, String name) throws IOException { HttpServer2.Builder builder = new HttpServer2.Builder().setName(name) .setConf(conf).setSecurityEnabled(false); if (httpAddr.getPort() == 0) { builder.setFindPort(true); } URI uri = URI.create("http://" + NetUtils.getHostPortString(httpAddr)); builder.addEndpoint(uri); LOG.info("Starting Web-server for " + name + " at: " + uri); return builder; } protected void startWepApp() { Map<String, String> serviceConfig = null; Configuration conf = getConfig(); RMWebAppUtil.setupSecurityAndFilters(conf, getClientRMService().rmDTSecretManager); Map<String, String> params = new HashMap<String, String>(); if (getConfig().getBoolean(YarnConfiguration.YARN_API_SERVICES_ENABLE, false)) { String apiPackages = "org.apache.hadoop.yarn.service.webapp;" + "org.apache.hadoop.yarn.webapp"; params.put("com.sun.jersey.config.property.resourceConfigClass", "com.sun.jersey.api.core.PackagesResourceConfig"); params.put("com.sun.jersey.config.property.packages", apiPackages); } Builder<ResourceManager> builder = WebApps .$for("cluster", ResourceManager.class, this, "ws") .with(conf) .withServlet("API-Service", "/app/*", ServletContainer.class, params, false) .withHttpSpnegoPrincipalKey( YarnConfiguration.RM_WEBAPP_SPNEGO_USER_NAME_KEY) .withHttpSpnegoKeytabKey( YarnConfiguration.RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY) .withCSRFProtection(YarnConfiguration.RM_CSRF_PREFIX) .withXFSProtection(YarnConfiguration.RM_XFS_PREFIX) .at(webAppAddress); String proxyHostAndPort = rmContext.getProxyHostAndPort(conf); if(WebAppUtils.getResolvedRMWebAppURLWithoutScheme(conf). equals(proxyHostAndPort)) { if (HAUtil.isHAEnabled(conf)) { fetcher = new AppReportFetcher(conf); } else { fetcher = new AppReportFetcher(conf, getClientRMService()); } builder.withServlet(ProxyUriUtils.PROXY_SERVLET_NAME, ProxyUriUtils.PROXY_PATH_SPEC, WebAppProxyServlet.class); builder.withAttribute(WebAppProxy.PROXY_CA, rmContext.getProxyCAManager().getProxyCA()); builder.withAttribute(WebAppProxy.FETCHER_ATTRIBUTE, fetcher); String[] proxyParts = proxyHostAndPort.split(":"); builder.withAttribute(WebAppProxy.PROXY_HOST_ATTRIBUTE, proxyParts[0]); } WebAppContext uiWebAppContext = null; if (getConfig().getBoolean(YarnConfiguration.YARN_WEBAPP_UI2_ENABLE, YarnConfiguration.DEFAULT_YARN_WEBAPP_UI2_ENABLE)) { String onDiskPath = getConfig() .get(YarnConfiguration.YARN_WEBAPP_UI2_WARFILE_PATH); uiWebAppContext = new WebAppContext(); uiWebAppContext.setContextPath(UI2_WEBAPP_NAME); if (null == onDiskPath) { String war = "hadoop-yarn-ui-" + VersionInfo.getVersion() + ".war"; URL url = getClass().getClassLoader().getResource(war); if (null == url) { onDiskPath = getWebAppsPath("ui2"); } else { onDiskPath = url.getFile(); } } if (onDiskPath == null || onDiskPath.isEmpty()) { LOG.error("No war file or webapps found for ui2 !"); } else { if (onDiskPath.endsWith(".war")) { uiWebAppContext.setWar(onDiskPath); LOG.info("Using war file at: " + onDiskPath); } else { uiWebAppContext.setResourceBase(onDiskPath); LOG.info("Using webapps at: " + onDiskPath); } } } builder.withAttribute(IsResourceManagerActiveServlet.RM_ATTRIBUTE, this); builder.withServlet(IsResourceManagerActiveServlet.SERVLET_NAME, IsResourceManagerActiveServlet.PATH_SPEC, IsResourceManagerActiveServlet.class); webApp = builder.start(new RMWebApp(this), uiWebAppContext); } private String getWebAppsPath(String appName) { URL url = getClass().getClassLoader().getResource("webapps/" + appName); if (url == null) { return ""; } return url.toString(); } /** * Helper method to create and init {@link #activeServices}. This creates an * instance of {@link RMActiveServices} and initializes it. * * @param fromActive Indicates if the call is from the active state transition * or the RM initialization. */ protected void createAndInitActiveServices(boolean fromActive) { activeServices = new RMActiveServices(this); activeServices.fromActive = fromActive; activeServices.init(conf); } /** * Helper method to start {@link #activeServices}. * @throws Exception */ void startActiveServices() throws Exception { if (activeServices != null) { clusterTimeStamp = System.currentTimeMillis(); activeServices.start(); } } /** * Helper method to stop {@link #activeServices}. * @throws Exception */ void stopActiveServices() { if (activeServices != null) { activeServices.stop(); activeServices = null; } } void reinitialize(boolean initialize) { ClusterMetrics.destroy(); QueueMetrics.clearQueueMetrics(); getResourceScheduler().resetSchedulerMetrics(); if (initialize) { resetRMContext(); createAndInitActiveServices(true); } } @VisibleForTesting protected boolean areActiveServicesRunning() { return activeServices != null && activeServices.isInState(STATE.STARTED); } synchronized void transitionToActive() throws Exception { if (rmContext.getHAServiceState() == HAServiceProtocol.HAServiceState.ACTIVE) { LOG.info("Already in active state"); return; } LOG.info("Transitioning to active state"); this.rmLoginUGI.doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { try { startActiveServices(); return null; } catch (Exception e) { reinitialize(true); throw e; } } }); rmContext.setHAServiceState(HAServiceProtocol.HAServiceState.ACTIVE); LOG.info("Transitioned to active state"); } synchronized void transitionToStandby(boolean initialize) throws Exception { if (rmContext.getHAServiceState() == HAServiceProtocol.HAServiceState.STANDBY) { LOG.info("Already in standby state"); return; } LOG.info("Transitioning to standby state"); HAServiceState state = rmContext.getHAServiceState(); rmContext.setHAServiceState(HAServiceProtocol.HAServiceState.STANDBY); if (state == HAServiceProtocol.HAServiceState.ACTIVE) { stopActiveServices(); reinitialize(initialize); } LOG.info("Transitioned to standby state"); } @Override protected void serviceStart() throws Exception { if (this.rmContext.isHAEnabled()) { transitionToStandby(false); } startWepApp(); if (getConfig().getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { int port = webApp.port(); WebAppUtils.setRMWebAppPort(conf, port); } super.serviceStart(); // Non HA case, start after RM services are started. if (!this.rmContext.isHAEnabled()) { transitionToActive(); } } protected void doSecureLogin() throws IOException { InetSocketAddress socAddr = getBindAddress(conf); SecurityUtil.login(this.conf, YarnConfiguration.RM_KEYTAB, YarnConfiguration.RM_PRINCIPAL, socAddr.getHostName()); // if security is enable, set rmLoginUGI as UGI of loginUser if (UserGroupInformation.isSecurityEnabled()) { this.rmLoginUGI = UserGroupInformation.getLoginUser(); } } @Override protected void serviceStop() throws Exception { if (webApp != null) { webApp.stop(); } if (fetcher != null) { fetcher.stop(); } if (configurationProvider != null) { configurationProvider.close(); } super.serviceStop(); if (zkManager != null) { zkManager.close(); } transitionToStandby(false); rmContext.setHAServiceState(HAServiceState.STOPPING); rmStatusInfoBean.unregister(); } protected ResourceTrackerService createResourceTrackerService() { return new ResourceTrackerService(this.rmContext, this.nodesListManager, this.nmLivelinessMonitor, this.rmContext.getContainerTokenSecretManager(), this.rmContext.getNMTokenSecretManager()); } protected ClientRMService createClientRMService() { return new ClientRMService(this.rmContext, scheduler, this.rmAppManager, this.applicationACLsManager, this.queueACLsManager, this.rmContext.getRMDelegationTokenSecretManager()); } protected ApplicationMasterService createApplicationMasterService() { Configuration config = this.rmContext.getYarnConfiguration(); if (isOpportunisticSchedulingEnabled(conf)) { if (YarnConfiguration.isDistSchedulingEnabled(config) && !YarnConfiguration .isOpportunisticContainerAllocationEnabled(config)) { throw new YarnRuntimeException( "Invalid parameters: opportunistic container allocation has to " + "be enabled when distributed scheduling is enabled."); } OpportunisticContainerAllocatorAMService oppContainerAllocatingAMService = new OpportunisticContainerAllocatorAMService(this.rmContext, scheduler); this.rmContext.setContainerQueueLimitCalculator( oppContainerAllocatingAMService.getNodeManagerQueueLimitCalculator()); return oppContainerAllocatingAMService; } return new ApplicationMasterService(this.rmContext, scheduler); } protected AdminService createAdminService() { return new AdminService(this); } protected RMSecretManagerService createRMSecretManagerService() { return new RMSecretManagerService(conf, rmContext); } private boolean isOpportunisticSchedulingEnabled(Configuration conf) { return YarnConfiguration.isOpportunisticContainerAllocationEnabled(conf) || YarnConfiguration.isDistSchedulingEnabled(conf); } /** * Create RMDelegatedNodeLabelsUpdater based on configuration. */ protected RMDelegatedNodeLabelsUpdater createRMDelegatedNodeLabelsUpdater() { if (conf.getBoolean(YarnConfiguration.NODE_LABELS_ENABLED, YarnConfiguration.DEFAULT_NODE_LABELS_ENABLED) && YarnConfiguration.isDelegatedCentralizedNodeLabelConfiguration( conf)) { return new RMDelegatedNodeLabelsUpdater(rmContext); } else { return null; } } @Private public ClientRMService getClientRMService() { return this.clientRM; } /** * return the scheduler. * @return the scheduler for the Resource Manager. */ @Private public ResourceScheduler getResourceScheduler() { return this.scheduler; } /** * return the resource tracking component. * @return the resource tracking component. */ @Private public ResourceTrackerService getResourceTrackerService() { return this.resourceTracker; } @Private public ApplicationMasterService getApplicationMasterService() { return this.masterService; } @Private public ApplicationACLsManager getApplicationACLsManager() { return this.applicationACLsManager; } @Private public QueueACLsManager getQueueACLsManager() { return this.queueACLsManager; } @Private @VisibleForTesting public FederationStateStoreService getFederationStateStoreService() { return this.federationStateStoreService; } @Private WebApp getWebapp() { return this.webApp; } @Override public void recover(RMState state) throws Exception { // recover RMdelegationTokenSecretManager rmContext.getRMDelegationTokenSecretManager().recover(state); // recover AMRMTokenSecretManager rmContext.getAMRMTokenSecretManager().recover(state); // recover reservations if (reservationSystem != null) { reservationSystem.recover(state); } // recover applications rmAppManager.recover(state); // recover ProxyCA rmContext.getProxyCAManager().recover(state); setSchedulerRecoveryStartAndWaitTime(state, conf); } public static void main(String argv[]) { Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler()); StringUtils.startupShutdownMessage(ResourceManager.class, argv, LOG); try { Configuration conf = new YarnConfiguration(); GenericOptionsParser hParser = new GenericOptionsParser(conf, argv); argv = hParser.getRemainingArgs(); // If -format-state-store, then delete RMStateStore; else startup normally if (argv.length >= 1) { if (argv[0].equals("-format-state-store")) { deleteRMStateStore(conf); } else if (argv[0].equals("-format-conf-store")) { deleteRMConfStore(conf); } else if (argv[0].equals("-remove-application-from-state-store") && argv.length == 2) { removeApplication(conf, argv[1]); } else { printUsage(System.err); } } else { ResourceManager resourceManager = new ResourceManager(); ShutdownHookManager.get().addShutdownHook( new CompositeServiceShutdownHook(resourceManager), SHUTDOWN_HOOK_PRIORITY); resourceManager.init(conf); resourceManager.start(); } } catch (Throwable t) { LOG.error(FATAL, "Error starting ResourceManager", t); System.exit(-1); } } /** * Register the handlers for alwaysOn services */ private Dispatcher setupDispatcher() { Dispatcher dispatcher = createDispatcher(); dispatcher.register(RMFatalEventType.class, new ResourceManager.RMFatalEventDispatcher()); return dispatcher; } private void resetRMContext() { RMContextImpl rmContextImpl = new RMContextImpl(); // transfer service context to new RM service Context rmContextImpl.setServiceContext(rmContext.getServiceContext()); // reset dispatcher Dispatcher dispatcher = setupDispatcher(); ((Service) dispatcher).init(this.conf); ((Service) dispatcher).start(); removeService((Service) rmDispatcher); // Need to stop previous rmDispatcher before assigning new dispatcher // otherwise causes "AsyncDispatcher event handler" thread leak ((Service) rmDispatcher).stop(); rmDispatcher = dispatcher; addIfService(rmDispatcher); rmContextImpl.setDispatcher(dispatcher); rmContext = rmContextImpl; } private void setSchedulerRecoveryStartAndWaitTime(RMState state, Configuration conf) { if (!state.getApplicationState().isEmpty()) { long waitTime = conf.getLong(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS, YarnConfiguration.DEFAULT_RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS); rmContext.setSchedulerRecoveryStartAndWaitTime(waitTime); } } /** * Retrieve RM bind address from configuration * * @param conf * @return InetSocketAddress */ public static InetSocketAddress getBindAddress(Configuration conf) { return conf.getSocketAddr(YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS, YarnConfiguration.DEFAULT_RM_PORT); } /** * Deletes the RMStateStore * * @param conf * @throws Exception */ @VisibleForTesting static void deleteRMStateStore(Configuration conf) throws Exception { RMStateStore rmStore = RMStateStoreFactory.getStore(conf); rmStore.setResourceManager(new ResourceManager()); rmStore.init(conf); rmStore.start(); try { LOG.info("Deleting ResourceManager state store..."); rmStore.deleteStore(); LOG.info("State store deleted"); } finally { rmStore.stop(); } } /** * Deletes the YarnConfigurationStore * * @param conf * @throws Exception */ @VisibleForTesting static void deleteRMConfStore(Configuration conf) throws Exception { ResourceManager rm = new ResourceManager(); rm.conf = conf; ResourceScheduler scheduler = rm.createScheduler(); RMContextImpl rmContext = new RMContextImpl(); rmContext.setResourceManager(rm); boolean isConfigurationMutable = false; String confProviderStr = conf.get( YarnConfiguration.SCHEDULER_CONFIGURATION_STORE_CLASS, YarnConfiguration.DEFAULT_CONFIGURATION_STORE); switch (confProviderStr) { case YarnConfiguration.MEMORY_CONFIGURATION_STORE: case YarnConfiguration.LEVELDB_CONFIGURATION_STORE: case YarnConfiguration.ZK_CONFIGURATION_STORE: case YarnConfiguration.FS_CONFIGURATION_STORE: isConfigurationMutable = true; break; default: } if (scheduler instanceof MutableConfScheduler && isConfigurationMutable) { YarnConfigurationStore confStore = YarnConfigurationStoreFactory .getStore(conf); confStore.initialize(conf, conf, rmContext); confStore.format(); } else { System.out.println("Scheduler Configuration format only " + "supported by MutableConfScheduler."); } } @VisibleForTesting static void removeApplication(Configuration conf, String applicationId) throws Exception { RMStateStore rmStore = RMStateStoreFactory.getStore(conf); rmStore.setResourceManager(new ResourceManager()); rmStore.init(conf); rmStore.start(); try { ApplicationId removeAppId = ApplicationId.fromString(applicationId); LOG.info("Deleting application " + removeAppId + " from state store"); rmStore.removeApplication(removeAppId); LOG.info("Application is deleted from state store"); } finally { rmStore.stop(); } } private static void printUsage(PrintStream out) { out.println("Usage: yarn resourcemanager [-format-state-store]"); out.println(" " + "[-remove-application-from-state-store <appId>]"); out.println(" " + "[-format-conf-store]" + "\n"); } protected RMAppLifetimeMonitor createRMAppLifetimeMonitor() { return new RMAppLifetimeMonitor(this.rmContext); } /** * Register ResourceManagerMXBean. */ private void registerMXBean() { MBeans.register("ResourceManager", "ResourceManager", this); } @Override public boolean isSecurityEnabled() { return UserGroupInformation.isSecurityEnabled(); } }
JingchengDu/hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
Java
apache-2.0
71,584
$packageName = 'wakemeonlan' $url = 'http://www.nirsoft.net/utils/wakemeonlan.zip' $checksum = '7b0de93a56e982c001f1cab1f1b350bd7a3c3e5c' $checksumType = 'sha1' $toolsDir = "$(Split-Path -parent $MyInvocation.MyCommand.Definition)" $installFile = Join-Path $toolsDir "$($packageName).exe" Install-ChocolateyZipPackage -PackageName "$packageName" ` -Url "$url" ` -UnzipLocation "$toolsDir" ` -Url64bit "" ` -Checksum "$checksum" ` -ChecksumType "$checksumType" Set-Content -Path ("$installFile.gui") ` -Value $null
dtgm/chocolatey-packages
automatic/_output/wakemeonlan/1.68/tools/chocolateyInstall.ps1
PowerShell
apache-2.0
672
CC = gcc ifndef HTSLIB_DIR $(error HTSLIB_DIR is undefined, see README.txt for details) endif all: calc_genotypes ld_vcf install: calc_genotypes mkdir -p ../bin cp calc_genotypes ../bin/ cp ld_vcf ../bin/ calc_genotypes: calc_genotypes.o $(CC) -o calc_genotypes calc_genotypes.c calc_genotypes.o: calc_genotypes.c $(CC) -c calc_genotypes.c ld_vcf: ld_vcf.c $(CC) -Wall -O3 ld_vcf.c -I $(HTSLIB_DIR)/htslib -o ld_vcf -L$(HTSLIB_DIR) -Wl,-rpath,$(HTSLIB_DIR) -lhts clean: \rm -f *.o calc_genotypes ld_vcf
Ensembl/ensembl-variation
C_code/Makefile
Makefile
apache-2.0
517
from a10sdk.common.A10BaseClass import A10BaseClass class SslCertKey(A10BaseClass): """ :param action: {"optional": true, "enum": ["create", "import", "export", "copy", "rename", "check", "replace", "delete"], "type": "string", "description": "'create': create; 'import': import; 'export': export; 'copy': copy; 'rename': rename; 'check': check; 'replace': replace; 'delete': delete; ", "format": "enum"} :param dst_file: {"description": "destination file name for copy and rename action", "format": "string", "minLength": 1, "optional": true, "maxLength": 32, "type": "string"} :param file_handle: {"description": "full path of the uploaded file", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 255, "type": "string"} :param file: {"description": "ssl certificate local file name", "format": "string", "minLength": 1, "optional": true, "maxLength": 255, "type": "string"} :param size: {"description": "ssl certificate file size in byte", "format": "number", "type": "number", "maximum": 2147483647, "minimum": 0, "optional": true} :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py` Class Description:: ssl certificate and key file information and management commands. Class ssl-cert-key supports CRUD Operations and inherits from `common/A10BaseClass`. This class is the `"PARENT"` class for this module.` URL for this object:: `https://<Hostname|Ip address>//axapi/v3/file/ssl-cert-key`. """ def __init__(self, **kwargs): self.ERROR_MSG = "" self.required=[] self.b_key = "ssl-cert-key" self.a10_url="/axapi/v3/file/ssl-cert-key" self.DeviceProxy = "" self.action = "" self.dst_file = "" self.file_handle = "" self.A10WW_file = "" self.size = "" for keys, value in kwargs.items(): setattr(self,keys, value)
amwelch/a10sdk-python
a10sdk/core/A10_file/file_ssl_cert_key.py
Python
apache-2.0
1,981
package com.alex.develop.fragment.service; /** * Created by Administrator on 2015-11-19. */ interface OptionalServiceInterFace<T> { public T LoadData(); }
xhackertxl/StocksAnalyzer
app/src/main/java/com/alex/develop/fragment/service/OptionalServiceInterFace.java
Java
apache-2.0
163
/** * JVectormap demo page */ (function ($) { 'use strict'; $('.world-map').vectorMap({ map: 'world_mill_en', backgroundColor: 'transparent', zoomOnScroll: false, strokeWidth: 1, regionStyle: { initial: { fill: $.staticApp.dark, 'fill-opacity': 0.2 }, hover: { 'fill-opacity': 0.3 } }, markerStyle: { initial: { fill: $.staticApp.primary, stroke: $.staticApp.primary, 'fill-opacity': 1, 'stroke-width': 8, 'stroke-opacity': 0.3, r: 5 }, hover: { r: 8, stroke: $.staticApp.primary, 'stroke-width': 10 } }, markers: [{ latLng: [41.90, 12.45], name: 'Vatican City' }, { latLng: [43.73, 7.41], name: 'Monaco' }, { latLng: [-0.52, 166.93], name: 'Nauru' }, { latLng: [-8.51, 179.21], name: 'Tuvalu' }, { latLng: [43.93, 12.46], name: 'San Marino' }, { latLng: [47.14, 9.52], name: 'Liechtenstein' }, { latLng: [35.88, 14.5], name: 'Malta' }, { latLng: [13.16, -61.23], name: 'Saint Vincent and the Grenadines' }, { latLng: [-4.61, 55.45], name: 'Seychelles' }, { latLng: [7.35, 134.46], name: 'Palau' }, { latLng: [42.5, 1.51], name: 'Andorra' }, { latLng: [6.91, 158.18], name: 'Federated States of Micronesia' }, { latLng: [1.3, 103.8], name: 'Singapore' }, { latLng: [1.46, 173.03], name: 'Kiribati' }, { latLng: [-21.13, -175.2], name: 'Tonga' }, { latLng: [-20.2, 57.5], name: 'Mauritius' }, { latLng: [26.02, 50.55], name: 'Bahrain' }] }); })(jQuery);
wiemKh/BotOrient
client/scripts/maps/jvector.js
JavaScript
apache-2.0
1,813
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!-- NewPage --> <html lang="en"> <head> <!-- Generated by javadoc (1.8.0_111) on Mon Oct 31 20:20:43 PDT 2016 --> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> <title>Uses of Class org.apache.guacamole.xml.DocumentHandler (guacamole-ext 0.9.10-incubating API)</title> <meta name="date" content="2016-10-31"> <link rel="stylesheet" type="text/css" href="../../../../../stylesheet.css" title="Style"> <script type="text/javascript" src="../../../../../script.js"></script> </head> <body> <script type="text/javascript"><!-- try { if (location.href.indexOf('is-external=true') == -1) { parent.document.title="Uses of Class org.apache.guacamole.xml.DocumentHandler (guacamole-ext 0.9.10-incubating API)"; } } catch(err) { } //--> </script> <noscript> <div>JavaScript is disabled on your browser.</div> </noscript> <!-- ========= START OF TOP NAVBAR ======= --> <div class="topNav"><a name="navbar.top"> <!-- --> </a> <div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div> <a name="navbar.top.firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../../overview-summary.html">Overview</a></li> <li><a href="../package-summary.html">Package</a></li> <li><a href="../../../../../org/apache/guacamole/xml/DocumentHandler.html" title="class in org.apache.guacamole.xml">Class</a></li> <li class="navBarCell1Rev">Use</li> <li><a href="../package-tree.html">Tree</a></li> <li><a href="../../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../../index-all.html">Index</a></li> <li><a href="../../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li>Prev</li> <li>Next</li> </ul> <ul class="navList"> <li><a href="../../../../../index.html?org/apache/guacamole/xml/class-use/DocumentHandler.html" target="_top">Frames</a></li> <li><a href="DocumentHandler.html" target="_top">No&nbsp;Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_top"> <li><a href="../../../../../allclasses-noframe.html">All&nbsp;Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_top"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <a name="skip.navbar.top"> <!-- --> </a></div> <!-- ========= END OF TOP NAVBAR ========= --> <div class="header"> <h2 title="Uses of Class org.apache.guacamole.xml.DocumentHandler" class="title">Uses of Class<br>org.apache.guacamole.xml.DocumentHandler</h2> </div> <div class="classUseContainer">No usage of org.apache.guacamole.xml.DocumentHandler</div> <!-- ======= START OF BOTTOM NAVBAR ====== --> <div class="bottomNav"><a name="navbar.bottom"> <!-- --> </a> <div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div> <a name="navbar.bottom.firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../../overview-summary.html">Overview</a></li> <li><a href="../package-summary.html">Package</a></li> <li><a href="../../../../../org/apache/guacamole/xml/DocumentHandler.html" title="class in org.apache.guacamole.xml">Class</a></li> <li class="navBarCell1Rev">Use</li> <li><a href="../package-tree.html">Tree</a></li> <li><a href="../../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../../index-all.html">Index</a></li> <li><a href="../../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li>Prev</li> <li>Next</li> </ul> <ul class="navList"> <li><a href="../../../../../index.html?org/apache/guacamole/xml/class-use/DocumentHandler.html" target="_top">Frames</a></li> <li><a href="DocumentHandler.html" target="_top">No&nbsp;Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_bottom"> <li><a href="../../../../../allclasses-noframe.html">All&nbsp;Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_bottom"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <a name="skip.navbar.bottom"> <!-- --> </a></div> <!-- ======== END OF BOTTOM NAVBAR ======= --> <p class="legalCopy"><small>Copyright &#169; 2016. All rights reserved.</small></p> </body> </html>
mike-jumper/incubator-guacamole-website
doc/0.9.10-incubating/guacamole-ext/org/apache/guacamole/xml/class-use/DocumentHandler.html
HTML
apache-2.0
4,629
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package e2e import ( "bytes" "fmt" "io/ioutil" "net/http" "os/exec" "strconv" "strings" "time" "k8s.io/kubernetes/pkg/api" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/test/e2e/framework" testutils "k8s.io/kubernetes/test/utils" "github.com/golang/glog" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) const ( defaultTimeout = 3 * time.Minute resizeTimeout = 5 * time.Minute scaleUpTimeout = 5 * time.Minute scaleDownTimeout = 15 * time.Minute gkeEndpoint = "https://test-container.sandbox.googleapis.com" gkeUpdateTimeout = 15 * time.Minute ) var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() { f := framework.NewDefaultFramework("autoscaling") var c *client.Client var nodeCount int var coresPerNode int var memCapacityMb int var originalSizes map[string]int BeforeEach(func() { c = f.Client framework.SkipUnlessProviderIs("gce", "gke") nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodeCount = len(nodes.Items) Expect(nodeCount).NotTo(BeZero()) cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU] mem := nodes.Items[0].Status.Capacity[api.ResourceMemory] coresPerNode = int((&cpu).MilliValue() / 1000) memCapacityMb = int((&mem).Value() / 1024 / 1024) originalSizes = make(map[string]int) sum := 0 for _, mig := range strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") { size, err := GroupSize(mig) framework.ExpectNoError(err) By(fmt.Sprintf("Initial size of %s: %d", mig, size)) originalSizes[mig] = size sum += size } Expect(nodeCount).Should(Equal(sum)) if framework.ProviderIs("gke") { val, err := isAutoscalerEnabled(3) framework.ExpectNoError(err) if !val { err = enableAutoscaler("default-pool", 3, 5) framework.ExpectNoError(err) } } }) AfterEach(func() { By(fmt.Sprintf("Restoring initial size of the cluster")) setMigSizes(originalSizes) framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount, scaleDownTimeout)) }) It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() { By("Creating unschedulable pod") ReserveMemory(f, "memory-reservation", 1, memCapacityMb, false) defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "memory-reservation") By("Waiting for scale up hoping it won't happen") // Verfiy, that the appropreate event was generated. eventFound := false EventsLoop: for start := time.Now(); time.Since(start) < scaleUpTimeout; time.Sleep(20 * time.Second) { By("Waiting for NotTriggerScaleUp event") events, err := f.Client.Events(f.Namespace.Name).List(api.ListOptions{}) framework.ExpectNoError(err) for _, e := range events.Items { if e.InvolvedObject.Kind == "Pod" && e.Reason == "NotTriggerScaleUp" && strings.Contains(e.Message, "it wouldn't fit if a new node is added") { By("NotTriggerScaleUp event found") eventFound = true break EventsLoop } } } Expect(eventFound).Should(Equal(true)) // Verify, that cluster size is not changed. framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, func(size int) bool { return size <= nodeCount }, time.Second)) }) It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]", func() { ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false) defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "memory-reservation") // Verify, that cluster size is increased framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) }) It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func() { framework.SkipUnlessProviderIs("gke") By("Creating new node-pool with one n1-standard-4 machine") const extraPoolName = "extra-pool" addNodePool(extraPoolName, "n1-standard-4", 1) defer deleteNodePool(extraPoolName) framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount+1, resizeTimeout)) glog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).") ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false) defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "memory-reservation") // Verify, that cluster size is increased framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) }) It("should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp]", func() { framework.SkipUnlessProviderIs("gke") By("Creating new node-pool with one n1-standard-4 machine") const extraPoolName = "extra-pool" addNodePool(extraPoolName, "n1-standard-4", 1) defer deleteNodePool(extraPoolName) framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount+1, resizeTimeout)) framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2)) framework.ExpectNoError(disableAutoscaler(extraPoolName, 1, 2)) }) It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() { CreateHostPortPods(f, "host-port", nodeCount+2, false) defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "host-port") framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) }) It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func() { labels := map[string]string{"cluster-autoscaling-test.special-node": "true"} By("Finding the smallest MIG") minMig := "" minSize := nodeCount for mig, size := range originalSizes { if size <= minSize { minMig = mig minSize = size } } removeLabels := func(nodesToClean sets.String) { By("Removing labels from nodes") updateNodeLabels(c, nodesToClean, nil, labels) } nodes, err := GetGroupNodes(minMig) ExpectNoError(err) nodesSet := sets.NewString(nodes...) defer removeLabels(nodesSet) By(fmt.Sprintf("Annotating nodes of the smallest MIG(%s): %v", minMig, nodes)) updateNodeLabels(c, nodesSet, labels, nil) CreateNodeSelectorPods(f, "node-selector", minSize+1, labels, false) By("Waiting for new node to appear and annotating it") WaitForGroupSize(minMig, int32(minSize+1)) // Verify, that cluster size is increased framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout)) newNodes, err := GetGroupNodes(minMig) ExpectNoError(err) newNodesSet := sets.NewString(newNodes...) newNodesSet.Delete(nodes...) defer removeLabels(newNodesSet) By(fmt.Sprintf("Setting labels for new nodes: %v", newNodesSet.List())) updateNodeLabels(c, newNodesSet, labels, nil) framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) framework.ExpectNoError(framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "node-selector")) }) It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() { framework.SkipUnlessProviderIs("gke") By("Creating new node-pool with one n1-standard-4 machine") const extraPoolName = "extra-pool" addNodePool(extraPoolName, "n1-standard-4", 1) defer deleteNodePool(extraPoolName) framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount+1, resizeTimeout)) framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2)) By("Creating rc with 2 pods too big to fit default-pool but fitting extra-pool") ReserveMemory(f, "memory-reservation", 2, 2*memCapacityMb, false) defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "memory-reservation") // Apparently GKE master is restarted couple minutes after the node pool is added // reseting all the timers in scale down code. Adding 5 extra minutes to workaround // this issue. // TODO: Remove the extra time when GKE restart is fixed. framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount+2, scaleUpTimeout+5*time.Minute)) }) It("should correctly scale down after a node is not needed [Feature:ClusterSizeAutoscalingScaleDown]", func() { By("Manually increase cluster size") increasedSize := 0 newSizes := make(map[string]int) for key, val := range originalSizes { newSizes[key] = val + 2 increasedSize += val + 2 } setMigSizes(newSizes) framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, func(size int) bool { return size >= increasedSize }, scaleUpTimeout)) By("Some node should be removed") framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, func(size int) bool { return size < increasedSize }, scaleDownTimeout)) }) It("should correctly scale down after a node is not needed when there is non autoscaled pool[Feature:ClusterSizeAutoscalingScaleDown]", func() { framework.SkipUnlessProviderIs("gke") By("Manually increase cluster size") increasedSize := 0 newSizes := make(map[string]int) for key, val := range originalSizes { newSizes[key] = val + 2 increasedSize += val + 2 } setMigSizes(newSizes) framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, func(size int) bool { return size >= increasedSize }, scaleUpTimeout)) const extraPoolName = "extra-pool" addNodePool(extraPoolName, "n1-standard-1", 3) defer deleteNodePool(extraPoolName) framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, func(size int) bool { return size >= increasedSize+3 }, scaleUpTimeout)) By("Some node should be removed") // Apparently GKE master is restarted couple minutes after the node pool is added // reseting all the timers in scale down code. Adding 10 extra minutes to workaround // this issue. // TODO: Remove the extra time when GKE restart is fixed. framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, func(size int) bool { return size < increasedSize+3 }, scaleDownTimeout+10*time.Minute)) }) }) func getGKEClusterUrl() string { out, err := exec.Command("gcloud", "auth", "print-access-token").Output() framework.ExpectNoError(err) token := strings.Replace(string(out), "\n", "", -1) return fmt.Sprintf("%s/v1/projects/%s/zones/%s/clusters/%s?access_token=%s", gkeEndpoint, framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone, framework.TestContext.CloudConfig.Cluster, token) } func isAutoscalerEnabled(expectedMinNodeCountInTargetPool int) (bool, error) { resp, err := http.Get(getGKEClusterUrl()) if err != nil { return false, err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return false, err } strBody := string(body) glog.Infof("Cluster config %s", strBody) if strings.Contains(strBody, "\"minNodeCount\": "+strconv.Itoa(expectedMinNodeCountInTargetPool)) { return true, nil } return false, nil } func enableAutoscaler(nodePool string, minCount, maxCount int) error { if nodePool == "default-pool" { glog.Infof("Using gcloud to enable autoscaling for pool %s", nodePool) output, err := exec.Command("gcloud", "alpha", "container", "clusters", "update", framework.TestContext.CloudConfig.Cluster, "--enable-autoscaling", "--min-nodes="+strconv.Itoa(minCount), "--max-nodes="+strconv.Itoa(maxCount), "--node-pool="+nodePool, "--project="+framework.TestContext.CloudConfig.ProjectID, "--zone="+framework.TestContext.CloudConfig.Zone).Output() if err != nil { return fmt.Errorf("Failed to enable autoscaling: %v", err) } glog.Infof("Config update result: %s", output) } else { glog.Infof("Using direct api access to enable autoscaling for pool %s", nodePool) updateRequest := "{" + " \"update\": {" + " \"desiredNodePoolId\": \"" + nodePool + "\"," + " \"desiredNodePoolAutoscaling\": {" + " \"enabled\": \"true\"," + " \"minNodeCount\": \"" + strconv.Itoa(minCount) + "\"," + " \"maxNodeCount\": \"" + strconv.Itoa(maxCount) + "\"" + " }" + " }" + "}" url := getGKEClusterUrl() glog.Infof("Using gke api url %s", url) putResult, err := doPut(url, updateRequest) if err != nil { return fmt.Errorf("Failed to put %s: %v", url, err) } glog.Infof("Config update result: %s", putResult) } for startTime := time.Now(); startTime.Add(gkeUpdateTimeout).After(time.Now()); time.Sleep(30 * time.Second) { if val, err := isAutoscalerEnabled(minCount); err == nil && val { return nil } } return fmt.Errorf("autoscaler not enabled") } func disableAutoscaler(nodePool string, minCount, maxCount int) error { if nodePool == "default-pool" { glog.Infof("Using gcloud to disable autoscaling for pool %s", nodePool) output, err := exec.Command("gcloud", "alpha", "container", "clusters", "update", framework.TestContext.CloudConfig.Cluster, "--no-enable-autoscaling", "--node-pool="+nodePool, "--project="+framework.TestContext.CloudConfig.ProjectID, "--zone="+framework.TestContext.CloudConfig.Zone).Output() if err != nil { return fmt.Errorf("Failed to enable autoscaling: %v", err) } glog.Infof("Config update result: %s", output) } else { glog.Infof("Using direct api access to disable autoscaling for pool %s", nodePool) updateRequest := "{" + " \"update\": {" + " \"desiredNodePoolId\": \"" + nodePool + "\"," + " \"desiredNodePoolAutoscaling\": {" + " \"enabled\": \"false\"," + " }" + " }" + "}" url := getGKEClusterUrl() glog.Infof("Using gke api url %s", url) putResult, err := doPut(url, updateRequest) if err != nil { return fmt.Errorf("Failed to put %s: %v", url, err) } glog.Infof("Config update result: %s", putResult) } for startTime := time.Now(); startTime.Add(gkeUpdateTimeout).After(time.Now()); time.Sleep(30 * time.Second) { if val, err := isAutoscalerEnabled(minCount); err == nil && !val { return nil } } return fmt.Errorf("autoscaler still enabled") } func addNodePool(name string, machineType string, numNodes int) { output, err := exec.Command("gcloud", "alpha", "container", "node-pools", "create", name, "--quiet", "--machine-type="+machineType, "--num-nodes="+strconv.Itoa(numNodes), "--project="+framework.TestContext.CloudConfig.ProjectID, "--zone="+framework.TestContext.CloudConfig.Zone, "--cluster="+framework.TestContext.CloudConfig.Cluster).CombinedOutput() framework.ExpectNoError(err) glog.Infof("Creating node-pool %s: %s", name, output) } func deleteNodePool(name string) { glog.Infof("Deleting node pool %s", name) output, err := exec.Command("gcloud", "alpha", "container", "node-pools", "delete", name, "--quiet", "--project="+framework.TestContext.CloudConfig.ProjectID, "--zone="+framework.TestContext.CloudConfig.Zone, "--cluster="+framework.TestContext.CloudConfig.Cluster).CombinedOutput() if err != nil { glog.Infof("Error: %v", err) } glog.Infof("Node-pool deletion output: %s", output) } func doPut(url, content string) (string, error) { req, err := http.NewRequest("PUT", url, bytes.NewBuffer([]byte(content))) req.Header.Set("Content-Type", "application/json") client := &http.Client{} resp, err := client.Do(req) if err != nil { return "", err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return "", err } strBody := string(body) return strBody, nil } func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nodeSelector map[string]string, expectRunning bool) { By(fmt.Sprintf("Running RC which reserves host port and defines node selector")) config := &testutils.RCConfig{ Client: f.Client, Name: "node-selector", Namespace: f.Namespace.Name, Timeout: defaultTimeout, Image: framework.GetPauseImageName(f.Client), Replicas: replicas, HostPorts: map[string]int{"port1": 4321}, NodeSelector: map[string]string{"cluster-autoscaling-test.special-node": "true"}, } err := framework.RunRC(*config) if expectRunning { framework.ExpectNoError(err) } } func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) { By(fmt.Sprintf("Running RC which reserves host port")) config := &testutils.RCConfig{ Client: f.Client, Name: id, Namespace: f.Namespace.Name, Timeout: defaultTimeout, Image: framework.GetPauseImageName(f.Client), Replicas: replicas, HostPorts: map[string]int{"port1": 4321}, } err := framework.RunRC(*config) if expectRunning { framework.ExpectNoError(err) } } func ReserveCpu(f *framework.Framework, id string, replicas, millicores int) { By(fmt.Sprintf("Running RC which reserves %v millicores", millicores)) request := int64(millicores / replicas) config := &testutils.RCConfig{ Client: f.Client, Name: id, Namespace: f.Namespace.Name, Timeout: defaultTimeout, Image: framework.GetPauseImageName(f.Client), Replicas: replicas, CpuRequest: request, } framework.ExpectNoError(framework.RunRC(*config)) } func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool) { By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes)) request := int64(1024 * 1024 * megabytes / replicas) config := &testutils.RCConfig{ Client: f.Client, Name: id, Namespace: f.Namespace.Name, Timeout: defaultTimeout, Image: framework.GetPauseImageName(f.Client), Replicas: replicas, MemRequest: request, } err := framework.RunRC(*config) if expectRunning { framework.ExpectNoError(err) } } // WaitForClusterSize waits until the cluster size matches the given function. func WaitForClusterSizeFunc(c *client.Client, sizeFunc func(int) bool, timeout time.Duration) error { for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { nodes, err := c.Nodes().List(api.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector()}) if err != nil { glog.Warningf("Failed to list nodes: %v", err) continue } numNodes := len(nodes.Items) // Filter out not-ready nodes. framework.FilterNodes(nodes, func(node api.Node) bool { return framework.IsNodeConditionSetAsExpected(&node, api.NodeReady, true) }) numReady := len(nodes.Items) if numNodes == numReady && sizeFunc(numReady) { glog.Infof("Cluster has reached the desired size") return nil } glog.Infof("Waiting for cluster, current size %d, not ready nodes %d", numNodes, numNodes-numReady) } return fmt.Errorf("timeout waiting %v for appropriate cluster size", timeout) } func waitForAllCaPodsReadyInNamespace(f *framework.Framework, c *client.Client) error { var notready []string for start := time.Now(); time.Now().Before(start.Add(scaleUpTimeout)); time.Sleep(20 * time.Second) { pods, err := c.Pods(f.Namespace.Name).List(api.ListOptions{}) if err != nil { return fmt.Errorf("failed to get pods: %v", err) } notready = make([]string, 0) for _, pod := range pods.Items { ready := false for _, c := range pod.Status.Conditions { if c.Type == api.PodReady && c.Status == api.ConditionTrue { ready = true } } if !ready { notready = append(notready, pod.Name) } } if len(notready) == 0 { glog.Infof("All pods ready") return nil } glog.Infof("Some pods are not ready yet: %v", notready) } glog.Info("Timeout on waiting for pods being ready") glog.Info(framework.RunKubectlOrDie("get", "pods", "-o", "json", "--all-namespaces")) glog.Info(framework.RunKubectlOrDie("get", "nodes", "-o", "json")) // Some pods are still not running. return fmt.Errorf("Some pods are still not running: %v", notready) } func setMigSizes(sizes map[string]int) { for mig, desiredSize := range sizes { currentSize, err := GroupSize(mig) framework.ExpectNoError(err) if desiredSize != currentSize { By(fmt.Sprintf("Setting size of %s to %d", mig, desiredSize)) err = ResizeGroup(mig, int32(desiredSize)) framework.ExpectNoError(err) } } }
shakamunyi/kubernetes
test/e2e/cluster_size_autoscaling.go
GO
apache-2.0
21,163
' Copyright 2015, Google Inc. All Rights Reserved. ' ' Licensed under the Apache License, Version 2.0 (the "License"); ' you may not use this file except in compliance with the License. ' You may obtain a copy of the License at ' ' http://www.apache.org/licenses/LICENSE-2.0 ' ' Unless required by applicable law or agreed to in writing, software ' distributed under the License is distributed on an "AS IS" BASIS, ' WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ' See the License for the specific language governing permissions and ' limitations under the License. ' Author: api.anash@gmail.com (Anash P. Oommen) Imports Google.Api.Ads.AdWords.Lib Imports Google.Api.Ads.AdWords.Util.Reports Imports Google.Api.Ads.AdWords.v201502 Imports Google.Api.Ads.Common.Util.Reports Imports System Imports System.Collections.Generic Imports System.IO Namespace Google.Api.Ads.AdWords.Examples.VB.v201502 ''' <summary> ''' This code example gets and downloads a criteria Ad Hoc report from an XML ''' report definition. ''' </summary> Public Class DownloadCriteriaReport Inherits ExampleBase ''' <summary> ''' Main method, to run this code example as a standalone application. ''' </summary> ''' <param name="args">The command line arguments.</param> Public Shared Sub Main(ByVal args As String()) Dim codeExample As New DownloadCriteriaReport Console.WriteLine(codeExample.Description) Try Dim fileName As String = "INSERT_OUTPUT_FILE_NAME" codeExample.Run(New AdWordsUser, fileName) Catch ex As Exception Console.WriteLine("An exception occurred while running this code example. {0}", _ ExampleUtilities.FormatException(ex)) End Try End Sub ''' <summary> ''' Returns a description about the code example. ''' </summary> Public Overrides ReadOnly Property Description() As String Get Return "This code example gets and downloads a criteria Ad Hoc report from an XML report" & _ " definition." End Get End Property ''' <summary> ''' Runs the code example. ''' </summary> ''' <param name="user">The AdWords user.</param> ''' <param name="fileName">The file to which the report is downloaded. ''' </param> Public Sub Run(ByVal user As AdWordsUser, ByVal fileName As String) Dim definition As New ReportDefinition definition.reportName = "Last 7 days CRITERIA_PERFORMANCE_REPORT" definition.reportType = ReportDefinitionReportType.CRITERIA_PERFORMANCE_REPORT definition.downloadFormat = DownloadFormat.GZIPPED_CSV definition.dateRangeType = ReportDefinitionDateRangeType.LAST_7_DAYS ' Create the selector. Dim selector As New Selector selector.fields = New String() {"CampaignId", "AdGroupId", "Id", "CriteriaType", "Criteria", _ "FinalUrls", "Clicks", "Impressions", "Cost"} Dim predicate As New Predicate predicate.field = "Status" predicate.operator = PredicateOperator.IN predicate.values = New String() {"ENABLED", "PAUSED"} selector.predicates = New Predicate() {predicate} definition.selector = selector definition.includeZeroImpressions = True Dim filePath As String = ExampleUtilities.GetHomeDir() & Path.DirectorySeparatorChar & _ fileName Try Dim utilities As New ReportUtilities(user, "v201502", definition) Using reportResponse As ReportResponse = utilities.GetResponse() reportResponse.Save(filePath) End Using Console.WriteLine("Report was downloaded to '{0}'.", filePath) Catch ex As Exception Throw New System.ApplicationException("Failed to download report.", ex) End Try End Sub End Class End Namespace
stevemanderson/googleads-dotnet-lib
examples/AdXBuyer/Vb/v201502/Reporting/DownloadCriteriaReport.vb
Visual Basic
apache-2.0
3,826
require_relative './shared_examples/schema_validation' describe 'Envelope schema-json' do it_behaves_like 'json-schema validation', :envelope end
learningtapestry/learningregistry
spec/schemas/envelope_spec.rb
Ruby
apache-2.0
149
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). import os import subprocess from typing import Sequence from pants.util.osutil import get_os_name class IdeaNotFoundException(Exception): """Could not find Idea executable.""" class OpenError(Exception): """Indicates an error opening a file in a desktop application.""" def _mac_open_with_idea(file_: str, lookup_paths: list) -> None: ideaPath = next((path for path in lookup_paths if os.path.isdir(path)), None) if ideaPath is not None: subprocess.call(["open", "-a", ideaPath, file_]) else: raise IdeaNotFoundException( "Could not find Idea executable in the following locations:\n{}".format( "\n".join(lookup_paths) ) ) def _mac_open(files: Sequence[str]) -> None: subprocess.call(["open"] + list(files)) def _linux_open_with_idea(file_: str, lookup_paths: list) -> None: cmd = "idea" if not _cmd_exists(cmd): raise OpenError( "The program '{}' isn't in your PATH. Please install and re-run this " "goal.".format(cmd) ) subprocess.Popen(["nohup", cmd, file_]) def _linux_open(files: Sequence[str]) -> None: cmd = "xdg-open" if not _cmd_exists(cmd): raise OpenError( "The program '{}' isn't in your PATH. Please install and re-run this " "goal.".format(cmd) ) for f in list(files): subprocess.call([cmd, f]) # From: http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python def _cmd_exists(cmd: str) -> bool: return ( subprocess.call( ["/usr/bin/which", cmd], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) == 0 ) _OPENER_BY_OS = {"darwin": _mac_open, "linux": _linux_open} _IDEA_BY_OS = {"darwin": _mac_open_with_idea, "linux": _linux_open_with_idea} def idea_open(file_: str, lookup_paths: list) -> None: """Attempts to open the given files using the preferred desktop viewer or editor. :raises :class:`OpenError`: if there is a problem opening any of the files. """ if file_: osname = get_os_name() opener = _IDEA_BY_OS.get(osname) if opener: opener(file_, lookup_paths) else: raise OpenError("Open currently not supported for " + osname) def ui_open(*files: str) -> None: """Attempts to open the given files using the preferred desktop viewer or editor. :raises :class:`OpenError`: if there is a problem opening any of the files. """ if files: osname = get_os_name() opener = _OPENER_BY_OS.get(osname) if opener: opener(files) else: raise OpenError("Open currently not supported for " + osname)
tdyas/pants
src/python/pants/util/desktop.py
Python
apache-2.0
2,880
/** * Copyright 2019 The AMP HTML Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS-IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ const { createCtrlcHandler, exitCtrlcHandler, } = require('../common/ctrlcHandler'); const { displayLifecycleDebugging, } = require('../compile/debug-compilation-lifecycle'); const {cleanupBuildDir, closureCompile} = require('../compile/compile'); const {compileCss} = require('./css'); const {extensions, maybeInitializeExtensions} = require('./extension-helpers'); const {log} = require('../common/logging'); const {typecheckNewServer} = require('../server/typescript-compile'); /** * Runs closure compiler's type checker against all AMP code. * @return {!Promise<void>} */ async function checkTypes() { const handlerProcess = createCtrlcHandler('check-types'); process.env.NODE_ENV = 'production'; cleanupBuildDir(); maybeInitializeExtensions(); typecheckNewServer(); const srcFiles = [ 'src/amp.js', 'src/amp-shadow.js', 'src/inabox/amp-inabox.js', 'ads/alp/install-alp.js', 'ads/inabox/inabox-host.js', 'src/web-worker/web-worker.js', ]; const extensionValues = Object.keys(extensions).map((key) => extensions[key]); const extensionSrcs = extensionValues .filter((ext) => !ext.noTypeCheck) .map((ext) => `extensions/${ext.name}/${ext.version}/${ext.name}.js`) .sort(); await compileCss(); log('Checking types...'); displayLifecycleDebugging(); await Promise.all([ closureCompile(srcFiles, './dist', 'src-check-types.js', { include3pDirectories: true, includePolyfills: true, extraGlobs: ['src/inabox/*.js', '!node_modules/preact'], typeCheckOnly: true, warningLevel: 'QUIET', // TODO(amphtml): Make this 'DEFAULT' }), closureCompile(extensionSrcs, './dist', 'extensions-check-types.js', { include3pDirectories: true, includePolyfills: true, extraGlobs: ['src/inabox/*.js', '!node_modules/preact'], typeCheckOnly: true, warningLevel: 'QUIET', // TODO(amphtml): Make this 'DEFAULT' }), // Type check 3p/ads code. closureCompile( ['3p/integration.js'], './dist', 'integration-check-types.js', { externs: ['ads/ads.extern.js'], include3pDirectories: true, includePolyfills: true, typeCheckOnly: true, warningLevel: 'QUIET', // TODO(amphtml): Make this 'DEFAULT' } ), closureCompile( ['3p/ampcontext-lib.js'], './dist', 'ampcontext-check-types.js', { externs: ['ads/ads.extern.js'], include3pDirectories: true, includePolyfills: true, typeCheckOnly: true, warningLevel: 'QUIET', // TODO(amphtml): Make this 'DEFAULT' } ), closureCompile( ['3p/iframe-transport-client-lib.js'], './dist', 'iframe-transport-client-check-types.js', { externs: ['ads/ads.extern.js'], include3pDirectories: true, includePolyfills: true, typeCheckOnly: true, warningLevel: 'QUIET', // TODO(amphtml): Make this 'DEFAULT' } ), ]); exitCtrlcHandler(handlerProcess); } module.exports = { checkTypes, }; /* eslint "google-camelcase/google-camelcase": 0 */ checkTypes.description = 'Check source code for JS type errors'; checkTypes.flags = { closure_concurrency: 'Sets the number of concurrent invocations of closure', debug: 'Outputs the file contents during compilation lifecycles', warning_level: "Optionally sets closure's warning level to one of [quiet, default, verbose]", };
prateekbh/amphtml
build-system/tasks/check-types.js
JavaScript
apache-2.0
4,088
/*- * * * Copyright 2015 Skymind,Inc. * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * */ package org.deeplearning4j.optimize.listeners; import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.optimize.api.IterationListener; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Date; import java.util.concurrent.atomic.AtomicLong; /** * Time Iteration Listener. * This listener displays into INFO logs the remaining time in minutes and the date of the end of the process. */ public class TimeIterationListener implements IterationListener { private static final long serialVersionUID = 1L; private boolean invoked; private long start; private int iterationCount; private AtomicLong iterationCounter = new AtomicLong(0); private static final Logger log = LoggerFactory.getLogger(TimeIterationListener.class); /** * Constructor * @param iterationCount The global number of iteration of the process */ public TimeIterationListener(int iterationCount) { this.iterationCount = iterationCount; start = System.currentTimeMillis(); } @Override public void iterationDone(Model model, int iteration, int epoch) { long currentIteration = iterationCounter.incrementAndGet(); long elapsed = System.currentTimeMillis() - start; long remaining = (iterationCount - currentIteration) * elapsed / currentIteration; long minutes = remaining / (1000 * 60); Date date = new Date(start + elapsed + remaining); log.info("Remaining time : " + minutes + "mn - End expected : " + date.toString()); } }
kinbod/deeplearning4j
deeplearning4j-nn/src/main/java/org/deeplearning4j/optimize/listeners/TimeIterationListener.java
Java
apache-2.0
2,227
/* * tkUnixXId.c -- * * Copyright (c) 1993 The Regents of the University of California. * Copyright (c) 1994-1997 Sun Microsystems, Inc. * * See the file "license.terms" for information on usage and redistribution of * this file, and for a DISCLAIMER OF ALL WARRANTIES. */ #include "tkUnixInt.h" /* *---------------------------------------------------------------------- * * Tk_FreeXId -- * * This function is called to indicate that an X resource identifier is * now free. * * Results: * None. * * Side effects: * The identifier is added to the stack of free identifiers for its * display, so that it can be re-used. * *---------------------------------------------------------------------- */ void Tk_FreeXId( Display *display, /* Display for which xid was allocated. */ XID xid) /* Identifier that is no longer in use. */ { /* * This does nothing, because the XC-MISC extension takes care of * freeing XIDs for us. It has been a standard X11 extension for * about 15 years as of 2008. Keith Packard and another X.org * developer suggested that we remove the previous code that used: * #define XLIB_ILLEGAL_ACCESS. */ } /* *---------------------------------------------------------------------- * * Tk_GetPixmap -- * * Same as the XCreatePixmap function except that it manages resource * identifiers better. * * Results: * Returns a new pixmap. * * Side effects: * None. * *---------------------------------------------------------------------- */ Pixmap Tk_GetPixmap( Display *display, /* Display for new pixmap. */ Drawable d, /* Drawable where pixmap will be used. */ int width, int height, /* Dimensions of pixmap. */ int depth) /* Bits per pixel for pixmap. */ { return XCreatePixmap(display, d, (unsigned) width, (unsigned) height, (unsigned) depth); } /* *---------------------------------------------------------------------- * * Tk_FreePixmap -- * * Same as the XFreePixmap function except that it also marks the * resource identifier as free. * * Results: * None. * * Side effects: * The pixmap is freed in the X server and its resource identifier is * saved for re-use. * *---------------------------------------------------------------------- */ void Tk_FreePixmap( Display *display, /* Display for which pixmap was allocated. */ Pixmap pixmap) /* Identifier for pixmap. */ { XFreePixmap(display, pixmap); Tk_FreeXId(display, (XID) pixmap); } /* *---------------------------------------------------------------------- * * TkpScanWindowId -- * * Given a string, produce the corresponding Window Id. * * Results: * The return value is normally TCL_OK; in this case *idPtr will be set * to the Window value equivalent to string. If string is improperly * formed then TCL_ERROR is returned and an error message will be left in * the interp's result. * * Side effects: * None. * *---------------------------------------------------------------------- */ int TkpScanWindowId( Tcl_Interp *interp, const char *string, Window *idPtr) { int value; if (Tcl_GetInt(interp, string, &value) != TCL_OK) { return TCL_ERROR; } *idPtr = (Window) value; return TCL_OK; } /* * Local Variables: * mode: c * c-basic-offset: 4 * fill-column: 78 * End: */
bitkeeper-scm/bitkeeper
src/gui/tcltk/tk/unix/tkUnixXId.c
C
apache-2.0
3,371
/* * Copyright (C) 2013 OTAPlatform * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.beerbong.otaplatform.updater.impl; import java.util.ArrayList; import org.apache.http.client.utils.URLEncodedUtils; import org.apache.http.message.BasicNameValuePair; import org.json.JSONObject; import com.beerbong.otaplatform.updater.OUCPackage; import com.beerbong.otaplatform.updater.Updater; import com.beerbong.otaplatform.util.Constants; import com.beerbong.otaplatform.util.HttpStringReader; public class OUCUpdater extends Updater { public static final String URL = "https://www.otaupdatecenter.pro/pages/romupdate.php"; public static final String PROPERTY_OTA_ID = "otaupdater.otaid"; public static final String PROPERTY_OTA_VER = "otaupdater.otaver"; public static final String PROPERTY_OTA_TIME = "otaupdater.otatime"; private UpdaterListener mListener; private boolean mScanning = false; public OUCUpdater(UpdaterListener listener) { mListener = listener; } @Override public String getName() { return Constants.getProperty(PROPERTY_OTA_ID); } @Override public String getDeveloperId() { return Constants.getProperty(PROPERTY_OTA_ID); } @Override public int getVersion() { String version = Constants.getProperty(PROPERTY_OTA_TIME); if (version != null) { try { version = version.replace("-", ""); return Integer.parseInt(version); } catch (NumberFormatException ex) { } } return -1; } @Override public void onReadEnd(String buffer) { mScanning = false; try { final JSONObject json = new JSONObject(buffer); if (json.has("error")) { String error = json.getString("error"); mListener.versionError(error); return; } PackageInfo info = new OUCPackage(json); mListener.versionFound(info); } catch (Exception ex) { ex.printStackTrace(); mListener.versionError(null); } } @Override public void onReadError(Exception ex) { mListener.versionError(null); } @Override public void searchVersion() { mScanning = true; ArrayList<BasicNameValuePair> params = new ArrayList<BasicNameValuePair>(); params.add(new BasicNameValuePair("device", android.os.Build.DEVICE.toLowerCase())); params.add(new BasicNameValuePair("rom", getName())); new HttpStringReader(this).execute(URL + "?" + URLEncodedUtils.format(params, "UTF-8")); } @Override public boolean isScanning() { return mScanning; } }
beerbong/OTAPlatform
src/com/beerbong/otaplatform/updater/impl/OUCUpdater.java
Java
apache-2.0
3,380
#!/usr/bin/python """ Copyright 2015 Ericsson AB Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import numpy import math import datetime import requests import json import re from operator import itemgetter from bson.objectid import ObjectId from pyspark import SparkContext, SparkConf from pymongo import MongoClient from pyspark.mllib.clustering import KMeans, KMeansModel from numpy import array from math import sqrt from geopy.distance import vincenty # Weights W_1 = 1.2 W_2 = .8 DISTANCE_THRESHOLD = 0.3 NUM_OF_IT = 8 MIN_LATITUDE = 59.78 MAX_LATITUDE = 59.92 MIN_LONGITUDE = 17.53 MAX_LONGITUDE = 17.75 MIN_COORDINATE = -13750 MAX_COORDINATE = 13750 CIRCLE_CONVERTER = math.pi / 43200 NUMBER_OF_RECOMMENDATIONS = 5 client2 = MongoClient('130.238.15.114') db2 = client2.monad1 client3 = MongoClient('130.238.15.114') db3 = client3.monad1 start = datetime.datetime.now() dontGoBehind = 0 def time_approximation(lat1, lon1, lat2, lon2): point1 = (lat1, lon1) point2 = (lat2, lon2) distance = vincenty(point1, point2).kilometers return int(round(distance / 10 * 60)) def retrieve_requests(): TravelRequest = db2.TravelRequest return TravelRequest def populate_requests(TravelRequest): results = db2.TravelRequest.find() for res in results: dist = time_approximation(res['startPositionLatitude'], res['startPositionLongitude'], res['endPositionLatitude'], res['endPositionLongitude']) if res['startTime'] == "null": users.append((res['userID'],(res['startPositionLatitude'], res['startPositionLongitude'], res['endPositionLatitude'], res['endPositionLongitude'], (res['endTime'] - datetime.timedelta(minutes = dist)).time(), (res['endTime']).time()))) elif res['endTime'] == "null": users.append((res['userID'],(res['startPositionLatitude'], res['startPositionLongitude'], res['endPositionLatitude'], res['endPositionLongitude'], (res['startTime']).time(), (res['startTime'] + datetime.timedelta(minutes = dist)).time()))) else: users.append((res['userID'],(res['startPositionLatitude'], res['startPositionLongitude'], res['endPositionLatitude'], res['endPositionLongitude'], (res['startTime']).time(), (res['endTime']).time()))) def get_today_timetable(): TimeTable = db2.TimeTable first = datetime.datetime.today() first = first.replace(hour = 0, minute = 0, second = 0, microsecond = 0) route = TimeTable.find({'date': {'$gte': first}}) return route def populate_timetable(): route = get_today_timetable() waypoints = [] for res in route: for res1 in res['timetable']: for res2 in db2.BusTrip.find({'_id': res1}): for res3 in res2['trajectory']: for res4 in db2.BusStop.find({'_id':res3['busStop']}): waypoints.append((res3['time'],res4['latitude'], res4['longitude'], res4['name'])) routes.append((res1, waypoints)) waypoints = [] def iterator(waypoints): Waypoints = [] for res in waypoints: Waypoints.append((lat_normalizer(res[1]), lon_normalizer(res[2]), time_normalizer(to_coordinates(to_seconds(res[0]))[0]), time_normalizer(to_coordinates(to_seconds(res[0]))[1]), res[3])) return Waypoints # Converting time object to seconds def to_seconds(dt): total_time = dt.hour * 3600 + dt.minute * 60 + dt.second return total_time # Mapping seconds value to (x, y) coordinates def to_coordinates(secs): angle = float(secs) * CIRCLE_CONVERTER x = 13750 * math.cos(angle) y = 13750 * math.sin(angle) return x, y # Normalization functions def time_normalizer(value): new_value = float((float(value) - MIN_COORDINATE) / (MAX_COORDINATE - MIN_COORDINATE)) return new_value /2 def lat_normalizer(value): new_value = float((float(value) - MIN_LATITUDE) / (MAX_LATITUDE - MIN_LATITUDE)) return new_value def lon_normalizer(value): new_value = float((float(value) - MIN_LONGITUDE) / (MAX_LONGITUDE - MIN_LONGITUDE)) return new_value # Function that implements the kmeans algorithm to group users requests def kmeans(iterations, theRdd): def error(point): center = clusters.centers[clusters.predict(point)] return sqrt(sum([x**2 for x in (point - center)])) clusters = KMeans.train(theRdd, iterations, maxIterations=10, runs=10, initializationMode="random") WSSSE = theRdd.map(lambda point: error(point)).reduce(lambda x, y: x + y) return WSSSE, clusters # Function that runs iteratively the kmeans algorithm to find the best number # of clusters to group the user's request def optimalk(theRdd): results = [] for i in range(NUM_OF_IT): results.append(kmeans(i+1, theRdd)[0]) optimal = [] for i in range(NUM_OF_IT-1): optimal.append(results[i] - results[i+1]) optimal1 = [] for i in range(NUM_OF_IT-2): optimal1.append(optimal[i] - optimal[i+1]) return (optimal1.index(max(optimal1)) + 2) def back_to_coordinates(lat, lon): new_lat = (lat * (MAX_LATITUDE - MIN_LATITUDE)) + MIN_LATITUDE new_lon = (lon * (MAX_LONGITUDE - MIN_LONGITUDE)) + MIN_LONGITUDE return new_lat, new_lon def nearest_stops(lat, lon, dist): stops = [] url = "http://130.238.15.114:9998/get_nearest_stops_from_coordinates" data = {'lon': lon, 'lat': lat, 'distance': dist} headers = {'Content-type': 'application/x-www-form-urlencoded'} answer = requests.post(url, data = data, headers = headers) p = re.compile("(u'\w*')") answer = p.findall(answer.text) answer = [x.encode('UTF8') for x in answer] answer = [x[2:-1] for x in answer] answer = list(set(answer)) return answer # The function that calculate the distance from the given tuple to all the # cluster centroids and returns the minimum disstance def calculate_distance_departure(tup1): dist_departure = [] pos_departure = [] cent_num = 0 for i in selected_centroids: position = -1 min_value = 1000 min_position = 0 centroid_departure = (i[0]*W_1, i[1]*W_1,i[4]*W_2, i[5]*W_2) centroid_departure = numpy.array(centroid_departure) trajectory = [] for l in range(len(tup1)-1): position = position + 1 if(tup1[l][4] in nearest_stops_dep[cent_num]): current_stop = (numpy.array(tup1[l][:4]) * numpy.array((W_1,W_1,W_2,W_2))) distance = numpy.linalg.norm(centroid_departure - current_stop) if (distance < min_value): min_value = distance min_position = position result = min_value dist_departure.append(result) pos_departure.append(min_position) cent_num += 1 return {"dist_departure":dist_departure,"pos_departure":pos_departure} def calculate_distance_arrival(tup1,pos_departure): dist_arrival = [] pos_arrival = [] counter=-1 cent_num = 0 for i in selected_centroids: min_value = 1000 min_position = 0 centroid_arrival = (i[2]*W_1, i[3]*W_1, i[6]*W_2, i[7]*W_2) centroid_arrival = numpy.array(centroid_arrival) counter = counter + 1 position = pos_departure[counter] for l in range(pos_departure[counter]+1, len(tup1)): position = position + 1 if(tup1[l][4] in nearest_stops_arr[cent_num]): current_stop = (numpy.array(tup1[l][:4]) * numpy.array((W_1,W_1,W_2,W_2))) distance = numpy.linalg.norm(centroid_arrival - current_stop) if (distance < min_value): min_value = distance min_position = position result = min_value dist_arrival.append(result) pos_arrival.append(min_position) cent_num += 1 return {"dist_arrival":dist_arrival,"pos_arrival":pos_arrival} def remove_duplicates(alist): return list(set(map(lambda (w, x, y, z): (w, y, z), alist))) def recommendations_to_return(alist): for rec in alist: trip = db2.BusTrip.find_one({'_id': rec[0]}) traj = trip['trajectory'][rec[2]:rec[3]+1] trajectory = [] names_only = [] for stop in traj: name_and_time = (db2.BusStop.find_one({"_id": stop['busStop']}) ['name']), stop['time'] trajectory.append(name_and_time) names_only.append(name_and_time[0]) busid = 1.0 line = trip['line'] result = (int(line), int(busid), names_only[0], names_only[-1], names_only, trajectory[0][1], trajectory[-1][1], rec[0]) to_return.append(result) def recommendations_to_db(user, alist): rec_list = [] for item in to_return: o_id = ObjectId() line = item[0] bus_id = item[1] start_place = item[2] end_place = item[3] start_time = item[5] end_time = item[6] bus_trip_id = item[7] request_time = "null" feedback = -1 request_id = "null" next_trip = "null" booked = False trajectory = item[4] new_user_trip = { "_id":o_id, "userID" : user, "line" : line, "busID" : bus_id, "startBusStop" : start_place, "endBusStop" : end_place, "startTime" : start_time, "busTripID" : bus_trip_id, "endTime" : end_time, "feedback" : feedback, "trajectory" : trajectory, "booked" : booked } new_recommendation = { "userID": user, "userTrip": o_id } db3.UserTrip.insert(new_user_trip) db3.TravelRecommendation.insert(new_recommendation) def empty_past_recommendations(): db3.TravelRecommendation.drop() if __name__ == "__main__": user_ids = [] users = [] routes = [] user_ids = [] sc = SparkContext() populate_timetable() my_routes = sc.parallelize(routes, 8) my_routes = my_routes.map(lambda (x,y): (x, iterator(y))).cache() req = retrieve_requests() populate_requests(req) start = datetime.datetime.now() initial_rdd = sc.parallelize(users, 4).cache() user_ids_rdd = (initial_rdd.map(lambda (x,y): (x,1)) .reduceByKey(lambda a, b: a + b) .collect()) ''' for user in user_ids_rdd: user_ids.append(user[0]) ''' empty_past_recommendations() user_ids = [] user_ids.append(1) for userId in user_ids: userId = 1 recommendations = [] transition = [] final_recommendation = [] selected_centroids = [] routes_distances = [] to_return = [] nearest_stops_dep = [] nearest_stops_arr = [] my_rdd = (initial_rdd.filter(lambda (x,y): x == userId) .map(lambda (x,y): y)).cache() my_rdd = (my_rdd.map(lambda x: (x[0], x[1], x[2], x[3], to_coordinates(to_seconds(x[4])), to_coordinates(to_seconds(x[5])))) .map(lambda (x1, x2, x3, x4, (x5, x6), (x7, x8)): (lat_normalizer(x1), lon_normalizer(x2), lat_normalizer(x3), lon_normalizer(x4), time_normalizer(x5), time_normalizer(x6), time_normalizer(x7), time_normalizer(x8)))) selected_centroids = kmeans(4, my_rdd)[1].centers for i in range(len(selected_centroids)): cent_lat, cent_long = back_to_coordinates(selected_centroids[i][0], selected_centroids[i][1]) nearest_stops_dep.append(nearest_stops(cent_lat, cent_long, 200)) cent_lat, cent_long = back_to_coordinates(selected_centroids[i][2], selected_centroids[i][3]) nearest_stops_arr.append(nearest_stops(cent_lat, cent_long, 200)) routes_distances = my_routes.map(lambda x: (x[0], calculate_distance_departure(x[1])['dist_departure'], calculate_distance_arrival(x[1], calculate_distance_departure(x[1])['pos_departure'])['dist_arrival'], calculate_distance_departure(x[1])['pos_departure'], calculate_distance_arrival(x[1], calculate_distance_departure(x[1])['pos_departure'])['pos_arrival'])) for i in range(len(selected_centroids)): sort_route = (routes_distances.map(lambda (v, w, x, y, z): (v, w[i] + x[i], y[i], z[i])) .sortBy(lambda x:x[1])) final_recommendation.append((sort_route .take(NUMBER_OF_RECOMMENDATIONS))) for sug in final_recommendation: for i in range(len(sug)): temp = [] for j in range(len(sug[i])): temp.append(sug[i][j]) recommendations.append(temp) recommendations.sort(key=lambda x: x[1]) recommendations_final = [] for rec in recommendations: if abs(rec[2] - rec[3]) > 1 and rec[1] < DISTANCE_THRESHOLD: recommendations_final.append(rec) recommendations = recommendations_final[:10] recommendations_to_return(recommendations) recommendations_to_db(userId, to_return)
EricssonResearch/monad
TravelRecommendation/TravelRecommendation_faster.py
Python
apache-2.0
14,541
using System.Reflection; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; // Управление общими сведениями о сборке осуществляется посредством следующего // набора атрибутов. Измените значения этих атрибутов, чтобы изменить сведения, // связанные со сборкой. [assembly: AssemblyTitle("Parser.Tests")] [assembly: AssemblyDescription("")] [assembly: AssemblyConfiguration("")] [assembly: AssemblyCompany("Microsoft")] [assembly: AssemblyProduct("Parser.Tests")] [assembly: AssemblyCopyright("Copyright © Microsoft 2016")] [assembly: AssemblyTrademark("")] [assembly: AssemblyCulture("")] // Задание значения false для атрибута ComVisible приведет к тому, что типы из этой сборки станут невидимыми // для COM-компонентов. Если к одному из типов этой сборки необходимо обращаться из // модели COM, задайте для атрибута ComVisible этого типа значение true. [assembly: ComVisible(false)] // Если данный проект доступен для модели COM, следующий GUID используется в качестве идентификатора библиотеки типов [assembly: Guid("d0e39e31-ddb2-4960-84f7-cff70cd23451")] // Сведения о версии сборки состоят из следующих четырех значений: // // Основной номер версии // Дополнительный номер версии // Номер сборки // Редакция // // Можно задать все значения или принять номера сборки и редакции по умолчанию // используя "*", как показано ниже: // [сборка: AssemblyVersion("1.0.*")] [assembly: AssemblyVersion("1.0.0.0")] [assembly: AssemblyFileVersion("1.0.0.0")]
AlbertMukhammadiev/University
Second_semester/Homework_4/Parser/Parser.Tests/Properties/AssemblyInfo.cs
C#
apache-2.0
2,141
# rkt prepare rkt can prepare images to run in a pod. This means it will fetch (if necessary) the images, extract them in its internal tree store, and allocate a pod UUID. If overlay fs is not supported or disabled, it will also copy the tree in the pod rootfs. In this way, the pod is ready to be launched immediately by the [run-prepared](run-prepared.md) command. Running `rkt prepare` + `rkt run-prepared` is semantically equivalent to running [rkt run](run.md). Therefore, the supported arguments are mostly the same as in `run` except runtime arguments like `--interactive` or `--mds-register`. ## Example ``` # rkt prepare coreos.com/etcd:v2.0.10 rkt prepare coreos.com/etcd:v2.0.10 rkt: using image from local store for image name coreos.com/rkt/stage1-coreos:1.0.0 rkt: searching for app image coreos.com/etcd:v2.0.10 rkt: remote fetching from url https://github.com/coreos/etcd/releases/download/v2.0.10/etcd-v2.0.10-linux-amd64.aci prefix: "coreos.com/etcd" key: "https://coreos.com/dist/pubkeys/aci-pubkeys.gpg" gpg key fingerprint is: 8B86 DE38 890D DB72 9186 7B02 5210 BD88 8818 2190 CoreOS ACI Builder <release@coreos.com> Key "https://coreos.com/dist/pubkeys/aci-pubkeys.gpg" already in the keystore Downloading signature from https://github.com/coreos/etcd/releases/download/v2.0.10/etcd-v2.0.10-linux-amd64.aci.asc Downloading signature: [=======================================] 819 B/819 B Downloading ACI: [=============================================] 3.79 MB/3.79 MB rkt: signature verified: CoreOS ACI Builder <release@coreos.com> c9fad0e6-8236-4fc2-ad17-55d0a4c7d742 ``` ## Options | Flag | Default | Options | Description | | --- | --- | --- | --- | | `--exec` | `` | A path | Override the exec command for the preceding image | | `--inherit-env` | `false` | `true` or `false` | Inherit all environment variables not set by apps | | `--mount` | `` | Mount syntax (`volume=NAME,target=PATH`). See [Mounting Volumes without Mount Points](run.md#mounting-volumes-without-mount-points) | Mount point binding a volume to a path within an app | | `--no-overlay` | `false` | `true` or `false` | Disable overlay filesystem | | `--no-store` | `false` | `true` or `false` | Fetch images, ignoring the local store. See [image fetching behavior](../image-fetching-behavior.md) | | `--pod-manifest` | `` | A path | The path to the pod manifest. If it's non-empty, then only `--net`, `--no-overlay` and `--interactive` will have effect | | `--port` | `` | A port number | Ports to expose on the host (requires [contained network](https://github.com/coreos/rkt/blob/master/Documentation/networking.md#contained-mode)). Syntax: --port=NAME:HOSTPORT | | `--private-users` | `false` | `true` or `false` | Run within user namespaces (experimental) | | `--quiet` | `false` | `true` or `false` | Supress superfluous output on stdout, print only the UUID on success | | `--set-env` | `` | An environment variable. Syntax `NAME=VALUE` | An environment variable to set for apps | | `--stage1-url` | `` | A URL to a stage1 image. HTTP/HTTPS/File/Docker URLs are supported | Image to use as stage1 | | `--stage1-path` | `` | A path to a stage1 image. Absolute and relative paths are supported | Image to use as stage1 | | `--stage1-name` | `` | A name of a stage1 image. Will perform a discovery if the image is not in the store | Image to use as stage1 | | `--stage1-hash` | `` | A hash of a stage1 image. The image must exist in the store | Image to use as stage1 | | `--stage1-from-dir` | `` | A stage1 image file inside the default stage1 images directory | Image to use as stage1 | | `--store-only` | `false` | `true` or `false` | Use only available images in the store (do not discover or download from remote URLs). See [image fetching behavior](../image-fetching-behavior.md) | | `--volume` | `` | Volume syntax (`NAME,kind=KIND,source=PATH,readOnly=BOOL`). See [Mount Volumes into a Pod](run.md#mount-volumes-into-a-pod) | Volumes to make available in the pod | ## Global options | Flag | Default | Options | Description | | --- | --- | --- | --- | | `--debug` | `false` | `true` or `false` | Prints out more debug information to `stderr` | | `--dir` | `/var/lib/rkt` | A directory path | Path to the `rkt` data directory | | `--insecure-options` | none | <ul><li>**none**: All security features are enabled</li><li>**http**: Allow HTTP connections. Be warned that this will send any credentials as clear text.</li><li>**image**: Disables verifying image signatures</li><li>**tls**: Accept any certificate from the server and any host name in that certificate</li><li>**ondisk**: Disables verifying the integrity of the on-disk, rendered image before running. This significantly speeds up start time.</li><li>**all**: Disables all security checks</li></ul> | Comma-separated list of security features to disable | | `--local-config` | `/etc/rkt` | A directory path | Path to the local configuration directory | | `--system-config` | `/usr/lib/rkt` | A directory path | Path to the system configuration directory | | `--trust-keys-from-https` | `false` | `true` or `false` | Automatically trust gpg keys fetched from https | | `--user-config` | `` | A directory path | Path to the user configuration directory |
sinfomicien/rkt
Documentation/subcommands/prepare.md
Markdown
apache-2.0
5,258
<!DOCTYPE html> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <meta name="description" content="Javadoc API documentation for Fresco." /> <link rel="shortcut icon" type="image/x-icon" href="../../../../favicon.ico" /> <title> AspectRatioMeasure - Fresco API | Fresco </title> <link href="../../../../../assets/doclava-developer-docs.css" rel="stylesheet" type="text/css" /> <link href="../../../../../assets/customizations.css" rel="stylesheet" type="text/css" /> <script src="../../../../../assets/search_autocomplete.js" type="text/javascript"></script> <script src="../../../../../assets/jquery-resizable.min.js" type="text/javascript"></script> <script src="../../../../../assets/doclava-developer-docs.js" type="text/javascript"></script> <script src="../../../../../assets/prettify.js" type="text/javascript"></script> <script type="text/javascript"> setToRoot("../../../../", "../../../../../assets/"); </script> <script src="../../../../../assets/doclava-developer-reference.js" type="text/javascript"></script> <script src="../../../../../assets/navtree_data.js" type="text/javascript"></script> <script src="../../../../../assets/customizations.js" type="text/javascript"></script> <noscript> <style type="text/css"> html,body{overflow:auto;} #body-content{position:relative; top:0;} #doc-content{overflow:visible;border-left:3px solid #666;} #side-nav{padding:0;} #side-nav .toggle-list ul {display:block;} #resize-packages-nav{border-bottom:3px solid #666;} </style> </noscript> </head> <body class=""> <div id="header"> <div id="headerLeft"> <span id="masthead-title"><a href="../../../../packages.html">Fresco</a></span> </div> <div id="headerRight"> <div id="search" > <div id="searchForm"> <form accept-charset="utf-8" class="gsc-search-box" onsubmit="return submit_search()"> <table class="gsc-search-box" cellpadding="0" cellspacing="0"><tbody> <tr> <td class="gsc-input"> <input id="search_autocomplete" class="gsc-input" type="text" size="33" autocomplete="off" title="search developer docs" name="q" value="search developer docs" onFocus="search_focus_changed(this, true)" onBlur="search_focus_changed(this, false)" onkeydown="return search_changed(event, true, '../../../../')" onkeyup="return search_changed(event, false, '../../../../')" /> <div id="search_filtered_div" class="no-display"> <table id="search_filtered" cellspacing=0> </table> </div> </td> <!-- <td class="gsc-search-button"> <input type="submit" value="Search" title="search" id="search-button" class="gsc-search-button" /> </td> <td class="gsc-clear-button"> <div title="clear results" class="gsc-clear-button">&nbsp;</div> </td> --> </tr></tbody> </table> </form> </div><!-- searchForm --> </div><!-- search --> </div> </div><!-- header --> <div class="g-section g-tpl-240" id="body-content"> <div class="g-unit g-first side-nav-resizable" id="side-nav"> <div id="swapper"> <div id="nav-panels"> <div id="resize-packages-nav"> <div id="packages-nav"> <div id="index-links"> <a href="../../../../packages.html" >Packages</a> | <a href="../../../../classes.html" >Classes</a> </div> <ul> <li class="api apilevel-"> <a href="../../../../com/facebook/animated/gif/package-summary.html">com.facebook.animated.gif</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/animated/webp/package-summary.html">com.facebook.animated.webp</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/binaryresource/package-summary.html">com.facebook.binaryresource</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/cache/common/package-summary.html">com.facebook.cache.common</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/cache/disk/package-summary.html">com.facebook.cache.disk</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/common/activitylistener/package-summary.html">com.facebook.common.activitylistener</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/common/disk/package-summary.html">com.facebook.common.disk</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/common/executors/package-summary.html">com.facebook.common.executors</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/common/file/package-summary.html">com.facebook.common.file</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/common/internal/package-summary.html">com.facebook.common.internal</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/common/lifecycle/package-summary.html">com.facebook.common.lifecycle</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/common/logging/package-summary.html">com.facebook.common.logging</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/common/media/package-summary.html">com.facebook.common.media</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/common/memory/package-summary.html">com.facebook.common.memory</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/common/references/package-summary.html">com.facebook.common.references</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/common/soloader/package-summary.html">com.facebook.common.soloader</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/common/statfs/package-summary.html">com.facebook.common.statfs</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/common/streams/package-summary.html">com.facebook.common.streams</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/common/time/package-summary.html">com.facebook.common.time</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/common/util/package-summary.html">com.facebook.common.util</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/common/webp/package-summary.html">com.facebook.common.webp</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/datasource/package-summary.html">com.facebook.datasource</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/drawable/base/package-summary.html">com.facebook.drawable.base</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/drawee/backends/pipeline/package-summary.html">com.facebook.drawee.backends.pipeline</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/drawee/backends/volley/package-summary.html">com.facebook.drawee.backends.volley</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/drawee/components/package-summary.html">com.facebook.drawee.components</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/drawee/controller/package-summary.html">com.facebook.drawee.controller</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/drawee/debug/package-summary.html">com.facebook.drawee.debug</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/drawee/drawable/package-summary.html">com.facebook.drawee.drawable</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/drawee/generic/package-summary.html">com.facebook.drawee.generic</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/drawee/gestures/package-summary.html">com.facebook.drawee.gestures</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/drawee/interfaces/package-summary.html">com.facebook.drawee.interfaces</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/drawee/span/package-summary.html">com.facebook.drawee.span</a></li> <li class="selected api apilevel-"> <a href="../../../../com/facebook/drawee/view/package-summary.html">com.facebook.drawee.view</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/fresco/animation/backend/package-summary.html">com.facebook.fresco.animation.backend</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/fresco/animation/bitmap/package-summary.html">com.facebook.fresco.animation.bitmap</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/fresco/animation/bitmap/cache/package-summary.html">com.facebook.fresco.animation.bitmap.cache</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/fresco/animation/bitmap/preparation/package-summary.html">com.facebook.fresco.animation.bitmap.preparation</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/fresco/animation/bitmap/wrapper/package-summary.html">com.facebook.fresco.animation.bitmap.wrapper</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/fresco/animation/drawable/package-summary.html">com.facebook.fresco.animation.drawable</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/fresco/animation/drawable/animator/package-summary.html">com.facebook.fresco.animation.drawable.animator</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/fresco/animation/factory/package-summary.html">com.facebook.fresco.animation.factory</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/fresco/animation/frame/package-summary.html">com.facebook.fresco.animation.frame</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/imageformat/package-summary.html">com.facebook.imageformat</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/imagepipeline/animated/base/package-summary.html">com.facebook.imagepipeline.animated.base</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/imagepipeline/animated/factory/package-summary.html">com.facebook.imagepipeline.animated.factory</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/imagepipeline/animated/impl/package-summary.html">com.facebook.imagepipeline.animated.impl</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/imagepipeline/animated/util/package-summary.html">com.facebook.imagepipeline.animated.util</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/imagepipeline/backends/okhttp3/package-summary.html">com.facebook.imagepipeline.backends.okhttp3</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/imagepipeline/backends/volley/package-summary.html">com.facebook.imagepipeline.backends.volley</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/imagepipeline/bitmaps/package-summary.html">com.facebook.imagepipeline.bitmaps</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/imagepipeline/cache/package-summary.html">com.facebook.imagepipeline.cache</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/imagepipeline/common/package-summary.html">com.facebook.imagepipeline.common</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/imagepipeline/core/package-summary.html">com.facebook.imagepipeline.core</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/imagepipeline/datasource/package-summary.html">com.facebook.imagepipeline.datasource</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/imagepipeline/decoder/package-summary.html">com.facebook.imagepipeline.decoder</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/imagepipeline/drawable/package-summary.html">com.facebook.imagepipeline.drawable</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/imagepipeline/image/package-summary.html">com.facebook.imagepipeline.image</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/imagepipeline/listener/package-summary.html">com.facebook.imagepipeline.listener</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/imagepipeline/memory/package-summary.html">com.facebook.imagepipeline.memory</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/imagepipeline/nativecode/package-summary.html">com.facebook.imagepipeline.nativecode</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/imagepipeline/platform/package-summary.html">com.facebook.imagepipeline.platform</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/imagepipeline/postprocessors/package-summary.html">com.facebook.imagepipeline.postprocessors</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/imagepipeline/producers/package-summary.html">com.facebook.imagepipeline.producers</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/imagepipeline/request/package-summary.html">com.facebook.imagepipeline.request</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/imageutils/package-summary.html">com.facebook.imageutils</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/webpsupport/package-summary.html">com.facebook.webpsupport</a></li> <li class="api apilevel-"> <a href="../../../../com/facebook/widget/text/span/package-summary.html">com.facebook.widget.text.span</a></li> </ul><br/> </div> <!-- end packages --> </div> <!-- end resize-packages --> <div id="classes-nav"> <ul> <li><h2>Classes</h2> <ul> <li class="selected api apilevel-"><a href="../../../../com/facebook/drawee/view/AspectRatioMeasure.html">AspectRatioMeasure</a></li> <li class="api apilevel-"><a href="../../../../com/facebook/drawee/view/AspectRatioMeasure.Spec.html">AspectRatioMeasure.Spec</a></li> <li class="api apilevel-"><a href="../../../../com/facebook/drawee/view/DraweeHolder.html">DraweeHolder</a>&lt;DH&nbsp;extends&nbsp;<a href="../../../../com/facebook/drawee/interfaces/DraweeHierarchy.html">DraweeHierarchy</a>&gt;</li> <li class="api apilevel-"><a href="../../../../com/facebook/drawee/view/DraweeTransition.html">DraweeTransition</a></li> <li class="api apilevel-"><a href="../../../../com/facebook/drawee/view/DraweeView.html">DraweeView</a>&lt;DH&nbsp;extends&nbsp;<a href="../../../../com/facebook/drawee/interfaces/DraweeHierarchy.html">DraweeHierarchy</a>&gt;</li> <li class="api apilevel-"><a href="../../../../com/facebook/drawee/view/GenericDraweeView.html">GenericDraweeView</a></li> <li class="api apilevel-"><a href="../../../../com/facebook/drawee/view/MultiDraweeHolder.html">MultiDraweeHolder</a>&lt;DH&nbsp;extends&nbsp;<a href="../../../../com/facebook/drawee/interfaces/DraweeHierarchy.html">DraweeHierarchy</a>&gt;</li> <li class="api apilevel-"><a href="../../../../com/facebook/drawee/view/SimpleDraweeView.html">SimpleDraweeView</a></li> </ul> </li> </ul><br/> </div><!-- end classes --> </div><!-- end nav-panels --> <div id="nav-tree" style="display:none"> <div id="index-links"> <a href="../../../../packages.html" >Packages</a> | <a href="../../../../classes.html" >Classes</a> </div> </div><!-- end nav-tree --> </div><!-- end swapper --> </div> <!-- end side-nav --> <script> if (!isMobile) { //$("<a href='#' id='nav-swap' onclick='swapNav();return false;' style='font-size:10px;line-height:9px;margin-left:1em;text-decoration:none;'><span id='tree-link'>Use Tree Navigation</span><span id='panel-link' style='display:none'>Use Panel Navigation</span></a>").appendTo("#side-nav"); chooseDefaultNav(); if ($("#nav-tree").is(':visible')) { init_default_navtree("../../../../"); } else { addLoadEvent(function() { scrollIntoView("packages-nav"); scrollIntoView("classes-nav"); }); } //$("#swapper").css({borderBottom:"2px solid #aaa"}); } else { swapNav(); // tree view should be used on mobile } </script> <div class="g-unit" id="doc-content"> <div id="api-info-block"> <div class="sum-details-links"> Summary: <a href="#nestedclasses">Nested Classes</a> &#124; <a href="#pubctors">Ctors</a> &#124; <a href="#pubmethods">Methods</a> &#124; <a href="#inhmethods">Inherited Methods</a> &#124; <a href="#" onclick="return toggleAllClassInherited()" id="toggleAllClassInherited">[Expand All]</a> </div><!-- end sum-details-links --> <div class="api-level"> </div> </div><!-- end api-info-block --> <!-- ======== START OF CLASS DATA ======== --> <div id="jd-header"> public class <h1>AspectRatioMeasure</h1> extends Object<br/> </div><!-- end header --> <div id="naMessage"></div> <div id="jd-content" class="api apilevel-"> <table class="jd-inheritance-table"> <tr> <td colspan="2" class="jd-inheritance-class-cell">java.lang.Object</td> </tr> <tr> <td class="jd-inheritance-space">&nbsp;&nbsp;&nbsp;&#x21b3;</td> <td colspan="1" class="jd-inheritance-class-cell">com.facebook.drawee.view.AspectRatioMeasure</td> </tr> </table> <div class="jd-descr"> <h2>Class Overview</h2> <p>A utility class that performs measuring based on the desired aspect ratio. </p> </div><!-- jd-descr --> <div class="jd-descr"> <h2>Summary</h2> <!-- ======== NESTED CLASS SUMMARY ======== --> <table id="nestedclasses" class="jd-sumtable"><tr><th colspan="12">Nested Classes</th></tr> <tr class="alt-color api apilevel-" > <td class="jd-typecol"> class</td> <td class="jd-linkcol"><a href="../../../../com/facebook/drawee/view/AspectRatioMeasure.Spec.html">AspectRatioMeasure.Spec</a></td> <td class="jd-descrcol" width="100%">Holder for width and height measure specs.&nbsp;</td> </tr> </table> <!-- ======== CONSTRUCTOR SUMMARY ======== --> <table id="pubctors" class="jd-sumtable"><tr><th colspan="12">Public Constructors</th></tr> <tr class="alt-color api apilevel-" > <td class="jd-typecol"> </td> <td class="jd-linkcol" width="100%"> <span class="sympad"><a href="../../../../com/facebook/drawee/view/AspectRatioMeasure.html#AspectRatioMeasure()">AspectRatioMeasure</a></span>() </td></tr> </table> <!-- ========== METHOD SUMMARY =========== --> <table id="pubmethods" class="jd-sumtable"><tr><th colspan="12">Public Methods</th></tr> <tr class="alt-color api apilevel-" > <td class="jd-typecol"> static void </td> <td class="jd-linkcol" width="100%"> <span class="sympad"><a href="../../../../com/facebook/drawee/view/AspectRatioMeasure.html#updateMeasureSpec(com.facebook.drawee.view.AspectRatioMeasure.Spec, float, android.view.ViewGroup.LayoutParams, int, int)">updateMeasureSpec</a></span>(<a href="../../../../com/facebook/drawee/view/AspectRatioMeasure.Spec.html">AspectRatioMeasure.Spec</a> spec, float aspectRatio, ViewGroup.LayoutParams layoutParams, int widthPadding, int heightPadding) <div class="jd-descrdiv">Updates the given measure spec with respect to the aspect ratio.</div> </td></tr> </table> <!-- ========== METHOD SUMMARY =========== --> <table id="inhmethods" class="jd-sumtable"><tr><th> <a href="#" class="toggle-all" onclick="return toggleAllInherited(this, null)">[Expand]</a> <div style="clear:left;">Inherited Methods</div></th></tr> <tr class="api apilevel-" > <td colspan="12"> <a href="#" onclick="return toggleInherited(this, null)" id="inherited-methods-java.lang.Object" class="jd-expando-trigger closed" ><img id="inherited-methods-java.lang.Object-trigger" src="../../../../../assets/images/triangle-closed.png" class="jd-expando-trigger-img" /></a> From class java.lang.Object <div id="inherited-methods-java.lang.Object"> <div id="inherited-methods-java.lang.Object-list" class="jd-inheritedlinks"> </div> <div id="inherited-methods-java.lang.Object-summary" style="display: none;"> <table class="jd-sumtable-expando"> <tr class="alt-color api apilevel-" > <td class="jd-typecol"> Object </td> <td class="jd-linkcol" width="100%"> <span class="sympad">clone</span>() </td></tr> <tr class=" api apilevel-" > <td class="jd-typecol"> boolean </td> <td class="jd-linkcol" width="100%"> <span class="sympad">equals</span>(Object arg0) </td></tr> <tr class="alt-color api apilevel-" > <td class="jd-typecol"> void </td> <td class="jd-linkcol" width="100%"> <span class="sympad">finalize</span>() </td></tr> <tr class=" api apilevel-" > <td class="jd-typecol"> final Class&lt;?&gt; </td> <td class="jd-linkcol" width="100%"> <span class="sympad">getClass</span>() </td></tr> <tr class="alt-color api apilevel-" > <td class="jd-typecol"> int </td> <td class="jd-linkcol" width="100%"> <span class="sympad">hashCode</span>() </td></tr> <tr class=" api apilevel-" > <td class="jd-typecol"> final void </td> <td class="jd-linkcol" width="100%"> <span class="sympad">notify</span>() </td></tr> <tr class="alt-color api apilevel-" > <td class="jd-typecol"> final void </td> <td class="jd-linkcol" width="100%"> <span class="sympad">notifyAll</span>() </td></tr> <tr class=" api apilevel-" > <td class="jd-typecol"> String </td> <td class="jd-linkcol" width="100%"> <span class="sympad">toString</span>() </td></tr> <tr class="alt-color api apilevel-" > <td class="jd-typecol"> final void </td> <td class="jd-linkcol" width="100%"> <span class="sympad">wait</span>(long arg0, int arg1) </td></tr> <tr class=" api apilevel-" > <td class="jd-typecol"> final void </td> <td class="jd-linkcol" width="100%"> <span class="sympad">wait</span>(long arg0) </td></tr> <tr class="alt-color api apilevel-" > <td class="jd-typecol"> final void </td> <td class="jd-linkcol" width="100%"> <span class="sympad">wait</span>() </td></tr> </table> </div> </div> </td></tr> </table> </div><!-- jd-descr (summary) --> <!-- Details --> <!-- XML Attributes --> <!-- Enum Values --> <!-- Constants --> <!-- Fields --> <!-- Public ctors --> <!-- ========= CONSTRUCTOR DETAIL ======== --> <h2>Public Constructors</h2> <a id="AspectRatioMeasure()"></a> <div class="jd-details api apilevel-"> <h4 class="jd-details-title"> <span class="normal"> public </span> <span class="sympad">AspectRatioMeasure</span> <span class="normal">()</span> </h4> <div class="api-level"> <div> </div> </div> <div class="jd-details-descr"> <div class="jd-tagdata jd-tagdescr"><p></p></div> </div> </div> <!-- ========= CONSTRUCTOR DETAIL ======== --> <!-- Protected ctors --> <!-- ========= METHOD DETAIL ======== --> <!-- Public methdos --> <h2>Public Methods</h2> <a id="updateMeasureSpec(com.facebook.drawee.view.AspectRatioMeasure.Spec, float, android.view.ViewGroup.LayoutParams, int, int)"></a> <div class="jd-details api apilevel-"> <h4 class="jd-details-title"> <span class="normal"> public static void </span> <span class="sympad">updateMeasureSpec</span> <span class="normal">(<a href="../../../../com/facebook/drawee/view/AspectRatioMeasure.Spec.html">AspectRatioMeasure.Spec</a> spec, float aspectRatio, ViewGroup.LayoutParams layoutParams, int widthPadding, int heightPadding)</span> </h4> <div class="api-level"> <div> </div> </div> <div class="jd-details-descr"> <div class="jd-tagdata jd-tagdescr"><p>Updates the given measure spec with respect to the aspect ratio. <p>Note: Measure spec is not changed if the aspect ratio is not greater than zero or if layoutParams is null. <p>Measure spec of the layout dimension (width or height) specified as "0dp" is updated to match the measure spec of the other dimension adjusted by the aspect ratio. Exactly one layout dimension should be specified as "0dp". <p>Padding is taken into account so that the aspect ratio refers to the content without padding: <code>aspectRatio == (viewWidth - widthPadding) / (viewHeight - heightPadding)</code> <p>Updated measure spec respects the parent's constraints. I.e. measure spec is not changed if the parent has specified mode <code>EXACTLY</code>, and it doesn't exceed measure size if parent has specified mode <code>AT_MOST</code>.</p></div> <div class="jd-tagdata"> <h5 class="jd-tagtitle">Parameters</h5> <table class="jd-tagtable"> <tr> <th>spec</th> <td>in/out measure spec to be updated</td> </tr> <tr> <th>aspectRatio</th> <td>desired aspect ratio</td> </tr> <tr> <th>layoutParams</th> <td>view's layout params</td> </tr> <tr> <th>widthPadding</th> <td>view's left + right padding</td> </tr> <tr> <th>heightPadding</th> <td>view's top + bottom padding </td> </tr> </table> </div> </div> </div> <!-- ========= METHOD DETAIL ======== --> <!-- ========= END OF CLASS DATA ========= --> <a id="navbar_top"></a> <div id="footer"> +Generated by <a href="http://code.google.com/p/doclava/">Doclava</a>. +</div> <!-- end footer - @generated --> </div> <!-- jd-content --> </div><!-- end doc-content --> </div> <!-- end body-content --> <script type="text/javascript"> init(); /* initialize doclava-developer-docs.js */ </script> </body> </html>
weiwenqiang/GitHub
expert/fresco/docs/javadoc/reference/com/facebook/drawee/view/AspectRatioMeasure.html
HTML
apache-2.0
28,446
package org.zstack.network.service.virtualrouter.lb; import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang.StringUtils; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.transaction.annotation.Transactional; import org.zstack.core.asyncbatch.While; import org.zstack.core.cloudbus.CloudBus; import org.zstack.core.cloudbus.CloudBusCallBack; import org.zstack.core.componentloader.PluginRegistry; import org.zstack.core.db.DatabaseFacade; import org.zstack.core.db.Q; import org.zstack.core.db.SQL; import org.zstack.core.db.SimpleQuery; import org.zstack.core.db.SimpleQuery.Op; import org.zstack.core.workflow.FlowChainBuilder; import org.zstack.core.workflow.ShareFlow; import org.zstack.header.acl.AccessControlListEntryVO; import org.zstack.header.acl.AccessControlListEntryVO_; import org.zstack.header.apimediator.ApiMessageInterceptionException; import org.zstack.header.apimediator.ApiMessageInterceptor; import org.zstack.header.apimediator.GlobalApiMessageInterceptor; import org.zstack.header.core.Completion; import org.zstack.header.core.NoErrorCompletion; import org.zstack.header.core.ReturnValueCompletion; import org.zstack.header.core.WhileDoneCompletion; import org.zstack.header.core.workflow.*; import org.zstack.header.errorcode.ErrorCode; import org.zstack.header.errorcode.ErrorCodeList; import org.zstack.header.errorcode.OperationFailureException; import org.zstack.header.exception.CloudRuntimeException; import org.zstack.header.message.APIMessage; import org.zstack.header.message.MessageReply; import org.zstack.header.network.l3.L3NetworkInventory; import org.zstack.header.network.l3.L3NetworkVO; import org.zstack.header.network.l3.UsedIpVO; import org.zstack.header.network.service.*; import org.zstack.header.tag.SystemTagVO; import org.zstack.header.tag.SystemTagVO_; import org.zstack.header.vm.*; import org.zstack.header.vo.ResourceVO; import org.zstack.network.service.NetworkServiceManager; import org.zstack.network.service.lb.*; import org.zstack.network.service.vip.*; import org.zstack.network.service.virtualrouter.*; import org.zstack.network.service.virtualrouter.VirtualRouterCommands.AgentCommand; import org.zstack.network.service.virtualrouter.VirtualRouterCommands.AgentResponse; import org.zstack.network.service.virtualrouter.ha.VirtualRouterHaBackend; import org.zstack.network.service.virtualrouter.vip.VipConfigProxy; import org.zstack.network.service.virtualrouter.vip.VirtualRouterVipBackend; import org.zstack.utils.CollectionUtils; import org.zstack.utils.DebugUtils; import org.zstack.utils.Utils; import org.zstack.utils.VipUseForList; import org.zstack.utils.function.Function; import org.zstack.utils.gson.JSONObjectUtil; import org.zstack.utils.logging.CLogger; import java.util.*; import java.util.stream.Collectors; import static java.util.Arrays.asList; import static org.zstack.core.Platform.argerr; import static org.zstack.core.Platform.operr; import static org.zstack.utils.CollectionDSL.list; /** * Created by frank on 8/9/2015. */ public class VirtualRouterLoadBalancerBackend extends AbstractVirtualRouterBackend implements LoadBalancerBackend, GlobalApiMessageInterceptor, ApiMessageInterceptor, VirtualRouterHaGetCallbackExtensionPoint, VirtualRouterAfterAttachNicExtensionPoint, VirtualRouterBeforeDetachNicExtensionPoint { private static final CLogger logger = Utils.getLogger(VirtualRouterLoadBalancerBackend.class); @Autowired private DatabaseFacade dbf; @Autowired private CloudBus bus; @Autowired @Qualifier("VirtualRouterVipBackend") private VirtualRouterVipBackend vipVrBkd; @Autowired private NetworkServiceManager nwServiceMgr; @Autowired private PluginRegistry pluginRgty; @Autowired private LbConfigProxy proxy; @Autowired private VirtualRouterHaBackend haBackend; @Autowired private LoadBalancerManager lbMgr; @Autowired private VipConfigProxy vipProxy; private static final String REFRESH_CERTIFICATE_TASK = "refreshCertificate"; private static final String DELETE_CERTIFICATE_TASK = "deleteCertificate"; private static final String REFRESH_LB_TASK = "refreshLb"; private static final String DESTROY_LB_TASK = "destroyLb"; @Override public List<Class> getMessageClassToIntercept() { return asList(APIAddVmNicToLoadBalancerMsg.class); } @Override public InterceptorPosition getPosition() { return InterceptorPosition.END; } @Override public APIMessage intercept(APIMessage msg) throws ApiMessageInterceptionException { if (msg instanceof APIAddVmNicToLoadBalancerMsg) { validate((APIAddVmNicToLoadBalancerMsg) msg); } return msg; } protected String getLoadLancerServiceProvider(List<String> l3Uuids) { NetworkServiceProviderType providerType = nwServiceMgr.getTypeOfNetworkServiceProviderForService( l3Uuids.get(0), LoadBalancerConstants.LB_NETWORK_SERVICE_TYPE); return providerType.toString(); } @Transactional(readOnly = true) private void validate(APIAddVmNicToLoadBalancerMsg msg) { LoadBalancerListenerVO listenerVO = dbf.findByUuid(msg.getListenerUuid(), LoadBalancerListenerVO.class); LoadBalancerServerGroupVO groupVO = lbMgr.getDefaultServerGroup(listenerVO); List<String> attachedVmNicUuids = new ArrayList<>(); if (groupVO != null) { attachedVmNicUuids = groupVO.getLoadBalancerServerGroupVmNicRefs().stream() .map(LoadBalancerServerGroupVmNicRefVO::getVmNicUuid).collect(Collectors.toList()); } attachedVmNicUuids.addAll(msg.getVmNicUuids()); Set<String> l3NetworkUuids = new HashSet<>( Q.New(VmNicVO.class).select(VmNicVO_.l3NetworkUuid) .in(VmNicVO_.uuid, attachedVmNicUuids) .listValues()); Set<String> vrUuids = new HashSet<>( Q.New(VmNicVO.class).select(VmNicVO_.vmInstanceUuid) .in(VmNicVO_.l3NetworkUuid, l3NetworkUuids) .eq(VmNicVO_.metaData, VirtualRouterNicMetaData.GUEST_NIC_MASK) .listValues()); boolean valid = true; if (vrUuids.size() == 2 ) { if (LoadBalancerSystemTags.SEPARATE_VR.hasTag(msg.getLoadBalancerUuid()) && vrUuids.stream().anyMatch(uuid -> VirtualRouterSystemTags.DEDICATED_ROLE_VR.hasTag(uuid))) { logger.debug(String.format( "there are two virtual routers[uuids:%s] on l3 networks[uuids:%s] which vmnics[uuids:%s]" + "attached", vrUuids, l3NetworkUuids, attachedVmNicUuids)); valid = true; } else if (isVirtualRouterHaPair(new ArrayList<>(vrUuids))){ valid = true; } else { valid = false; } } else if (vrUuids.size() > 1) { valid = false; } if (!valid) { throw new ApiMessageInterceptionException(argerr( "new add vm nics[uuids:%s] and attached vmnics are not on the same vrouter, " + "they are on vrouters[uuids:%s]", msg.getVmNicUuids(), vrUuids)); } List<String> peerL3NetworkUuids = SQL.New("select peer.l3NetworkUuid " + "from LoadBalancerVO lb, VipVO vip, VipPeerL3NetworkRefVO peer " + "where lb.vipUuid = vip.uuid " + "and vip.uuid = peer.vipUuid " + "and lb.uuid = :lbUuid") .param("lbUuid", msg.getLoadBalancerUuid()) .list(); if (peerL3NetworkUuids == null || peerL3NetworkUuids.isEmpty()) { return; } List<String> requestVmNicsL3NetworkUuids = Q.New(VmNicVO.class) .select(VmNicVO_.l3NetworkUuid) .in(VmNicVO_.uuid, msg.getVmNicUuids()) .listValues(); requestVmNicsL3NetworkUuids.addAll(peerL3NetworkUuids); vrUuids = new HashSet<>( Q.New(VmNicVO.class).select(VmNicVO_.vmInstanceUuid) .in(VmNicVO_.l3NetworkUuid, requestVmNicsL3NetworkUuids) .eq(VmNicVO_.metaData, VirtualRouterNicMetaData.GUEST_NIC_MASK) .listValues()); if (vrUuids.size() > 1 && !isVirtualRouterHaPair(new ArrayList<>(vrUuids))) { throw new ApiMessageInterceptionException(argerr( "new add vm nics[uuids:%s] and peer l3s[uuids:%s] of loadbalancer[uuid: %s]'s vip are not on the same vrouter, " + "they are on vrouters[uuids:%s]", msg.getVmNicUuids(), peerL3NetworkUuids, msg.getLoadBalancerUuid(), vrUuids)); } } @Transactional(readOnly = true) private VirtualRouterVmInventory findVirtualRouterVm(String lbUuid) { List<VirtualRouterVmVO> vrs = getAllVirtualRouters(lbUuid); if (LoadBalancerSystemTags.SEPARATE_VR.hasTag(lbUuid)) { Optional<VirtualRouterVmInventory> vr = vrs.stream() .filter(v -> VirtualRouterSystemTags.DEDICATED_ROLE_VR.hasTag(v.getUuid())) .map(VirtualRouterVmInventory::valueOf) .findFirst(); return vr.orElse(null); } DebugUtils.Assert(vrs.size() <= 1, String.format("multiple virtual routers[uuids:%s] found", vrs.stream().map(ResourceVO::getUuid).collect(Collectors.toList()))); return vrs.isEmpty() ? null : VirtualRouterVmInventory.valueOf(vrs.get(0)); } @Transactional(readOnly = true) private VirtualRouterVmInventory findVirtualRouterVm(String lbUuid, List<String> vmNics) { if (vmNics.isEmpty()) { return null; } List<VirtualRouterVmVO> vrs = getAllVirtualRouters(lbUuid); if (LoadBalancerSystemTags.SEPARATE_VR.hasTag(lbUuid)) { Optional<VirtualRouterVmVO> vr = vrs.stream() .filter(v -> VirtualRouterSystemTags.DEDICATED_ROLE_VR.hasTag(v.getUuid())) .findFirst(); if (!vr.isPresent()) { return null; } List<String> vmNicL3NetworkUuids = Q.New(VmNicVO.class).select(VmNicVO_.l3NetworkUuid).in(VmNicVO_.uuid, vmNics).listValues(); VirtualRouterVmInventory vrInventory = VirtualRouterVmInventory.valueOf(vr.get()); vmNicL3NetworkUuids.removeAll(vrInventory.getGuestL3Networks()); if (!vmNicL3NetworkUuids.isEmpty()) { logger.debug(String.format("found l3 networks[uuids:%s] not attached to separate vr[uuid:%s] for loadbalancer[uuid:%s]", vmNicL3NetworkUuids, vr.get().getUuid(), lbUuid)); throw new CloudRuntimeException("not support separate vr with multiple networks vpc!"); } } DebugUtils.Assert(vrs.size() <= 1, String.format("multiple virtual routers[uuids:%s] found", vrs.stream().map(ResourceVO::getUuid).collect(Collectors.toList()))); return vrs.isEmpty() ? null : VirtualRouterVmInventory.valueOf(vrs.get(0)); } public static class LbTO { String lbUuid; String listenerUuid; String vip; String publicNic; List<String> nicIps; int instancePort; int loadBalancerPort; String mode; List<String> parameters; String certificateUuid; String securityPolicyType; List<ServerGroup> serverGroups; List<RedirectRule> redirectRules; static class ServerGroup { private String name; private String serverGroupUuid; private List<BackendServer> backendServers; private boolean isDefault = false; public String getName() { return name; } public void setName(String name) { this.name = name; } public String getServerGroupUuid() { return serverGroupUuid; } public void setServerGroupUuid(String serverGroupUuid) { this.serverGroupUuid = serverGroupUuid; } public List<BackendServer> getBackendServers() { return backendServers; } public void setBackendServers(List<BackendServer> backendServers) { this.backendServers = backendServers; } public boolean isDefault() { return isDefault; } public void setDefault(boolean aDefault) { isDefault = aDefault; } } static class BackendServer { private String ip; private long weight; public BackendServer(String ip, long weight) { this.ip = ip; this.weight = weight; } public String getIp() { return ip; } public void setIp(String ip) { this.ip = ip; } public long getWeight() { return weight; } public void setWeight(long weight) { this.weight = weight; } } static class RedirectRule { private String redirectRuleUuid; private String aclUuid; private String redirectRule; private String serverGroupUuid; public String getRedirectRuleUuid() { return redirectRuleUuid; } public void setRedirectRuleUuid(String redirectRuleUuid) { this.redirectRuleUuid = redirectRuleUuid; } public String getAclUuid() { return aclUuid; } public void setAclUuid(String aclUuid) { this.aclUuid = aclUuid; } public String getRedirectRule() { return redirectRule; } public void setRedirectRule(String redirectRule) { this.redirectRule = redirectRule; } public String getServerGroupUuid() { return serverGroupUuid; } public void setServerGroupUuid(String serverGroupUuid) { this.serverGroupUuid = serverGroupUuid; } } public String getListenerUuid() { return listenerUuid; } public void setListenerUuid(String listenerUuid) { this.listenerUuid = listenerUuid; } public List<String> getParameters() { return parameters; } public void setParameters(List<String> parameters) { this.parameters = parameters; } public String getLbUuid() { return lbUuid; } public void setLbUuid(String lbUuid) { this.lbUuid = lbUuid; } public String getVip() { return vip; } public void setVip(String vip) { this.vip = vip; } public List<String> getNicIps() { return nicIps; } public void setNicIps(List<String> nicIps) { this.nicIps = nicIps; } public int getInstancePort() { return instancePort; } public void setInstancePort(int instancePort) { this.instancePort = instancePort; } public int getLoadBalancerPort() { return loadBalancerPort; } public void setLoadBalancerPort(int loadBalancerPort) { this.loadBalancerPort = loadBalancerPort; } public String getMode() { return mode; } public void setMode(String mode) { this.mode = mode; } public String getCertificateUuid() { return certificateUuid; } public void setCertificateUuid(String certificateUuid) { this.certificateUuid = certificateUuid; } public String getPublicNic() { return publicNic; } public void setPublicNic(String publicNic) { this.publicNic = publicNic; } public String getSecurityPolicyType() { return securityPolicyType; } public void setSecurityPolicyType(String securityPolicyType) { this.securityPolicyType = securityPolicyType; } public List<ServerGroup> getServerGroups() { return serverGroups; } public void setServerGroups(List<ServerGroup> serverGroups) { this.serverGroups = serverGroups; } public List<RedirectRule> getRedirectRules() { return redirectRules; } public void setRedirectRules(List<RedirectRule> redirectRules) { this.redirectRules = redirectRules; } } public static class RefreshLbCmd extends AgentCommand { List<LbTO> lbs; public List<LbTO> getLbs() { return lbs; } public void setLbs(List<LbTO> lbs) { this.lbs = lbs; } } public static class RefreshLbLogLevelCmd extends AgentCommand { String level; public String getLevel() { return level; } public void setLevel(String level) { this.level = level; } } public static class RefreshLbLogLevelRsp extends AgentResponse { } public static class RefreshLbRsp extends AgentResponse { } public static class CertificateCmd extends AgentCommand { String uuid; String certificate; public String getUuid() { return uuid; } public void setUuid(String uuid) { this.uuid = uuid; } public String getCertificate() { return certificate; } public void setCertificate(String certificate) { this.certificate = certificate; } } public static class CertificateRsp extends AgentResponse { } public static class DeleteLbCmd extends AgentCommand { List<LbTO> lbs; public List<LbTO> getLbs() { return lbs; } public void setLbs(List<LbTO> lbs) { this.lbs = lbs; } } public static class DeleteLbRsp extends AgentResponse { } public static final String REFRESH_LB_PATH = "/lb/refresh"; public static final String DELETE_LB_PATH = "/lb/delete"; public static final String REFRESH_LB_LOG_LEVEL_PATH = "/lb/log/level"; public static final String CREATE_CERTIFICATE_PATH = "/certificate/create"; public static final String DELETE_CERTIFICATE_PATH = "/certificate/delete"; private List<LbTO> makeLbTOs(final LoadBalancerStruct struct, VirtualRouterVmInventory vr) { VipInventory vip = struct.getVip(); Optional<VmNicInventory> publicNic = vr.getVmNics().stream() .filter(n -> n.getL3NetworkUuid().equals(vip.getL3NetworkUuid())) .findFirst(); if (!publicNic.isPresent()) { return new ArrayList<>(); } return CollectionUtils.transformToList(struct.getListeners(), new Function<LbTO, LoadBalancerListenerInventory>() { private List<String> makeAcl(LoadBalancerListenerInventory listenerInv) { String aclEntry = ""; List<String> aclRules = new ArrayList<>(); List<LoadBalancerListenerACLRefInventory> refs = listenerInv.getAclRefs(); if (refs.isEmpty()) { aclRules.add(String.format("aclEntry::%s", aclEntry)); return aclRules; } List<LoadBalancerListenerACLRefInventory> aclRefInventories = refs.stream().filter(ref -> !ref.getType().equals(LoadBalancerAclType.redirect.toString())).collect(Collectors.toList()); if (aclRefInventories.isEmpty()) { return aclRules; } aclRules.add(String.format("aclType::%s", aclRefInventories.get(0).getType())); List<String> aclUuids = aclRefInventories.stream().map(LoadBalancerListenerACLRefInventory::getAclUuid).collect(Collectors.toList()); List<String> entry = Q.New(AccessControlListEntryVO.class).select(AccessControlListEntryVO_.ipEntries) .in(AccessControlListEntryVO_.aclUuid, aclUuids).listValues(); if (!entry.isEmpty()) { aclEntry = StringUtils.join(entry.toArray(), ','); } aclRules.add(String.format("aclEntry::%s", aclEntry)); return aclRules; } private List<AccessControlListEntryVO> sortAclEntry(List<AccessControlListEntryVO> entries) { if (entries == null || entries.size() <= 1) { return entries; } Collections.sort(entries, new Comparator<AccessControlListEntryVO>() { @Override public int compare(AccessControlListEntryVO entry1, AccessControlListEntryVO entry2) { int i = entry2.getDomain().length() - entry1.getDomain().length(); if (i != 0) { return i; } int urlLength1 = entry1.getUrl() == null ? 0 : entry1.getUrl().length(); int urlLength2 = entry2.getUrl() == null ? 0 : entry2.getUrl().length(); return urlLength2 - urlLength1; } }); return entries; } private List<LbTO.RedirectRule> makeRedirectAcl(LoadBalancerListenerInventory listenerInv, LbTO lbTO) { ArrayList<LbTO.RedirectRule> redirectRules = new ArrayList<>(); if (lbTO.getServerGroups() == null || lbTO.getServerGroups().isEmpty()) { return redirectRules; } List<LoadBalancerListenerACLRefInventory> refs = listenerInv.getAclRefs(); if (refs.isEmpty()) { return redirectRules; } List<String> aclUuids = refs.stream().filter(ref -> ref.getType().equals(LoadBalancerAclType.redirect.toString())).map(LoadBalancerListenerACLRefInventory::getAclUuid).collect(Collectors.toList()); if (aclUuids.isEmpty()) { return redirectRules; } List<AccessControlListEntryVO> entries = Q.New(AccessControlListEntryVO.class).in(AccessControlListEntryVO_.aclUuid, aclUuids).list(); List<String> usedSgUuids = new ArrayList<>(); List<String> usedAggSgUuids = new ArrayList<>(); //sort acl entry: The more accurate, the more forward //accurate forward, longer forward List<AccessControlListEntryVO> domainWildWordMatchEntries = new ArrayList<>(); List<AccessControlListEntryVO> domainAccurateMatchEntries = new ArrayList<>(); List<AccessControlListEntryVO> onlyUrlEntries = new ArrayList<>(); List<AccessControlListEntryVO> afterSortEntries = new ArrayList<>(); for (AccessControlListEntryVO entryVO : entries) { if (entryVO.getMatchMethod().equals("Url")) { onlyUrlEntries.add(entryVO); } else if ("AccurateMatch".equals(entryVO.getCriterion())) { domainAccurateMatchEntries.add(entryVO); } else { domainWildWordMatchEntries.add(entryVO); } } sortAclEntry(domainAccurateMatchEntries); sortAclEntry(domainWildWordMatchEntries); afterSortEntries.addAll(domainAccurateMatchEntries); afterSortEntries.addAll(domainWildWordMatchEntries); afterSortEntries.addAll(onlyUrlEntries); List<String> sgOwnBackenServerUuids = lbTO.getServerGroups().stream().map(LbTO.ServerGroup::getServerGroupUuid).collect(Collectors.toList()); for (AccessControlListEntryVO entry : afterSortEntries) { List<String> serverGroupUuids = refs.stream() .filter(ref -> ref.getListenerUuid().equals(listenerInv.getUuid()) && ref.getAclUuid().equals(entry.getAclUuid()) && ref.getServerGroupUuid() != null && sgOwnBackenServerUuids.contains(ref.getServerGroupUuid())) .map(LoadBalancerListenerACLRefInventory::getServerGroupUuid).sorted().collect(Collectors.toList()); if (serverGroupUuids.isEmpty()) { continue; } LbTO.RedirectRule redirectRule = new LbTO.RedirectRule(); if (serverGroupUuids.size() == 1) { usedSgUuids.addAll(serverGroupUuids); redirectRule.setAclUuid(entry.getAclUuid()); redirectRule.setRedirectRuleUuid(entry.getUuid()); redirectRule.setServerGroupUuid(serverGroupUuids.get(0)); redirectRule.setRedirectRule(entry.getRedirectRule()); } else { usedAggSgUuids.addAll(serverGroupUuids); redirectRule.setAclUuid(entry.getAclUuid()); redirectRule.setRedirectRuleUuid(entry.getUuid()); redirectRule.setRedirectRule(entry.getRedirectRule()); StringBuilder stringBuilder = new StringBuilder(); for (String sgUuid : serverGroupUuids) { stringBuilder.append(sgUuid); } String polymerizedUuid = DigestUtils.md5Hex(stringBuilder.toString()); boolean needAddServerGroup = true; if (!lbTO.getServerGroups().isEmpty()) { needAddServerGroup = lbTO.getServerGroups().stream().noneMatch(sg -> sg.getServerGroupUuid().equals(polymerizedUuid)); } if (needAddServerGroup) { LbTO.ServerGroup serverGroup = new LbTO.ServerGroup(); serverGroup.setBackendServers(new ArrayList<>()); for (String sgUuid : serverGroupUuids) { for (LbTO.ServerGroup serverGroup1 : lbTO.serverGroups) { if (sgUuid.equals(serverGroup1.serverGroupUuid)) { serverGroup.getBackendServers().addAll(serverGroup1.getBackendServers()); } } } serverGroup.setName(polymerizedUuid); serverGroup.setServerGroupUuid(polymerizedUuid); lbTO.getServerGroups().add(serverGroup); } redirectRule.setServerGroupUuid(polymerizedUuid); usedSgUuids.add(polymerizedUuid); } redirectRules.add(redirectRule); } ArrayList<LbTO.BackendServer> backendServers = new ArrayList<>(); Iterator<LbTO.ServerGroup> iterator = lbTO.getServerGroups().iterator(); if (!usedAggSgUuids.isEmpty()) { if (!usedSgUuids.isEmpty()) { usedAggSgUuids = usedAggSgUuids.stream().filter(sgUuid -> !usedSgUuids.contains(sgUuid)).collect(Collectors.toList()); } } while (iterator.hasNext()) { LbTO.ServerGroup sg = iterator.next(); if (!usedSgUuids.contains(sg.getServerGroupUuid()) && !usedAggSgUuids.contains(sg.getServerGroupUuid())) { backendServers.addAll(sg.getBackendServers()); iterator.remove(); } if (usedAggSgUuids.contains(sg.getServerGroupUuid())) { iterator.remove(); } } if (!backendServers.isEmpty()) { if (!redirectRules.isEmpty()) { LbTO.ServerGroup serverGroup = new LbTO.ServerGroup(); serverGroup.setBackendServers(backendServers); serverGroup.setName("default-server-group"); StringBuilder stringBuilder = new StringBuilder(); serverGroup.setDefault(true); serverGroup.setServerGroupUuid("defaultServerGroup"); lbTO.getServerGroups().add(serverGroup); } } boolean isDefaultPort = false; // http 80;https 443 ArrayList<LbTO.RedirectRule> formatRedirectRules = new ArrayList<>(); if ( (lbTO.getMode().equals(LoadBalancerConstants.LB_PROTOCOL_HTTP) && lbTO.getLoadBalancerPort() == LoadBalancerConstants.PROTOCOL_HTTP_DEFAULT_PORT ) || (lbTO.getMode().equals(LoadBalancerConstants.LB_PROTOCOL_HTTPS) && lbTO.getLoadBalancerPort() == LoadBalancerConstants.PROTOCOL_HTTPS_DEFAULT_PORT) ){ formatRedirectRules.addAll(redirectRules); isDefaultPort = true; } for(LbTO.RedirectRule rule:redirectRules){ LbTO.RedirectRule formatRule = new LbTO.RedirectRule(); formatRule.setRedirectRule(rule.getRedirectRule()); formatRule.setRedirectRuleUuid(rule.getRedirectRuleUuid()); formatRule.setAclUuid(rule.getAclUuid()); formatRule.setServerGroupUuid(rule.getServerGroupUuid()); String matchMethod = Q.New(AccessControlListEntryVO.class).select(AccessControlListEntryVO_.matchMethod).eq(AccessControlListEntryVO_.uuid, rule.getRedirectRuleUuid()).findValue(); boolean isSkipInsertPort = ( LoadBalancerConstants.MatchMethod.Domain.toString().equals(matchMethod) || LoadBalancerConstants.MatchMethod.Url.toString().equals(matchMethod) ); if ( isSkipInsertPort && isDefaultPort ){ continue; } else if( isSkipInsertPort){ formatRedirectRules.add(formatRule); } else{ insertPortToRedirectRule(formatRule, lbTO); formatRedirectRules.add(formatRule); } } return formatRedirectRules; } private void insertPortToRedirectRule(LbTO.RedirectRule redirectRule,LbTO lbTO){ //add lbport after domain name StringBuffer rule = new StringBuffer(redirectRule.getRedirectRule()); String insertRule = ":" + lbTO.getLoadBalancerPort(); int index = rule.indexOf("/"); if (index != -1) { rule.insert(index,insertRule); } redirectRule.setRedirectRule(rule.toString()); } @Override public LbTO call(LoadBalancerListenerInventory l) { LbTO to = new LbTO(); to.setInstancePort(l.getInstancePort()); to.setLoadBalancerPort(l.getLoadBalancerPort()); to.setLbUuid(l.getLoadBalancerUuid()); to.setListenerUuid(l.getUuid()); to.setMode(l.getProtocol()); to.setVip(vip.getIp()); to.setSecurityPolicyType(l.getSecurityPolicyType()); if (l.getCertificateRefs() != null && !l.getCertificateRefs().isEmpty()) { to.setCertificateUuid(l.getCertificateRefs().get(0).getCertificateUuid()); } List<LoadBalancerServerGroupInventory> groupInvs = struct.getListenerServerGroupMap().get(l.getUuid()); List<String> params = new ArrayList<>(); List<String> ips = new ArrayList<>(); List<LbTO.ServerGroup> serverGroups = new ArrayList<>(); if (groupInvs != null) { for (LoadBalancerServerGroupInventory groupInv : groupInvs) { LbTO.ServerGroup serverGroup = new LbTO.ServerGroup(); serverGroup.setName(groupInv.getName()); serverGroup.setServerGroupUuid(groupInv.getUuid()); List<LbTO.BackendServer> backendServers = new ArrayList<>(); serverGroup.setBackendServers(backendServers); List<LoadBalancerServerGroupVmNicRefInventory> nicRefInventories = Optional.ofNullable(groupInv.getVmNicRefs()).orElse(new ArrayList<>()).stream() .sorted(new Comparator<LoadBalancerServerGroupVmNicRefInventory>() { @Override public int compare(LoadBalancerServerGroupVmNicRefInventory r1, LoadBalancerServerGroupVmNicRefInventory r2) { return (int) (r1.getId() - r2.getId()); } }).collect(Collectors.toList()); List<LoadBalancerServerGroupServerIpInventory> ipRefInventories = Optional.ofNullable(groupInv.getServerIps()).orElse(new ArrayList<>()).stream() .sorted(new Comparator<LoadBalancerServerGroupServerIpInventory>() { @Override public int compare(LoadBalancerServerGroupServerIpInventory r1, LoadBalancerServerGroupServerIpInventory r2) { return (int) (r1.getId() - r2.getId()); } }).collect(Collectors.toList()); for (LoadBalancerServerGroupVmNicRefInventory nicRef : nicRefInventories) { if (nicRef.getStatus().equals(LoadBalancerVmNicStatus.Inactive.toString())) { continue; } VmNicInventory nic = struct.getVmNics().get(nicRef.getVmNicUuid()); if (nic == null) { throw new CloudRuntimeException(String.format("cannot find nic[uuid:%s]", nicRef.getVmNicUuid())); } if(nic.getIp() == null || nic.getIp().isEmpty()){ continue; } ips.add(nic.getIp()); params.add(String.format("balancerWeight::%s::%s", nic.getIp(), nicRef.getWeight())); backendServers.add(new LbTO.BackendServer(nic.getIp(), nicRef.getWeight())); } for (LoadBalancerServerGroupServerIpInventory ipRef : ipRefInventories) { if (ipRef.getStatus().equals(LoadBalancerBackendServerStatus.Inactive.toString())) { continue; } if(ipRef.getIpAddress() == null || ipRef.getIpAddress().isEmpty()){ continue; } ips.add(ipRef.getIpAddress()); params.add(String.format("balancerWeight::%s::%s",ipRef.getIpAddress(), ipRef.getWeight())); backendServers.add(new LbTO.BackendServer(ipRef.getIpAddress(), ipRef.getWeight())); } if (!backendServers.isEmpty()) { serverGroups.add(serverGroup); } } } to.setNicIps(ips.stream().sorted().collect(Collectors.toList())); to.setPublicNic(publicNic.get().getMac()); to.setServerGroups(serverGroups); params.addAll(CollectionUtils.transformToList(struct.getTags().get(l.getUuid()), new Function<String, String>() { // vnicUuid::weight @Override public String call(String arg) { if(LoadBalancerSystemTags.BALANCER_WEIGHT.isMatch(arg)) { /* 4.0 lb server ip weight configuration from nicRefVO and serverIpVO,not systemTag */ return null; } return arg; } })); to.setRedirectRules(makeRedirectAcl(l, to)); params.addAll(makeAcl(l)); to.setParameters(params); return to; } }); } private List<String> getCertificates(List<LoadBalancerStruct> structs) { List<String> certificateUuids = new ArrayList<>(); for (LoadBalancerStruct struct : structs) { for (LoadBalancerListenerInventory listenerInv : struct.getListeners()) { if (listenerInv.getCertificateRefs() == null || listenerInv.getCertificateRefs().isEmpty()) { continue; } List<LoadBalancerServerGroupInventory> serverGroups = struct.getListenerServerGroupMap().get(listenerInv.getUuid()); if (serverGroups == null) { continue; } List<String> nics = new ArrayList<>(); List<String> serverIps = new ArrayList<>(); for (LoadBalancerServerGroupInventory group : serverGroups) { nics.addAll(group.getVmNicRefs().stream().map(LoadBalancerServerGroupVmNicRefInventory::getVmNicUuid).collect(Collectors.toList())); serverIps.addAll(group.getServerIps().stream().map(LoadBalancerServerGroupServerIpInventory::getIpAddress).collect(Collectors.toList())); } if (nics.isEmpty() && serverIps.isEmpty()) { continue; } if (!certificateUuids.contains(listenerInv.getCertificateRefs().get(0).getCertificateUuid())) { certificateUuids.add(listenerInv.getCertificateRefs().get(0).getCertificateUuid()); } } } return certificateUuids; } private void refreshCertificate(VirtualRouterVmInventory vr, boolean checkVrState, List<LoadBalancerStruct> struct, final Completion completion){ List<String> certificateUuids = getCertificates(struct); List<ErrorCode> errors = new ArrayList<>(); new While<>(certificateUuids).each((uuid, wcmpl) -> { VirtualRouterAsyncHttpCallMsg msg = new VirtualRouterAsyncHttpCallMsg(); msg.setVmInstanceUuid(vr.getUuid()); msg.setPath(CREATE_CERTIFICATE_PATH); msg.setCheckStatus(checkVrState); CertificateCmd cmd = new CertificateCmd(); CertificateVO vo = dbf.findByUuid(uuid, CertificateVO.class); cmd.setUuid(uuid); cmd.setCertificate(vo.getCertificate()); msg.setCommand(cmd); bus.makeTargetServiceIdByResourceUuid(msg, VmInstanceConstant.SERVICE_ID, vr.getUuid()); bus.send(msg, new CloudBusCallBack(wcmpl) { @Override public void run(MessageReply reply) { if (reply.isSuccess()) { CertificateRsp rsp = ((VirtualRouterAsyncHttpCallReply) reply).toResponse(CertificateRsp.class); if (rsp.isSuccess()) { wcmpl.done(); } else { errors.add(operr("operation error, because:%s", rsp.getError())); wcmpl.allDone(); } } else { errors.add(reply.getError()); wcmpl.allDone(); } } }); }).run(new WhileDoneCompletion(completion) { @Override public void done(ErrorCodeList errorCodeList) { if (errors.isEmpty()) { completion.success(); } else { completion.fail(errors.get(0)); } } }); } private void rollbackCertificate(VirtualRouterVmInventory vr, boolean checkVrState, List<LoadBalancerStruct> struct, final NoErrorCompletion completion){ List<String> certificateUuids = getCertificates(struct); new While<>(certificateUuids).each((uuid, wcmpl) -> { VirtualRouterAsyncHttpCallMsg msg = new VirtualRouterAsyncHttpCallMsg(); msg.setVmInstanceUuid(vr.getUuid()); msg.setPath(DELETE_CERTIFICATE_PATH); msg.setCheckStatus(checkVrState); CertificateCmd cmd = new CertificateCmd(); cmd.setUuid(uuid); msg.setCommand(cmd); bus.makeTargetServiceIdByResourceUuid(msg, VmInstanceConstant.SERVICE_ID, vr.getUuid()); bus.send(msg, new CloudBusCallBack(wcmpl) { @Override public void run(MessageReply reply) { wcmpl.done(); } }); }).run(new WhileDoneCompletion(completion) { @Override public void done(ErrorCodeList errorCodeList) { completion.done(); } }); } private void refreshLbToVirtualRouter(VirtualRouterVmInventory vr, LoadBalancerStruct struct, Completion completion) { VirtualRouterAsyncHttpCallMsg msg = new VirtualRouterAsyncHttpCallMsg(); msg.setVmInstanceUuid(vr.getUuid()); msg.setPath(REFRESH_LB_PATH); msg.setCheckStatus(true); RefreshLbCmd cmd = new RefreshLbCmd(); cmd.lbs = makeLbTOs(struct, vr); if (cmd.lbs.isEmpty()) { completion.success(); return; } msg.setCommand(cmd); bus.makeTargetServiceIdByResourceUuid(msg, VmInstanceConstant.SERVICE_ID, vr.getUuid()); bus.send(msg, new CloudBusCallBack(completion) { @Override public void run(MessageReply reply) { if (reply.isSuccess()) { RefreshLbRsp rsp = ((VirtualRouterAsyncHttpCallReply) reply).toResponse(RefreshLbRsp.class); if (rsp.isSuccess()) { new VirtualRouterRoleManager().makeLoadBalancerRole(vr.getUuid()); completion.success(); } else { completion.fail(operr("operation error, because:%s", rsp.getError())); } } else { completion.fail(reply.getError()); } } }); } public void refresh(VirtualRouterVmInventory vr, LoadBalancerStruct struct, final Completion completion) { FlowChain chain = FlowChainBuilder.newShareFlowChain(); chain.setName("refresh-lb-to-virtualRouter"); chain.then(new ShareFlow() { @Override public void setup() { flow(new Flow() { String __name__ = "refresh-lb-ceriticae-to-virtualRouter"; @Override public void run(FlowTrigger trigger, Map data) { refreshCertificate(vr, true, Collections.singletonList(struct), new Completion(trigger) { @Override public void success() { refreshCertificateOnHaRouter(vr.getUuid(), Collections.singletonList(struct), new Completion(trigger) { @Override public void success() { trigger.next(); } @Override public void fail(ErrorCode errorCode) { trigger.fail(errorCode); } }); } @Override public void fail(ErrorCode errorCode) { trigger.fail(errorCode); } }); } @Override public void rollback(FlowRollback trigger, Map data) { rollbackCertificate(vr, true, Collections.singletonList(struct), new NoErrorCompletion(trigger) { @Override public void done() { rollbackCertificateOnHaRouter(vr.getUuid(), Collections.singletonList(struct), new Completion(trigger) { @Override public void success() { trigger.rollback(); } @Override public void fail(ErrorCode errorCode) { trigger.rollback(); } }); } }); } }); flow(new NoRollbackFlow() { String __name__ = "refresh-lb-listener-to-virtualRouter"; @Override public void run(FlowTrigger trigger, Map data) { refreshLbToVirtualRouter(vr, struct, new Completion(trigger) { @Override public void success() { refreshLbToVirtualRouterHa(vr, struct, new Completion(trigger) { @Override public void success() { trigger.next(); } @Override public void fail(ErrorCode errorCode) { trigger.fail(errorCode); } }); } @Override public void fail(ErrorCode errorCode) { trigger.fail(errorCode); } }); } }); done(new FlowDoneHandler(completion) { @Override public void handle(Map data) { completion.success(); } }); error(new FlowErrorHandler(completion) { @Override public void handle(ErrorCode errCode, Map data) { completion.fail(errCode); } }); } }).start(); } private void stopVip(final LoadBalancerStruct struct, final List<VmNicInventory> nics, final Completion completion) { LoadBalancerFactory f = lbMgr.getLoadBalancerFactory(struct.getLb().getType()); ModifyVipAttributesStruct vipStruct = new ModifyVipAttributesStruct(); vipStruct.setUseFor(f.getNetworkServiceType()); vipStruct.setServiceUuid(struct.getLb().getUuid()); Set<String> guestL3NetworkUuids = nics.stream() .map(VmNicInventory::getL3NetworkUuid) .collect(Collectors.toSet()); /*remove the l3networks still attached*/ Set<String> vnicUuidsAttached = new HashSet<>(); for (LoadBalancerListenerInventory listener : struct.getListeners()) { for (LoadBalancerServerGroupInventory group : struct.getListenerServerGroupMap().get(listener.getUuid())) { vnicUuidsAttached.addAll(group.getVmNicRefs().stream().map(LoadBalancerServerGroupVmNicRefInventory::getVmNicUuid).collect(Collectors.toList())); } } if (!vnicUuidsAttached.isEmpty()) { List<String> l3Uuids = Q.New(VmNicVO.class).select(VmNicVO_.l3NetworkUuid).in(VmNicVO_.uuid, vnicUuidsAttached).listValues(); if (l3Uuids != null && !l3Uuids.isEmpty()) { guestL3NetworkUuids.removeAll(l3Uuids); } } if (guestL3NetworkUuids.isEmpty()) { completion.success(); return; } vipStruct.setPeerL3NetworkUuids(new ArrayList<>(guestL3NetworkUuids)); vipStruct.setServiceProvider(getLoadLancerServiceProvider(vipStruct.getPeerL3NetworkUuids())); Vip v = new Vip(struct.getLb().getVipUuid()); v.setStruct(vipStruct); v.stop(completion); } private void acquireVip(final VirtualRouterVmInventory vr, final LoadBalancerStruct struct, final List<VmNicInventory> nics, final Completion completion) { LoadBalancerVO loadBalancerVO = dbf.findByUuid(struct.getLb().getUuid(), LoadBalancerVO.class); LoadBalancerFactory f = lbMgr.getLoadBalancerFactory(loadBalancerVO.getType().toString()); ModifyVipAttributesStruct vipStruct = new ModifyVipAttributesStruct(); vipStruct.setUseFor(f.getNetworkServiceType()); vipStruct.setServiceUuid(struct.getLb().getUuid()); Set<String> guestL3NetworkUuids = nics.stream() .map(VmNicInventory::getL3NetworkUuid) .collect(Collectors.toSet()); vipStruct.setPeerL3NetworkUuids(new ArrayList<>(guestL3NetworkUuids)); vipStruct.setServiceProvider(getLoadLancerServiceProvider(vipStruct.getPeerL3NetworkUuids())); Vip v = new Vip(struct.getLb().getVipUuid()); v.setStruct(vipStruct); v.acquire(new Completion(completion) { @Override public void success() { completion.success(); } @Override public void fail(ErrorCode errorCode) { completion.fail(errorCode); } }); } private void startVrIfNeededAndRefresh(final VirtualRouterVmInventory vr, final LoadBalancerStruct struct, List<VmNicInventory> nics, final Completion completion) { acquireVip(vr, struct, nics, new Completion(completion) { @Override public void success() { startVrIfNeededAndRefresh(vr, struct, completion); } @Override public void fail(ErrorCode errorCode) { completion.fail(errorCode); } }); } private void startVrIfNeededAndRefresh(final VirtualRouterVmInventory vr, final LoadBalancerStruct struct, final Completion completion) { if (!VmInstanceState.Stopped.toString().equals(vr.getState())) { refresh(vr, struct, completion); return; } final VipInventory vip = VipInventory.valueOf(dbf.findByUuid(struct.getLb().getVipUuid(), VipVO.class)); LoadBalancerVO loadBalancerVO = dbf.findByUuid(struct.getLb().getUuid(), LoadBalancerVO.class); LoadBalancerFactory f = lbMgr.getLoadBalancerFactory(loadBalancerVO.getType().toString()); final FlowChain chain = FlowChainBuilder.newShareFlowChain(); chain.setName(String.format("start-vr-%s-and-refresh-lb-%s", vr.getUuid(), struct.getLb().getUuid())); chain.then(new ShareFlow() { @Override public void setup() { flow(new NoRollbackFlow() { String __name__ = "start-vr"; @Override public void run(final FlowTrigger trigger, Map data) { StartVmInstanceMsg msg = new StartVmInstanceMsg(); msg.setVmInstanceUuid(vr.getUuid()); bus.makeTargetServiceIdByResourceUuid(msg, VmInstanceConstant.SERVICE_ID, vr.getUuid()); bus.send(msg, new CloudBusCallBack(trigger) { @Override public void run(MessageReply reply) { if (!reply.isSuccess()) { trigger.fail(reply.getError()); } else { trigger.next(); } } }); } }); flow(new Flow() { String __name__ = "create-vip-on-vr"; boolean success = false; @Override public void run(final FlowTrigger trigger, Map data) { ModifyVipAttributesStruct vipStruct = new ModifyVipAttributesStruct(); vipStruct.setUseFor(f.getNetworkServiceType()); vipStruct.setServiceUuid(struct.getLb().getUuid()); vipStruct.setServiceProvider(getLoadLancerServiceProvider(vr.getGuestL3Networks())); vipStruct.setPeerL3NetworkUuids(vr.getGuestL3Networks()); Vip v = new Vip(struct.getLb().getVipUuid()); v.setStruct(vipStruct); v.acquire(new Completion(trigger) { @Override public void success() { success = true; trigger.next(); } @Override public void fail(ErrorCode errorCode) { trigger.fail(errorCode); } }); } @Override public void rollback(final FlowRollback trigger, Map data) { if (!success) { trigger.rollback(); return; } ModifyVipAttributesStruct vipStruct = new ModifyVipAttributesStruct(); vipStruct.setUseFor(f.getNetworkServiceType()); vipStruct.setServiceUuid(struct.getLb().getUuid()); vipStruct.setPeerL3NetworkUuids(vr.getGuestL3Networks()); vipStruct.setServiceProvider(getLoadLancerServiceProvider(vr.getGuestL3Networks())); Vip v = new Vip(vip.getUuid()); v.setStruct(vipStruct); v.stop(new Completion(trigger) { @Override public void success() { trigger.rollback(); } @Override public void fail(ErrorCode errorCode) { trigger.rollback(); } }); } }); flow(new NoRollbackFlow() { String __name__ = "refresh-lb"; @Override public void run(final FlowTrigger trigger, Map data) { refresh(vr, struct, new Completion(trigger) { @Override public void success() { trigger.next(); } @Override public void fail(ErrorCode errorCode) { trigger.fail(errorCode); } }); } }); done(new FlowDoneHandler(completion) { @Override public void handle(Map data) { completion.success(); } }); error(new FlowErrorHandler(completion) { @Override public void handle(ErrorCode errCode, Map data) { completion.fail(errCode); } }); } }).start(); } @Override public void addVmNics(final LoadBalancerStruct struct, List<VmNicInventory> nics, final Completion completion) { if (struct.getLb().getType().equals(LoadBalancerType.Shared.toString()) && nics.isEmpty()) { completion.fail(operr("vmnic must be specified for share loadbalancer")); return; } VirtualRouterVmInventory vr = findVirtualRouterVm(struct.getLb().getUuid(), nics.stream().map(VmNicInventory::getUuid).collect(Collectors.toList())); if (vr != null) { startVrIfNeededAndRefresh(vr, struct, nics, completion); return; } L3NetworkInventory nicL3 = null; if (!nics.isEmpty()) { nicL3 = L3NetworkInventory.valueOf(dbf.findByUuid(nics.get(0).getL3NetworkUuid(), L3NetworkVO.class)); } final L3NetworkInventory l3 = nicL3; final VipInventory vip = VipInventory.valueOf(dbf.findByUuid(struct.getLb().getVipUuid(), VipVO.class)); List<String> useFor = Q.New(VipNetworkServicesRefVO.class).select(VipNetworkServicesRefVO_.serviceType).eq(VipNetworkServicesRefVO_.vipUuid, struct.getLb().getVipUuid()).listValues(); VipUseForList useForList = new VipUseForList(useFor); if (!useForList.isIncluded(LoadBalancerConstants.LB_NETWORK_SERVICE_TYPE_STRING)) { logger.warn(String.format("the vip[uuid:%s, name:%s, ip:%s, useFor: %s] is not for load balancer", vip.getUuid(), vip.getName(), vip.getIp(), vip.getUseFor())); } final boolean separateVr = LoadBalancerSystemTags.SEPARATE_VR.hasTag(struct.getLb().getUuid()); LoadBalancerVO loadBalancerVO = dbf.findByUuid(struct.getLb().getUuid(), LoadBalancerVO.class); LoadBalancerFactory f = lbMgr.getLoadBalancerFactory(loadBalancerVO.getType().toString()); FlowChain chain = FlowChainBuilder.newShareFlowChain(); chain.setName(String.format("add-nic-to-vr-lb-%s", struct.getLb().getUuid())); chain.then(new ShareFlow() { VirtualRouterVmInventory vr; @Override public void setup() { if (separateVr) { flow(new Flow() { String __name__ = "lock-vip"; @Override public boolean skip(Map data) { return nics.isEmpty(); } /* now the vip support multi services and it doesn't need to lock vip that will be locked by itself in vip module via to VipNetworkServicesRefVO * */ @Override public void run(FlowTrigger trigger, Map data) { ModifyVipAttributesStruct vipStruct = new ModifyVipAttributesStruct(); vipStruct.setUseFor(f.getNetworkServiceType()); vipStruct.setServiceUuid(struct.getLb().getUuid()); Set<String> guestL3NetworkUuids = nics.stream() .map(VmNicInventory::getL3NetworkUuid) .collect(Collectors.toSet()); vipStruct.setPeerL3NetworkUuids(new ArrayList<>(guestL3NetworkUuids)); Vip v = new Vip(vip.getUuid()); v.setStruct(vipStruct); v.acquire(new Completion(trigger) { @Override public void success() { trigger.next(); } @Override public void fail(ErrorCode errorCode) { trigger.fail(errorCode); } }); } @Override public void rollback(FlowRollback trigger, Map data) { List<String> attachedVmNicUuids = struct.getActiveVmNics(); Set<String> guestL3NetworkUuids = nics.stream() .map(VmNicInventory::getL3NetworkUuid) .collect(Collectors.toSet()); guestL3NetworkUuids.removeAll(attachedVmNicUuids); if (guestL3NetworkUuids.isEmpty()) { logger.debug(String.format("there are vmnics[uuids:%s] attached on loadbalancer[uuid:%s], " + "wont release vip[uuid: %s]", attachedVmNicUuids, struct.getLb().getUuid(), vip.getUuid())); trigger.rollback(); return; } ModifyVipAttributesStruct vipStruct = new ModifyVipAttributesStruct(); vipStruct.setUseFor(f.getNetworkServiceType()); vipStruct.setServiceUuid(struct.getLb().getUuid()); vipStruct.setPeerL3NetworkUuids(new ArrayList<>(guestL3NetworkUuids)); vipStruct.setServiceProvider(getLoadLancerServiceProvider(vipStruct.getPeerL3NetworkUuids())); Vip v = new Vip(vip.getUuid()); v.setStruct(vipStruct); v.stop(new Completion(trigger) { @Override public void success() { trigger.rollback(); } @Override public void fail(ErrorCode errorCode) { logger.warn(String.format("failed to release vip[uuid:%s, ip:%s] on vr[uuid:%s], continue to rollback", vip.getUuid(), vip.getIp(), vr.getUuid())); trigger.rollback(); } }); } }); flow(new Flow() { String __name__ = "create-separate-vr"; @Override public void run(final FlowTrigger trigger, Map data) { VirtualRouterStruct s = new VirtualRouterStruct(); s.setInherentSystemTags(list(VirtualRouterSystemTags.DEDICATED_ROLE_VR.getTagFormat(), VirtualRouterSystemTags.VR_LB_ROLE.getTagFormat())); s.setVirtualRouterVmSelector(new VirtualRouterVmSelector() { @Override public VirtualRouterVmVO select(List<VirtualRouterVmVO> vrs) { return null; } }); s.setL3Network(l3); s.setNotGatewayForGuestL3Network(true); acquireVirtualRouterVm(s, new ReturnValueCompletion<VirtualRouterVmInventory>(trigger) { @Override public void success(VirtualRouterVmInventory returnValue) { vr = returnValue; new VirtualRouterRoleManager().makeLoadBalancerRole(vr.getUuid()); trigger.next(); } @Override public void fail(ErrorCode errorCode) { trigger.fail(errorCode); } }); } @Override public void rollback(final FlowRollback trigger, Map data) { if (vr == null) { trigger.rollback(); return; } DestroyVmInstanceMsg msg = new DestroyVmInstanceMsg(); msg.setVmInstanceUuid(vr.getUuid()); bus.makeTargetServiceIdByResourceUuid(msg, VmInstanceConstant.SERVICE_ID, vr.getUuid()); bus.send(msg, new CloudBusCallBack(trigger) { @Override public void run(MessageReply reply) { if (!reply.isSuccess()) { //TODO: logger.warn(String.format("failed to destroy vr[uuid:%s], %s. Need a cleanup", vr.getUuid(), reply.getError())); } trigger.rollback(); } }); } }); flow(new Flow() { String __name__ = "create-vip-on-vr"; boolean success = false; @Override public void run(final FlowTrigger trigger, Map data) { vipVrBkd.acquireVipOnVirtualRouterVm(vr, vip, new Completion(trigger) { @Override public void success() { success = true; trigger.next(); } @Override public void fail(ErrorCode errorCode) { trigger.fail(errorCode); } }); } @Override public void rollback(final FlowRollback trigger, Map data) { if (!success) { trigger.rollback(); return; } vipVrBkd.releaseVipOnVirtualRouterVm(vr, vip, new Completion(trigger) { @Override public void success() { trigger.rollback(); } @Override public void fail(ErrorCode errorCode) { logger.warn(String.format("failed to release vip[uuid:%s, ip:%s] on vr[uuid:%s], continue to rollback", vip.getUuid(), vip.getIp(), vr.getUuid())); trigger.rollback(); } }); } }); } else { flow(new NoRollbackFlow() { String __name__ = "acquire-vr"; @Override public void run(final FlowTrigger trigger, Map data) { VirtualRouterStruct s = new VirtualRouterStruct(l3); s.setLoadBalancerUuid(struct.getLb().getUuid()); acquireVirtualRouterVm(s, new ReturnValueCompletion<VirtualRouterVmInventory>(trigger) { @Override public void success(VirtualRouterVmInventory returnValue) { vr = returnValue; new VirtualRouterRoleManager().makeLoadBalancerRole(vr.getUuid()); trigger.next(); } @Override public void fail(ErrorCode errorCode) { trigger.fail(errorCode); } }); } }); flow(new Flow() { String __name__ = "acquire-vip"; boolean success = false; @Override public boolean skip(Map data) { return nics.isEmpty(); } @Override public void run(final FlowTrigger trigger, Map data) { ModifyVipAttributesStruct vipStruct = new ModifyVipAttributesStruct(); vipStruct.setUseFor(f.getNetworkServiceType()); vipStruct.setServiceUuid(struct.getLb().getUuid()); Set<String> guestL3NetworkUuids = nics.stream() .map(VmNicInventory::getL3NetworkUuid) .collect(Collectors.toSet()); vipStruct.setPeerL3NetworkUuids(new ArrayList<>(guestL3NetworkUuids)); vipStruct.setServiceProvider(getLoadLancerServiceProvider(vipStruct.getPeerL3NetworkUuids())); Vip v = new Vip(vip.getUuid()); v.setStruct(vipStruct); v.acquire(new Completion(trigger) { @Override public void success() { success = true; trigger.next(); } @Override public void fail(ErrorCode errorCode) { trigger.fail(errorCode); } }); } @Override public void rollback(final FlowRollback trigger, Map data) { if (!success) { trigger.rollback(); return; } List<String> attachedVmNicUuids = struct.getActiveVmNics(); Set<String> guestL3NetworkUuids = nics.stream() .map(VmNicInventory::getL3NetworkUuid) .collect(Collectors.toSet()); guestL3NetworkUuids.removeAll(attachedVmNicUuids); if (guestL3NetworkUuids.isEmpty()) { logger.debug(String.format("there are vmnics[uuids:%s] attached on loadbalancer[uuid:%s], " + "wont release vip[uuid: %s]", attachedVmNicUuids, struct.getLb().getUuid(), vip.getUuid())); trigger.rollback(); return; } ModifyVipAttributesStruct vipStruct = new ModifyVipAttributesStruct(); vipStruct.setUseFor(f.getNetworkServiceType()); vipStruct.setServiceUuid(struct.getLb().getUuid()); vipStruct.setPeerL3NetworkUuids(new ArrayList<>(guestL3NetworkUuids)); vipStruct.setServiceProvider(getLoadLancerServiceProvider(vipStruct.getPeerL3NetworkUuids())); Vip v = new Vip(vip.getUuid()); v.setStruct(vipStruct); v.stop(new Completion(trigger) { @Override public void success() { trigger.rollback(); } @Override public void fail(ErrorCode errorCode) { logger.warn(String.format("failed to release vip[uuid:%s, ip:%s] on vr[uuid:%s], continue to rollback", vip.getUuid(), vip.getIp(), vr.getUuid())); trigger.rollback(); } }); } }); } flow(new NoRollbackFlow() { String __name__ = "refresh-lb-on-vr"; @Override public void run(final FlowTrigger trigger, Map data) { refresh(vr, struct, new Completion(trigger) { @Override public void success() { trigger.next(); } @Override public void fail(ErrorCode errorCode) { trigger.fail(errorCode); } }); } }); done(new FlowDoneHandler(completion) { @Override public void handle(Map data) { proxy.attachNetworkService(vr.getUuid(), LoadBalancerVO.class.getSimpleName(), asList(struct.getLb().getUuid())); completion.success(); } }); error(new FlowErrorHandler(completion) { @Override public void handle(ErrorCode errCode, Map data) { completion.fail(errCode); } }); } }).start(); } @Override public void addVmNic(final LoadBalancerStruct struct, VmNicInventory nic, final Completion completion) { addVmNics(struct, list(nic), completion); } @Override public void removeVmNic(LoadBalancerStruct struct, VmNicInventory nic, Completion completion) { removeVmNics(struct, list(nic), completion); } @Override public void removeVmNics(LoadBalancerStruct struct, List<VmNicInventory> nics, Completion completion) { VirtualRouterVmInventory vr = findVirtualRouterVm(struct.getLb().getUuid()); final boolean separateVr = LoadBalancerSystemTags.SEPARATE_VR.hasTag(struct.getLb().getUuid()); if ( separateVr ) { logger.error("not support the separate vrouter currently."); // no support the case completion.success(); return; } if (vr == null) { // the vr has been destroyed, it just need modify the Vip if (!nics.isEmpty()) { stopVip(struct, nics, completion); } return; } FlowChain chain = FlowChainBuilder.newShareFlowChain(); chain.setName(String.format("remove-nic-from-vr-lb-%s", struct.getLb().getUuid())); chain.then(new ShareFlow() { //VirtualRouterVmInventory vr; @Override public void setup() { flow(new NoRollbackFlow() { String __name__ = "refresh-lb-on-vr"; @Override public void run(final FlowTrigger trigger, Map data) { if (VmInstanceState.Stopped.toString().equals(vr.getState())) { // no need to remove as the vr is stopped trigger.next(); return; } refresh(vr, struct, new Completion(trigger) { @Override public void success() { trigger.next(); } @Override public void fail(ErrorCode errorCode) { trigger.fail(errorCode); } }); } }); flow(new NoRollbackFlow() { String __name__ = "remove-l3network-from-vip"; @Override public void run(FlowTrigger trigger, Map data) { if (nics.isEmpty()) { trigger.next(); return; } stopVip(struct, nics, new Completion(trigger) { @Override public void success() { trigger.next(); } @Override public void fail(ErrorCode errorCode) { trigger.fail(errorCode); } }); } }); done(new FlowDoneHandler(completion) { @Override public void handle(Map data) { if (struct.getListeners().isEmpty() || struct.getAllVmNics().isEmpty()) { proxy.detachNetworkService(vr.getUuid(), LoadBalancerVO.class.getSimpleName(), asList(struct.getLb().getUuid())); } completion.success(); } }); error(new FlowErrorHandler(completion) { @Override public void handle(ErrorCode errCode, Map data) { completion.fail(errCode); } }); } }).start(); } @Override public void addListener(LoadBalancerStruct struct, LoadBalancerListenerInventory listener, Completion completion) { VirtualRouterVmInventory vr = findVirtualRouterVm(struct.getLb().getUuid()); if (vr == null) { throw new OperationFailureException(operr("cannot find virtual router for load balancer [uuid:%s]", struct.getLb().getUuid())); } startVrIfNeededAndRefresh(vr, struct, completion); } @Override public void removeListener(final LoadBalancerStruct struct, LoadBalancerListenerInventory listener, Completion completion) { VirtualRouterVmInventory vr = findVirtualRouterVm(struct.getLb().getUuid()); if (vr == null) { // the vr has been destroyed completion.success(); return; } final boolean separateVr = LoadBalancerSystemTags.SEPARATE_VR.hasTag(struct.getLb().getUuid()); if ( separateVr ) { logger.error("not support the separate vrouter currently."); completion.success(); return; } LoadBalancerVO loadBalancerVO = dbf.findByUuid(struct.getLb().getUuid(), LoadBalancerVO.class); LoadBalancerFactory f = lbMgr.getLoadBalancerFactory(loadBalancerVO.getType().toString()); FlowChain chain = FlowChainBuilder.newShareFlowChain(); chain.setName(String.format("remove-Listener-from-vr-lb-%s", struct.getLb().getUuid())); chain.then(new ShareFlow() { @Override public void setup() { flow(new NoRollbackFlow() { String __name__ = "refresh-lb-on-vr"; @Override public void run(final FlowTrigger trigger, Map data) { if (VmInstanceState.Stopped.toString().equals(vr.getState())) { trigger.next(); return; } refresh(vr, struct, new Completion(trigger) { @Override public void success() { trigger.next(); } @Override public void fail(ErrorCode errorCode) { trigger.fail(errorCode); } }); } }); flow(new NoRollbackFlow() { String __name__ = "remove-l3networks-from-vip"; @Override public void run(FlowTrigger trigger, Map data) { ModifyVipAttributesStruct vipStruct = new ModifyVipAttributesStruct(); vipStruct.setUseFor(f.getNetworkServiceType()); vipStruct.setServiceUuid(struct.getLb().getUuid()); List<String> nicUuids = struct.getAllVmNicsOfListener(listener); if (nicUuids.isEmpty()) { trigger.next(); return; } List<String> guestL3NetworkUuids = Q.New(VmNicVO.class).select(VmNicVO_.l3NetworkUuid).in(VmNicVO_.uuid, nicUuids).listValues(); /*remove the l3networks still attached*/ List<String> vnicUuidsAttached = struct.getAllVmNics(); if (!vnicUuidsAttached.isEmpty()) { List<String> l3Uuids = Q.New(VmNicVO.class).select(VmNicVO_.l3NetworkUuid).in(VmNicVO_.uuid, vnicUuidsAttached).listValues(); if (l3Uuids != null && !l3Uuids.isEmpty()) { guestL3NetworkUuids.removeAll(l3Uuids); } } if (guestL3NetworkUuids.isEmpty()) { trigger.next(); return; } vipStruct.setPeerL3NetworkUuids(new ArrayList<>(guestL3NetworkUuids)); vipStruct.setServiceProvider(getLoadLancerServiceProvider(vipStruct.getPeerL3NetworkUuids())); Vip v = new Vip(struct.getLb().getVipUuid()); v.setStruct(vipStruct); v.stop(new Completion(trigger) { @Override public void success() { trigger.next(); } @Override public void fail(ErrorCode errorCode) { trigger.fail(errorCode); } }); } }); done(new FlowDoneHandler(completion) { @Override public void handle(Map data) { if (struct.getListeners().isEmpty() || struct.getAllVmNics().isEmpty()) { proxy.detachNetworkService(vr.getUuid(), LoadBalancerVO.class.getSimpleName(), asList(struct.getLb().getUuid())); } completion.success(); } }); error(new FlowErrorHandler(completion) { @Override public void handle(ErrorCode errCode, Map data) { completion.fail(errCode); } }); } }).start(); } @Override public void destroyLoadBalancer(final LoadBalancerStruct struct, final Completion completion) { FlowChain chain = FlowChainBuilder.newShareFlowChain(); chain.setName(String.format("delete-lb-%s-from-vr", struct.getLb().getUuid())); chain.then(new ShareFlow() { @Override public void setup() { flow(new NoRollbackFlow() { String __name__ = "delete-from-vr"; @Override public void run(final FlowTrigger trigger, Map data) { VirtualRouterVmInventory vr = findVirtualRouterVm(struct.getLb().getUuid()); if (vr == null) { // the vr has been destroyed trigger.next(); return; } List<String> roles = new VirtualRouterRoleManager().getAllRoles(vr.getUuid()); if (roles.size() == 1 && roles.contains(VirtualRouterSystemTags.VR_LB_ROLE.getTagFormat())) { DestroyVmInstanceMsg msg = new DestroyVmInstanceMsg(); msg.setVmInstanceUuid(vr.getUuid()); bus.makeTargetServiceIdByResourceUuid(msg, VmInstanceConstant.SERVICE_ID, vr.getUuid()); bus.send(msg, new CloudBusCallBack(trigger) { @Override public void run(MessageReply reply) { if (reply.isSuccess()) { trigger.next(); } else { trigger.fail(reply.getError()); } } }); } else if (roles.size() > 1 && roles.contains(VirtualRouterSystemTags.VR_LB_ROLE.getTagFormat())) { destroyLoadBalancerOnVirtualRouter(vr, struct, new Completion(trigger) { @Override public void success() { destroyLoadBalancerOnHaRouter(vr.getUuid(), struct, new Completion(trigger) { @Override public void success() { proxy.detachNetworkService(vr.getUuid(), LoadBalancerVO.class.getSimpleName(), asList(struct.getLb().getUuid())); trigger.next(); } @Override public void fail(ErrorCode errorCode) { trigger.fail(errorCode); } }); } @Override public void fail(ErrorCode errorCode) { trigger.fail(errorCode); } }); } else { throw new CloudRuntimeException(String.format("wrong virtual router roles%s. it doesn't have the role[%s]", roles, VirtualRouterSystemTags.VR_LB_ROLE.getTagFormat())); } } }); done(new FlowDoneHandler(completion) { @Override public void handle(Map data) { completion.success(); } }); error(new FlowErrorHandler(completion) { @Override public void handle(ErrorCode errCode, Map data) { completion.fail(errCode); } }); } }).start(); } @Override public void refresh(LoadBalancerStruct struct, Completion completion) { VirtualRouterVmInventory vr = findVirtualRouterVm(struct.getLb().getUuid()); if (vr == null) { // the vr has been destroyed completion.success(); return; } startVrIfNeededAndRefresh(vr, struct, completion); } /* this api is called from VirtualRouterSyncLbOnStartFlow which is specified to a individual router */ public void syncOnStart(VirtualRouterVmInventory vr, boolean checkStatus, List<LoadBalancerStruct> structs, final Completion completion) { FlowChain chain = FlowChainBuilder.newShareFlowChain(); chain.setName("lb-sync-on-Start"); chain.then(new ShareFlow() { @Override public void setup() { flow(new Flow() { String __name__ = "lb-sync-certificate-on-start"; @Override public void run(FlowTrigger trigger, Map data) { refreshCertificate(vr, checkStatus, structs, new Completion(trigger) { @Override public void success() { trigger.next(); } @Override public void fail(ErrorCode errorCode) { trigger.fail(errorCode); } }); } @Override public void rollback(FlowRollback trigger, Map data) { rollbackCertificate(vr, false, structs, new NoErrorCompletion(trigger) { @Override public void done() { trigger.rollback(); } }); } }); flow(new NoRollbackFlow() { String __name__ = "lb-sync-listener-on-start"; @Override public void run(FlowTrigger trigger, Map data) { List<LbTO> tos = new ArrayList<LbTO>(); for (LoadBalancerStruct s : structs) { tos.addAll(makeLbTOs(s, vr)); } RefreshLbCmd cmd = new RefreshLbCmd(); cmd.lbs = tos; VirtualRouterAsyncHttpCallMsg msg = new VirtualRouterAsyncHttpCallMsg(); msg.setCommand(cmd); msg.setPath(REFRESH_LB_PATH); msg.setVmInstanceUuid(vr.getUuid()); msg.setCheckStatus(checkStatus); bus.makeTargetServiceIdByResourceUuid(msg, VmInstanceConstant.SERVICE_ID, vr.getUuid()); bus.send(msg, new CloudBusCallBack(trigger) { @Override public void run(MessageReply reply) { if (reply.isSuccess()) { VirtualRouterAsyncHttpCallReply kr = reply.castReply(); RefreshLbRsp rsp = kr.toResponse(RefreshLbRsp.class); if (rsp.isSuccess()) { trigger.next(); } else { trigger.fail(operr("operation error, because:%s", rsp.getError())); } } else { trigger.fail(reply.getError()); } } }); } }); done(new FlowDoneHandler(completion) { @Override public void handle(Map data) { new VirtualRouterRoleManager().makeLoadBalancerRole(vr.getUuid()); completion.success(); } }); error(new FlowErrorHandler(completion) { @Override public void handle(ErrorCode errCode, Map data) { completion.fail(errCode); } }); } }).start(); } @Override public String getNetworkServiceProviderType() { return VirtualRouterConstant.VIRTUAL_ROUTER_PROVIDER_TYPE; } protected List<VirtualRouterVmVO> getAllVirtualRouters(String lbUuid) { List<String> vrUuids = proxy.getVrUuidsByNetworkService(LoadBalancerVO.class.getSimpleName(), lbUuid); if (vrUuids == null || vrUuids.isEmpty()) { return new ArrayList<>(); } return Q.New(VirtualRouterVmVO.class).in(VirtualRouterVmVO_.uuid, vrUuids).list(); } public void destroyLoadBalancerOnVirtualRouter(VirtualRouterVmInventory vr, LoadBalancerStruct struct, Completion completion) { DeleteLbCmd cmd = new DeleteLbCmd(); cmd.setLbs(makeLbTOs(struct, vr)); if (cmd.lbs.isEmpty()) { completion.success(); return; } VirtualRouterAsyncHttpCallMsg msg = new VirtualRouterAsyncHttpCallMsg(); msg.setVmInstanceUuid(vr.getUuid()); msg.setPath(DELETE_LB_PATH); msg.setCommand(cmd); bus.makeTargetServiceIdByResourceUuid(msg, VmInstanceConstant.SERVICE_ID, vr.getUuid()); bus.send(msg, new CloudBusCallBack(completion) { @Override public void run(MessageReply reply) { if (reply.isSuccess()) { DeleteLbRsp rsp = ((VirtualRouterAsyncHttpCallReply)reply).toResponse(DeleteLbRsp.class); if (rsp.isSuccess()) { completion.success(); } else { completion.fail(operr("operation error, because:%s", rsp.getError())); } } else { completion.fail(reply.getError()); } } }); } private boolean isVirtualRouterHaPair(List<String> vrUuids) { for (VirtualRouterHaGroupExtensionPoint ext : pluginRgty.getExtensionList(VirtualRouterHaGroupExtensionPoint.class)) { return ext.isVirtualRouterInSameHaPair(vrUuids); } return false; } private void refreshCertificateOnHaRouter(String vrUuid, List<LoadBalancerStruct> structs, Completion completion) { VirtualRouterHaTask task = new VirtualRouterHaTask(); task.setTaskName(REFRESH_CERTIFICATE_TASK); task.setOriginRouterUuid(vrUuid); task.setJsonData(JSONObjectUtil.toJsonString(structs)); haBackend.submitVirtualRouterHaTask(task, completion); } private void rollbackCertificateOnHaRouter(String vrUuid, List<LoadBalancerStruct> structs, Completion completion) { VirtualRouterHaTask task = new VirtualRouterHaTask(); task.setTaskName(DELETE_CERTIFICATE_TASK); task.setOriginRouterUuid(vrUuid); task.setJsonData(JSONObjectUtil.toJsonString(structs)); haBackend.submitVirtualRouterHaTask(task, completion); } protected void refreshLbToVirtualRouterHa(VirtualRouterVmInventory vrInv, LoadBalancerStruct struct, Completion completion) { VirtualRouterHaTask task = new VirtualRouterHaTask(); task.setTaskName(REFRESH_LB_TASK); task.setOriginRouterUuid(vrInv.getUuid()); task.setJsonData(JSONObjectUtil.toJsonString(struct)); haBackend.submitVirtualRouterHaTask(task, completion); } protected void destroyLoadBalancerOnHaRouter(String vrUuid, LoadBalancerStruct struct, Completion completion) { VirtualRouterHaTask task = new VirtualRouterHaTask(); task.setTaskName(DESTROY_LB_TASK); task.setOriginRouterUuid(vrUuid); task.setJsonData(JSONObjectUtil.toJsonString(struct)); haBackend.submitVirtualRouterHaTask(task, completion); } private List<LoadBalancerStruct> getLoadBalancersByL3Networks(String l3Uuid, boolean detach) { List<LoadBalancerStruct> ret = new ArrayList<>(); String sql = "select distinct l from LoadBalancerListenerVO l, LoadBalancerServerGroupVO grp, " + " LoadBalancerListenerServerGroupRefVO lgRef, VmNicVO nic, LoadBalancerServerGroupVmNicRefVO nicRef, " + " LoadBalancerVO lb where lb.type = :lbType and lb.uuid = l.loadBalancerUuid " + " and l.uuid = lgRef.listenerUuid and lgRef.serverGroupUuid = grp.uuid " + " and grp.uuid = nicRef.serverGroupUuid and nicRef.status in (:status) " + " and nicRef.vmNicUuid=nic.uuid and nic.l3NetworkUuid=(:l3Uuid)"; List<LoadBalancerListenerVO> listenerVOS = SQL.New(sql, LoadBalancerListenerVO.class).param("l3Uuid", l3Uuid) .param("lbType", LoadBalancerType.Shared) .param("status", asList(LoadBalancerVmNicStatus.Active, LoadBalancerVmNicStatus.Pending)).list(); if (listenerVOS == null || listenerVOS.isEmpty()){ return ret; } HashMap<String, List<LoadBalancerListenerVO>> listenerMap = new HashMap<>(); for (LoadBalancerListenerVO vo : listenerVOS) { listenerMap.computeIfAbsent(vo.getLoadBalancerUuid(), k-> new ArrayList<>()).add(vo); } for (Map.Entry<String, List<LoadBalancerListenerVO>> e : listenerMap.entrySet()) { LoadBalancerStruct struct = new LoadBalancerStruct(); LoadBalancerVO lb = dbf.findByUuid(e.getKey(), LoadBalancerVO.class); struct.setLb(LoadBalancerInventory.valueOf(lb)); struct.setVip(VipInventory.valueOf(dbf.findByUuid(lb.getVipUuid(), VipVO.class))); struct.setListenerServerGroupMap(new HashMap<>()); List<String> serverGroupUuids = new ArrayList<>(); for (LoadBalancerListenerVO listenerVO : e.getValue()) { List<String> uuids = listenerVO.getServerGroupRefs().stream().map(LoadBalancerListenerServerGroupRefVO::getServerGroupUuid).sorted().collect(Collectors.toList()); if (!uuids.isEmpty()) { List<LoadBalancerServerGroupVO> groupVOS = Q.New(LoadBalancerServerGroupVO.class) .in(LoadBalancerServerGroupVO_.uuid, uuids).list(); struct.getListenerServerGroupMap().put(listenerVO.getUuid(), LoadBalancerServerGroupInventory.valueOf(groupVOS)); serverGroupUuids.addAll(uuids); } } HashMap<String, VmNicInventory> nicMap = new HashMap<>(); if (!serverGroupUuids.isEmpty()) { sql = "select nic from LoadBalancerServerGroupVmNicRefVO ref, VmNicVO nic " + "where nic.uuid=ref.vmNicUuid and ref.serverGroupUuid in (:serverGroupUuids) and ref.status in (:status)"; List<VmNicVO> nicVOS = SQL.New(sql, VmNicVO.class).param("serverGroupUuids", serverGroupUuids) .param("status", asList(LoadBalancerVmNicStatus.Active, LoadBalancerVmNicStatus.Pending)).list(); if (nicVOS != null && !nicVOS.isEmpty()) { for (VmNicVO nic : nicVOS) { if (!detach) { nicMap.put(nic.getUuid(), VmNicInventory.valueOf(nic)); } else { /* when detach nic, vm nics of same l3 should not be included */ List<String> nicL3Uuids = nic.getUsedIps().stream().map(UsedIpVO::getL3NetworkUuid).collect(Collectors.toList()); if (!nicL3Uuids.contains(l3Uuid)) { nicMap.put(nic.getUuid(), VmNicInventory.valueOf(nic)); } } } } } Map<String, List<String>> systemTags = new HashMap<>(); for (LoadBalancerListenerVO l : listenerVOS) { SimpleQuery<SystemTagVO> q = dbf.createQuery(SystemTagVO.class); q.select(SystemTagVO_.tag); q.add(SystemTagVO_.resourceUuid, Op.EQ, l.getUuid()); q.add(SystemTagVO_.resourceType, Op.EQ, LoadBalancerListenerVO.class.getSimpleName()); systemTags.put(l.getUuid(), q.listValue()); } struct.setListeners(LoadBalancerListenerInventory.valueOf(e.getValue())); struct.setVmNics(nicMap); struct.setTags(systemTags); ret.add(struct); } return ret; } @Override public void afterAttachNic(VmNicInventory nic, Completion completion) { if (!VirtualRouterNicMetaData.GUEST_NIC_MASK_STRING_LIST.contains(nic.getMetaData())) { completion.success(); return; } if (VirtualRouterSystemTags.DEDICATED_ROLE_VR.hasTag(nic.getVmInstanceUuid())) { completion.success(); return; } try { nwServiceMgr.getTypeOfNetworkServiceProviderForService(nic.getL3NetworkUuid(), LoadBalancerConstants.LB_NETWORK_SERVICE_TYPE); } catch (OperationFailureException e) { completion.success(); return; } VirtualRouterVmVO vrVO = Q.New(VirtualRouterVmVO.class).eq(VirtualRouterVmVO_.uuid, nic.getVmInstanceUuid()).find(); DebugUtils.Assert(vrVO != null, String.format("can not find virtual router[uuid: %s] for nic[uuid: %s, ip: %s, l3NetworkUuid: %s]", nic.getVmInstanceUuid(), nic.getUuid(), nic.getIp(), nic.getL3NetworkUuid())); VirtualRouterVmInventory vr = VirtualRouterVmInventory.valueOf(vrVO); List<LoadBalancerStruct> lbs = getLoadBalancersByL3Networks(nic.getL3NetworkUuid(), false); if (lbs == null || lbs.isEmpty()) { completion.success(); return; } syncOnStart(vr, true, lbs, new Completion(completion) { @Override public void success() { List<String> lbUuids = lbs.stream().map(s -> s.getLb().getUuid()).collect(Collectors.toList()); proxy.attachNetworkService(vr.getUuid(), LoadBalancerVO.class.getSimpleName(), lbUuids); completion.success(); } @Override public void fail(ErrorCode errorCode) { completion.fail(errorCode); } }); } @Override public void afterAttachNicRollback(VmNicInventory nic, NoErrorCompletion completion) { completion.done(); } @Override public void beforeDetachNic(VmNicInventory nic, Completion completion) { /* ZSTAC-24726 for lb, it's not necessary to implement this interface * delete network/detach user vm nic, under these cases, the removeVmNics extend point will be triggered * that will remove the lb reference with nic first, and refresh lb to agent. * */ completion.success(); } @Override public void beforeDetachNicRollback(VmNicInventory nic, NoErrorCompletion completion) { completion.done(); } @Override public List<VirtualRouterHaCallbackStruct> getCallback() { List<VirtualRouterHaCallbackStruct> structs = new ArrayList<>(); VirtualRouterHaCallbackStruct refreshCertificate = new VirtualRouterHaCallbackStruct(); refreshCertificate.type = REFRESH_CERTIFICATE_TASK; refreshCertificate.callback = new VirtualRouterHaCallbackInterface() { @Override public void callBack(String vrUuid, VirtualRouterHaTask task, Completion completion) { VirtualRouterVmVO vrVO = dbf.findByUuid(vrUuid, VirtualRouterVmVO.class); if (vrVO == null) { logger.debug(String.format("VirtualRouter[uuid:%s] is deleted, no need refresh certificate on backend", vrUuid)); completion.success(); return; } LoadBalancerStruct[] s = JSONObjectUtil.toObject(task.getJsonData(), LoadBalancerStruct[].class); refreshCertificate(VirtualRouterVmInventory.valueOf(vrVO), false, Arrays.asList(s), completion); } }; structs.add(refreshCertificate); VirtualRouterHaCallbackStruct deleteCertificate = new VirtualRouterHaCallbackStruct(); deleteCertificate.type = DELETE_CERTIFICATE_TASK; deleteCertificate.callback = new VirtualRouterHaCallbackInterface() { @Override public void callBack(String vrUuid, VirtualRouterHaTask task, Completion completion) { VirtualRouterVmVO vrVO = dbf.findByUuid(vrUuid, VirtualRouterVmVO.class); if (vrVO == null) { logger.debug(String.format("VirtualRouter[uuid:%s] is deleted, no need delete certificate on backend", vrUuid)); completion.success(); return; } LoadBalancerStruct[] s = JSONObjectUtil.toObject(task.getJsonData(), LoadBalancerStruct[].class); rollbackCertificate(VirtualRouterVmInventory.valueOf(vrVO), false, Arrays.asList(s), new NoErrorCompletion(completion) { @Override public void done() { completion.success(); } }); } }; structs.add(deleteCertificate); VirtualRouterHaCallbackStruct refreshLb = new VirtualRouterHaCallbackStruct(); refreshLb.type = REFRESH_LB_TASK; refreshLb.callback = new VirtualRouterHaCallbackInterface() { @Override public void callBack(String vrUuid, VirtualRouterHaTask task, Completion completion) { VirtualRouterVmVO vrVO = dbf.findByUuid(vrUuid, VirtualRouterVmVO.class); if (vrVO == null) { logger.debug(String.format("VirtualRouter[uuid:%s] is deleted, no need refresh Lb on backend", vrUuid)); completion.success(); return; } LoadBalancerStruct s = JSONObjectUtil.toObject(task.getJsonData(), LoadBalancerStruct.class); refreshLbToVirtualRouter(VirtualRouterVmInventory.valueOf(vrVO), s, completion); } }; structs.add(refreshLb); VirtualRouterHaCallbackStruct destroyLb = new VirtualRouterHaCallbackStruct(); destroyLb.type = DESTROY_LB_TASK; destroyLb.callback = new VirtualRouterHaCallbackInterface() { @Override public void callBack(String vrUuid, VirtualRouterHaTask task, Completion completion) { VirtualRouterVmVO vrVO = dbf.findByUuid(vrUuid, VirtualRouterVmVO.class); if (vrVO == null) { logger.debug(String.format("VirtualRouter[uuid:%s] is deleted, no need d Lb on backend", vrUuid)); completion.success(); return; } LoadBalancerStruct s = JSONObjectUtil.toObject(task.getJsonData(), LoadBalancerStruct.class); destroyLoadBalancerOnVirtualRouter(VirtualRouterVmInventory.valueOf(vrVO), s, completion); } }; structs.add(destroyLb); return structs; } protected List<String> getAttachableL3UuidsForVirtualRouter(VirtualRouterVmInventory vr, LoadBalancerInventory lb) { return vr.getGuestL3Networks(); } @Override public List<VmNicVO> getAttachableVmNicsForServerGroup(LoadBalancerVO lbVO, LoadBalancerServerGroupVO groupVO) { List<String> attachedL3Uuids = new ArrayList<>(); if (groupVO != null) { attachedL3Uuids = LoadBalancerServerGroupInventory.valueOf(groupVO).getAttachedL3Uuids(); } List<String> l3NetworkUuids = new ArrayList<>(); /* get vr of attached l3 */ List<String> vrUuids = new ArrayList<>(); VirtualRouterVmInventory vr = null; if (!attachedL3Uuids.isEmpty()) { vrUuids = Q.New(VmNicVO.class).select(VmNicVO_.vmInstanceUuid) .notNull(VmNicVO_.vmInstanceUuid) .in(VmNicVO_.l3NetworkUuid, attachedL3Uuids) .in(VmNicVO_.metaData, VirtualRouterNicMetaData.GUEST_NIC_MASK_STRING_LIST).listValues(); } if (vrUuids.isEmpty()) { vr = findVirtualRouterVm(lbVO.getUuid()); } else { vr = VirtualRouterVmInventory.valueOf(dbf.findByUuid(vrUuids.get(0), VirtualRouterVmVO.class)); } if (vr != null) { l3NetworkUuids = getAttachableL3UuidsForVirtualRouter(vr, LoadBalancerInventory.valueOf(lbVO)); } else { VipVO vipVO = dbf.findByUuid(lbVO.getVipUuid(), VipVO.class); if (vipVO.isSystem()) { vrUuids = vipProxy.getVrUuidsByNetworkService(VipVO.class.getSimpleName(), vipVO.getUuid()); } else { vrUuids = Q.New(VmNicVO.class).select(VmNicVO_.vmInstanceUuid) .eq(VmNicVO_.l3NetworkUuid, vipVO.getL3NetworkUuid()).notNull(VmNicVO_.metaData).listValues(); } if (!vrUuids.isEmpty()) { List<String> l3Uuids = Q.New(VmNicVO.class).select(VmNicVO_.l3NetworkUuid) .in(VmNicVO_.vmInstanceUuid, vrUuids) .in(VmNicVO_.metaData, VirtualRouterNicMetaData.GUEST_NIC_MASK_STRING_LIST).listValues(); l3NetworkUuids.addAll(l3Uuids); } } if (l3NetworkUuids.isEmpty()) { return new ArrayList<>(); } String sql = "select l3.uuid from L3NetworkVO l3, NetworkServiceL3NetworkRefVO ref" + " where l3.uuid = ref.l3NetworkUuid and l3.uuid in (:l3NetworkUuids) and ref.networkServiceType = :type"; l3NetworkUuids = SQL.New(sql, String.class).param("l3NetworkUuids", l3NetworkUuids) .param("type", LoadBalancerConstants.LB_NETWORK_SERVICE_TYPE_STRING).list(); if (l3NetworkUuids.isEmpty()) { return new ArrayList<>(); } sql = "select nic from VmInstanceVO vm, VmNicVO nic " + " where vm.uuid=nic.vmInstanceUuid and vm.type in ('UserVM', 'baremetal2') and vm.state in (:vmStates) " + " and nic.l3NetworkUuid in (:l3NetworkUuids) and nic.metaData is null "; List<VmNicVO> nicVOS = SQL.New(sql, VmNicVO.class) .param("l3NetworkUuids", l3NetworkUuids) .param("vmStates", asList(VmInstanceState.Running, VmInstanceState.Stopped)) .list(); nicVOS = nicVOS.stream().filter(n -> !VmNicInventory.valueOf(n).isIpv6OnlyNic()).collect(Collectors.toList()); if (groupVO != null) { List<String> attachedNicUuids = groupVO.getLoadBalancerServerGroupVmNicRefs().stream() .map(LoadBalancerServerGroupVmNicRefVO::getVmNicUuid).collect(Collectors.toList()); return nicVOS.stream().filter(n -> !attachedNicUuids.contains(n.getUuid())).collect(Collectors.toList()); } else { return nicVOS; } } }
zstackio/zstack
plugin/virtualRouterProvider/src/main/java/org/zstack/network/service/virtualrouter/lb/VirtualRouterLoadBalancerBackend.java
Java
apache-2.0
112,967
# 14. print_log('\n14. Issuer (Trust Anchor) is creating a Credential Offer for Prover\n') cred_offer_json = await anoncreds.issuer_create_credential_offer(issuer_wallet_handle, cred_def_id) print_log('Credential Offer: ') pprint.pprint(json.loads(cred_offer_json)) # 15. print_log('\n15. Prover creates Credential Request for the given credential offer\n') (cred_req_json, cred_req_metadata_json) = \ await anoncreds.prover_create_credential_req(prover_wallet_handle, prover_did, cred_offer_json, cred_def_json, prover_link_secret_name) print_log('Credential Request: ') pprint.pprint(json.loads(cred_req_json)) # 16. print_log('\n16. Issuer (Trust Anchor) creates Credential for Credential Request\n') cred_values_json = json.dumps({ "sex": {"raw": "male", "encoded": "5944657099558967239210949258394887428692050081607692519917050011144233"}, "name": {"raw": "Alex", "encoded": "1139481716457488690172217916278103335"}, "height": {"raw": "175", "encoded": "175"}, "age": {"raw": "28", "encoded": "28"} }) (cred_json, _, _) = \ await anoncreds.issuer_create_credential(issuer_wallet_handle, cred_offer_json, cred_req_json, cred_values_json, None, None) print_log('Credential: ') pprint.pprint(json.loads(cred_json)) # 17. print_log('\n17. Prover processes and stores received Credential\n') await anoncreds.prover_store_credential(prover_wallet_handle, None, cred_req_metadata_json, cred_json, cred_def_json, None) # 18. print_log('\n18. Closing both wallet_handles and pool\n') await wallet.close_wallet(issuer_wallet_handle) await wallet.close_wallet(prover_wallet_handle) await pool.close_pool_ledger(pool_handle) # 19. print_log('\n19. Deleting created wallet_handles\n') await wallet.delete_wallet(issuer_wallet_config, issuer_wallet_credentials) await wallet.delete_wallet(prover_wallet_config, prover_wallet_credentials) # 20. print_log('\n20. Deleting pool ledger config\n') await pool.delete_pool_ledger_config(pool_name)
Artemkaaas/indy-sdk
docs/how-tos/issue-credential/python/step4.py
Python
apache-2.0
2,869
package fix package v0_7_0 import com.spotify.scio.CoreSysProps import com.spotify.scio.bigquery.BigQuerySysProps object RewriteSysProp { BigQuerySysProps.Project.value = "project-key" BigQuerySysProps.CacheEnabled.value = false.toString BigQuerySysProps.Priority.value = "INTERACTIVE" val tmp = CoreSysProps.TmpDir.value val username = CoreSysProps.User.value }
spotify/scio
scalafix/output-0_7/src/main/scala/fix/RewriteSysProp.scala
Scala
apache-2.0
376
/* * Copyright 2015-2017 Red Hat, Inc. and/or its affiliates * and other contributors as indicated by the @author tags. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.hawkular.client.android.fragment; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.Date; import java.util.List; import org.hawkular.client.android.HawkularApplication; import org.hawkular.client.android.R; import org.hawkular.client.android.activity.AlertDetailActivity; import org.hawkular.client.android.adapter.AlertsAdapter; import org.hawkular.client.android.backend.BackendClient; import org.hawkular.client.android.backend.model.Alert; import org.hawkular.client.android.backend.model.Resource; import org.hawkular.client.android.backend.model.Trigger; import org.hawkular.client.android.util.ColorSchemer; import org.hawkular.client.android.util.ErrorUtil; import org.hawkular.client.android.util.Fragments; import org.hawkular.client.android.util.Intents; import org.hawkular.client.android.util.Time; import org.hawkular.client.android.util.ViewDirector; import android.content.Intent; import android.os.Bundle; import android.support.v4.app.Fragment; import android.support.v4.view.MenuItemCompat; import android.support.v4.widget.SwipeRefreshLayout; import android.support.v7.widget.LinearLayoutManager; import android.support.v7.widget.PopupMenu; import android.support.v7.widget.RecyclerView; import android.support.v7.widget.SearchView; import android.text.TextUtils; import android.view.LayoutInflater; import android.view.Menu; import android.view.MenuInflater; import android.view.MenuItem; import android.view.View; import android.view.ViewGroup; import com.squareup.leakcanary.RefWatcher; import butterknife.BindView; import butterknife.ButterKnife; import butterknife.OnClick; import butterknife.Unbinder; import icepick.Icepick; import retrofit2.Call; import retrofit2.Callback; import retrofit2.Response; import timber.log.Timber; /** * Alerts fragment. * <p/> * Displays alerts as a list with menus allowing some alert-related actions, such as acknowledgement and resolving. */ public class AlertsFragment extends Fragment implements SwipeRefreshLayout.OnRefreshListener, AlertsAdapter.AlertListener, SearchView.OnQueryTextListener { @BindView(R.id.list) RecyclerView recyclerView; @BindView(R.id.content) SwipeRefreshLayout swipeRefreshLayout; public ArrayList<Trigger> triggers; public ArrayList<Alert> alerts; public ArrayList<Alert> alertsDump; public boolean isActionPlus; public int alertsTimeMenu; public boolean isAlertsFragmentAvailable; public SearchView searchView; public String searchText; public AlertsAdapter alertsAdapter; private Unbinder unbinder; @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle state) { View view = inflater.inflate(R.layout.fragment_list, container, false); unbinder = ButterKnife.bind(this, view); return view; } @Override public void onActivityCreated(Bundle state) { super.onActivityCreated(state); isAlertsFragmentAvailable = true; setUpState(state); setUpBindings(); setUpList(); setUpMenu(); isActionPlus = false; setUpRefreshing(); setUpAlertsUi(); } private void setUpState(Bundle state) { Icepick.restoreInstanceState(this, state); } private void setUpBindings() { ButterKnife.bind(this, getView()); } private void setUpList() { final LinearLayoutManager linearLayoutManager = new LinearLayoutManager(getContext()); recyclerView.setLayoutManager(linearLayoutManager); } private void setUpMenu() { setHasOptionsMenu(true); } private void setUpRefreshing() { swipeRefreshLayout.setOnRefreshListener(this); swipeRefreshLayout.setColorSchemeResources(ColorSchemer.getScheme()); } @OnClick(R.id.button_retry) public void setUpAlertsUi() { if (alerts == null) { alertsTimeMenu = R.id.menu_time_hour; setUpAlertsForced(); } else { setUpAlerts(alertsDump); } } private void setUpAlertsRefreshed() { setUpAlerts(); } private void setUpAlertsForced() { showProgress(); setUpAlerts(); } private void setUpAlerts() { if(getResource() == null) { BackendClient.of(this).getAlerts(getAlertsTime(), Time.current(), null, new AlertsCallback(this)); } else if (!areTriggersAvailable()) { setUpTriggers(); } else { BackendClient.of(this).getAlerts(getAlertsTime(), Time.current(), triggers, new AlertsCallback(this)); } } @Override public void onResume() { super.onResume(); setUpAlerts(); } private boolean areTriggersAvailable() { return (triggers != null) && !triggers.isEmpty(); } private void setUpTriggers() { BackendClient.of(this).getTriggers(new TriggersCallback(this)); } private Date getAlertsTime() { switch (alertsTimeMenu) { case R.id.menu_time_hour: return Time.hourAgo(); case R.id.menu_time_day: return Time.dayAgo(); case R.id.menu_time_week: return Time.weekAgo(); case R.id.menu_time_month: return Time.monthAgo(); case R.id.menu_time_year: return Time.yearAgo(); default: return Time.hourAgo(); } } private void showProgress() { ViewDirector.of(this).using(R.id.animator).show(R.id.progress); } private void setUpAlertsTriggers(List<Trigger> triggers) { this.triggers = new ArrayList<>(filterTriggers(triggers)); setUpAlerts(); } private List<Trigger> filterTriggers(List<Trigger> triggers) { // TODO: think about better backend API. // This is mostly a hack, as trigger usage at all, actually. // Caused by a lack of API connecting Inventory and Alerts components. Resource resource = getResource(); List<Trigger> filteredTriggers = new ArrayList<>(); for (Trigger trigger : triggers) { if (trigger.getTags() != null && trigger.getTags().get("resourceId").equals(resource.getId())) { filteredTriggers.add(trigger); } } return filteredTriggers; } private Resource getResource() { return getArguments().getParcelable(Fragments.Arguments.RESOURCE); } private void setUpAlerts(final List<Alert> alerts) { this.alertsDump = new ArrayList<>(alerts); sortAlerts(this.alertsDump); if (isActionPlus) { this.alerts = alertsDump; if (this.alerts != null) { alertsAdapter = new AlertsAdapter(getActivity(), this, this.alerts); recyclerView.setAdapter(alertsAdapter); } } else { this.alerts = removeResolved(); if (this.alerts != null) { alertsAdapter = new AlertsAdapter(getActivity(), this, this.alerts); recyclerView.setAdapter(alertsAdapter); } } hideRefreshing(); if(this.alerts.isEmpty()) { showMessage(); } else { showList(); } } private ArrayList<Alert> removeResolved() { this.alerts = new ArrayList<>(); for (Alert alert : alertsDump) { if (!alert.getStatus().equals("RESOLVED")) alerts.add(alert); } return alerts; } private void sortAlerts(List<Alert> alerts) { Collections.sort(alerts, new AlertsComparator()); } @Override public void onAlertBodyClick(View alertView, int alertPosition) { Intent intent = new Intent(getActivity(), AlertDetailActivity.class); Alert alert = getAlertsAdapter().getItem(alertPosition); intent.putExtra(Intents.Extras.ALERT, alert); startActivity(intent); } @Override public void onAlertMenuClick(View alertView, int alertPosition) { showAlertMenu(alertView, alertPosition); } private void showAlertMenu(final View alertView, final int alertPosition) { PopupMenu alertMenu = new PopupMenu(getActivity(), alertView); alertMenu.getMenuInflater().inflate(R.menu.popup_alerts, alertMenu.getMenu()); alertMenu.setOnMenuItemClickListener(new PopupMenu.OnMenuItemClickListener() { @Override public boolean onMenuItemClick(MenuItem menuItem) { Alert alert = getAlertsAdapter().getItem(alertPosition); switch (menuItem.getItemId()) { case R.id.menu_resolve: BackendClient.of(AlertsFragment.this).resolveAlert(alert, new AlertActionCallback(AlertsFragment.this)); return true; case R.id.menu_acknowledge: BackendClient.of(AlertsFragment.this).acknowledgeAlert(alert, new AlertActionCallback(AlertsFragment.this)); return true; default: return false; } } }); alertMenu.show(); } private AlertsAdapter getAlertsAdapter() { return (AlertsAdapter) recyclerView.getAdapter(); } @Override public void onCreateOptionsMenu(Menu menu, MenuInflater menuInflater) { super.onCreateOptionsMenu(menu, menuInflater); menuInflater.inflate(R.menu.menu_search, menu); menuInflater.inflate(R.menu.toolbar_alerts, menu); MenuItem item = menu.findItem(R.id.menu_search1); searchView = (SearchView) MenuItemCompat.getActionView(item); searchView.setOnQueryTextListener(this); if (searchText != null) { searchView.setQuery(searchText, false); } } @Override public boolean onQueryTextChange(String query) { if (alerts != null && alerts.size() != 0) { if (!TextUtils.isEmpty(query)) { ArrayList<Alert> filteredAlerts = new ArrayList<>(); filteredAlerts.clear(); for (int i=0;i<alerts.size();i++) { String alertID = alerts.get(i).getTrigger().getId().toLowerCase(); if (alertID.contains(query.toLowerCase())) { filteredAlerts.add(alerts.get(i)); } } alertsAdapter = new AlertsAdapter(getActivity(), this, filteredAlerts); recyclerView.setAdapter(alertsAdapter); searchText = query; } else { alertsAdapter = new AlertsAdapter(getActivity(), this, this.alerts); recyclerView.setAdapter(alertsAdapter); } return false; } return true; } @Override public boolean onQueryTextSubmit(String query) { searchView.clearFocus(); return false; } @Override public void onPrepareOptionsMenu(Menu menu) { super.onPrepareOptionsMenu(menu); // menu.findItem(alertsTimeMenu).setChecked(true); } @Override public boolean onOptionsItemSelected(MenuItem menuItem) { switch (menuItem.getItemId()) { case R.id.menu_time_hour: case R.id.menu_time_day: case R.id.menu_time_week: case R.id.menu_time_month: case R.id.menu_time_year: alertsTimeMenu = menuItem.getItemId(); menuItem.setChecked(true); setUpAlertsForced(); return true; case R.id.show_hide_res: isActionPlus = !isActionPlus; setUpAlerts(alertsDump); if(isActionPlus){ menuItem.setTitle(R.string.hide_resolved); } else{ menuItem.setTitle(R.string.show_resolved); } return true; default: return super.onOptionsItemSelected(menuItem); } } private void cleanDump() { this.alertsDump = new ArrayList<>(); } private void hideRefreshing() { swipeRefreshLayout.setRefreshing(false); } private void showList() { ViewDirector.of(this).using(R.id.animator).show(R.id.content); } private void showMessage() { ViewDirector.of(this).using(R.id.animator).show(R.id.message); } @Override public void onSaveInstanceState(Bundle state) { super.onSaveInstanceState(state); tearDownState(state); } private void tearDownState(Bundle state) { Icepick.saveInstanceState(this, state); } @Override public void onDestroyView() { super.onDestroyView(); isAlertsFragmentAvailable = false; tearDownBindings(); detectLeaks(); } private void detectLeaks() { RefWatcher refWatcher = HawkularApplication.getRefWatcher(getActivity()); refWatcher.watch(this); } private void tearDownBindings() { unbinder.unbind(); } @Override public void onRefresh() { } private static final class TriggersCallback implements Callback<List<Trigger>> { AlertsFragment alertsFragment; public TriggersCallback(AlertsFragment alertsFragment) { this.alertsFragment = alertsFragment; } private AlertsFragment getAlertsFragment() { return alertsFragment; } @Override public void onResponse(Call<List<Trigger>> call, Response<List<Trigger>> response) { if(!response.body().isEmpty()){ Timber.d("Triggers list is empty, this should not happen."); ErrorUtil.showError(getAlertsFragment(),R.id.animator,R.id.error); return; } getAlertsFragment().setUpAlertsTriggers(response.body()); } @Override public void onFailure(Call<List<Trigger>> call, Throwable t) { Timber.d(t, "Triggers fetching failed."); ErrorUtil.showError(getAlertsFragment(),R.id.animator,R.id.error); } } private static final class AlertsCallback implements Callback<List<Alert>> { AlertsFragment alertsFragment; public AlertsCallback(AlertsFragment alertsFragment) { this.alertsFragment = alertsFragment; } private AlertsFragment getAlertsFragment() { return alertsFragment; } @Override public void onResponse(Call<List<Alert>> call, Response<List<Alert>> response) { if (getAlertsFragment().isAlertsFragmentAvailable) { if (!response.body().isEmpty()) { getAlertsFragment().setUpAlerts(response.body()); } else { getAlertsFragment().showMessage(); getAlertsFragment().cleanDump(); } } } @Override public void onFailure(Call<List<Alert>> call, Throwable t) { Timber.d(t, "Alerts fetching failed."); if (getAlertsFragment().isAlertsFragmentAvailable) { ErrorUtil.showError(getAlertsFragment(),R.id.animator,R.id.error); } } } private static final class AlertActionCallback implements Callback<List<String>> { AlertsFragment alertsFragment; public AlertActionCallback(AlertsFragment alertsFragment) { this.alertsFragment = alertsFragment; } private AlertsFragment getAlertsFragment() { return alertsFragment; } @Override public void onResponse(Call<List<String>> call, Response<List<String>> response) { getAlertsFragment().setUpAlertsRefreshed(); } @Override public void onFailure(Call<List<String>> call, Throwable t) { Timber.d(t, "State uodate failed."); } } private static final class AlertsComparator implements Comparator<Alert> { @Override public int compare(Alert leftAlert, Alert rightAlert) { Date leftAlertTimestamp = new Date(leftAlert.getTimestamp()); Date rightAlertTimestamp = new Date(rightAlert.getTimestamp()); return leftAlertTimestamp.compareTo(rightAlertTimestamp); } } }
hawkular/hawkular-android-client
mobile/src/main/java/org/hawkular/client/android/fragment/AlertsFragment.java
Java
apache-2.0
17,256
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #if !defined(XALAN_ELEMPARAM_HEADER_GUARD) #define XALAN_ELEMPARAM_HEADER_GUARD // Base include file. Must be first. #include "XSLTDefinitions.hpp" // Base class header file. #include "ElemVariable.hpp" XALAN_CPP_NAMESPACE_BEGIN class ElemParam: public ElemVariable { public: /** * Construct an object corresponding to an "xsl:param" element * * @param constructionContext context for construction of object * @param stylesheetTree stylesheet containing element * @param atts list of attributes for element * @param lineNumber line number in document * @param columnNumber column number in document */ ElemParam( StylesheetConstructionContext& constructionContext, Stylesheet& stylesheetTree, const AttributeListType& atts, XalanFileLoc lineNumber, XalanFileLoc columnNumber); // These methods are inherited from ElemVariable ... virtual const XalanDOMString& getElementName() const; #if !defined(XALAN_RECURSIVE_STYLESHEET_EXECUTION) virtual const ElemTemplateElement* startElement(StylesheetExecutionContext& executionContext) const; virtual void endElement(StylesheetExecutionContext& executionContext) const; #else virtual void execute(StylesheetExecutionContext& executionContext) const; #endif }; XALAN_CPP_NAMESPACE_END #endif // XALAN_ELEMPARAM_HEADER_GUARD
AaronNGray/xalan
src/xalanc/XSLT/ElemParam.hpp
C++
apache-2.0
2,368
<html> <body> <font face="verdana" size="-1">Allows to see syntax errors in the results of Analyze | Inspect Code. </body> </html>
jexp/idea2
platform/platform-resources-en/src/inspectionDescriptions/SyntaxError.html
HTML
apache-2.0
130
# # Author:: Tyler Ball (<tball@chef.io>) # Copyright:: Copyright 2014-2016, Chef Software, Inc. # License:: Apache License, Version 2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # RSpec::Support.require_rspec_core "formatters/base_text_formatter" class Chef class Audit class AuditEventProxy < ::RSpec::Core::Formatters::BaseFormatter ::RSpec::Core::Formatters.register self, :stop, :example_group_started # TODO I don't like this, but I don't see another way to pass this in # see rspec files configuration.rb#L671 and formatters.rb#L129 def self.events=(events) @@events = events end def events @@events end def example_group_started(notification) if notification.group.parent_groups.size == 1 # top level `control_group` block desc = notification.group.description Chef::Log.trace("Entered `control_group` block named #{desc}") events.control_group_started(desc) end end def stop(notification) Chef::Log.info("Successfully executed all `control_group` blocks and contained examples") notification.examples.each do |example| control_group_name, control_data = build_control_from(example) e = example.exception if e events.control_example_failure(control_group_name, control_data, e) else events.control_example_success(control_group_name, control_data) end end end private def build_control_from(example) described_class = example.metadata[:described_class] if described_class resource_type = described_class.class.name.split(":")[-1] resource_name = described_class.name end # The following code builds up the context - the list of wrapping `describe` or `control` blocks describe_groups = [] group = example.metadata[:example_group] # If the innermost block has a resource instead of a string, don't include it in context describe_groups.unshift(group[:description]) if described_class.nil? group = group[:parent_example_group] until group.nil? describe_groups.unshift(group[:description]) group = group[:parent_example_group] end # We know all of our examples each live in a top-level `control_group` block - get this name now outermost_group_desc = describe_groups.shift [outermost_group_desc, { :name => example.description, :desc => example.full_description, :resource_type => resource_type, :resource_name => resource_name, :context => describe_groups, :line_number => example.metadata[:line_number], }] end end end end
juliandunn/chef
lib/chef/audit/audit_event_proxy.rb
Ruby
apache-2.0
3,344
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.ml.util import org.apache.spark.sql.types.{DataType, StructField, StructType} /** * Utils for handling schemas. */ private[spark] object SchemaUtils { // TODO: Move the utility methods to SQL. /** * Check whether the given schema contains a column of the required data type. * @param colName column name * @param dataType required column data type */ def checkColumnType(schema: StructType, colName: String, dataType: DataType): Unit = { val actualDataType = schema(colName).dataType require(actualDataType.equals(dataType), s"Column $colName must be of type $dataType but was actually $actualDataType.") } /** * Appends a new column to the input schema. This fails if the given output column already exists. * @param schema input schema * @param colName new column name. If this column name is an empty string "", this method returns * the input schema unchanged. This allows users to disable output columns. * @param dataType new column data type * @return new schema with the input column appended */ def appendColumn( schema: StructType, colName: String, dataType: DataType): StructType = { if (colName.isEmpty) return schema val fieldNames = schema.fieldNames require(!fieldNames.contains(colName), s"Column $colName already exists.") val outputFields = schema.fields :+ StructField(colName, dataType, nullable = false) StructType(outputFields) } /** * Appends a new column to the input schema. This fails if the given output column already exists. * @param schema input schema * @param col New column schema * @return new schema with the input column appended */ def appendColumn(schema: StructType, col: StructField): StructType = { require(!schema.fieldNames.contains(col.name), s"Column ${col.name} already exists.") StructType(schema.fields :+ col) } }
andrewor14/iolap
mllib/src/main/scala/org/apache/spark/ml/util/SchemaUtils.scala
Scala
apache-2.0
2,744
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.aries.osgi.functional.internal; import org.osgi.framework.BundleContext; /** * @author Carlos Sierra Andrés */ public class BundleContextOSGiImpl extends OSGiImpl<BundleContext> { public BundleContextOSGiImpl() { super(bundleContext -> new JustOSGiImpl<>(bundleContext)._operation.run(bundleContext)); } }
fwassmer/aries
component-dsl/component-dsl/src/main/java/org/apache/aries/osgi/functional/internal/BundleContextOSGiImpl.java
Java
apache-2.0
1,142
// Copyright 2018 The Bazel Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.devtools.build.lib.query2.aquery; import com.google.devtools.build.lib.actions.CommandLineExpansionException; import com.google.devtools.build.lib.analysis.AspectValue; import com.google.devtools.build.lib.analysis.configuredtargets.RuleConfiguredTarget; import com.google.devtools.build.lib.events.ExtendedEventHandler; import com.google.devtools.build.lib.query2.engine.QueryEnvironment.TargetAccessor; import com.google.devtools.build.lib.skyframe.ConfiguredTargetValue; import com.google.devtools.build.lib.skyframe.SkyframeExecutor; import com.google.devtools.build.lib.skyframe.actiongraph.v2.ActionGraphDump; import com.google.devtools.build.lib.skyframe.actiongraph.v2.AqueryOutputHandler; import com.google.devtools.build.lib.skyframe.actiongraph.v2.AqueryOutputHandler.OutputType; import com.google.devtools.build.lib.skyframe.actiongraph.v2.MonolithicOutputHandler; import com.google.devtools.build.lib.skyframe.actiongraph.v2.StreamedOutputHandler; import com.google.protobuf.CodedOutputStream; import java.io.IOException; import java.io.OutputStream; import java.io.PrintStream; /** Default output callback for aquery, prints proto output. */ public class ActionGraphProtoV2OutputFormatterCallback extends AqueryThreadsafeCallback { private final OutputType outputType; private final ActionGraphDump actionGraphDump; private final AqueryActionFilter actionFilters; private AqueryOutputHandler aqueryOutputHandler; /** * Pseudo-arbitrarily chosen buffer size for output. Chosen to be large enough to fit a handful of * messages without needing to flush to the underlying output, which may not be buffered. */ private static final int OUTPUT_BUFFER_SIZE = 16384; ActionGraphProtoV2OutputFormatterCallback( ExtendedEventHandler eventHandler, AqueryOptions options, OutputStream out, SkyframeExecutor skyframeExecutor, TargetAccessor<ConfiguredTargetValue> accessor, OutputType outputType, AqueryActionFilter actionFilters) { super(eventHandler, options, out, skyframeExecutor, accessor); this.outputType = outputType; this.actionFilters = actionFilters; this.aqueryOutputHandler = constructAqueryOutputHandler(outputType, out, printStream); this.actionGraphDump = new ActionGraphDump( options.includeCommandline, options.includeArtifacts, this.actionFilters, options.includeParamFiles, aqueryOutputHandler); } public static AqueryOutputHandler constructAqueryOutputHandler( OutputType outputType, OutputStream out, PrintStream printStream) { switch (outputType) { case BINARY: case TEXT: return new StreamedOutputHandler( outputType, CodedOutputStream.newInstance(out, OUTPUT_BUFFER_SIZE), printStream); case JSON: return new MonolithicOutputHandler(printStream); } throw new IllegalStateException( "Unsupported output format " + outputType.formatName() + ": --incompatible_proto_output_v2 must be used with" + " --output=(proto|textproto|jsonproto)."); } @Override public String getName() { return outputType.formatName(); } @Override public void processOutput(Iterable<ConfiguredTargetValue> partialResult) throws IOException, InterruptedException { try { // Enabling includeParamFiles should enable includeCommandline by default. options.includeCommandline |= options.includeParamFiles; for (ConfiguredTargetValue configuredTargetValue : partialResult) { actionGraphDump.dumpConfiguredTarget(configuredTargetValue); if (options.useAspects) { if (configuredTargetValue.getConfiguredTarget() instanceof RuleConfiguredTarget) { for (AspectValue aspectValue : accessor.getAspectValues(configuredTargetValue)) { actionGraphDump.dumpAspect(aspectValue, configuredTargetValue); } } } } } catch (CommandLineExpansionException e) { throw new IOException(e.getMessage()); } } @Override public void close(boolean failFast) throws IOException { if (!failFast) { aqueryOutputHandler.close(); } } }
akira-baruah/bazel
src/main/java/com/google/devtools/build/lib/query2/aquery/ActionGraphProtoV2OutputFormatterCallback.java
Java
apache-2.0
4,875
# Generated by Django 2.1.7 on 2019-04-07 21:43 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('data_log', '0008_auto_20190402_2035'), ] operations = [ migrations.AlterModelOptions( name='craftrunelog', options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)}, ), migrations.AlterModelOptions( name='dungeonlog', options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)}, ), migrations.AlterModelOptions( name='fulllog', options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)}, ), migrations.AlterModelOptions( name='magicboxcraft', options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)}, ), migrations.AlterModelOptions( name='riftdungeonlog', options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)}, ), migrations.AlterModelOptions( name='riftraidlog', options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)}, ), migrations.AlterModelOptions( name='shoprefreshlog', options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)}, ), migrations.AlterModelOptions( name='summonlog', options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)}, ), migrations.AlterModelOptions( name='wishlog', options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)}, ), migrations.AlterModelOptions( name='worldbosslog', options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)}, ), migrations.AlterField( model_name='riftraidrunecraftdrop', name='log', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rune_crafts', to='data_log.RiftRaidLog'), ), ]
PeteAndersen/swarfarm
data_log/migrations/0009_auto_20190407_1443.py
Python
apache-2.0
2,137
var view = Ti.UI.createView({     backgroundColor:'#000',     top:0,     left:0,     width:'100%',     height:'100%',     layout:'vertical' }); // create labels, buttons, text fields var helpLabel = Titanium.UI.createLabel({ color:'#abcdef', highlightedColor:'#0f0', backgroundColor:'transparent', width:'auto', height:'auto', text:'Pick your selection and touch submit' }); var submitButton = Titanium.UI.createButton({ color:'#abcdef', top: 20, width:200, height:40, font:{fontSize:20,fontWeight:'bold',fontFamily:'Helvetica Neue'}, title:'Submit' }); submitButton.addEventListener('click', function() { // do nothing right now, next lab will submit via xhr }); var radioGroup = []; var radioGroupIndex = 0; var radioSelected; function createRadioGroupButton(itemName) { var buttonView1 = Titanium.UI.createView({ top: 10, left: 0, height: 50, width: 200, borderRadius: 10, backgroundColor: '#fff', index: radioGroupIndex }); var selection1 = Titanium.UI.createLabel({ text : itemName, color : '#f79e18', font : {fontSize : 40}, textAlign: 'center' }); buttonView1.add(selection1); buttonView1.addEventListener('click', function() { buttonView1.backgroundColor = '#cef7ff'; radioSelected = selection1.text; Ti.API.log("DEBUG",'selection1.text'+selection1.text); for (var i=0; i < radioGroup.length; i++) { deselect(i,index); }; }); var index = radioGroupIndex; deselect = function (i,j) { //Ti.API.log("DEBUG",'deselect:'+radioGroup[i].index); if (radioGroup[i].index != j) { radioGroup[i].radioButton.backgroundColor = '#fff'; } } radioGroupIndex = radioGroupIndex +1; var exportWidget = { radioButton: buttonView1, deselect: deselect, index: index}; radioGroup.push(exportWidget); return exportWidget; } view.add(helpLabel); view.add(createRadioGroupButton('Fish').radioButton); view.add(createRadioGroupButton('Vegetarian').radioButton); view.add(createRadioGroupButton('Chicken').radioButton); view.add(submitButton); Ti.UI.currentWindow.add(view);
prpatel/VoterLab
Resources/lab5-b.js
JavaScript
apache-2.0
2,072
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012, Intel, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Client side of the volume RPC API. """ from oslo.config import cfg from cinder.openstack.common import rpc import cinder.openstack.common.rpc.proxy CONF = cfg.CONF class VolumeAPI(cinder.openstack.common.rpc.proxy.RpcProxy): '''Client side of the volume rpc API. API version history: 1.0 - Initial version. 1.1 - Adds clone volume option to create_volume. 1.2 - Add publish_service_capabilities() method. 1.3 - Pass all image metadata (not just ID) in copy_volume_to_image. 1.4 - Add request_spec, filter_properties and allow_reschedule arguments to create_volume(). 1.5 - Add accept_transfer. 1.6 - Add extend_volume. 1.7 - Adds host_name parameter to attach_volume() to allow attaching to host rather than instance. 1.8 - Add migrate_volume, rename_volume. ''' BASE_RPC_API_VERSION = '1.0' def __init__(self, topic=None): super(VolumeAPI, self).__init__( topic=topic or CONF.volume_topic, default_version=self.BASE_RPC_API_VERSION) def create_volume(self, ctxt, volume, host, request_spec, filter_properties, allow_reschedule=True, snapshot_id=None, image_id=None, source_volid=None): self.cast(ctxt, self.make_msg('create_volume', volume_id=volume['id'], request_spec=request_spec, filter_properties=filter_properties, allow_reschedule=allow_reschedule, snapshot_id=snapshot_id, image_id=image_id, source_volid=source_volid), topic=rpc.queue_get_for(ctxt, self.topic, host), version='1.4') def delete_volume(self, ctxt, volume): self.cast(ctxt, self.make_msg('delete_volume', volume_id=volume['id']), topic=rpc.queue_get_for(ctxt, self.topic, volume['host'])) def create_snapshot(self, ctxt, volume, snapshot): self.cast(ctxt, self.make_msg('create_snapshot', volume_id=volume['id'], snapshot_id=snapshot['id']), topic=rpc.queue_get_for(ctxt, self.topic, volume['host'])) def delete_snapshot(self, ctxt, snapshot, host): self.cast(ctxt, self.make_msg('delete_snapshot', snapshot_id=snapshot['id']), topic=rpc.queue_get_for(ctxt, self.topic, host)) def attach_volume(self, ctxt, volume, instance_uuid, host_name, mountpoint): return self.call(ctxt, self.make_msg('attach_volume', volume_id=volume['id'], instance_uuid=instance_uuid, host_name=host_name, mountpoint=mountpoint), topic=rpc.queue_get_for(ctxt, self.topic, volume['host']), version='1.7') def detach_volume(self, ctxt, volume): return self.call(ctxt, self.make_msg('detach_volume', volume_id=volume['id']), topic=rpc.queue_get_for(ctxt, self.topic, volume['host'])) def copy_volume_to_image(self, ctxt, volume, image_meta): self.cast(ctxt, self.make_msg('copy_volume_to_image', volume_id=volume['id'], image_meta=image_meta), topic=rpc.queue_get_for(ctxt, self.topic, volume['host']), version='1.3') def initialize_connection(self, ctxt, volume, connector): return self.call(ctxt, self.make_msg('initialize_connection', volume_id=volume['id'], connector=connector), topic=rpc.queue_get_for(ctxt, self.topic, volume['host'])) def terminate_connection(self, ctxt, volume, connector, force=False): return self.call(ctxt, self.make_msg('terminate_connection', volume_id=volume['id'], connector=connector, force=force), topic=rpc.queue_get_for(ctxt, self.topic, volume['host'])) def publish_service_capabilities(self, ctxt): self.fanout_cast(ctxt, self.make_msg('publish_service_capabilities'), version='1.2') def accept_transfer(self, ctxt, volume): self.cast(ctxt, self.make_msg('accept_transfer', volume_id=volume['id']), topic=rpc.queue_get_for(ctxt, self.topic, volume['host']), version='1.5') def extend_volume(self, ctxt, volume, new_size): self.cast(ctxt, self.make_msg('extend_volume', volume_id=volume['id'], new_size=new_size), topic=rpc.queue_get_for(ctxt, self.topic, volume['host']), version='1.6') def migrate_volume(self, ctxt, volume, dest_host, force_host_copy): host_p = {'host': dest_host.host, 'capabilities': dest_host.capabilities} self.cast(ctxt, self.make_msg('migrate_volume', volume_id=volume['id'], host=host_p, force_host_copy=force_host_copy), topic=rpc.queue_get_for(ctxt, self.topic, volume['host']), version='1.8') def rename_volume(self, ctxt, volume, new_name_id): self.call(ctxt, self.make_msg('rename_volume', volume_id=volume['id'], new_name_id=new_name_id), topic=rpc.queue_get_for(ctxt, self.topic, volume['host']), version='1.8')
inkerra/cinder
cinder/volume/rpcapi.py
Python
apache-2.0
7,605
#include "./Processors/DenoiseStrategies/DoublehistEqual.h" #include "math.h" #include "string.h" //Àྲ̬±äÁ¿±ØÐëÔÚ.cppÎļþÀïÉùÃ÷ DoublehistEqual* DoublehistEqual::mSingleton = NULL; void DoublehistEqual::denoise(const Mat& srcImg, Mat& desImg) { unsigned char* cvtImg = mat2GrayImgPointer(srcImg, desImg); histEqual2( cvtImg, desImg.rows, desImg.cols); Mat outimage(desImg.rows, desImg.cols, CV_8UC1, cvtImg); desImg = outimage; } void DoublehistEqual::histEqual2( unsigned char *image, int height, int width ) { //³õʼ»¯¸÷²ÎÊý double lamba = 0.25; int num = 6; //Ñ­»·´ÎÊý£¬Ô½¸ß¼ì²âЧ¹ûÔ½ºÃ£¬µ«ÊǸüºÄʱ¡£ int option = 1; //ÈÈ´«µ¼·½³ÌµÄÑ¡Ôñ int kappa = 30 * 30; //ÌݶÈãÐÖµÉÏµÄÆ½·½ int lLBytes = ((width * 8) + 31) / 32 * 4; //±£Ö¤Í¼ÏñµÄÁÐΪ4µÄ±¶Êý //Ìí¼Ó°Ë¸ö·½ÏòµÄÂ˲¨Ä£°å int hN[9] = { 0, 1, 0, 0, -1, 0, 0, 0, 0 }; int hS[9] = { 0, 0, 0, 0, -1, 0, 0, 1, 0 }; int hE[9] = { 0, 0, 0, 0, -1, 1, 0, 0, 0 }; int hW[9] = { 0, 0, 0, 1, -1, 0, 0, 0, 0 }; int hNE[9] = { 0, 0, 1, 0, -1, 0, 0, 0, 0 }; int hSE[9] = { 0, 0, 0, 0, -1, 0, 0, 0, 1 }; int hSW[9] = { 0, 0, 0, 0, -1, 0, 1, 0, 0 }; int hNW[9] = { 1, 0, 0, 0, -1, 0, 0, 0, 0 }; //ÉêÇëÄÚ´æ´æ´¢¸÷¸ö·½ÏòÂ˲¨ºóµÄ½á¹û float *deltaN = new float[lLBytes*height]; memset(deltaN, 0, lLBytes*height*sizeof(float)); float *deltaS = new float[lLBytes*height]; memset(deltaS, 0, lLBytes*height*sizeof(float)); float *deltaW = new float[lLBytes*height]; memset(deltaW, 0, lLBytes*height*sizeof(float)); float *deltaE = new float[lLBytes*height]; memset(deltaE, 0, lLBytes*height*sizeof(float)); float *deltaNE = new float[lLBytes*height]; memset(deltaNE, 0, lLBytes*height*sizeof(float)); float *deltaSE = new float[lLBytes*height]; memset(deltaSE, 0, lLBytes*height*sizeof(float)); float *deltaSW = new float[lLBytes*height]; memset(deltaSW, 0, lLBytes*height*sizeof(float)); float *deltaNW = new float[lLBytes*height]; memset(deltaNW, 0, lLBytes*height*sizeof(float)); //ÉêÇëÄÚ´æ´æ´¢ÖмäͼÏñ½á¹û float *pimgsorce = new float[lLBytes*height]; memset(pimgsorce, 0, lLBytes*height*sizeof(float)); int x, y; //Ñ­»·±äÁ¿ //¶Ôpimgsorce¸³³õÖµ for (y = 0; y<height; y++) { for (x = 0; x<width; x++) { pimgsorce[lLBytes*y + x] = image[lLBytes*y + x]; } } //Ñ­»·¿ªÊ¼ for (int i = 1; i<num; i++) { imfilter((float *)pimgsorce, (float *)deltaN, width, height, hN); imfilter((float *)pimgsorce, (float *)deltaS, width, height, hS); imfilter((float *)pimgsorce, (float *)deltaW, width, height, hW); imfilter((float *)pimgsorce, (float *)deltaE, width, height, hE); imfilter((float *)pimgsorce, (float *)deltaNE, width, height, hNE); imfilter((float *)pimgsorce, (float *)deltaSE, width, height, hSE); imfilter((float *)pimgsorce, (float *)deltaSW, width, height, hSW); imfilter((float *)pimgsorce, (float *)deltaNW, width, height, hNW); //Ñ¡Ôñ¹«Ê½Ò» // 1 if (option == 1) // g(delta(*))=------------------------------- { // 1 + (delta(*)/kappa)^2 for (y = 0; y<height; y++) { for (x = 0; x<width; x++) { int k = lLBytes*y + x; float CN, CS, CE, CW, CNE, CSE, CSW, CNW; CN = (float)(exp(-(deltaN[k] * deltaN[k] / kappa))); CS = (float)(exp(-(deltaS[k] * deltaS[k] / kappa))); CE = (float)(exp(-(deltaE[k] * deltaE[k] / kappa))); CW = (float)(exp(-(deltaW[k] * deltaW[k] / kappa))); CNE = (float)(exp(-(deltaNE[k] * deltaNE[k] / kappa))); CSE = (float)(exp(-(deltaSE[k] * deltaSE[k] / kappa))); CSW = (float)(exp(-(deltaSW[k] * deltaSW[k] / kappa))); CNW = (float)(exp(-(deltaNW[k] * deltaNW[k] / kappa))); pimgsorce[k] = (float)(pimgsorce[k] + lamba*(CN*deltaN[k] + CS*deltaS[k] + CW*deltaW[k] + CE*deltaE[k] + (CNE*deltaNE[k] + CSE*deltaSE[k] + CSW*deltaSW[k] + CNW*deltaNW[k]) / 2)); } } } //Ñ¡Ôñ¹«Ê½¶þ // (delta(*))^2 if (option == 2) // g(delta(*))=exp (- ----------------------- ) { // (kappa)^2 for (y = 0; y<height; y++) { for (x = 0; x<width; x++) { int k = lLBytes*y + x; float CN, CS, CE, CW, CNE, CSE, CSW, CNW; CN = (float)(1 / (1 + exp((deltaN[k] * deltaN[k] / kappa)))); CS = (float)(1 / (1 + exp((deltaS[k] * deltaS[k] / kappa)))); CE = (float)(1 / (1 + exp((deltaE[k] * deltaE[k] / kappa)))); CW = (float)(1 / (1 + exp((deltaW[k] * deltaW[k] / kappa)))); CNE = (float)(1 / (1 + exp((deltaNE[k] * deltaNE[k] / kappa)))); CSE = (float)(1 / (1 + exp((deltaSE[k] * deltaSE[k] / kappa)))); CSW = (float)(1 / (1 + exp((deltaSW[k] * deltaSW[k] / kappa)))); CNW = (float)(1 / (1 + exp((deltaNW[k] * deltaNW[k] / kappa)))); pimgsorce[k] = (float)(pimgsorce[k] + lamba*(CN*deltaN[k] + CS*deltaS[k] + CW*deltaW[k] + CE*deltaE[k] + (CNE*deltaNE[k] + CSE*deltaSE[k] + CSW*deltaSW[k] + CNW*deltaNW[k]) / 2)); } } } } //ÒÔÉϵõ½µÄÊDZ³¾°½¨Ä££¨¾­¹ý¸÷ÏòÒìÐÔÀ©É¢¹ýÂË£©ºóµÄͼÏñ£» for (y = 0; y<height; y++) { for (x = 0; x<width; x++) { int k = lLBytes*y + x; if (pimgsorce[k]>255) pimgsorce[k] = 255; else pimgsorce[k] = (unsigned char)(pimgsorce[k] + 0.5); } } //ԭͼÓë±³¾°×ö²î£¬²¢¹éÒ»»¯ for (y = 0; y<height; y++) { for (x = 0; x<width; x++) { float temp1; //²îͼ temp1 = float(image[lLBytes*y + x] - pimgsorce[lLBytes*y + x]); //ÓÃԭʼͼÏñ¼õÈ¥±³¾°Í¼Ïñ if (temp1<0.0) temp1 = 0; if (temp1>255.0) temp1 = 255.0; pimgsorce[lLBytes*y + x] = (unsigned char)temp1; } } float maxT = 1.0; //¶Ô½á¹ûͼÏñ½øÐйéÒ»»¯£¬À©´óÏÔʾµÄ¶Ô±È¶È for (y = 0; y<height; y++) { for (x = 0; x<width; x++) { if (pimgsorce[lLBytes*y + x]>maxT) maxT = pimgsorce[lLBytes*y + x]; // ¼ÆËãͼÏñµÄ×î¸ßÏñËØÖµ } } for (y = 0; y<height; y++) { for (x = 0; x<width; x++) { image[lLBytes*y + x] = (unsigned char)(pimgsorce[lLBytes*y + x] * 255 / maxT);//¶Ô²îֵͼÏñ½øÐйéÒ»»¯ } } delete[]pimgsorce; delete[]deltaE; delete[]deltaN; delete[]deltaS; delete[]deltaW; delete[]deltaNE; delete[]deltaSE; delete[]deltaNW; delete[]deltaSW; } void DoublehistEqual::imfilter( float *pimgsorce, float *image1, int width, int height, int *moban ) { int lLBytes = ((width * 8) + 31) / 32 * 4; //±£Ö¤Í¼ÏñµÄÁпíÄܹ»±»4Õû³ö //Ê×ÏȶÔͼÏñÀ©³ä±ßÔµ£¨±ßÔµÀ©³ä0£©£¬±ãÓÚ¾í»ýÔËËã float *buffer = new float[(height + 2)*(lLBytes + 2)]; memset(buffer, 0, (height + 2)*(lLBytes + 2)*sizeof(float)); int x, y; //Ñ­»·±äÁ¿ for (y = 1; y <= height; y++) { for (x = 1; x <= width; x++) { buffer[(lLBytes + 2)*y + x] = pimgsorce[lLBytes*(y - 1) + x - 1]; //½«Í¼ÏñÌîÈë¾ØÕóµÄÖÐÐÄ } } //¾í»ýÔËËã for (y = 1; y<height + 1; y++) { for (x = 1; x<width + 1; x++) { float temp; int k1 = (lLBytes + 2)*(y - 1) + x - 1; int k2 = (lLBytes + 2)*y + x - 1; int k3 = (lLBytes + 2)*(y + 1) + x - 1; temp = buffer[k1] * moban[6] + buffer[k1 + 1] * moban[7] + buffer[k1 + 2] * moban[8] + buffer[k2] * moban[3] + buffer[k2 + 1] * moban[4] + buffer[k2 + 2] * moban[5] + buffer[k3] * moban[0] + buffer[k3 + 1] * moban[1] + buffer[k3 + 2] * moban[2]; image1[lLBytes*(y - 1) + x - 1] = temp; } } delete[] buffer; }
sxxlearn2rock/SissorActionTest
HODTest/HODTest/Processors/DenoiseStrategies/DoublehistEqual.cpp
C++
apache-2.0
7,482
/* Copyright (C) Relevance Lab Private Limited- All Rights Reserved * Unauthorized copying of this file, via any medium is strictly prohibited * Proprietary and confidential * Written by Relevance UI Team, * Aug 2015 */ //This is a global service and will be cache angular.module('factory.appPermission', []).factory('uac', ['$http', '$log', '$q', 'session', function ($http, $log, $q, session) { 'use strict'; function getpermissionforcategory (category, permissionto, permissionset) { var perms = []; if (permissionset) { for (var i = 0; i < permissionset.length; i++) { var obj = permissionset[i].permissions; for (var j = 0; j < obj.length; j++) { if (obj[j].category === category) { var acc = obj[j].access.toString().split(','); for (var ac in acc) { if (perms.indexOf(acc[ac]) < 0){ perms.push(acc[ac]); } } } } } if (perms.indexOf(permissionto) >= 0) { return (true); } else{ return (false); } } else { return (false); } } var permissionService = { hasPermission: function(category, permissionto){ var retVal = ''; if (!session.getUser()) { return false; // return permission denied }else { retVal = getpermissionforcategory(category, permissionto, session.getUser().permissionset); return retVal; } } }; return { hasPermission: permissionService.hasPermission }; }]);
RLOpenCatalyst/core
client/cat3/src/factory/access_permission_service.js
JavaScript
apache-2.0
1,465
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!--NewPage--> <HTML> <HEAD> <!-- Generated by javadoc (build 1.6.0_20) on Tue Jun 01 14:31:55 BST 2010 --> <META http-equiv="Content-Type" content="text/html; charset=UTF-8"> <TITLE> Uses of Class com.hp.hpl.jena.graph.query.PatternStageCompiler (Jena Framework) </TITLE> <META NAME="date" CONTENT="2010-06-01"> <LINK REL ="stylesheet" TYPE="text/css" HREF="../../../../../../../stylesheet.css" TITLE="Style"> <SCRIPT type="text/javascript"> function windowTitle() { if (location.href.indexOf('is-external=true') == -1) { parent.document.title="Uses of Class com.hp.hpl.jena.graph.query.PatternStageCompiler (Jena Framework)"; } } </SCRIPT> <NOSCRIPT> </NOSCRIPT> </HEAD> <BODY BGCOLOR="white" onload="windowTitle();"> <HR> <!-- ========= START OF TOP NAVBAR ======= --> <A NAME="navbar_top"><!-- --></A> <A HREF="#skip-navbar_top" title="Skip navigation links"></A> <TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY=""> <TR> <TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A NAME="navbar_top_firstrow"><!-- --></A> <TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY=""> <TR ALIGN="center" VALIGN="top"> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../com/hp/hpl/jena/graph/query/PatternStageCompiler.html" title="class in com.hp.hpl.jena.graph.query"><FONT CLASS="NavBarFont1"><B>Class</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> &nbsp;<FONT CLASS="NavBarFont1Rev"><B>Use</B></FONT>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A>&nbsp;</TD> </TR> </TABLE> </TD> <TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM> </EM> </TD> </TR> <TR> <TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2"> &nbsp;PREV&nbsp; &nbsp;NEXT</FONT></TD> <TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2"> <A HREF="../../../../../../../index.html?com/hp/hpl/jena/graph/query/\class-usePatternStageCompiler.html" target="_top"><B>FRAMES</B></A> &nbsp; &nbsp;<A HREF="PatternStageCompiler.html" target="_top"><B>NO FRAMES</B></A> &nbsp; &nbsp;<SCRIPT type="text/javascript"> <!-- if(window==top) { document.writeln('<A HREF="../../../../../../../allclasses-noframe.html"><B>All Classes</B></A>'); } //--> </SCRIPT> <NOSCRIPT> <A HREF="../../../../../../../allclasses-noframe.html"><B>All Classes</B></A> </NOSCRIPT> </FONT></TD> </TR> </TABLE> <A NAME="skip-navbar_top"></A> <!-- ========= END OF TOP NAVBAR ========= --> <HR> <CENTER> <H2> <B>Uses of Class<br>com.hp.hpl.jena.graph.query.PatternStageCompiler</B></H2> </CENTER> No usage of com.hp.hpl.jena.graph.query.PatternStageCompiler <P> <HR> <!-- ======= START OF BOTTOM NAVBAR ====== --> <A NAME="navbar_bottom"><!-- --></A> <A HREF="#skip-navbar_bottom" title="Skip navigation links"></A> <TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY=""> <TR> <TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A NAME="navbar_bottom_firstrow"><!-- --></A> <TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY=""> <TR ALIGN="center" VALIGN="top"> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../com/hp/hpl/jena/graph/query/PatternStageCompiler.html" title="class in com.hp.hpl.jena.graph.query"><FONT CLASS="NavBarFont1"><B>Class</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> &nbsp;<FONT CLASS="NavBarFont1Rev"><B>Use</B></FONT>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A>&nbsp;</TD> </TR> </TABLE> </TD> <TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM> </EM> </TD> </TR> <TR> <TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2"> &nbsp;PREV&nbsp; &nbsp;NEXT</FONT></TD> <TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2"> <A HREF="../../../../../../../index.html?com/hp/hpl/jena/graph/query/\class-usePatternStageCompiler.html" target="_top"><B>FRAMES</B></A> &nbsp; &nbsp;<A HREF="PatternStageCompiler.html" target="_top"><B>NO FRAMES</B></A> &nbsp; &nbsp;<SCRIPT type="text/javascript"> <!-- if(window==top) { document.writeln('<A HREF="../../../../../../../allclasses-noframe.html"><B>All Classes</B></A>'); } //--> </SCRIPT> <NOSCRIPT> <A HREF="../../../../../../../allclasses-noframe.html"><B>All Classes</B></A> </NOSCRIPT> </FONT></TD> </TR> </TABLE> <A NAME="skip-navbar_bottom"></A> <!-- ======== END OF BOTTOM NAVBAR ======= --> <HR> Copyright © 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Hewlett-Packard Development Company, LP </BODY> </HTML>
jianglili007/pretty-printer
Jenna-2.6.3/doc/javadoc/com/hp/hpl/jena/graph/query/class-use/PatternStageCompiler.html
HTML
apache-2.0
6,464
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!-- NewPage --> <html lang="en"> <head> <!-- Generated by javadoc (version 1.7.0_111) on Thu Aug 18 01:51:18 UTC 2016 --> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> <title>Uses of Package org.apache.hadoop.lib.lang (Apache Hadoop Main 2.7.3 API)</title> <meta name="date" content="2016-08-18"> <link rel="stylesheet" type="text/css" href="../../../../../stylesheet.css" title="Style"> </head> <body> <script type="text/javascript"><!-- if (location.href.indexOf('is-external=true') == -1) { parent.document.title="Uses of Package org.apache.hadoop.lib.lang (Apache Hadoop Main 2.7.3 API)"; } //--> </script> <noscript> <div>JavaScript is disabled on your browser.</div> </noscript> <!-- ========= START OF TOP NAVBAR ======= --> <div class="topNav"><a name="navbar_top"> <!-- --> </a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../../overview-summary.html">Overview</a></li> <li><a href="package-summary.html">Package</a></li> <li>Class</li> <li class="navBarCell1Rev">Use</li> <li><a href="package-tree.html">Tree</a></li> <li><a href="../../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../../index-all.html">Index</a></li> <li><a href="../../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li>Prev</li> <li>Next</li> </ul> <ul class="navList"> <li><a href="../../../../../index.html?org/apache/hadoop/lib/lang/package-use.html" target="_top">Frames</a></li> <li><a href="package-use.html" target="_top">No Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_top"> <li><a href="../../../../../allclasses-noframe.html">All Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_top"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <a name="skip-navbar_top"> <!-- --> </a></div> <!-- ========= END OF TOP NAVBAR ========= --> <div class="header"> <h1 title="Uses of Package org.apache.hadoop.lib.lang" class="title">Uses of Package<br>org.apache.hadoop.lib.lang</h1> </div> <div class="contentContainer">No usage of org.apache.hadoop.lib.lang</div> <!-- ======= START OF BOTTOM NAVBAR ====== --> <div class="bottomNav"><a name="navbar_bottom"> <!-- --> </a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../../overview-summary.html">Overview</a></li> <li><a href="package-summary.html">Package</a></li> <li>Class</li> <li class="navBarCell1Rev">Use</li> <li><a href="package-tree.html">Tree</a></li> <li><a href="../../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../../index-all.html">Index</a></li> <li><a href="../../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li>Prev</li> <li>Next</li> </ul> <ul class="navList"> <li><a href="../../../../../index.html?org/apache/hadoop/lib/lang/package-use.html" target="_top">Frames</a></li> <li><a href="package-use.html" target="_top">No Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_bottom"> <li><a href="../../../../../allclasses-noframe.html">All Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_bottom"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <a name="skip-navbar_bottom"> <!-- --> </a></div> <!-- ======== END OF BOTTOM NAVBAR ======= --> <p class="legalCopy"><small>Copyright &#169; 2016 <a href="http://www.apache.org">Apache Software Foundation</a>. All rights reserved.</small></p> </body> </html>
TK-TarunW/ecosystem
hadoop-2.7.3/share/doc/hadoop/api/org/apache/hadoop/lib/lang/package-use.html
HTML
apache-2.0
4,102
package org.wtrader.loader.utils.interfaces; import java.io.IOException; import java.util.List; public interface IStockDataLoader { public void loadFile(String fullnamePath, List<String> acceptedStocks) throws IOException; }
bbranquinho/wpattern-wtrader
wtrader-loader/wtrader-loader-utils/src/main/java/org/wtrader/loader/utils/interfaces/IStockDataLoader.java
Java
apache-2.0
240
--- layout: default title: CAS - Database Authentication category: Authentication --- {% include variables.html %} # Database Authentication Database authentication is enabled by including the following dependencies in the WAR overlay: {% include casmodule.html group="org.apereo.cas" module="cas-server-support-jdbc" %} To learn how to configure database drivers, [please see this guide](../installation/JDBC-Drivers.html). ## Configuration ### Query Database Authentication Authenticates a user by comparing the user password (which can be encoded with a password encoder) against the password on record determined by a configurable database query. {% include casproperties.html properties="cas.authn.jdbc.query" %} ### Search Database Authentication Searches for a user record by querying against a username and password; the user is authenticated if at least one result is found. {% include casproperties.html properties="cas.authn.jdbc.search" %} ### Bind Database Authentication Authenticates a user by attempting to create a database connection using the username and (hashed) password. {% include casproperties.html properties="cas.authn.jdbc.bind" %} ### Encode Database Authentication A JDBC querying handler that will pull back the password and the private salt value for a user and validate the encoded password using the public salt value. Assumes everything is inside the same database table. Supports settings for number of iterations as well as private salt. This password encoding method combines the private Salt and the public salt which it prepends to the password before hashing. If multiple iterations are used, the bytecode hash of the first iteration is rehashed without the salt values. The final hash is converted to hex before comparing it to the database value. {% include casproperties.html properties="cas.authn.jdbc.encode" %} ## Password Policy Enforcement A certain number of database authentication schemes have limited support for detecting locked/disabled/etc accounts via column names that are defined in the CAS settings. To learn how to enforce a password policy, please [review this guide](../installation/Password-Policy-Enforcement.html).
fogbeam/cas_mirror
docs/cas-server-documentation/authentication/Database-Authentication.md
Markdown
apache-2.0
2,203
using System; namespace SimpleWAWS.Authentication { public static class AuthConstants { //public const string EncryptionReason = "ProtectCookie"; public const string LoginSessionCookie = "loginsession"; public static readonly TimeSpan SessionCookieValidTimeSpan = TimeSpan.FromMinutes(59); public const string BearerHeader = "Bearer "; public const string DefaultAuthProvider = "AAD"; public const string AnonymousUser = "aus"; public const string TiPCookie = "x-ms-routing-name"; } }
projectkudu/TryAppService
SimpleWAWS/Authentication/AuthConstants.cs
C#
apache-2.0
556
Ext.namespace("Inubit.WebModeler"); Inubit.WebModeler.ModelNodesLoader = Ext.extend(Inubit.WebModeler.Loader, { constructor : function (config) { this.chunkSize = 25; this.loaded = 0; this.toLoad = 0; this.currentChunk = 0; this.chunks = new Array(); this.nodes = new Array(); this.mode = config.mode; this.rootComponent = config.rootComponent; Inubit.WebModeler.ModelNodesLoader.superclass.constructor.call(this, config); }, load : function(baseUri, canvas) { var handler = this; Ext.Ajax.request({ method : 'GET', url : baseUri + "/nodes", disableCaching : false, success : function(response) { if (response.status == 200) { var xmlDoc = response.responseXML; var uris = xmlDoc.getElementsByTagName("uri"); handler.toLoad = Math.ceil(uris.length / handler.chunkSize); if (handler.toLoad == 0) handler.fireEvent("load"); for (var i = 0; i < handler.toLoad; i++) { handler.chunks[i] = new Array(); } for (i = 0; i < uris.length; i++) { var ci = Math.floor(i / handler.chunkSize); handler.chunks[ci].push(uris[i].firstChild.nodeValue); } handler.loadNextChunk(canvas); } else if (response.status == 403) { // forbidden..... do something } else { // do something on error } }, failure : function(){ this.fireEvent("error"); // do something on error } }); }, loadNextChunk : function( canvas ) { var chunk = this.chunks[this.currentChunk]; if(!Ext.isDefined(chunk)) { return; } for (var i = 0; i < chunk.length; i++) { var newNode = null; var config = { uri: Util.getPath(chunk[i]), model: null, rootComponent: this.rootComponent, listeners: { load: function() { this.nodeLoaded(canvas); }, scope: this } }; if (this.mode == Inubit.WebModeler.ModelLoader.MODE_EDIT) { newNode = new Inubit.WebModeler.EditableProcessNode(config); } else if (this.mode == Inubit.WebModeler.ModelLoader.MODE_VIEW) { newNode = new Inubit.WebModeler.model.viewer.AnnotatedProcessNode(config); } else { newNode = new Inubit.WebModeler.model.viewer.ReferenceNode(config); } newNode.load(canvas, true); this.nodes.push(newNode); } }, nodeLoaded : function(canvas) { this.loaded++; if (this.loaded == this.chunks[this.currentChunk].length) { this.currentChunk++; if (this.currentChunk == this.chunks.length) this.fireEvent("load"); this.loaded = 0; this.loadNextChunk(canvas); } }, getNodes : function() { return this.nodes; } });
frapu78/processeditor
www/js/loader/ModelNodesLoader.js
JavaScript
apache-2.0
3,202
# -*- coding:utf-8 -*- """ Description: Issue Transaction Usage: from AntShares.Core.IssueTransaction import IssueTransaction """ from AntShares.Core.AssetType import AssetType from AntShares.Helper import * from AntShares.Core.Transaction import Transaction from AntShares.Core.TransactionType import TransactionType from random import randint class IssueTransaction(Transaction): """docstring for IssueTransaction""" def __init__(self, inputs, outputs): super(IssueTransaction, self).__init__(inputs, outputs) self.TransactionType = TransactionType.IssueTransaction # 0x40 self.Nonce = self.genNonce() def genNonce(self): return random.randint(268435456, 4294967295) def getScriptHashesForVerifying(self): """Get ScriptHash From SignatureContract""" pass def serializeExclusiveData(self, writer): writer.writeUInt32(self.Nonce)
AntSharesSDK/antshares-python
sdk/AntShares/Core/IssueTransaction.py
Python
apache-2.0
922
/* Copyright 2019 Esri Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ using System.Reflection; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; // General Information about an assembly is controlled through the following // set of attributes. Change these attribute values to modify the information // associated with an assembly. [assembly: AssemblyTitle("DivideLines")] [assembly: AssemblyDescription("")] [assembly: AssemblyConfiguration("")] [assembly: AssemblyCompany("Acme")] [assembly: AssemblyProduct("DivideLines")] [assembly: AssemblyCopyright("Copyright © Acme 2015")] [assembly: AssemblyTrademark("")] [assembly: AssemblyCulture("")] // Setting ComVisible to false makes the types in this assembly not visible // to COM components. If you need to access a type in this assembly from // COM, set the ComVisible attribute to true on that type. [assembly: ComVisible(false)] // The following GUID is for the ID of the typelib if this project is exposed to COM [assembly: Guid("3d5af139-7a66-4c5a-8a33-9b61a6ddccd4")] // Version information for an assembly consists of the following four values: // // Major Version // Minor Version // Build Number // Revision // // You can specify all the values or you can default the Build and Revision Numbers // by using the '*' as shown below: // [assembly: AssemblyVersion("1.0.*")] [assembly: AssemblyVersion("1.0.0.0")] [assembly: AssemblyFileVersion("1.0.0.0")]
SoJourned/arcgis-pro-sdk-community-samples-1
Editing/DivideLines/Properties/AssemblyInfo.cs
C#
apache-2.0
1,987
/* * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights * Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.services.sns.model; import java.io.Serializable; /** * */ public class SetPlatformApplicationAttributesResult implements Serializable, Cloneable { /** * Returns a string representation of this object; useful for testing and * debugging. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof SetPlatformApplicationAttributesResult == false) return false; SetPlatformApplicationAttributesResult other = (SetPlatformApplicationAttributesResult) obj; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; return hashCode; } @Override public SetPlatformApplicationAttributesResult clone() { try { return (SetPlatformApplicationAttributesResult) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException( "Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e); } } }
nterry/aws-sdk-java
aws-java-sdk-sns/src/main/java/com/amazonaws/services/sns/model/SetPlatformApplicationAttributesResult.java
Java
apache-2.0
2,106
package costmanagement // Copyright (c) Microsoft and contributors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // // See the License for the specific language governing permissions and // limitations under the License. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "context" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" "net/http" ) // OperationsClient is the client for the Operations methods of the Costmanagement service. type OperationsClient struct { BaseClient } // NewOperationsClient creates an instance of the OperationsClient client. func NewOperationsClient(subscriptionID string) OperationsClient { return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) } // NewOperationsClientWithBaseURI creates an instance of the OperationsClient client. func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} } // List lists all of the available consumption REST API operations. func (client OperationsClient) List(ctx context.Context) (result OperationListResultPage, err error) { result.fn = client.listNextResults req, err := client.ListPreparer(ctx) if err != nil { err = autorest.NewErrorWithError(err, "costmanagement.OperationsClient", "List", nil, "Failure preparing request") return } resp, err := client.ListSender(req) if err != nil { result.olr.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "costmanagement.OperationsClient", "List", resp, "Failure sending request") return } result.olr, err = client.ListResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "costmanagement.OperationsClient", "List", resp, "Failure responding to request") } return } // ListPreparer prepares the List request. func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) { const APIVersion = "2018-05-31" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPath("/providers/Microsoft.CostManagement/operations"), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListResponder handles the response to the List request. The method always // closes the http.Response Body. func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // listNextResults retrieves the next set of results, if any. func (client OperationsClient) listNextResults(lastResults OperationListResult) (result OperationListResult, err error) { req, err := lastResults.operationListResultPreparer() if err != nil { return result, autorest.NewErrorWithError(err, "costmanagement.OperationsClient", "listNextResults", nil, "Failure preparing next results request") } if req == nil { return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} return result, autorest.NewErrorWithError(err, "costmanagement.OperationsClient", "listNextResults", resp, "Failure sending next results request") } result, err = client.ListResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "costmanagement.OperationsClient", "listNextResults", resp, "Failure responding to next results request") } return } // ListComplete enumerates all values, automatically crossing page boundaries as required. func (client OperationsClient) ListComplete(ctx context.Context) (result OperationListResultIterator, err error) { result.page, err = client.List(ctx) return }
anpingli/origin
vendor/github.com/Azure/azure-sdk-for-go/services/costmanagement/mgmt/2018-05-31/costmanagement/operations.go
GO
apache-2.0
4,861
package org.openqa.selenium.support; // Basic colour keywords as defined by the W3C HTML4 spec // See http://www.w3.org/TR/css3-color/#html4 import org.openqa.selenium.support.Color; public enum Colors { BLACK(new Color(0, 0, 0, 1d)), SILVER(new Color(192, 192, 192, 1d)), GRAY(new Color(128, 128, 128, 1d)), WHITE(new Color(255, 255, 255, 1d)), MAROON(new Color(128, 0, 0, 1d)), RED(new Color(255, 0, 0, 1d)), PURPLE(new Color(128, 0, 128, 1d)), FUCHSIA(new Color(255, 0, 255, 1d)), GREEN(new Color(0, 128, 0, 1d)), LIME(new Color(0, 255, 0, 1d)), OLIVE(new Color(128, 128, 0, 1d)), YELLOW(new Color(255, 255, 0, 1d)), NAVY(new Color(0, 0, 128, 1d)), BLUE(new Color(0, 0, 255, 1d)), TEAL(new Color(0, 128, 128, 1d)), AQUA(new Color(0, 255, 255, 1d)); private final Color colorValue; private Colors(Color colorValue) { this.colorValue = colorValue; } public Color getColorValue() { return this.colorValue; } }
winhamwr/selenium
java/client/src/org/openqa/selenium/support/Colors.java
Java
apache-2.0
1,017
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. fn main() { let a: ~[int] = ~[]; vec::each(a, |_| -> bool { //~^ ERROR mismatched types }); }
jeltz/rust-debian-package
src/test/compile-fail/liveness-issue-2163.rs
Rust
apache-2.0
581
/* * Copyright 2012 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package com.google.android.apps.mytracks.io.file.importer; import com.google.android.apps.mytracks.content.MyTracksProviderUtils; import com.google.android.apps.mytracks.content.Track; import com.google.android.apps.mytracks.io.file.TrackFileFormat; import com.google.android.apps.mytracks.io.file.exporter.KmzTrackExporter; import com.google.android.apps.mytracks.util.FileUtils; import com.google.android.apps.mytracks.util.PreferencesUtils; import com.google.android.apps.mytracks.util.SystemUtils; import com.google.android.maps.mytracks.R; import android.content.Context; import android.net.Uri; import android.os.AsyncTask; import android.os.PowerManager.WakeLock; import android.util.Log; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; import java.util.List; /** * AsyncTask to import files from the external storage. * * @author Jimmy Shih */ public class ImportAsyncTask extends AsyncTask<Void, Integer, Boolean> { private static final String TAG = ImportAsyncTask.class.getSimpleName(); private ImportActivity importActivity; private final boolean importAll; private final TrackFileFormat trackFileFormat; private final String path; private final Context context; private WakeLock wakeLock; // true if the AsyncTask has completed private boolean completed; // the number of files successfully imported private int successCount; // the number of files to import private int totalCount; // the last successfully imported track id private long trackId; /** * Creates an AsyncTask. * * @param importActivity the activity currently associated with this AsyncTask * @param importAll true to import all GPX files * @param trackFileFormat the track file format * @param path path to import GPX files */ public ImportAsyncTask(ImportActivity importActivity, boolean importAll, TrackFileFormat trackFileFormat, String path) { this.importActivity = importActivity; this.importAll = importAll; this.trackFileFormat = trackFileFormat; this.path = path; context = importActivity.getApplicationContext(); completed = false; successCount = 0; totalCount = 0; trackId = -1L; } /** * Sets the current {@link ImportActivity} associated with this AyncTask. * * @param importActivity the current {@link ImportActivity}, can be null */ public void setActivity(ImportActivity importActivity) { this.importActivity = importActivity; if (completed && importActivity != null) { importActivity.onAsyncTaskCompleted(successCount, totalCount, trackId); } } @Override protected void onPreExecute() { if (importActivity != null) { importActivity.showProgressDialog(); } } @Override protected Boolean doInBackground(Void... params) { try { Thread.currentThread().setPriority(Thread.MAX_PRIORITY); // Get the wake lock if not recording or paused boolean isRecording = PreferencesUtils.getLong(importActivity, R.string.recording_track_id_key) != PreferencesUtils.RECORDING_TRACK_ID_DEFAULT; boolean isPaused = PreferencesUtils.getBoolean(importActivity, R.string.recording_track_paused_key, PreferencesUtils.RECORDING_TRACK_PAUSED_DEFAULT); if (!isRecording || isPaused) { wakeLock = SystemUtils.acquireWakeLock(importActivity, wakeLock); } List<File> files = getFiles(); totalCount = files.size(); if (totalCount == 0) { return true; } for (int i = 0; i < totalCount; i++) { if (isCancelled()) { // If cancelled, return true to show the number of files imported return true; } if (importFile(files.get(i))) { successCount++; } publishProgress(i + 1, totalCount); } return true; } finally { if (wakeLock != null && wakeLock.isHeld()) { wakeLock.release(); } } } @Override protected void onProgressUpdate(Integer... values) { if (importActivity != null) { importActivity.setProgressDialogValue(values[0], values[1]); } } @Override protected void onPostExecute(Boolean result) { completed = true; if (importActivity != null) { importActivity.onAsyncTaskCompleted(successCount, totalCount, trackId); } } @Override protected void onCancelled() { completed = true; if (importActivity != null) { importActivity.onAsyncTaskCompleted(successCount, totalCount, trackId); } } /** * Imports a file. * * @param file the file */ private boolean importFile(final File file) { FileInputStream fileInputStream = null; try { TrackImporter trackImporter; if (trackFileFormat == TrackFileFormat.KML) { String extension = FileUtils.getExtension(file.getName()); if (TrackFileFormat.KML.getExtension().equals(extension)) { trackImporter = new KmlFileTrackImporter(context, -1L); } else { MyTracksProviderUtils myTracksProviderUtils = MyTracksProviderUtils.Factory.get(context); Uri uri = myTracksProviderUtils.insertTrack(new Track()); long newId = Long.parseLong(uri.getLastPathSegment()); trackImporter = new KmzTrackImporter(context, newId); } } else { trackImporter = new GpxFileTrackImporter(context); } fileInputStream = new FileInputStream(file); trackId = trackImporter.importFile(fileInputStream); return trackId != -1L; } catch (FileNotFoundException e) { Log.e(TAG, "Unable to import file", e); return false; } finally { if (fileInputStream != null) { try { fileInputStream.close(); } catch (IOException e) { Log.e(TAG, "Unable to close file input stream", e); } } } } /** * Gets a list of files. If importAll is true, returns a list of the files * under the path directory. If importAll is false, returns a list containing * just the path file. */ private List<File> getFiles() { List<File> files = new ArrayList<File>(); File file = new File(path); if (importAll) { File[] candidates = file.listFiles(); if (candidates != null) { for (File candidate : candidates) { if (!FileUtils.isDirectory(candidate)) { String extension = FileUtils.getExtension(candidate.getName()); if (trackFileFormat == TrackFileFormat.KML && ( TrackFileFormat.KML.getExtension().equals(extension) || KmzTrackExporter.KMZ_EXTENSION.equals(extension))) { files.add(candidate); } else if (trackFileFormat == TrackFileFormat.GPX && TrackFileFormat.GPX.getExtension().equals(extension)) { files.add(candidate); } } } } } else { files.add(file); } return files; } }
Plonk42/mytracks
myTracks/src/main/java/com/google/android/apps/mytracks/io/file/importer/ImportAsyncTask.java
Java
apache-2.0
7,667
package com.s4game.server.message.manager; import javax.annotation.Resource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.stereotype.Component; import com.s4game.core.message.Message; import com.s4game.core.message.Message.DestType; import com.s4game.core.message.Message.FromType; import com.s4game.server.message.IMsgDispatcher; import com.s4game.server.share.moduleinit.CommandRegister; import com.s4game.server.share.moduleinit.Group; /** * @Author zeusgooogle@gmail.com * @sine 2015年5月10日 下午8:41:22 * */ @Component public class SwapManager { private Logger LOG = LoggerFactory.getLogger(getClass()); @Resource private IMsgDispatcher ioDispatcher; @Resource private IMsgDispatcher publicDispatcher; @Resource private IMsgDispatcher stageDispatcher; @Resource private IMsgDispatcher gsDispatcher; public void swap(Message message) { FromType from = message.getFrom(); switch (from) { case CLIENT: swapClientMsg(message); break; case BUS: componentMsgSwap(message); break; case STAGE: componentMsgSwap(message); break; case STAGE_CONTROL: componentMsgSwap(message); break; default: break; } } public void swapClientMsg(Message message) { String command = message.getCommand(); /** * 获取 command 所属分组 */ Group group = CommandRegister.getCmdGroup(command); //消息路由,不同routeHelper 意义不相同,根据具体上下文判断 message.setRoute(group.value); LOG.debug("swap msg command: {}, dest group: {}", command, group); switch (group) { case BUS: case STAGE: case STAGE_CONTROL: gsDispatcher.in(message); break; case LOGIN: case PUBLIC: toPublic(message); break; default: LOG.error("message cmd: {} not find group.", command); break; } } public void swapPublicMsg(Message message) { componentMsgSwap(message); } private void componentMsgSwap(Message message) { DestType dest = message.getDest(); switch (dest) { case CLIENT: toClient(message); break; case BUS: this.gsDispatcher.in(message); break; case STAGE_CONTROL: this.gsDispatcher.in(message); break; case PUBLIC: toPublic(message); break; case STAGE: toStage(message); break; case INOUT: case INNER_SYSTEM: break; default: break; } } private void toClient(Message message) { ioDispatcher.in(message); } private void toPublic(Message message) { publicDispatcher.in(message); } private void toStage(Message message) { stageDispatcher.in(message); } }
zuesgooogle/HappyMj
mj-game/src/main/java/com/s4game/server/message/manager/SwapManager.java
Java
apache-2.0
3,175
<?php /* * This file is part of the symfony package. * (c) Fabien Potencier <fabien.potencier@symfony-project.com> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ /** * Finds deprecated plugins usage. * * @package symfony * @subpackage task * @author Fabien Potencier <fabien.potencier@symfony-project.com> * @version SVN: $Id: sfDeprecatedPluginsValidation.class.php 25410 2009-12-15 15:19:07Z fabien $ */ class sfDeprecatedPluginsValidation extends sfValidation { public function getHeader() { return 'Checking usage of deprecated plugins'; } public function getExplanation() { return array( '', ' The files above use deprecated plugins', ' that have been removed in symfony 1.4.', '', 'You can probably remove those references safely.', '', ); } public function validate() { $found = array(); $files = sfFinder::type('file')->name('*Configuration.class.php')->in($this->getProjectConfigDirectories()); foreach ($files as $file) { $content = sfToolkit::stripComments(file_get_contents($file)); $matches = array(); if (false !== strpos($content, 'sfCompat10Plugin')) { $matches[] = 'sfCompat10Plugin'; } if (false !== strpos($content, 'sfProtoculousPlugin')) { $matches[] = 'sfProtoculousPlugin'; } if ($matches) { $found[$file] = implode(', ', $matches); } } return $found; } }
avpreserve/MediaSCORE
lib/vendor/lib/task/project/validation/sfDeprecatedPluginsValidation.class.php
PHP
apache-2.0
1,700
package org.keycloak.services.messages; import java.io.IOException; import java.net.URL; import java.text.MessageFormat; import java.util.Locale; import java.util.Properties; import org.jboss.logging.Logger; import org.keycloak.models.KeycloakSession; import org.keycloak.messages.MessagesProvider; /** * @author <a href="mailto:leonardo.zanivan@gmail.com">Leonardo Zanivan</a> */ public class AdminMessagesProvider implements MessagesProvider { private static final Logger logger = Logger.getLogger(AdminMessagesProvider.class); private KeycloakSession session; private Locale locale; private Properties messagesBundle; public AdminMessagesProvider(KeycloakSession session, Locale locale) { this.session = session; this.locale = locale; this.messagesBundle = getMessagesBundle(locale); } @Override public String getMessage(String messageKey, Object... parameters) { String message = messagesBundle.getProperty(messageKey, messageKey); return new MessageFormat(message, locale).format(parameters); } @Override public void close() { } private Properties getMessagesBundle(Locale locale) { Properties properties = new Properties(); if (locale == null) { return properties; } URL url = getClass().getClassLoader().getResource( "theme/base/admin/messages/messages_" + locale.toString() + ".properties"); if (url != null) { try { properties.load(url.openStream()); } catch (IOException ex) { logger.warn("Failed to load messages", ex); } } return properties; } }
matzew/keycloak
services/src/main/java/org/keycloak/services/messages/AdminMessagesProvider.java
Java
apache-2.0
1,720
/* * Copyright 2015 RONDHUIT Co.,LTD. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.nlp4l.core.analysis import org.apache.lucene.util.Attribute import org.apache.lucene.util.AttributeReflector object Token { def apply() = new Token } class Token extends Map[String, String] with AttributeReflector { val attributes = scala.collection.mutable.Map[String, String]() def reflect(attClass: Class[_ <: Attribute], key: String, value: Any): Unit = { if(value != null){ attributes += key -> value.toString } } override def +[B1 >: String](kv: (String, B1)): Map[String, B1] = { if(kv._2 != null){ attributes += kv._1 -> kv._2.toString } Map() ++ attributes } override def get(key: String): Option[String] = { attributes.get(key) } override def iterator: Iterator[(String, String)] = { attributes.iterator } override def -(key: String): Map[String, String] = { attributes -= key Map() ++ attributes } }
gazimahmud/nlp4l
src/main/scala/org/nlp4l/core/analysis/Token.scala
Scala
apache-2.0
1,506
from plenum.common.event_bus import InternalBus from plenum.common.startable import Mode from plenum.common.timer import QueueTimer from plenum.common.util import get_utc_epoch from plenum.server.consensus.primary_selector import RoundRobinConstantNodesPrimariesSelector from plenum.server.database_manager import DatabaseManager from plenum.server.quorums import Quorums from plenum.server.replica import Replica from plenum.test.testing_utils import FakeSomething def test_ordered_cleaning(tconf): global_view_no = 2 node = FakeSomething( name="fake node", ledger_ids=[0], viewNo=global_view_no, utc_epoch=get_utc_epoch, get_validators=lambda: [], db_manager=DatabaseManager(), requests=[], mode=Mode.participating, timer=QueueTimer(), quorums=Quorums(4), write_manager=None, poolManager=FakeSomething(node_names_ordered_by_rank=lambda: []), primaries_selector=RoundRobinConstantNodesPrimariesSelector(["Alpha", "Beta", "Gamma", "Delta"]) ) bls_bft_replica = FakeSomething( gc=lambda *args: None, ) replica = Replica(node, instId=0, config=tconf, bls_bft_replica=bls_bft_replica) replica._consensus_data.view_no = global_view_no total = [] num_requests_per_view = 3 for viewNo in range(global_view_no + 1): for seqNo in range(num_requests_per_view): reqId = viewNo, seqNo replica._ordering_service._add_to_ordered(*reqId) total.append(reqId) # gc is called after stable checkpoint, since no request executed # in this test starting it manually replica._ordering_service.gc(100) # Requests with view lower then previous view # should not be in ordered assert len(replica._ordering_service.ordered) == len(total[num_requests_per_view:]) def test_primary_names_cleaning(tconf): node = FakeSomething( name="fake node", ledger_ids=[0], viewNo=0, utc_epoch=get_utc_epoch, get_validators=lambda: [], db_manager=DatabaseManager(), requests=[], mode=Mode.participating, timer=QueueTimer(), quorums=Quorums(4), write_manager=None, poolManager=FakeSomething(node_names_ordered_by_rank=lambda: []), primaries_selector=RoundRobinConstantNodesPrimariesSelector(["Alpha", "Beta", "Gamma", "Delta"]) ) bls_bft_replica = FakeSomething( gc=lambda *args: None, ) replica = Replica(node, instId=0, config=tconf, bls_bft_replica=bls_bft_replica) replica.primaryName = "Node1:0" assert list(replica.primaryNames.items()) == \ [(0, "Node1:0")] node.viewNo += 1 replica._consensus_data.view_no = node.viewNo replica.primaryName = "Node2:0" assert list(replica.primaryNames.items()) == \ [(0, "Node1:0"), (1, "Node2:0")] node.viewNo += 1 replica._consensus_data.view_no = node.viewNo replica.primaryName = "Node3:0" assert list(replica.primaryNames.items()) == \ [(1, "Node2:0"), (2, "Node3:0")] node.viewNo += 1 replica._consensus_data.view_no = node.viewNo replica.primaryName = "Node4:0" assert list(replica.primaryNames.items()) == \ [(2, "Node3:0"), (3, "Node4:0")]
evernym/zeno
plenum/test/replica/test_buffers_cleaning.py
Python
apache-2.0
3,313
from distutils.core import setup, Extension module1=Extension('hamsterdb', libraries=['hamsterdb'], include_dirs=['../include'], library_dirs=['../src/.libs'], sources=['src/python.cc']) setup(name='hamsterdb-python', version='2.1.8', author='Christoph Rupp', author_email='chris@crupp.de', url='http://hamsterdb.com', description='This is the hamsterdb wrapper for Python', license='Apache Public License 2', ext_modules=[module1])
cloudrain21/hamsterdb
python/setup.py
Python
apache-2.0
505
// Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using System.Linq; using Microsoft.CodeAnalysis.CSharp.Symbols; using Microsoft.CodeAnalysis.CSharp.Syntax; using Microsoft.CodeAnalysis.CSharp.Test.Utilities; using Microsoft.CodeAnalysis.Test.Utilities; using Microsoft.CodeAnalysis.Text; using Roslyn.Test.Utilities; using Xunit; namespace Microsoft.CodeAnalysis.CSharp.UnitTests { public class FieldTests : CSharpTestBase { [Fact] public void InitializerInStruct() { var text = @"struct S { public int I = 9; public S(int i) {} }"; CreateCompilationWithMscorlib(text).VerifyDiagnostics( // (3,16): error CS0573: 'S': cannot have instance property or field initializers in structs // public int I = 9; Diagnostic(ErrorCode.ERR_FieldInitializerInStruct, "I").WithArguments("S").WithLocation(3, 16) ); } [Fact] public void InitializerInStruct2() { var text = @"struct S { public int I = 9; public S(int i) : this() {} }"; var comp = CreateCompilationWithMscorlib(text); comp.VerifyDiagnostics( // (3,16): error CS0573: 'S': cannot have instance property or field initializers in structs // public int I = 9; Diagnostic(ErrorCode.ERR_FieldInitializerInStruct, "I").WithArguments("S").WithLocation(3, 16), // (3,16): warning CS0649: Field 'S.I' is never assigned to, and will always have its default value 0 // public int I = 9; Diagnostic(ErrorCode.WRN_UnassignedInternalField, "I").WithArguments("S.I", "0").WithLocation(3, 16) ); } [Fact] public void Simple1() { var text = @" class A { A F; } "; var comp = CreateCompilation(text); var global = comp.GlobalNamespace; var a = global.GetTypeMembers("A", 0).Single(); var sym = a.GetMembers("F").Single() as FieldSymbol; Assert.Equal(TypeKind.Class, sym.Type.TypeKind); Assert.Equal<TypeSymbol>(a, sym.Type); Assert.Equal(Accessibility.Private, sym.DeclaredAccessibility); Assert.Equal(SymbolKind.Field, sym.Kind); Assert.False(sym.IsStatic); Assert.False(sym.IsAbstract); Assert.False(sym.IsSealed); Assert.False(sym.IsVirtual); Assert.False(sym.IsOverride); // Assert.Equal(0, sym.GetAttributes().Count()); } [Fact] public void Simple2() { var text = @" class A { A F, G; A G; } "; var comp = CreateCompilationWithMscorlib(text); var global = comp.GlobalNamespace; var a = global.GetTypeMembers("A", 0).Single(); var f = a.GetMembers("F").Single() as FieldSymbol; Assert.Equal(TypeKind.Class, f.Type.TypeKind); Assert.Equal<TypeSymbol>(a, f.Type); Assert.Equal(Accessibility.Private, f.DeclaredAccessibility); var gs = a.GetMembers("G"); Assert.Equal(2, gs.Length); foreach (var g in gs) { Assert.Equal(a, (g as FieldSymbol).Type); // duplicate, but all the same. } var errors = comp.GetDeclarationDiagnostics(); var one = errors.Single(); Assert.Equal(ErrorCode.ERR_DuplicateNameInClass, (ErrorCode)one.Code); } [Fact] public void Ambig1() { var text = @" class A { A F; A F; } "; var comp = CreateCompilation(text); var global = comp.GlobalNamespace; var a = global.GetTypeMembers("A", 0).Single(); var fs = a.GetMembers("F"); Assert.Equal(2, fs.Length); foreach (var f in fs) { Assert.Equal(a, (f as FieldSymbol).Type); } } [WorkItem(537237, "http://vstfdevdiv:8080/DevDiv2/DevDiv/_workitems/edit/537237")] [Fact] public void FieldModifiers() { var text = @" class A { internal protected const long N1 = 0; public volatile byte N2 = 0; private static char N3 = ' '; } "; var comp = CreateCompilation(text); var global = comp.GlobalNamespace; var a = global.GetTypeMembers("A", 0).Single(); var n1 = a.GetMembers("N1").Single() as FieldSymbol; Assert.True(n1.IsConst); Assert.False(n1.IsVolatile); Assert.True(n1.IsStatic); Assert.Equal(0, n1.CustomModifiers.Length); var n2 = a.GetMembers("N2").Single() as FieldSymbol; Assert.False(n2.IsConst); Assert.True(n2.IsVolatile); Assert.False(n2.IsStatic); Assert.Equal(1, n2.CustomModifiers.Length); CustomModifier mod = n2.CustomModifiers[0]; Assert.False(mod.IsOptional); Assert.Equal("System.Runtime.CompilerServices.IsVolatile[missing]", mod.Modifier.ToTestDisplayString()); var n3 = a.GetMembers("N3").Single() as FieldSymbol; Assert.False(n3.IsConst); Assert.False(n3.IsVolatile); Assert.True(n3.IsStatic); Assert.Equal(0, n3.CustomModifiers.Length); } [Fact] public void Nullable() { var text = @" class A { int? F = null; } "; var comp = CreateCompilationWithMscorlib(text); var global = comp.GlobalNamespace; var a = global.GetTypeMembers("A", 0).Single(); var sym = a.GetMembers("F").Single() as FieldSymbol; Assert.Equal("System.Int32? A.F", sym.ToTestDisplayString()); Assert.Equal(TypeKind.Struct, sym.Type.TypeKind); Assert.Equal("System.Int32?", sym.Type.ToTestDisplayString()); } [Fact] public void Generic01() { var text = @"public class C<T> { internal struct S<V> { public System.Collections.Generic.List<T> M<V>(V p) { return null; } private System.Collections.Generic.List<T> field1; internal System.Collections.Generic.IList<V> field2; public S<string> field3; } } "; var comp = CreateCompilationWithMscorlib(text); var type1 = comp.GlobalNamespace.GetTypeMembers("C", 1).Single(); var type2 = type1.GetTypeMembers("S").Single(); var s = type2.GetMembers("M").Single() as MethodSymbol; Assert.Equal("M", s.Name); Assert.Equal("System.Collections.Generic.List<T> C<T>.S<V>.M<V>(V p)", s.ToTestDisplayString()); var sym = type2.GetMembers("field1").Single() as FieldSymbol; Assert.Equal("System.Collections.Generic.List<T> C<T>.S<V>.field1", sym.ToTestDisplayString()); Assert.Equal(TypeKind.Class, sym.Type.TypeKind); Assert.Equal("System.Collections.Generic.List<T>", sym.Type.ToTestDisplayString()); sym = type2.GetMembers("field2").Single() as FieldSymbol; Assert.Equal("System.Collections.Generic.IList<V> C<T>.S<V>.field2", sym.ToTestDisplayString()); Assert.Equal(TypeKind.Interface, sym.Type.TypeKind); Assert.Equal("System.Collections.Generic.IList<V>", sym.Type.ToTestDisplayString()); sym = type2.GetMembers("field3").Single() as FieldSymbol; Assert.Equal("C<T>.S<System.String> C<T>.S<V>.field3", sym.ToTestDisplayString()); Assert.Equal(TypeKind.Struct, sym.Type.TypeKind); Assert.Equal("C<T>.S<System.String>", sym.Type.ToTestDisplayString()); } [WorkItem(537401, "http://vstfdevdiv:8080/DevDiv2/DevDiv/_workitems/edit/537401")] [Fact] public void EventEscapedIdentifier() { var text = @" delegate void @out(); class C1 { @out @in; } "; var comp = CreateCompilationWithMscorlib(Parse(text)); NamedTypeSymbol c1 = (NamedTypeSymbol)comp.SourceModule.GlobalNamespace.GetMembers("C1").Single(); FieldSymbol ein = (FieldSymbol)c1.GetMembers("in").Single(); Assert.Equal("in", ein.Name); Assert.Equal("C1.@in", ein.ToString()); NamedTypeSymbol dout = (NamedTypeSymbol)ein.Type; Assert.Equal("out", dout.Name); Assert.Equal("@out", dout.ToString()); } [WorkItem(539653, "http://vstfdevdiv:8080/DevDiv2/DevDiv/_workitems/edit/539653")] [Fact] public void ConstFieldWithoutValueErr() { var text = @" class C { const int x; } "; var comp = CreateCompilationWithMscorlib(Parse(text)); NamedTypeSymbol type1 = (NamedTypeSymbol)comp.SourceModule.GlobalNamespace.GetMembers("C").Single(); FieldSymbol mem = (FieldSymbol)type1.GetMembers("x").Single(); Assert.Equal("x", mem.Name); Assert.True(mem.IsConst); Assert.False(mem.HasConstantValue); Assert.Equal(null, mem.ConstantValue); } [WorkItem(543538, "http://vstfdevdiv:8080/DevDiv2/DevDiv/_workitems/edit/543538")] [Fact] public void Error_InvalidConst() { var source = @" class A { const delegate void D(); protected virtual void Finalize const () { } } "; // CONSIDER: Roslyn's cascading errors are much uglier than Dev10's. CreateCompilationWithMscorlib(source, parseOptions: TestOptions.Regular).VerifyDiagnostics( // (4,11): error CS1031: Type expected // const delegate void D(); Diagnostic(ErrorCode.ERR_TypeExpected, "delegate").WithLocation(4, 11), // (4,11): error CS1001: Identifier expected // const delegate void D(); Diagnostic(ErrorCode.ERR_IdentifierExpected, "delegate").WithLocation(4, 11), // (4,11): error CS0145: A const field requires a value to be provided // const delegate void D(); Diagnostic(ErrorCode.ERR_ConstValueRequired, "delegate").WithLocation(4, 11), // (4,11): error CS1002: ; expected // const delegate void D(); Diagnostic(ErrorCode.ERR_SemicolonExpected, "delegate").WithLocation(4, 11), // (5,37): error CS1002: ; expected // protected virtual void Finalize const () { } Diagnostic(ErrorCode.ERR_SemicolonExpected, "const").WithLocation(5, 37), // (5,44): error CS8124: Tuple must contain at least two elements. // protected virtual void Finalize const () { } Diagnostic(ErrorCode.ERR_TupleTooFewElements, ")").WithLocation(5, 44), // (5,46): error CS1001: Identifier expected // protected virtual void Finalize const () { } Diagnostic(ErrorCode.ERR_IdentifierExpected, "{").WithLocation(5, 46), // (5,46): error CS0145: A const field requires a value to be provided // protected virtual void Finalize const () { } Diagnostic(ErrorCode.ERR_ConstValueRequired, "{").WithLocation(5, 46), // (5,46): error CS1003: Syntax error, ',' expected // protected virtual void Finalize const () { } Diagnostic(ErrorCode.ERR_SyntaxError, "{").WithArguments(",", "{").WithLocation(5, 46), // (5,48): error CS1002: ; expected // protected virtual void Finalize const () { } Diagnostic(ErrorCode.ERR_SemicolonExpected, "}").WithLocation(5, 48), // (6,1): error CS1022: Type or namespace definition, or end-of-file expected // } Diagnostic(ErrorCode.ERR_EOFExpected, "}").WithLocation(6, 1), // (5,28): error CS0106: The modifier 'virtual' is not valid for this item // protected virtual void Finalize const () { } Diagnostic(ErrorCode.ERR_BadMemberFlag, "Finalize").WithArguments("virtual").WithLocation(5, 28), // (5,46): error CS0102: The type 'A' already contains a definition for '' // protected virtual void Finalize const () { } Diagnostic(ErrorCode.ERR_DuplicateNameInClass, "").WithArguments("A", "").WithLocation(5, 46), // (5,37): error CS0283: The type '(?, ?)' cannot be declared const // protected virtual void Finalize const () { } Diagnostic(ErrorCode.ERR_BadConstType, "const").WithArguments("(?, ?)").WithLocation(5, 37), // (5,43): error CS8179: Predefined type 'System.ValueTuple`2' is not defined or imported // protected virtual void Finalize const () { } Diagnostic(ErrorCode.ERR_PredefinedValueTupleTypeNotFound, "()").WithArguments("System.ValueTuple`2").WithLocation(5, 43), // (5,23): error CS0670: Field cannot have void type // protected virtual void Finalize const () { } Diagnostic(ErrorCode.ERR_FieldCantHaveVoidType, "void").WithLocation(5, 23), // (5,28): warning CS0649: Field 'A.Finalize' is never assigned to, and will always have its default value // protected virtual void Finalize const () { } Diagnostic(ErrorCode.WRN_UnassignedInternalField, "Finalize").WithArguments("A.Finalize", "").WithLocation(5, 28) ); } [WorkItem(543538, "http://vstfdevdiv:8080/DevDiv2/DevDiv/_workitems/edit/543538")] [Fact] public void Error_InvalidConstWithCSharp6() { var source = @" class A { const delegate void D(); protected virtual void Finalize const () { } } "; // CONSIDER: Roslyn's cascading errors are much uglier than Dev10's. CreateCompilationWithMscorlib(Parse(source, options: TestOptions.Regular.WithLanguageVersion(LanguageVersion.CSharp6))).VerifyDiagnostics( // (4,11): error CS1031: Type expected // const delegate void D(); Diagnostic(ErrorCode.ERR_TypeExpected, "delegate").WithLocation(4, 11), // (4,11): error CS1001: Identifier expected // const delegate void D(); Diagnostic(ErrorCode.ERR_IdentifierExpected, "delegate").WithLocation(4, 11), // (4,11): error CS0145: A const field requires a value to be provided // const delegate void D(); Diagnostic(ErrorCode.ERR_ConstValueRequired, "delegate").WithLocation(4, 11), // (4,11): error CS1002: ; expected // const delegate void D(); Diagnostic(ErrorCode.ERR_SemicolonExpected, "delegate").WithLocation(4, 11), // (5,37): error CS1002: ; expected // protected virtual void Finalize const () { } Diagnostic(ErrorCode.ERR_SemicolonExpected, "const").WithLocation(5, 37), // (5,43): error CS8059: Feature 'tuples' is not available in C# 6. Please use language version 7 or greater. // protected virtual void Finalize const () { } Diagnostic(ErrorCode.ERR_FeatureNotAvailableInVersion6, "()").WithArguments("tuples", "7").WithLocation(5, 43), // (5,44): error CS8124: Tuple must contain at least two elements. // protected virtual void Finalize const () { } Diagnostic(ErrorCode.ERR_TupleTooFewElements, ")").WithLocation(5, 44), // (5,46): error CS1001: Identifier expected // protected virtual void Finalize const () { } Diagnostic(ErrorCode.ERR_IdentifierExpected, "{").WithLocation(5, 46), // (5,46): error CS0145: A const field requires a value to be provided // protected virtual void Finalize const () { } Diagnostic(ErrorCode.ERR_ConstValueRequired, "{").WithLocation(5, 46), // (5,46): error CS1003: Syntax error, ',' expected // protected virtual void Finalize const () { } Diagnostic(ErrorCode.ERR_SyntaxError, "{").WithArguments(",", "{").WithLocation(5, 46), // (5,48): error CS1002: ; expected // protected virtual void Finalize const () { } Diagnostic(ErrorCode.ERR_SemicolonExpected, "}").WithLocation(5, 48), // (6,1): error CS1022: Type or namespace definition, or end-of-file expected // } Diagnostic(ErrorCode.ERR_EOFExpected, "}").WithLocation(6, 1), // (5,28): error CS0106: The modifier 'virtual' is not valid for this item // protected virtual void Finalize const () { } Diagnostic(ErrorCode.ERR_BadMemberFlag, "Finalize").WithArguments("virtual").WithLocation(5, 28), // (5,46): error CS0102: The type 'A' already contains a definition for '' // protected virtual void Finalize const () { } Diagnostic(ErrorCode.ERR_DuplicateNameInClass, "").WithArguments("A", "").WithLocation(5, 46), // (5,37): error CS0283: The type '(?, ?)' cannot be declared const // protected virtual void Finalize const () { } Diagnostic(ErrorCode.ERR_BadConstType, "const").WithArguments("(?, ?)").WithLocation(5, 37), // (5,43): error CS8179: Predefined type 'System.ValueTuple`2' is not defined or imported // protected virtual void Finalize const () { } Diagnostic(ErrorCode.ERR_PredefinedValueTupleTypeNotFound, "()").WithArguments("System.ValueTuple`2").WithLocation(5, 43), // (5,23): error CS0670: Field cannot have void type // protected virtual void Finalize const () { } Diagnostic(ErrorCode.ERR_FieldCantHaveVoidType, "void").WithLocation(5, 23), // (5,28): warning CS0649: Field 'A.Finalize' is never assigned to, and will always have its default value // protected virtual void Finalize const () { } Diagnostic(ErrorCode.WRN_UnassignedInternalField, "Finalize").WithArguments("A.Finalize", "").WithLocation(5, 28) ); } [WorkItem(543791, "http://vstfdevdiv:8080/DevDiv2/DevDiv/_workitems/edit/543791")] [Fact] public void MultipleDeclaratorsOneError() { var source = @" class A { Unknown a, b; } "; CreateCompilationWithMscorlib(source).VerifyDiagnostics( // (4,5): error CS0246: The type or namespace name 'Unknown' could not be found (are you missing a using directive or an assembly reference?) Diagnostic(ErrorCode.ERR_SingleTypeNameNotFound, "Unknown").WithArguments("Unknown"), // (4,13): warning CS0169: The field 'A.a' is never used Diagnostic(ErrorCode.WRN_UnreferencedField, "a").WithArguments("A.a"), // (4,16): warning CS0169: The field 'A.b' is never used Diagnostic(ErrorCode.WRN_UnreferencedField, "b").WithArguments("A.b")); } /// <summary> /// Fields named "value__" should be marked rtspecialname. /// </summary> [WorkItem(546185, "http://vstfdevdiv:8080/DevDiv2/DevDiv/_workitems/edit/546185")] [ClrOnlyFact(ClrOnlyReason.Unknown, Skip = "https://github.com/dotnet/roslyn/issues/6190")] public void RTSpecialName() { var source = @"class A { object value__ = null; } class B { object VALUE__ = null; } class C { void value__() { } } class D { object value__ { get; set; } } class E { event System.Action value__; } class F { event System.Action value__ { add { } remove { } } } class G { interface value__ { } } class H { class value__ { } } class K { static System.Action<object> F() { object value__; return v => { value__ = v; }; } }"; var compilation = CreateCompilationWithMscorlib(source); compilation.VerifyDiagnostics( // (19,25): warning CS0067: The event 'E.value__' is never used // event System.Action value__; Diagnostic(ErrorCode.WRN_UnreferencedEvent, "value__").WithArguments("E.value__"), // (7,12): warning CS0414: The field 'B.VALUE__' is assigned but its value is never used // object VALUE__ = null; Diagnostic(ErrorCode.WRN_UnreferencedFieldAssg, "VALUE__").WithArguments("B.VALUE__"), // (3,12): warning CS0414: The field 'A.value__' is assigned but its value is never used // object value__ = null; Diagnostic(ErrorCode.WRN_UnreferencedFieldAssg, "value__").WithArguments("A.value__")); // PEVerify should not report "Field value__ ... is not marked RTSpecialName". var verifier = new CompilationVerifier(this, compilation); verifier.EmitAndVerify( "Error: Field name value__ is reserved for Enums only.", "Error: Field name value__ is reserved for Enums only.", "Error: Field name value__ is reserved for Enums only."); } } }
weltkante/roslyn
src/Compilers/CSharp/Test/Symbol/Symbols/Source/FieldTests.cs
C#
apache-2.0
21,652
# # Copyright:: Copyright (c) 2014 Chef Software Inc. # License:: Apache License, Version 2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # require 'spec_helper' require 'chef-dk/policyfile_lock' describe ChefDK::PolicyfileLock, "when reading a Policyfile.lock" do let(:valid_lock_data) do { "name" => "example", "run_list" => [ "recipe[cookbook::recipe_name]" ], "named_run_lists" => { "fast-deploy" => [ "recipe[cookbook::deployit]" ] }, "cookbook_locks" => { # TODO: add some valid locks }, "default_attributes" => { "foo" => "bar" }, "override_attributes" => { "override_foo" => "override_bar" }, "solution_dependencies" => { "Policyfile" => [], "dependencies" => {} } } end let(:storage_config) { ChefDK::Policyfile::StorageConfig.new } let(:lockfile) { ChefDK::PolicyfileLock.new(storage_config) } describe "populating the deserialized lock" do before do lockfile.build_from_lock_data(valid_lock_data) end it "includes the run list" do expect(lockfile.run_list).to eq(["recipe[cookbook::recipe_name]"]) end it "includes the named run lists" do expect(lockfile.named_run_lists).to eq({ "fast-deploy" => [ "recipe[cookbook::deployit]" ] }) end it "includes the cookbook locks" do expect(lockfile.cookbook_locks).to eq({}) end it "includes the attributes" do expect(lockfile.default_attributes).to eq({"foo" => "bar"}) expect(lockfile.override_attributes).to eq({"override_foo" => "override_bar"}) end end describe "validating required fields" do it "does not raise an error when all fields are valid" do expect { lockfile.build_from_lock_data(valid_lock_data) }.to_not raise_error end it "requires the name to be present" do missing_name = valid_lock_data.dup missing_name.delete("name") expect { lockfile.build_from_lock_data(missing_name) }.to raise_error(ChefDK::InvalidLockfile) blank_name = valid_lock_data.dup blank_name["name"] = "" expect { lockfile.build_from_lock_data(blank_name) }.to raise_error(ChefDK::InvalidLockfile) invalid_name = valid_lock_data.dup invalid_name["name"] = {} expect { lockfile.build_from_lock_data(invalid_name) }.to raise_error(ChefDK::InvalidLockfile) end it "requires the run_list to be present" do no_run_list = valid_lock_data.dup no_run_list.delete("run_list") expect { lockfile.build_from_lock_data(no_run_list) }.to raise_error(ChefDK::InvalidLockfile) bad_run_list = valid_lock_data.dup bad_run_list["run_list"] = "bad data" expect { lockfile.build_from_lock_data(bad_run_list) }.to raise_error(ChefDK::InvalidLockfile) end it "validates the format of run_list items" do bad_run_list = valid_lock_data.dup bad_run_list["run_list"] = [ "bad data" ] expect { lockfile.build_from_lock_data(bad_run_list) }.to raise_error(ChefDK::InvalidLockfile) end it "allows the named_run_lists field to be absent" do missing_named_run_lists = valid_lock_data.dup missing_named_run_lists.delete("named_run_lists") expect { lockfile.build_from_lock_data(missing_named_run_lists) }.to_not raise_error end it "requires the named_run_lists field to be a Hash if present" do bad_named_run_lists = valid_lock_data.dup bad_named_run_lists["named_run_lists"] = false expect { lockfile.build_from_lock_data(bad_named_run_lists) }.to raise_error(ChefDK::InvalidLockfile) end it "requires the keys in named_run_lists to be strings" do bad_named_run_lists = valid_lock_data.dup bad_named_run_lists["named_run_lists"] = { 42 => [] } expect { lockfile.build_from_lock_data(bad_named_run_lists) }.to raise_error(ChefDK::InvalidLockfile) end it "requires the values in named_run_lists to be arrays" do bad_named_run_lists = valid_lock_data.dup bad_named_run_lists["named_run_lists"] = { "bad" => 42 } expect { lockfile.build_from_lock_data(bad_named_run_lists) }.to raise_error(ChefDK::InvalidLockfile) end it "requires the values in named_run_lists to be valid run lists" do bad_named_run_lists = valid_lock_data.dup bad_named_run_lists["named_run_lists"] = { "bad" => [ 42 ] } expect { lockfile.build_from_lock_data(bad_named_run_lists) }.to raise_error(ChefDK::InvalidLockfile) end it "requires the `cookbook_locks` section be present and its value is a Hash" do missing_locks = valid_lock_data.dup missing_locks.delete("cookbook_locks") expect { lockfile.build_from_lock_data(missing_locks) }.to raise_error(ChefDK::InvalidLockfile) invalid_locks = valid_lock_data.dup invalid_locks["cookbook_locks"] = [] expect { lockfile.build_from_lock_data(invalid_locks) }.to raise_error(ChefDK::InvalidLockfile) end it "requires the `default_attributes` section be present and its value is a Hash" do missing_attrs = valid_lock_data.dup missing_attrs.delete("default_attributes") expect { lockfile.build_from_lock_data(missing_attrs) }.to raise_error(ChefDK::InvalidLockfile) invalid_attrs = valid_lock_data.dup invalid_attrs["default_attributes"] = [] expect { lockfile.build_from_lock_data(invalid_attrs) }.to raise_error(ChefDK::InvalidLockfile) end it "requires the `override_attributes` section be present and its value is a Hash" do missing_attrs = valid_lock_data.dup missing_attrs.delete("override_attributes") expect { lockfile.build_from_lock_data(missing_attrs) }.to raise_error(ChefDK::InvalidLockfile) invalid_attrs = valid_lock_data.dup invalid_attrs["override_attributes"] = [] expect { lockfile.build_from_lock_data(invalid_attrs) }.to raise_error(ChefDK::InvalidLockfile) end describe "validating solution_dependencies" do it "requires the `solution_dependencies' section be present" do missing_soln_deps = valid_lock_data.dup missing_soln_deps.delete("solution_dependencies") expect { lockfile.build_from_lock_data(missing_soln_deps) }.to raise_error(ChefDK::InvalidLockfile) end it "requires the solution_dependencies object be a Hash" do invalid_soln_deps = valid_lock_data.dup invalid_soln_deps["solution_dependencies"] = [] expect { lockfile.build_from_lock_data(invalid_soln_deps) }.to raise_error(ChefDK::InvalidLockfile) end it "requires the solution_dependencies object have a 'Policyfile' and 'dependencies' key" do missing_keys_soln_deps = valid_lock_data.dup missing_keys_soln_deps["solution_dependencies"] = {} expect { lockfile.build_from_lock_data(missing_keys_soln_deps) }.to raise_error(ChefDK::InvalidLockfile) missing_policyfile_key = valid_lock_data.dup missing_policyfile_key["solution_dependencies"] = {"dependencies" => {} } expect { lockfile.build_from_lock_data(missing_policyfile_key) }.to raise_error(ChefDK::InvalidLockfile) missing_dependencies_key = valid_lock_data.dup missing_dependencies_key["solution_dependencies"] = { "Policyfile" => [] } expect { lockfile.build_from_lock_data(missing_dependencies_key) }.to raise_error(ChefDK::InvalidLockfile) end it "requires the Policyfile dependencies be an Array" do invalid_policyfile_deps = valid_lock_data.dup invalid_policyfile_deps["solution_dependencies"] = {"Policyfile" => 42, "dependencies" => {} } expect { lockfile.build_from_lock_data(invalid_policyfile_deps) }.to raise_error(ChefDK::InvalidLockfile) end it %q(requires the Policyfile dependencies be formatted like [ "COOKBOOK_NAME", "CONSTRAINT" ]) do invalid_policyfile_deps_content = valid_lock_data.dup invalid_policyfile_deps_content["solution_dependencies"] = { "Policyfile" => [ "bad" ], "dependencies" => {} } expect { lockfile.build_from_lock_data(invalid_policyfile_deps_content) }.to raise_error(ChefDK::InvalidLockfile) invalid_policyfile_deps_content2 = valid_lock_data.dup invalid_policyfile_deps_content2["solution_dependencies"] = { "Policyfile" => [ [42, "~> 2.0"] ], "dependencies" => {} } expect { lockfile.build_from_lock_data(invalid_policyfile_deps_content2) }.to raise_error(ChefDK::InvalidLockfile) invalid_policyfile_deps_content3 = valid_lock_data.dup invalid_policyfile_deps_content3["solution_dependencies"] = { "Policyfile" => [ ["cookbook_name", "bad"] ], "dependencies" => {} } expect { lockfile.build_from_lock_data(invalid_policyfile_deps_content3) }.to raise_error(ChefDK::InvalidLockfile) end it "requires the cookbook dependencies be a Hash" do invalid_cookbook_deps = valid_lock_data.dup invalid_cookbook_deps["solution_dependencies"] = { "Policyfile" => [], "dependencies" => 42 } expect { lockfile.build_from_lock_data(invalid_cookbook_deps) }.to raise_error(ChefDK::InvalidLockfile) end it "requires the cookbook dependencies entries be in the correct format" do invalid_cookbook_deps = valid_lock_data.dup bad_deps = { 42 => 42 } invalid_cookbook_deps["solution_dependencies"] = { "Policyfile" => [], "dependencies" => bad_deps } expect { lockfile.build_from_lock_data(invalid_cookbook_deps) }.to raise_error(ChefDK::InvalidLockfile) invalid_cookbook_deps2 = valid_lock_data.dup bad_deps2 = { "bad-format" => [] } invalid_cookbook_deps2["solution_dependencies"] = { "Policyfile" => [], "dependencies" => bad_deps2 } expect { lockfile.build_from_lock_data(invalid_cookbook_deps2) }.to raise_error(ChefDK::InvalidLockfile) invalid_cookbook_deps3 = valid_lock_data.dup bad_deps3 = { "cookbook (1.0.0)" => 42 } invalid_cookbook_deps3["solution_dependencies"] = { "Policyfile" => [], "dependencies" => bad_deps3 } expect { lockfile.build_from_lock_data(invalid_cookbook_deps3) }.to raise_error(ChefDK::InvalidLockfile) invalid_cookbook_deps4 = valid_lock_data.dup bad_deps4 = { "cookbook (1.0.0)" => [ 42 ] } invalid_cookbook_deps4["solution_dependencies"] = { "Policyfile" => [], "dependencies" => bad_deps4 } expect { lockfile.build_from_lock_data(invalid_cookbook_deps4) }.to raise_error(ChefDK::InvalidLockfile) end end describe "validating cookbook_locks entries" do # TODO: also check non-cached cookbook let(:valid_cookbook_lock) do { "version" => "1.0.0", "identifier" => "68c13b136a49b4e66cfe9d8aa2b5a85167b5bf9b", "dotted_decimal_identifier" => "111.222.333", "cache_key" => "foo-1.0.0", "source_options" => {} } end it "requires that each cookbook lock be a Hash" do invalid_cookbook_lock = valid_lock_data.dup invalid_cookbook_lock["cookbook_locks"] = { "foo" => 42 } expect { lockfile.build_from_lock_data(invalid_cookbook_lock) }.to raise_error(ChefDK::InvalidLockfile) end it "requires that cookbook locks not be empty" do invalid_cookbook_lock = valid_lock_data.dup invalid_cookbook_lock["cookbook_locks"] = { "foo" => {} } expect { lockfile.build_from_lock_data(invalid_cookbook_lock) }.to raise_error(ChefDK::InvalidLockfile) end it "requires that each cookbook lock have a version" do invalid_lockfile = valid_lock_data.dup invalid_cookbook_lock = valid_cookbook_lock.dup invalid_cookbook_lock.delete("version") invalid_lockfile["cookbook_locks"] = { "foo" => invalid_cookbook_lock } expect { lockfile.build_from_lock_data(invalid_lockfile) }.to raise_error(ChefDK::InvalidLockfile) end it "requires that the version be a string" do invalid_lockfile = valid_lock_data.dup invalid_cookbook_lock = valid_cookbook_lock.dup invalid_cookbook_lock["version"] = 42 invalid_lockfile["cookbook_locks"] = { "foo" => invalid_cookbook_lock } expect { lockfile.build_from_lock_data(invalid_lockfile) }.to raise_error(ChefDK::InvalidLockfile) end it "requires that each cookbook lock have an identifier" do invalid_lockfile = valid_lock_data.dup invalid_cookbook_lock = valid_cookbook_lock.dup invalid_cookbook_lock.delete("identifier") invalid_lockfile["cookbook_locks"] = { "foo" => invalid_cookbook_lock } expect { lockfile.build_from_lock_data(invalid_lockfile) }.to raise_error(ChefDK::InvalidLockfile) end it "requires that the identifier be a string" do invalid_lockfile = valid_lock_data.dup invalid_cookbook_lock = valid_cookbook_lock.dup invalid_cookbook_lock["identifier"] = 42 invalid_lockfile["cookbook_locks"] = { "foo" => invalid_cookbook_lock } expect { lockfile.build_from_lock_data(invalid_lockfile) }.to raise_error(ChefDK::InvalidLockfile) end it "requires that a cookbook lock have a key named `cache_key'" do invalid_lockfile = valid_lock_data.dup invalid_cookbook_lock = valid_cookbook_lock.dup invalid_cookbook_lock.delete("cache_key") invalid_lockfile["cookbook_locks"] = { "foo" => invalid_cookbook_lock } expect { lockfile.build_from_lock_data(invalid_lockfile) }.to raise_error(ChefDK::InvalidLockfile) end it "requires that the cache_key be a string or null" do invalid_lockfile = valid_lock_data.dup invalid_cookbook_lock = valid_cookbook_lock.dup invalid_cookbook_lock["cache_key"] = 42 invalid_lockfile["cookbook_locks"] = { "foo" => invalid_cookbook_lock } expect { lockfile.build_from_lock_data(invalid_lockfile) }.to raise_error(ChefDK::InvalidLockfile) end it "requires that a cookbook lock have a source_options attribute" do invalid_lockfile = valid_lock_data.dup invalid_cookbook_lock = valid_cookbook_lock.dup invalid_cookbook_lock.delete("source_options") invalid_lockfile["cookbook_locks"] = { "foo" => invalid_cookbook_lock } expect { lockfile.build_from_lock_data(invalid_lockfile) }.to raise_error(ChefDK::InvalidLockfile) end it "requires that source options be a Hash" do invalid_lockfile = valid_lock_data.dup invalid_cookbook_lock = valid_cookbook_lock.dup invalid_cookbook_lock["source_options"] = 42 invalid_lockfile["cookbook_locks"] = { "foo" => invalid_cookbook_lock } expect { lockfile.build_from_lock_data(invalid_lockfile) }.to raise_error(ChefDK::InvalidLockfile) end it "requires that a cookbook lock be a valid local cookbook if `cache_key' is null/nil" do valid_lock_with_local_cookbook = valid_lock_data.dup valid_local_cookbook = valid_cookbook_lock.dup valid_local_cookbook["cache_key"] = nil valid_local_cookbook["source"] = "path/to/foo" valid_lock_with_local_cookbook["cookbook_locks"] = { "foo" => valid_local_cookbook } expect { lockfile.build_from_lock_data(valid_lock_with_local_cookbook) }.to_not raise_error invalid_lock_with_local_cookbook = valid_lock_data.dup invalid_local_cookbook = valid_cookbook_lock.dup invalid_local_cookbook["cache_key"] = nil invalid_local_cookbook["source"] = 42 invalid_lock_with_local_cookbook["cookbook_locks"] = { "foo" => invalid_local_cookbook } expect { lockfile.build_from_lock_data(invalid_lock_with_local_cookbook) }.to raise_error(ChefDK::InvalidLockfile) end it "requires that a cookbook lock w/ a key named `cache_key' be a valid cached cookbook structure" do valid_lock_with_cached_cookbook = valid_lock_data.dup valid_cached_cookbook = valid_cookbook_lock.dup valid_cached_cookbook["cache_key"] = nil valid_cached_cookbook["source"] = "path/to/foo" valid_lock_with_cached_cookbook["cookbook_locks"] = { "foo" => valid_cached_cookbook } expect { lockfile.build_from_lock_data(valid_lock_with_cached_cookbook) }.to_not raise_error invalid_lock_with_cached_cookbook = valid_lock_data.dup invalid_cached_cookbook = valid_cookbook_lock.dup invalid_cached_cookbook["cache_key"] = 42 invalid_lock_with_cached_cookbook["cookbook_locks"] = { "foo" => invalid_cached_cookbook } expect { lockfile.build_from_lock_data(invalid_lock_with_cached_cookbook) }.to raise_error(ChefDK::InvalidLockfile) end end end describe "populating lock data from an archive" do let(:valid_cookbook_lock) do { "version" => "1.0.0", "identifier" => "68c13b136a49b4e66cfe9d8aa2b5a85167b5bf9b", "dotted_decimal_identifier" => "111.222.333", "cache_key" => nil, "source" => "path/to/foo", "source_options" => { path: "path/to/foo"}, "scm_info" => nil } end let(:lock_data) do valid_lock_with_cached_cookbook = valid_lock_data.dup valid_cached_cookbook = valid_cookbook_lock.dup valid_cached_cookbook["cache_key"] = nil valid_cached_cookbook["source"] = "path/to/foo" valid_lock_with_cached_cookbook["cookbook_locks"] = { "foo" => valid_cached_cookbook } valid_lock_with_cached_cookbook end before do lockfile.build_from_archive(lock_data) end it "creates cookbook locks as archived cookbooks" do locks = lockfile.cookbook_locks expect(locks).to have_key("foo") cb_foo = locks["foo"] expect(cb_foo).to be_a(ChefDK::Policyfile::ArchivedCookbook) expected_path = File.join(storage_config.relative_paths_root, "cookbook_artifacts", "foo-68c13b136a49b4e66cfe9d8aa2b5a85167b5bf9b") expect(cb_foo.cookbook_path).to eq(expected_path) expect(cb_foo.dotted_decimal_identifier).to eq("111.222.333") expect(locks["foo"].to_lock).to eq(valid_cookbook_lock) end end end
vinyar/chef-dk
spec/unit/policyfile_lock_serialization_spec.rb
Ruby
apache-2.0
18,676
Setting up your development environment ======================================= To begin with, please follow the steps outlined in: [Apache Apex Development Environment Setup](../apex_development_setup.md) to setup your development environment; you can skip the sandbox download and installation if you already have a Hadoop cluster with Datatorrent RTS installed where you can deploy applications. Sample input files ------------------ For this tutorial, you need some sample text files to use as input to the application. Binary files such as PDF or DOCX files are not suitable since they contain a lot of meaningless strings that look like words (for example, &ldquo;Wqgi&rdquo;). Similarly, files using markup languages such as XML or HTML files are also not suitable since the tag names such as `div`, `td` and `p` dominate the word counts. The RFC (Request for Comment) files that are used as de-facto specifications for internet standards are good candidates since they contain pure text; download a few of them as follows: Open a terminal and run the following commands to create a directory named `data` under your home directory and download 3 files there: cd; mkdir data; cd data wget http://tools.ietf.org/rfc/rfc1945.txt wget https://www.ietf.org/rfc/rfc2616.txt wget https://tools.ietf.org/rfc/rfc4844.txt
sanjaypujare/docs
docs/tutorials/topnwords-c1.md
Markdown
apache-2.0
1,348
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.action.admin.indices.template.post; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.AliasValidator; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.findConflictingV1Templates; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.findConflictingV2Templates; /** * Handles simulating an index template either by name (looking it up in the * cluster state), or by a provided template configuration */ public class TransportSimulateTemplateAction extends TransportMasterNodeReadAction<SimulateTemplateAction.Request, SimulateIndexTemplateResponse> { private final MetadataIndexTemplateService indexTemplateService; private final NamedXContentRegistry xContentRegistry; private final IndicesService indicesService; private AliasValidator aliasValidator; @Inject public TransportSimulateTemplateAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, MetadataIndexTemplateService indexTemplateService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, NamedXContentRegistry xContentRegistry, IndicesService indicesService) { super(SimulateTemplateAction.NAME, transportService, clusterService, threadPool, actionFilters, SimulateTemplateAction.Request::new, indexNameExpressionResolver, SimulateIndexTemplateResponse::new, ThreadPool.Names.SAME); this.indexTemplateService = indexTemplateService; this.xContentRegistry = xContentRegistry; this.indicesService = indicesService; this.aliasValidator = new AliasValidator(); } @Override protected void masterOperation(Task task, SimulateTemplateAction.Request request, ClusterState state, ActionListener<SimulateIndexTemplateResponse> listener) throws Exception { String uuid = UUIDs.randomBase64UUID().toLowerCase(Locale.ROOT); final String temporaryIndexName = "simulate_template_index_" + uuid; final ClusterState stateWithTemplate; final String simulateTemplateToAdd; // First, if a template body was requested, we need to "fake add" that template to the // cluster state, so it can be used when we resolved settings/etc if (request.getIndexTemplateRequest() != null) { // we'll "locally" add the template defined by the user in the cluster state (as if it // existed in the system), either with a temporary name, or with the given name if // specified, to simulate replacing the existing template simulateTemplateToAdd = request.getTemplateName() == null ? "simulate_template_" + uuid : request.getTemplateName(); // Perform validation for things like typos in component template names MetadataIndexTemplateService.validateV2TemplateRequest(state.metadata(), simulateTemplateToAdd, request.getIndexTemplateRequest().indexTemplate()); stateWithTemplate = indexTemplateService.addIndexTemplateV2(state, request.getIndexTemplateRequest().create(), simulateTemplateToAdd, request.getIndexTemplateRequest().indexTemplate()); } else { simulateTemplateToAdd = null; stateWithTemplate = state; } // We also need the name of the template we're going to resolve, so if they specified a // name, use that, otherwise use the name of the template that was "fake added" in the previous block final String matchingTemplate; if (request.getTemplateName() == null) { // Automatically match the template that was added matchingTemplate = simulateTemplateToAdd; } else { matchingTemplate = request.getTemplateName(); } // If they didn't either specify a name that existed or a template body, we cannot simulate anything! if (matchingTemplate == null) { // They should have specified either a template name or the body of a template, but neither were specified listener.onFailure(new IllegalArgumentException("a template name to match or a new template body must be specified")); return; } else if (stateWithTemplate.metadata().templatesV2().containsKey(matchingTemplate) == false) { // They specified a template, but it didn't exist listener.onFailure(new IllegalArgumentException("unable to simulate template [" + matchingTemplate + "] that does not exist")); return; } final ClusterState tempClusterState = TransportSimulateIndexTemplateAction.resolveTemporaryState(matchingTemplate, temporaryIndexName, stateWithTemplate); ComposableIndexTemplate templateV2 = tempClusterState.metadata().templatesV2().get(matchingTemplate); assert templateV2 != null : "the matched template must exist"; Map<String, List<String>> overlapping = new HashMap<>(); overlapping.putAll(findConflictingV1Templates(tempClusterState, matchingTemplate, templateV2.indexPatterns())); overlapping.putAll(findConflictingV2Templates(tempClusterState, matchingTemplate, templateV2.indexPatterns())); Template template = TransportSimulateIndexTemplateAction.resolveTemplate(matchingTemplate, temporaryIndexName, stateWithTemplate, xContentRegistry, indicesService, aliasValidator); listener.onResponse(new SimulateIndexTemplateResponse(template, overlapping)); } @Override protected ClusterBlockException checkBlock(SimulateTemplateAction.Request request, ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); } }
nknize/elasticsearch
server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java
Java
apache-2.0
7,785
/** * Licensed to the Sakai Foundation (SF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The SF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ /* General*/ .me #entity_container { padding: 20px 0 0; } #entity_container { padding: 20px 15px 10px 20px; } #entity_container .entity_number { display: inline-block; color: #424242; } #entity_container #entity_name { word-wrap: break-word; font-size: 18px; margin-bottom: 0; padding: 0 0 2px !important; color: #454A4F; line-height: 10px; display: inline; } #entity_container #entity_name.entity_group { margin-top: 10px; } #entity_container #entity_name.entity_name_noneditable { margin-top: 8px; overflow: hidden; display: inline; } #entity_container #entity_owns { font-size: 11px; margin-top: 5px; } #entity_container #entity_owns.entity_owns_viewer { margin-top: 0; } #entity_container .entity_permissions_label { margin-left: 10px; } #entity_container .entity_comments { float: left; margin-top: 11px; } #entity_container #entity_owns.entity_owns_viewer .entity_comments { margin-top: 5px; } #entity_container #entity_actions .s3d-button { float: right; margin-left: 5px; color: #666; line-height: 17px; } #entity_container #entity_actions.entity_group .s3d-button { margin-top: 14px; } #entity_container #entity_owns.entity_owns_viewer .s3d-margin-top-5 { margin-top: 0 !important; } #entity_container #entity_comments_link { text-transform: lowercase; } #entity_container .entity_plaintitle { font-size: 18px; margin-top: 8px; margin-left: 10px; line-height: 12px; } #entity_container .entity_owns_actions_container { background: #fff; border: 1px solid #eee; border-radius: 3px; -webkit-border-radius: 3px; -moz-border-radius: 3px; display: inline-block; padding: 4px 0 4px 6px; line-height: 23px; height: 23px; float: left; margin-right: 7px; } #entity_container .entity_owns_actions_container .entity_owns_actions_header { float: left; } #entity_container .entity_owns_actions_container .entity_owns_actions { float: right; margin-left: 7px; padding: 0; height: 21px; margin-right: 10px; } #entity_container .entity_owns_actions_container .entity_owns_actions:hover { background: none repeat scroll 0 0 #fff; border: 1px solid #E0E3E5; box-shadow: 0 0 4px -2px #A8A8A8 inset; } #entity_container .entity_owns_actions_container .entity_owns_actions > span { color: #bbb; float: left; font-size: 13px; line-height: 20px; } #entity_container .entity_owns_actions_container .entity_owns_actions .s3d-link-button { margin: 0 9px 0 7px; } #entity_container .entity_owns_actions_container .s3d-actions-addtolibrary { margin: 3px 5px 0 0; } #entity_container .entity_owns_actions_container .entity_owns_actions > div .s3d-dropdown-list, #entity_container .entity_owns_actions_container .s3d-actions-addtolibrary .s3d-dropdown-list { color: #333; font-size: 11px; font-weight: normal; line-height: 13px; padding: 8px; text-align: left; cursor: auto; display: none; z-index: 10000; } #entity_container .entity_owns_actions_container .entity_owns_actions > div .s3d-dropdown-list .s3d-dropdown-list-arrow-up, #entity_container .entity_owns_actions_container .s3d-actions-addtolibrary .s3d-dropdown-list .s3d-dropdown-list-arrow-up { margin: -34px 0 0 50px; cursor: pointer; } #entity_container .entity_owns_actions_container .s3d-actions-addtolibrary .s3d-dropdown-list { margin-left: -63px; } #entity_container .entity_owns_actions_container .entity_owns_actions > div .s3d-dropdown-list .s3d-link-button { margin: 0; display: inline; } #entity_container .entity_owns_actions_container .entity_owns_actions > div .s3d-dropdown-list .s3d-link-button.ew_permissions, #entity_container .entity_owns_actions_container .s3d-actions-addtolibrary .s3d-dropdown-list .s3d-link-button.ew_permissions { display: block; margin-top: 7px; } #entity_container .entity_owns_actions_container .entity_owns_actions > div:hover .s3d-dropdown-list, #entity_container .entity_owns_actions_container .s3d-actions-addtolibrary:hover .s3d-dropdown-list { display: inline; } #entity_container .entity_owns_actions_container .entity_owns_actions > div { width: 30px; height: 21px; float: left; } #entity_container .entity_owns_actions_container .entity_owns_actions > div.selected { background-color: #eee !important; } #entity_container .entity_owns_actions_container .entity_owns_actions > div.has_counts { width: auto; } #entity_container .entity_owns_actions_container .entity_owns_actions > div.has_counts > span { margin: 0 7px 0 30px; } #entity_container .entity_owns_actions_container .entity_owns_actions > div.has_counts, #entity_container .entity_owns_actions_container .entity_owns_actions > div.has_counts:hover { background-position: 5px center; } #entity_container .entity_owns_actions_container .entity_owns_actions .entity_owns_actions_private { background: url("/devwidgets/entity/images/lock_icon_18x18.png") no-repeat center center transparent; border-radius: 3px 0 0 3px; -moz-border-radius: 3px 0 0 3px; -webkit-border-radius: 3px 0 0 3px; } #entity_container .entity_owns_actions_container .entity_owns_actions .entity_owns_actions_private:hover, #entity_container .entity_owns_actions_container .entity_owns_actions .entity_owns_actions_private.selected { background: url("/devwidgets/entity/images/lock_icon_18x18_hover.png") no-repeat center center transparent; } #entity_container .entity_owns_actions_container .entity_owns_actions .entity_owns_actions_institution { background: url("/devwidgets/entity/images/institution_icon_18x18.png") no-repeat center center transparent; } #entity_container .entity_owns_actions_container .entity_owns_actions .entity_owns_actions_institution:hover, #entity_container .entity_owns_actions_container .entity_owns_actions .entity_owns_actions_institution.selected { background: url("/devwidgets/entity/images/institution_icon_18x18_hover.png") no-repeat center center transparent; } #entity_container .entity_owns_actions_container .entity_owns_actions .entity_owns_actions_public { background: url("/devwidgets/entity/images/globe_icon_18x18.png") no-repeat center center transparent; border-radius: 0 3px 3px 0; -moz-border-radius: 0 3px 3px 0; -webkit-border-radius: 0 3px 3px 0; } #entity_container .entity_owns_actions_container .entity_owns_actions .entity_owns_actions_public:hover, #entity_container .entity_owns_actions_container .entity_owns_actions .entity_owns_actions_public.selected { background: url("/devwidgets/entity/images/globe_icon_18x18_hover.png") no-repeat center center transparent; } #entity_container .entity_owns_actions_container .entity_owns_actions .entity_owns_actions_collaborator { background: url("/devwidgets/entity/images/collaborator_icon_20x16.png") no-repeat center center transparent; border-radius: 0 3px 3px 0; -moz-border-radius: 0 3px 3px 0; -webkit-border-radius: 0 3px 3px 0; } #entity_container .entity_owns_actions_container .entity_owns_actions .entity_owns_actions_collaborator:hover, #entity_container .entity_owns_actions_container .entity_owns_actions .entity_owns_actions_collaborator.selected { background: url("/devwidgets/entity/images/collaborator_icon_20x16_hover.png") no-repeat center center transparent; } #entity_container .entity_owns_actions_container .entity_owns_actions .entity_owns_actions_share { background: url("/devwidgets/entity/images/share_icon_20x16.png") no-repeat center center transparent; border-radius: 0 3px 3px 0; -moz-border-radius: 0 3px 3px 0; -webkit-border-radius: 0 3px 3px 0; } #entity_container .entity_owns_actions_container .entity_owns_actions .entity_owns_actions_share:hover, #entity_container .entity_owns_actions_container .entity_owns_actions .entity_owns_actions_share.selected { background: url("/devwidgets/entity/images/share_icon_20x16_hover.png") no-repeat center center transparent; } #entity_container .s3d-button .entity_share_icon { background: url("/dev/images/share_icon_default_16x11.png") no-repeat scroll left top transparent; float: right; margin-left: 8px; margin-top: 3px; } #entity_container .s3d-button:hover .entity_share_icon { background: url("/dev/images/share_icon_hover_16x11.png") no-repeat scroll left top transparent; } #entity_container .entity_owns_actions_container .s3d-dropdown-list-arrow-up { background-position: center center; margin-left: 37px !important; width: 46px; height: 32px !important; } #contentpreview_download_button { line-height: 23px !important; } #entity_container .entity_content_usedby { margin-right: 10px; } #entity_container .s3d-dropdown-list { position: absolute; width: 120px; } /* Directory */ #entity_container #entity_directory_image { width: 45px; float: left; margin-right: 15px; margin-top: -14px; } #entity_container #entity_directory_image img { width: 45px; height: 45px; } /* Avatar */ #entity_container #entity_profile_picture { width: 50px; height: 50px; } #entity_container .entity_change_avatar { display: inline-block; cursor: pointer; padding: 3px 0 3px 3px; margin-right: 10px; float: left; background: 0; border: 0; } #entity_container button.entity_change_avatar::-moz-focus-inner { padding: 0; border: 0 } #entity_container .entity_change_avatar .s3d-dropdown-list { cursor: default; } #entity_container .entity_change_avatar:hover, #entity_container .entity_change_avatar.clicked { background-color: #32b3ec; } #entity_container .entity_change_avatar:hover .entity_profilepic_internal, #entity_container .entity_change_avatar.clicked .entity_profilepic_internal { box-shadow: none; -webkit-box-shadow: none; -moz-box-shadow: none; } #entity_container .entity_change_avatar .s3d-dropdown-menu-arrow { background: url("/devwidgets/entity/images/entity_changepic_arrow.png") no-repeat top left transparent; height: 6px; margin: 45px 1px 0 4px; width: 12px; float: right; } #entity_container .entity_change_avatar:hover .s3d-dropdown-menu-arrow, #entity_container .entity_change_avatar.clicked .s3d-dropdown-menu-arrow { background: url("/devwidgets/entity/images/entity_changepic_hover_arrow.png") no-repeat top left transparent; width: 12px; height: 6px; } #entity_container #entity_user_image.entity_change_avatar { margin-left: 18px; } #entity_container .entity_user_avatar_menu.s3d-dropdown-list { margin-left: 16px; } #entity_container .entity_user_avatar_menu.s3d-dropdown-list .s3d-dropdown-list-arrow-up { margin-left: 52px; right: auto; } #entity_container .entity_group_avatar_menu.s3d-dropdown-list .s3d-dropdown-list-arrow-up { margin-left: 50px; right: auto; } #entity_container .entity_group_image { width: 63px; height: 46px; background: url("/dev/images/group_icon.png") no-repeat transparent; float: left; margin-right: 10px; margin-top: 5px; margin-left: 5px; } #entity_container #entity_group_image { margin-top: -11px; } #entity_container #entity_group_image.s3d-dropdown-menu { float: left; padding: 5px 6px; margin: -10px 15px 0px 5px; } #entity_container #entity_group_image.s3d-dropdown-menu .s3d-dropdown-menu-arrow { background: url("/devwidgets/entity/images/entity_changepic_arrow.png") no-repeat top left transparent; height: 8px; margin-left: 7px; margin-top: 40px; width: 12px; float: right; } #entity_container #entity_group_image.s3d-dropdown-menu:hover .s3d-dropdown-menu-arrow { background: url("/devwidgets/entity/images/entity_changepic_hover_arrow.png") no-repeat top left transparent; width: 12px; height: 8px; } #entity_container .entity_user_image_pointer { cursor: pointer; width: 93px; margin-left: 15px !important; } #entity_container .entity_user_create_add_dropdown { position: fixed; z-index: 999; width: 150px; margin-top: 34px; } #entity_container .entity_other_profilepic { float: left; margin: -10px 15px 0px 5px; width: 52px; height: 54px; background-color: #fff; padding: 2px 2px 2px 1px; } #entity_container .entity_profilepic_internal { background-color: #FFFFFF; box-shadow: 0 0 1px 2px #AAAAAA; -webkit-box-shadow: 0 0 1px 2px #AAAAAA; float: left; height: 50px; padding: 1px; width: 50px; } /* User */ #entity_container .entity_user_action_icon { margin-left: 5px; padding-right: 20px; } #entity_container .entity_message_icon { background: url("/dev/images/message_icon.png") no-repeat scroll left center transparent; } #entity_container .entity_add_to_contacts_icon { background: url("/dev/images/opposite_arrows_icon.png") no-repeat scroll left center transparent; } #entity_container .entity_add_to_contacts_requested_icon { background: url("/dev/images/opposite_arrows_requested_icon.png") no-repeat scroll left center transparent; } /* Me */ #entity_container .entity_name_me { clear: both; font-size: 14px; padding: 5px 5px 0px 20px; color: #666; word-wrap: break-word; } #entity_container .entity_my_profilepic { width: 70px; height: 54px; background-color: #fff; margin-left: 20px; padding: 2px 2px 2px 1px; } /* Group */ .entity_pseudogroup { font-size: 11px; color: #666; } /* Content */ #entity_container #entity_content_image { width: 45px; height: 62px; float: left; margin-right: 15px; margin-top: -4px; } #entity_content_image img { height: 50px; margin-top: 5px; } #entity_name form { display: inline-block !important } #entity_name input { font-size: 18px; font-weight: bold; border: 1px solid #DDDDDD; background: none; color: #454A4F; width: 607px !important; height: 24px !important; padding: 3px 0; position: relative; top: -9px; left: -2px; } #entity_name.entity_name_editable { padding: 5px; } #entity_name.entity_name_editable:hover { background-color: #fcecb2; cursor: pointer; } #entity_name.entity_name_editing:hover { background-color: transparent !important; } #entity_container .entity_add_to_library { background: url("/devwidgets/savecontent/images/savecontent_button_icon_16x16.png") no-repeat center center transparent; display: block; float: right; height: 17px; margin-left: 10px; padding-left: 20px; } #entity_container .s3d-button:hover .entity_add_to_library { background: url("/devwidgets/savecontent/images/savecontent_button_icon_16x16_hover.png") no-repeat center center transparent; } #entity_container .entity_download_content { background: url("/dev/images/download_button_image.png") no-repeat; display: block; float: right; height: 12px; margin-left: 10px; padding-left: 12px; margin-top: 6px; } #entity_container .s3d-button:hover .entity_download_content { background: url("/dev/images/download_button_image_hover.png") no-repeat; } #entity_container #entity_managed_content { background: url("/devwidgets/entity/images/entity_manager_icon.png") no-repeat scroll left top transparent; height: 18px; margin-left: -2px; margin-top: 38px; position: absolute; width: 53px; } #entity_container .entity_content_details_icon { display: inline-block; height: 10px; width: 15px; } #entity_container #entity_used_by_icon { background: url("/devwidgets/entity/images/entity_used_by_icon.png") no-repeat scroll left top transparent; } #entity_container #entity_public_privacy_icon { background: url("/devwidgets/entity/images/entity_chain_icon.png") no-repeat scroll left top transparent; } #entity_container #entity_everyone_privacy_icon { background: url("/devwidgets/entity/images/entity_anyone_lock_icon.png") no-repeat scroll left top transparent; } #entity_container #entity_private_privacy_icon { background: url("/devwidgets/entity/images/entity_private_lock_icon.png") no-repeat scroll left top transparent; } #entity_container #entity_type { color: #999; text-transform: uppercase; margin-left: 7px; } /* Search title */ .entity_search { float: left; margin-top: 4px !important; } /* Dialogs */ #entity_content_users_dialog { z-index: 40000 !important; } .entity_content_users_dialog_list { margin: 12px 0 0; padding: 0; height: 310px; overflow: auto; border: 1px solid #d4dade; } .entity_content_users_dialog_list li { display: block; list-style: none; clear: both; padding: 5px 0 5px 10px; border-bottom: 1px solid #FFF; border-top: 1px solid #FFF; overflow: hidden; } .entity_content_users_dialog_list .entity_content_picture { float: left; display: block; height: 32px; width: 32px; padding-right: 10px; } .entity_content_users_dialog_list .entity_content_name { line-height: 30px; } .entity_content_users_dialog_list .entity_content_even { background: #f1f1f1; border-bottom: 1px solid #f1f1f1; } .entity_content_users_dialog_list .entity_content_list_empty, .entity_content_activity_dialog_list .entity_content_list_empty { text-align: center; line-height: 300px; } .entity_content_activity_dialog_list { margin: 12px 0 0; padding: 0; height: 310px; overflow: auto; border: 1px solid #d4dade; position: relative; } .entity_content_activity_dialog_list li { display: block; list-style: none; clear: both; padding: 10px 0 10px 10px; border-bottom: 9px solid #FFF; border-top: 1px solid #FFF; overflow: hidden; background: #f1f1f1; } .entity_content_activity_dialog_list .entity_content_picture { float: left; display: block; height: 32px; width: 32px; padding-right: 10px; } .entity_content_activity_dialog_list .entity_content_activity { position: relative; top: 1px; } /* Widget Popup */ #entity_container .entity_permissions_icon { background: url("/dev/images/settings_icon_16x16.png") no-repeat left center transparent; margin-left: 5px; padding: 2px 0 0 20px; margin-right: 4px; display: block; height: 19px; font-size: 10px; } #entity_container .s3d-button:hover .entity_permissions_icon { background: url("/dev/images/settings_icon_16x16_hover.png") no-repeat left center transparent; } #entity_contentsettings_dropdown, #entity_groupsettings_dropdown { color: #333333; position: absolute; display: none; width: 170px; } .entity_dropdown_outer { position: relative; clear: right; z-index: 1; background-color: #e4e3e3; border: 1px solid #EAEBEC; border-top: none; border-radius: 5px; -moz-border-radius: 5px; -webkit-border-radius: 5px; box-shadow: 0px 0px 5px #A9A9A9; -moz-box-shadow: 0px 0px 5px #A9A9A9; -webkit-box-shadow: 0px 0px 5px #A9A9A9; } .entity_dropdown_inner { margin: 3px; padding: 0; background-color: #ffffff; border-radius: 5px; -moz-border-radius: 5px; -webkit-border-radius: 5px; } #entity_dropdown_title { font-size: 13px; margin: 10px 10px 5px; } #entity_dropdown_select { margin-top: 10px; width: 220px; } #entity_dropdown button { margin: 10px 10px 10px 0px; } #entity_contentsettings_dropdown .jqmClose, #entity_groupsettings_dropdown .jqmClose { margin-left: 10px; margin-top: 15px; } #entity_contentsettings_dropdown .entity_dropdown_header_arrow, #entity_groupsettings_dropdown .entity_dropdown_header_arrow { position: relative; float: right; z-index: 100; margin-right: 10px; font-size: 0px; line-height: 0%; width: 0px; border-top: 0; border-bottom: 15px solid #e4e3e3; border-left: 15px solid transparent; border-right: 15px solid transparent; } #entity_dropdown_sharelist_container { margin-bottom: 15px; } #entity_dropdown_container > div { padding: 6px; } #entity_dropdown_container ul { margin: 0; padding: 0 10px; } #entity_dropdown_container ul li { list-style: none; padding: 0px 0; border-bottom: 1px solid #ccc; } #entity_dropdown_container ul li a { padding-left: 6px; color: #369; font-size: 13px; } #entity_dropdown_container ul li a:hover { color: #999; } #entity_dropdown_container ul li#ew_content_preview_delete { border: 0; }
jonmhays/3akai-ux
devwidgets/entity/css/entity.css
CSS
apache-2.0
21,419
<%# Copyright 2013-2017 the original author or authors. This file is part of the JHipster project, see https://jhipster.github.io/ for more information. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -%> (function() { 'use strict'; // DO NOT EDIT THIS FILE, EDIT THE GULP TASK NGCONSTANT SETTINGS INSTEAD WHICH GENERATES THIS FILE angular .module('<%=angularAppName%>') .constant('VERSION', '0.0.1-SNAPSHOT') .constant('DEBUG_INFO_ENABLED', true); })();
fjuriolli/scribble
node_modules/generator-jhipster/generators/client/templates/angularjs/src/main/webapp/app/_app.constants.js
JavaScript
apache-2.0
982
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from google.net.proto import ProtocolBuffer import array import dummy_thread as thread __pychecker__ = """maxreturns=0 maxbranches=0 no-callinit unusednames=printElemNumber,debug_strs no-special""" if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'): _extension_runtime = True _ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage else: _extension_runtime = False _ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage from google.appengine.datastore.entity_pb import * import google.appengine.datastore.entity_pb class AggregateRpcStatsProto(ProtocolBuffer.ProtocolMessage): has_service_call_name_ = 0 service_call_name_ = "" has_total_amount_of_calls_ = 0 total_amount_of_calls_ = 0 has_total_cost_of_calls_microdollars_ = 0 total_cost_of_calls_microdollars_ = 0 def __init__(self, contents=None): self.total_billed_ops_ = [] if contents is not None: self.MergeFromString(contents) def service_call_name(self): return self.service_call_name_ def set_service_call_name(self, x): self.has_service_call_name_ = 1 self.service_call_name_ = x def clear_service_call_name(self): if self.has_service_call_name_: self.has_service_call_name_ = 0 self.service_call_name_ = "" def has_service_call_name(self): return self.has_service_call_name_ def total_amount_of_calls(self): return self.total_amount_of_calls_ def set_total_amount_of_calls(self, x): self.has_total_amount_of_calls_ = 1 self.total_amount_of_calls_ = x def clear_total_amount_of_calls(self): if self.has_total_amount_of_calls_: self.has_total_amount_of_calls_ = 0 self.total_amount_of_calls_ = 0 def has_total_amount_of_calls(self): return self.has_total_amount_of_calls_ def total_cost_of_calls_microdollars(self): return self.total_cost_of_calls_microdollars_ def set_total_cost_of_calls_microdollars(self, x): self.has_total_cost_of_calls_microdollars_ = 1 self.total_cost_of_calls_microdollars_ = x def clear_total_cost_of_calls_microdollars(self): if self.has_total_cost_of_calls_microdollars_: self.has_total_cost_of_calls_microdollars_ = 0 self.total_cost_of_calls_microdollars_ = 0 def has_total_cost_of_calls_microdollars(self): return self.has_total_cost_of_calls_microdollars_ def total_billed_ops_size(self): return len(self.total_billed_ops_) def total_billed_ops_list(self): return self.total_billed_ops_ def total_billed_ops(self, i): return self.total_billed_ops_[i] def mutable_total_billed_ops(self, i): return self.total_billed_ops_[i] def add_total_billed_ops(self): x = BilledOpProto() self.total_billed_ops_.append(x) return x def clear_total_billed_ops(self): self.total_billed_ops_ = [] def MergeFrom(self, x): assert x is not self if (x.has_service_call_name()): self.set_service_call_name(x.service_call_name()) if (x.has_total_amount_of_calls()): self.set_total_amount_of_calls(x.total_amount_of_calls()) if (x.has_total_cost_of_calls_microdollars()): self.set_total_cost_of_calls_microdollars(x.total_cost_of_calls_microdollars()) for i in xrange(x.total_billed_ops_size()): self.add_total_billed_ops().CopyFrom(x.total_billed_ops(i)) def Equals(self, x): if x is self: return 1 if self.has_service_call_name_ != x.has_service_call_name_: return 0 if self.has_service_call_name_ and self.service_call_name_ != x.service_call_name_: return 0 if self.has_total_amount_of_calls_ != x.has_total_amount_of_calls_: return 0 if self.has_total_amount_of_calls_ and self.total_amount_of_calls_ != x.total_amount_of_calls_: return 0 if self.has_total_cost_of_calls_microdollars_ != x.has_total_cost_of_calls_microdollars_: return 0 if self.has_total_cost_of_calls_microdollars_ and self.total_cost_of_calls_microdollars_ != x.total_cost_of_calls_microdollars_: return 0 if len(self.total_billed_ops_) != len(x.total_billed_ops_): return 0 for e1, e2 in zip(self.total_billed_ops_, x.total_billed_ops_): if e1 != e2: return 0 return 1 def IsInitialized(self, debug_strs=None): initialized = 1 if (not self.has_service_call_name_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: service_call_name not set.') if (not self.has_total_amount_of_calls_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: total_amount_of_calls not set.') for p in self.total_billed_ops_: if not p.IsInitialized(debug_strs): initialized=0 return initialized def ByteSize(self): n = 0 n += self.lengthString(len(self.service_call_name_)) n += self.lengthVarInt64(self.total_amount_of_calls_) if (self.has_total_cost_of_calls_microdollars_): n += 1 + self.lengthVarInt64(self.total_cost_of_calls_microdollars_) n += 1 * len(self.total_billed_ops_) for i in xrange(len(self.total_billed_ops_)): n += self.lengthString(self.total_billed_ops_[i].ByteSize()) return n + 2 def ByteSizePartial(self): n = 0 if (self.has_service_call_name_): n += 1 n += self.lengthString(len(self.service_call_name_)) if (self.has_total_amount_of_calls_): n += 1 n += self.lengthVarInt64(self.total_amount_of_calls_) if (self.has_total_cost_of_calls_microdollars_): n += 1 + self.lengthVarInt64(self.total_cost_of_calls_microdollars_) n += 1 * len(self.total_billed_ops_) for i in xrange(len(self.total_billed_ops_)): n += self.lengthString(self.total_billed_ops_[i].ByteSizePartial()) return n def Clear(self): self.clear_service_call_name() self.clear_total_amount_of_calls() self.clear_total_cost_of_calls_microdollars() self.clear_total_billed_ops() def OutputUnchecked(self, out): out.putVarInt32(10) out.putPrefixedString(self.service_call_name_) out.putVarInt32(24) out.putVarInt64(self.total_amount_of_calls_) if (self.has_total_cost_of_calls_microdollars_): out.putVarInt32(32) out.putVarInt64(self.total_cost_of_calls_microdollars_) for i in xrange(len(self.total_billed_ops_)): out.putVarInt32(42) out.putVarInt32(self.total_billed_ops_[i].ByteSize()) self.total_billed_ops_[i].OutputUnchecked(out) def OutputPartial(self, out): if (self.has_service_call_name_): out.putVarInt32(10) out.putPrefixedString(self.service_call_name_) if (self.has_total_amount_of_calls_): out.putVarInt32(24) out.putVarInt64(self.total_amount_of_calls_) if (self.has_total_cost_of_calls_microdollars_): out.putVarInt32(32) out.putVarInt64(self.total_cost_of_calls_microdollars_) for i in xrange(len(self.total_billed_ops_)): out.putVarInt32(42) out.putVarInt32(self.total_billed_ops_[i].ByteSizePartial()) self.total_billed_ops_[i].OutputPartial(out) def TryMerge(self, d): while d.avail() > 0: tt = d.getVarInt32() if tt == 10: self.set_service_call_name(d.getPrefixedString()) continue if tt == 24: self.set_total_amount_of_calls(d.getVarInt64()) continue if tt == 32: self.set_total_cost_of_calls_microdollars(d.getVarInt64()) continue if tt == 42: length = d.getVarInt32() tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length) d.skip(length) self.add_total_billed_ops().TryMerge(tmp) continue if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError d.skipData(tt) def __str__(self, prefix="", printElemNumber=0): res="" if self.has_service_call_name_: res+=prefix+("service_call_name: %s\n" % self.DebugFormatString(self.service_call_name_)) if self.has_total_amount_of_calls_: res+=prefix+("total_amount_of_calls: %s\n" % self.DebugFormatInt64(self.total_amount_of_calls_)) if self.has_total_cost_of_calls_microdollars_: res+=prefix+("total_cost_of_calls_microdollars: %s\n" % self.DebugFormatInt64(self.total_cost_of_calls_microdollars_)) cnt=0 for e in self.total_billed_ops_: elm="" if printElemNumber: elm="(%d)" % cnt res+=prefix+("total_billed_ops%s <\n" % elm) res+=e.__str__(prefix + " ", printElemNumber) res+=prefix+">\n" cnt+=1 return res def _BuildTagLookupTable(sparse, maxtag, default=None): return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)]) kservice_call_name = 1 ktotal_amount_of_calls = 3 ktotal_cost_of_calls_microdollars = 4 ktotal_billed_ops = 5 _TEXT = _BuildTagLookupTable({ 0: "ErrorCode", 1: "service_call_name", 3: "total_amount_of_calls", 4: "total_cost_of_calls_microdollars", 5: "total_billed_ops", }, 5) _TYPES = _BuildTagLookupTable({ 0: ProtocolBuffer.Encoder.NUMERIC, 1: ProtocolBuffer.Encoder.STRING, 3: ProtocolBuffer.Encoder.NUMERIC, 4: ProtocolBuffer.Encoder.NUMERIC, 5: ProtocolBuffer.Encoder.STRING, }, 5, ProtocolBuffer.Encoder.MAX_TYPE) _STYLE = """""" _STYLE_CONTENT_TYPE = """""" _PROTO_DESCRIPTOR_NAME = 'apphosting.AggregateRpcStatsProto' class KeyValProto(ProtocolBuffer.ProtocolMessage): has_key_ = 0 key_ = "" has_value_ = 0 value_ = "" def __init__(self, contents=None): if contents is not None: self.MergeFromString(contents) def key(self): return self.key_ def set_key(self, x): self.has_key_ = 1 self.key_ = x def clear_key(self): if self.has_key_: self.has_key_ = 0 self.key_ = "" def has_key(self): return self.has_key_ def value(self): return self.value_ def set_value(self, x): self.has_value_ = 1 self.value_ = x def clear_value(self): if self.has_value_: self.has_value_ = 0 self.value_ = "" def has_value(self): return self.has_value_ def MergeFrom(self, x): assert x is not self if (x.has_key()): self.set_key(x.key()) if (x.has_value()): self.set_value(x.value()) def Equals(self, x): if x is self: return 1 if self.has_key_ != x.has_key_: return 0 if self.has_key_ and self.key_ != x.key_: return 0 if self.has_value_ != x.has_value_: return 0 if self.has_value_ and self.value_ != x.value_: return 0 return 1 def IsInitialized(self, debug_strs=None): initialized = 1 if (not self.has_key_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: key not set.') if (not self.has_value_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: value not set.') return initialized def ByteSize(self): n = 0 n += self.lengthString(len(self.key_)) n += self.lengthString(len(self.value_)) return n + 2 def ByteSizePartial(self): n = 0 if (self.has_key_): n += 1 n += self.lengthString(len(self.key_)) if (self.has_value_): n += 1 n += self.lengthString(len(self.value_)) return n def Clear(self): self.clear_key() self.clear_value() def OutputUnchecked(self, out): out.putVarInt32(10) out.putPrefixedString(self.key_) out.putVarInt32(18) out.putPrefixedString(self.value_) def OutputPartial(self, out): if (self.has_key_): out.putVarInt32(10) out.putPrefixedString(self.key_) if (self.has_value_): out.putVarInt32(18) out.putPrefixedString(self.value_) def TryMerge(self, d): while d.avail() > 0: tt = d.getVarInt32() if tt == 10: self.set_key(d.getPrefixedString()) continue if tt == 18: self.set_value(d.getPrefixedString()) continue if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError d.skipData(tt) def __str__(self, prefix="", printElemNumber=0): res="" if self.has_key_: res+=prefix+("key: %s\n" % self.DebugFormatString(self.key_)) if self.has_value_: res+=prefix+("value: %s\n" % self.DebugFormatString(self.value_)) return res def _BuildTagLookupTable(sparse, maxtag, default=None): return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)]) kkey = 1 kvalue = 2 _TEXT = _BuildTagLookupTable({ 0: "ErrorCode", 1: "key", 2: "value", }, 2) _TYPES = _BuildTagLookupTable({ 0: ProtocolBuffer.Encoder.NUMERIC, 1: ProtocolBuffer.Encoder.STRING, 2: ProtocolBuffer.Encoder.STRING, }, 2, ProtocolBuffer.Encoder.MAX_TYPE) _STYLE = """""" _STYLE_CONTENT_TYPE = """""" _PROTO_DESCRIPTOR_NAME = 'apphosting.KeyValProto' class StackFrameProto(ProtocolBuffer.ProtocolMessage): has_class_or_file_name_ = 0 class_or_file_name_ = "" has_line_number_ = 0 line_number_ = 0 has_function_name_ = 0 function_name_ = "" def __init__(self, contents=None): self.variables_ = [] if contents is not None: self.MergeFromString(contents) def class_or_file_name(self): return self.class_or_file_name_ def set_class_or_file_name(self, x): self.has_class_or_file_name_ = 1 self.class_or_file_name_ = x def clear_class_or_file_name(self): if self.has_class_or_file_name_: self.has_class_or_file_name_ = 0 self.class_or_file_name_ = "" def has_class_or_file_name(self): return self.has_class_or_file_name_ def line_number(self): return self.line_number_ def set_line_number(self, x): self.has_line_number_ = 1 self.line_number_ = x def clear_line_number(self): if self.has_line_number_: self.has_line_number_ = 0 self.line_number_ = 0 def has_line_number(self): return self.has_line_number_ def function_name(self): return self.function_name_ def set_function_name(self, x): self.has_function_name_ = 1 self.function_name_ = x def clear_function_name(self): if self.has_function_name_: self.has_function_name_ = 0 self.function_name_ = "" def has_function_name(self): return self.has_function_name_ def variables_size(self): return len(self.variables_) def variables_list(self): return self.variables_ def variables(self, i): return self.variables_[i] def mutable_variables(self, i): return self.variables_[i] def add_variables(self): x = KeyValProto() self.variables_.append(x) return x def clear_variables(self): self.variables_ = [] def MergeFrom(self, x): assert x is not self if (x.has_class_or_file_name()): self.set_class_or_file_name(x.class_or_file_name()) if (x.has_line_number()): self.set_line_number(x.line_number()) if (x.has_function_name()): self.set_function_name(x.function_name()) for i in xrange(x.variables_size()): self.add_variables().CopyFrom(x.variables(i)) def Equals(self, x): if x is self: return 1 if self.has_class_or_file_name_ != x.has_class_or_file_name_: return 0 if self.has_class_or_file_name_ and self.class_or_file_name_ != x.class_or_file_name_: return 0 if self.has_line_number_ != x.has_line_number_: return 0 if self.has_line_number_ and self.line_number_ != x.line_number_: return 0 if self.has_function_name_ != x.has_function_name_: return 0 if self.has_function_name_ and self.function_name_ != x.function_name_: return 0 if len(self.variables_) != len(x.variables_): return 0 for e1, e2 in zip(self.variables_, x.variables_): if e1 != e2: return 0 return 1 def IsInitialized(self, debug_strs=None): initialized = 1 if (not self.has_class_or_file_name_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: class_or_file_name not set.') if (not self.has_function_name_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: function_name not set.') for p in self.variables_: if not p.IsInitialized(debug_strs): initialized=0 return initialized def ByteSize(self): n = 0 n += self.lengthString(len(self.class_or_file_name_)) if (self.has_line_number_): n += 1 + self.lengthVarInt64(self.line_number_) n += self.lengthString(len(self.function_name_)) n += 1 * len(self.variables_) for i in xrange(len(self.variables_)): n += self.lengthString(self.variables_[i].ByteSize()) return n + 2 def ByteSizePartial(self): n = 0 if (self.has_class_or_file_name_): n += 1 n += self.lengthString(len(self.class_or_file_name_)) if (self.has_line_number_): n += 1 + self.lengthVarInt64(self.line_number_) if (self.has_function_name_): n += 1 n += self.lengthString(len(self.function_name_)) n += 1 * len(self.variables_) for i in xrange(len(self.variables_)): n += self.lengthString(self.variables_[i].ByteSizePartial()) return n def Clear(self): self.clear_class_or_file_name() self.clear_line_number() self.clear_function_name() self.clear_variables() def OutputUnchecked(self, out): out.putVarInt32(10) out.putPrefixedString(self.class_or_file_name_) if (self.has_line_number_): out.putVarInt32(16) out.putVarInt32(self.line_number_) out.putVarInt32(26) out.putPrefixedString(self.function_name_) for i in xrange(len(self.variables_)): out.putVarInt32(34) out.putVarInt32(self.variables_[i].ByteSize()) self.variables_[i].OutputUnchecked(out) def OutputPartial(self, out): if (self.has_class_or_file_name_): out.putVarInt32(10) out.putPrefixedString(self.class_or_file_name_) if (self.has_line_number_): out.putVarInt32(16) out.putVarInt32(self.line_number_) if (self.has_function_name_): out.putVarInt32(26) out.putPrefixedString(self.function_name_) for i in xrange(len(self.variables_)): out.putVarInt32(34) out.putVarInt32(self.variables_[i].ByteSizePartial()) self.variables_[i].OutputPartial(out) def TryMerge(self, d): while d.avail() > 0: tt = d.getVarInt32() if tt == 10: self.set_class_or_file_name(d.getPrefixedString()) continue if tt == 16: self.set_line_number(d.getVarInt32()) continue if tt == 26: self.set_function_name(d.getPrefixedString()) continue if tt == 34: length = d.getVarInt32() tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length) d.skip(length) self.add_variables().TryMerge(tmp) continue if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError d.skipData(tt) def __str__(self, prefix="", printElemNumber=0): res="" if self.has_class_or_file_name_: res+=prefix+("class_or_file_name: %s\n" % self.DebugFormatString(self.class_or_file_name_)) if self.has_line_number_: res+=prefix+("line_number: %s\n" % self.DebugFormatInt32(self.line_number_)) if self.has_function_name_: res+=prefix+("function_name: %s\n" % self.DebugFormatString(self.function_name_)) cnt=0 for e in self.variables_: elm="" if printElemNumber: elm="(%d)" % cnt res+=prefix+("variables%s <\n" % elm) res+=e.__str__(prefix + " ", printElemNumber) res+=prefix+">\n" cnt+=1 return res def _BuildTagLookupTable(sparse, maxtag, default=None): return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)]) kclass_or_file_name = 1 kline_number = 2 kfunction_name = 3 kvariables = 4 _TEXT = _BuildTagLookupTable({ 0: "ErrorCode", 1: "class_or_file_name", 2: "line_number", 3: "function_name", 4: "variables", }, 4) _TYPES = _BuildTagLookupTable({ 0: ProtocolBuffer.Encoder.NUMERIC, 1: ProtocolBuffer.Encoder.STRING, 2: ProtocolBuffer.Encoder.NUMERIC, 3: ProtocolBuffer.Encoder.STRING, 4: ProtocolBuffer.Encoder.STRING, }, 4, ProtocolBuffer.Encoder.MAX_TYPE) _STYLE = """""" _STYLE_CONTENT_TYPE = """""" _PROTO_DESCRIPTOR_NAME = 'apphosting.StackFrameProto' class BilledOpProto(ProtocolBuffer.ProtocolMessage): DATASTORE_READ = 0 DATASTORE_WRITE = 1 DATASTORE_SMALL = 2 MAIL_RECIPIENT = 3 CHANNEL_OPEN = 4 XMPP_STANZA = 5 _BilledOp_NAMES = { 0: "DATASTORE_READ", 1: "DATASTORE_WRITE", 2: "DATASTORE_SMALL", 3: "MAIL_RECIPIENT", 4: "CHANNEL_OPEN", 5: "XMPP_STANZA", } def BilledOp_Name(cls, x): return cls._BilledOp_NAMES.get(x, "") BilledOp_Name = classmethod(BilledOp_Name) has_op_ = 0 op_ = 0 has_num_ops_ = 0 num_ops_ = 0 def __init__(self, contents=None): if contents is not None: self.MergeFromString(contents) def op(self): return self.op_ def set_op(self, x): self.has_op_ = 1 self.op_ = x def clear_op(self): if self.has_op_: self.has_op_ = 0 self.op_ = 0 def has_op(self): return self.has_op_ def num_ops(self): return self.num_ops_ def set_num_ops(self, x): self.has_num_ops_ = 1 self.num_ops_ = x def clear_num_ops(self): if self.has_num_ops_: self.has_num_ops_ = 0 self.num_ops_ = 0 def has_num_ops(self): return self.has_num_ops_ def MergeFrom(self, x): assert x is not self if (x.has_op()): self.set_op(x.op()) if (x.has_num_ops()): self.set_num_ops(x.num_ops()) def Equals(self, x): if x is self: return 1 if self.has_op_ != x.has_op_: return 0 if self.has_op_ and self.op_ != x.op_: return 0 if self.has_num_ops_ != x.has_num_ops_: return 0 if self.has_num_ops_ and self.num_ops_ != x.num_ops_: return 0 return 1 def IsInitialized(self, debug_strs=None): initialized = 1 if (not self.has_op_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: op not set.') if (not self.has_num_ops_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: num_ops not set.') return initialized def ByteSize(self): n = 0 n += self.lengthVarInt64(self.op_) n += self.lengthVarInt64(self.num_ops_) return n + 2 def ByteSizePartial(self): n = 0 if (self.has_op_): n += 1 n += self.lengthVarInt64(self.op_) if (self.has_num_ops_): n += 1 n += self.lengthVarInt64(self.num_ops_) return n def Clear(self): self.clear_op() self.clear_num_ops() def OutputUnchecked(self, out): out.putVarInt32(8) out.putVarInt32(self.op_) out.putVarInt32(16) out.putVarInt32(self.num_ops_) def OutputPartial(self, out): if (self.has_op_): out.putVarInt32(8) out.putVarInt32(self.op_) if (self.has_num_ops_): out.putVarInt32(16) out.putVarInt32(self.num_ops_) def TryMerge(self, d): while d.avail() > 0: tt = d.getVarInt32() if tt == 8: self.set_op(d.getVarInt32()) continue if tt == 16: self.set_num_ops(d.getVarInt32()) continue if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError d.skipData(tt) def __str__(self, prefix="", printElemNumber=0): res="" if self.has_op_: res+=prefix+("op: %s\n" % self.DebugFormatInt32(self.op_)) if self.has_num_ops_: res+=prefix+("num_ops: %s\n" % self.DebugFormatInt32(self.num_ops_)) return res def _BuildTagLookupTable(sparse, maxtag, default=None): return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)]) kop = 1 knum_ops = 2 _TEXT = _BuildTagLookupTable({ 0: "ErrorCode", 1: "op", 2: "num_ops", }, 2) _TYPES = _BuildTagLookupTable({ 0: ProtocolBuffer.Encoder.NUMERIC, 1: ProtocolBuffer.Encoder.NUMERIC, 2: ProtocolBuffer.Encoder.NUMERIC, }, 2, ProtocolBuffer.Encoder.MAX_TYPE) _STYLE = """""" _STYLE_CONTENT_TYPE = """""" _PROTO_DESCRIPTOR_NAME = 'apphosting.BilledOpProto' class DatastoreCallDetailsProto(ProtocolBuffer.ProtocolMessage): has_query_kind_ = 0 query_kind_ = "" has_query_ancestor_ = 0 query_ancestor_ = None has_query_thiscursor_ = 0 query_thiscursor_ = 0 has_query_nextcursor_ = 0 query_nextcursor_ = 0 def __init__(self, contents=None): self.get_successful_fetch_ = [] self.keys_read_ = [] self.keys_written_ = [] self.lazy_init_lock_ = thread.allocate_lock() if contents is not None: self.MergeFromString(contents) def query_kind(self): return self.query_kind_ def set_query_kind(self, x): self.has_query_kind_ = 1 self.query_kind_ = x def clear_query_kind(self): if self.has_query_kind_: self.has_query_kind_ = 0 self.query_kind_ = "" def has_query_kind(self): return self.has_query_kind_ def query_ancestor(self): if self.query_ancestor_ is None: self.lazy_init_lock_.acquire() try: if self.query_ancestor_ is None: self.query_ancestor_ = Reference() finally: self.lazy_init_lock_.release() return self.query_ancestor_ def mutable_query_ancestor(self): self.has_query_ancestor_ = 1; return self.query_ancestor() def clear_query_ancestor(self): if self.has_query_ancestor_: self.has_query_ancestor_ = 0; if self.query_ancestor_ is not None: self.query_ancestor_.Clear() def has_query_ancestor(self): return self.has_query_ancestor_ def query_thiscursor(self): return self.query_thiscursor_ def set_query_thiscursor(self, x): self.has_query_thiscursor_ = 1 self.query_thiscursor_ = x def clear_query_thiscursor(self): if self.has_query_thiscursor_: self.has_query_thiscursor_ = 0 self.query_thiscursor_ = 0 def has_query_thiscursor(self): return self.has_query_thiscursor_ def query_nextcursor(self): return self.query_nextcursor_ def set_query_nextcursor(self, x): self.has_query_nextcursor_ = 1 self.query_nextcursor_ = x def clear_query_nextcursor(self): if self.has_query_nextcursor_: self.has_query_nextcursor_ = 0 self.query_nextcursor_ = 0 def has_query_nextcursor(self): return self.has_query_nextcursor_ def get_successful_fetch_size(self): return len(self.get_successful_fetch_) def get_successful_fetch_list(self): return self.get_successful_fetch_ def get_successful_fetch(self, i): return self.get_successful_fetch_[i] def set_get_successful_fetch(self, i, x): self.get_successful_fetch_[i] = x def add_get_successful_fetch(self, x): self.get_successful_fetch_.append(x) def clear_get_successful_fetch(self): self.get_successful_fetch_ = [] def keys_read_size(self): return len(self.keys_read_) def keys_read_list(self): return self.keys_read_ def keys_read(self, i): return self.keys_read_[i] def mutable_keys_read(self, i): return self.keys_read_[i] def add_keys_read(self): x = Reference() self.keys_read_.append(x) return x def clear_keys_read(self): self.keys_read_ = [] def keys_written_size(self): return len(self.keys_written_) def keys_written_list(self): return self.keys_written_ def keys_written(self, i): return self.keys_written_[i] def mutable_keys_written(self, i): return self.keys_written_[i] def add_keys_written(self): x = Reference() self.keys_written_.append(x) return x def clear_keys_written(self): self.keys_written_ = [] def MergeFrom(self, x): assert x is not self if (x.has_query_kind()): self.set_query_kind(x.query_kind()) if (x.has_query_ancestor()): self.mutable_query_ancestor().MergeFrom(x.query_ancestor()) if (x.has_query_thiscursor()): self.set_query_thiscursor(x.query_thiscursor()) if (x.has_query_nextcursor()): self.set_query_nextcursor(x.query_nextcursor()) for i in xrange(x.get_successful_fetch_size()): self.add_get_successful_fetch(x.get_successful_fetch(i)) for i in xrange(x.keys_read_size()): self.add_keys_read().CopyFrom(x.keys_read(i)) for i in xrange(x.keys_written_size()): self.add_keys_written().CopyFrom(x.keys_written(i)) def Equals(self, x): if x is self: return 1 if self.has_query_kind_ != x.has_query_kind_: return 0 if self.has_query_kind_ and self.query_kind_ != x.query_kind_: return 0 if self.has_query_ancestor_ != x.has_query_ancestor_: return 0 if self.has_query_ancestor_ and self.query_ancestor_ != x.query_ancestor_: return 0 if self.has_query_thiscursor_ != x.has_query_thiscursor_: return 0 if self.has_query_thiscursor_ and self.query_thiscursor_ != x.query_thiscursor_: return 0 if self.has_query_nextcursor_ != x.has_query_nextcursor_: return 0 if self.has_query_nextcursor_ and self.query_nextcursor_ != x.query_nextcursor_: return 0 if len(self.get_successful_fetch_) != len(x.get_successful_fetch_): return 0 for e1, e2 in zip(self.get_successful_fetch_, x.get_successful_fetch_): if e1 != e2: return 0 if len(self.keys_read_) != len(x.keys_read_): return 0 for e1, e2 in zip(self.keys_read_, x.keys_read_): if e1 != e2: return 0 if len(self.keys_written_) != len(x.keys_written_): return 0 for e1, e2 in zip(self.keys_written_, x.keys_written_): if e1 != e2: return 0 return 1 def IsInitialized(self, debug_strs=None): initialized = 1 if (self.has_query_ancestor_ and not self.query_ancestor_.IsInitialized(debug_strs)): initialized = 0 for p in self.keys_read_: if not p.IsInitialized(debug_strs): initialized=0 for p in self.keys_written_: if not p.IsInitialized(debug_strs): initialized=0 return initialized def ByteSize(self): n = 0 if (self.has_query_kind_): n += 1 + self.lengthString(len(self.query_kind_)) if (self.has_query_ancestor_): n += 1 + self.lengthString(self.query_ancestor_.ByteSize()) if (self.has_query_thiscursor_): n += 9 if (self.has_query_nextcursor_): n += 9 n += 2 * len(self.get_successful_fetch_) n += 1 * len(self.keys_read_) for i in xrange(len(self.keys_read_)): n += self.lengthString(self.keys_read_[i].ByteSize()) n += 1 * len(self.keys_written_) for i in xrange(len(self.keys_written_)): n += self.lengthString(self.keys_written_[i].ByteSize()) return n def ByteSizePartial(self): n = 0 if (self.has_query_kind_): n += 1 + self.lengthString(len(self.query_kind_)) if (self.has_query_ancestor_): n += 1 + self.lengthString(self.query_ancestor_.ByteSizePartial()) if (self.has_query_thiscursor_): n += 9 if (self.has_query_nextcursor_): n += 9 n += 2 * len(self.get_successful_fetch_) n += 1 * len(self.keys_read_) for i in xrange(len(self.keys_read_)): n += self.lengthString(self.keys_read_[i].ByteSizePartial()) n += 1 * len(self.keys_written_) for i in xrange(len(self.keys_written_)): n += self.lengthString(self.keys_written_[i].ByteSizePartial()) return n def Clear(self): self.clear_query_kind() self.clear_query_ancestor() self.clear_query_thiscursor() self.clear_query_nextcursor() self.clear_get_successful_fetch() self.clear_keys_read() self.clear_keys_written() def OutputUnchecked(self, out): if (self.has_query_kind_): out.putVarInt32(10) out.putPrefixedString(self.query_kind_) if (self.has_query_ancestor_): out.putVarInt32(18) out.putVarInt32(self.query_ancestor_.ByteSize()) self.query_ancestor_.OutputUnchecked(out) if (self.has_query_thiscursor_): out.putVarInt32(25) out.put64(self.query_thiscursor_) if (self.has_query_nextcursor_): out.putVarInt32(33) out.put64(self.query_nextcursor_) for i in xrange(len(self.get_successful_fetch_)): out.putVarInt32(40) out.putBoolean(self.get_successful_fetch_[i]) for i in xrange(len(self.keys_read_)): out.putVarInt32(50) out.putVarInt32(self.keys_read_[i].ByteSize()) self.keys_read_[i].OutputUnchecked(out) for i in xrange(len(self.keys_written_)): out.putVarInt32(58) out.putVarInt32(self.keys_written_[i].ByteSize()) self.keys_written_[i].OutputUnchecked(out) def OutputPartial(self, out): if (self.has_query_kind_): out.putVarInt32(10) out.putPrefixedString(self.query_kind_) if (self.has_query_ancestor_): out.putVarInt32(18) out.putVarInt32(self.query_ancestor_.ByteSizePartial()) self.query_ancestor_.OutputPartial(out) if (self.has_query_thiscursor_): out.putVarInt32(25) out.put64(self.query_thiscursor_) if (self.has_query_nextcursor_): out.putVarInt32(33) out.put64(self.query_nextcursor_) for i in xrange(len(self.get_successful_fetch_)): out.putVarInt32(40) out.putBoolean(self.get_successful_fetch_[i]) for i in xrange(len(self.keys_read_)): out.putVarInt32(50) out.putVarInt32(self.keys_read_[i].ByteSizePartial()) self.keys_read_[i].OutputPartial(out) for i in xrange(len(self.keys_written_)): out.putVarInt32(58) out.putVarInt32(self.keys_written_[i].ByteSizePartial()) self.keys_written_[i].OutputPartial(out) def TryMerge(self, d): while d.avail() > 0: tt = d.getVarInt32() if tt == 10: self.set_query_kind(d.getPrefixedString()) continue if tt == 18: length = d.getVarInt32() tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length) d.skip(length) self.mutable_query_ancestor().TryMerge(tmp) continue if tt == 25: self.set_query_thiscursor(d.get64()) continue if tt == 33: self.set_query_nextcursor(d.get64()) continue if tt == 40: self.add_get_successful_fetch(d.getBoolean()) continue if tt == 50: length = d.getVarInt32() tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length) d.skip(length) self.add_keys_read().TryMerge(tmp) continue if tt == 58: length = d.getVarInt32() tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length) d.skip(length) self.add_keys_written().TryMerge(tmp) continue if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError d.skipData(tt) def __str__(self, prefix="", printElemNumber=0): res="" if self.has_query_kind_: res+=prefix+("query_kind: %s\n" % self.DebugFormatString(self.query_kind_)) if self.has_query_ancestor_: res+=prefix+"query_ancestor <\n" res+=self.query_ancestor_.__str__(prefix + " ", printElemNumber) res+=prefix+">\n" if self.has_query_thiscursor_: res+=prefix+("query_thiscursor: %s\n" % self.DebugFormatFixed64(self.query_thiscursor_)) if self.has_query_nextcursor_: res+=prefix+("query_nextcursor: %s\n" % self.DebugFormatFixed64(self.query_nextcursor_)) cnt=0 for e in self.get_successful_fetch_: elm="" if printElemNumber: elm="(%d)" % cnt res+=prefix+("get_successful_fetch%s: %s\n" % (elm, self.DebugFormatBool(e))) cnt+=1 cnt=0 for e in self.keys_read_: elm="" if printElemNumber: elm="(%d)" % cnt res+=prefix+("keys_read%s <\n" % elm) res+=e.__str__(prefix + " ", printElemNumber) res+=prefix+">\n" cnt+=1 cnt=0 for e in self.keys_written_: elm="" if printElemNumber: elm="(%d)" % cnt res+=prefix+("keys_written%s <\n" % elm) res+=e.__str__(prefix + " ", printElemNumber) res+=prefix+">\n" cnt+=1 return res def _BuildTagLookupTable(sparse, maxtag, default=None): return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)]) kquery_kind = 1 kquery_ancestor = 2 kquery_thiscursor = 3 kquery_nextcursor = 4 kget_successful_fetch = 5 kkeys_read = 6 kkeys_written = 7 _TEXT = _BuildTagLookupTable({ 0: "ErrorCode", 1: "query_kind", 2: "query_ancestor", 3: "query_thiscursor", 4: "query_nextcursor", 5: "get_successful_fetch", 6: "keys_read", 7: "keys_written", }, 7) _TYPES = _BuildTagLookupTable({ 0: ProtocolBuffer.Encoder.NUMERIC, 1: ProtocolBuffer.Encoder.STRING, 2: ProtocolBuffer.Encoder.STRING, 3: ProtocolBuffer.Encoder.DOUBLE, 4: ProtocolBuffer.Encoder.DOUBLE, 5: ProtocolBuffer.Encoder.NUMERIC, 6: ProtocolBuffer.Encoder.STRING, 7: ProtocolBuffer.Encoder.STRING, }, 7, ProtocolBuffer.Encoder.MAX_TYPE) _STYLE = """""" _STYLE_CONTENT_TYPE = """""" _PROTO_DESCRIPTOR_NAME = 'apphosting.DatastoreCallDetailsProto' class IndividualRpcStatsProto(ProtocolBuffer.ProtocolMessage): has_service_call_name_ = 0 service_call_name_ = "" has_request_data_summary_ = 0 request_data_summary_ = "" has_response_data_summary_ = 0 response_data_summary_ = "" has_api_mcycles_ = 0 api_mcycles_ = 0 has_api_milliseconds_ = 0 api_milliseconds_ = 0 has_start_offset_milliseconds_ = 0 start_offset_milliseconds_ = 0 has_duration_milliseconds_ = 0 duration_milliseconds_ = 0 has_namespace_ = 0 namespace_ = "" has_was_successful_ = 0 was_successful_ = 1 has_datastore_details_ = 0 datastore_details_ = None has_call_cost_microdollars_ = 0 call_cost_microdollars_ = 0 def __init__(self, contents=None): self.call_stack_ = [] self.billed_ops_ = [] self.lazy_init_lock_ = thread.allocate_lock() if contents is not None: self.MergeFromString(contents) def service_call_name(self): return self.service_call_name_ def set_service_call_name(self, x): self.has_service_call_name_ = 1 self.service_call_name_ = x def clear_service_call_name(self): if self.has_service_call_name_: self.has_service_call_name_ = 0 self.service_call_name_ = "" def has_service_call_name(self): return self.has_service_call_name_ def request_data_summary(self): return self.request_data_summary_ def set_request_data_summary(self, x): self.has_request_data_summary_ = 1 self.request_data_summary_ = x def clear_request_data_summary(self): if self.has_request_data_summary_: self.has_request_data_summary_ = 0 self.request_data_summary_ = "" def has_request_data_summary(self): return self.has_request_data_summary_ def response_data_summary(self): return self.response_data_summary_ def set_response_data_summary(self, x): self.has_response_data_summary_ = 1 self.response_data_summary_ = x def clear_response_data_summary(self): if self.has_response_data_summary_: self.has_response_data_summary_ = 0 self.response_data_summary_ = "" def has_response_data_summary(self): return self.has_response_data_summary_ def api_mcycles(self): return self.api_mcycles_ def set_api_mcycles(self, x): self.has_api_mcycles_ = 1 self.api_mcycles_ = x def clear_api_mcycles(self): if self.has_api_mcycles_: self.has_api_mcycles_ = 0 self.api_mcycles_ = 0 def has_api_mcycles(self): return self.has_api_mcycles_ def api_milliseconds(self): return self.api_milliseconds_ def set_api_milliseconds(self, x): self.has_api_milliseconds_ = 1 self.api_milliseconds_ = x def clear_api_milliseconds(self): if self.has_api_milliseconds_: self.has_api_milliseconds_ = 0 self.api_milliseconds_ = 0 def has_api_milliseconds(self): return self.has_api_milliseconds_ def start_offset_milliseconds(self): return self.start_offset_milliseconds_ def set_start_offset_milliseconds(self, x): self.has_start_offset_milliseconds_ = 1 self.start_offset_milliseconds_ = x def clear_start_offset_milliseconds(self): if self.has_start_offset_milliseconds_: self.has_start_offset_milliseconds_ = 0 self.start_offset_milliseconds_ = 0 def has_start_offset_milliseconds(self): return self.has_start_offset_milliseconds_ def duration_milliseconds(self): return self.duration_milliseconds_ def set_duration_milliseconds(self, x): self.has_duration_milliseconds_ = 1 self.duration_milliseconds_ = x def clear_duration_milliseconds(self): if self.has_duration_milliseconds_: self.has_duration_milliseconds_ = 0 self.duration_milliseconds_ = 0 def has_duration_milliseconds(self): return self.has_duration_milliseconds_ def namespace(self): return self.namespace_ def set_namespace(self, x): self.has_namespace_ = 1 self.namespace_ = x def clear_namespace(self): if self.has_namespace_: self.has_namespace_ = 0 self.namespace_ = "" def has_namespace(self): return self.has_namespace_ def was_successful(self): return self.was_successful_ def set_was_successful(self, x): self.has_was_successful_ = 1 self.was_successful_ = x def clear_was_successful(self): if self.has_was_successful_: self.has_was_successful_ = 0 self.was_successful_ = 1 def has_was_successful(self): return self.has_was_successful_ def call_stack_size(self): return len(self.call_stack_) def call_stack_list(self): return self.call_stack_ def call_stack(self, i): return self.call_stack_[i] def mutable_call_stack(self, i): return self.call_stack_[i] def add_call_stack(self): x = StackFrameProto() self.call_stack_.append(x) return x def clear_call_stack(self): self.call_stack_ = [] def datastore_details(self): if self.datastore_details_ is None: self.lazy_init_lock_.acquire() try: if self.datastore_details_ is None: self.datastore_details_ = DatastoreCallDetailsProto() finally: self.lazy_init_lock_.release() return self.datastore_details_ def mutable_datastore_details(self): self.has_datastore_details_ = 1; return self.datastore_details() def clear_datastore_details(self): if self.has_datastore_details_: self.has_datastore_details_ = 0; if self.datastore_details_ is not None: self.datastore_details_.Clear() def has_datastore_details(self): return self.has_datastore_details_ def call_cost_microdollars(self): return self.call_cost_microdollars_ def set_call_cost_microdollars(self, x): self.has_call_cost_microdollars_ = 1 self.call_cost_microdollars_ = x def clear_call_cost_microdollars(self): if self.has_call_cost_microdollars_: self.has_call_cost_microdollars_ = 0 self.call_cost_microdollars_ = 0 def has_call_cost_microdollars(self): return self.has_call_cost_microdollars_ def billed_ops_size(self): return len(self.billed_ops_) def billed_ops_list(self): return self.billed_ops_ def billed_ops(self, i): return self.billed_ops_[i] def mutable_billed_ops(self, i): return self.billed_ops_[i] def add_billed_ops(self): x = BilledOpProto() self.billed_ops_.append(x) return x def clear_billed_ops(self): self.billed_ops_ = [] def MergeFrom(self, x): assert x is not self if (x.has_service_call_name()): self.set_service_call_name(x.service_call_name()) if (x.has_request_data_summary()): self.set_request_data_summary(x.request_data_summary()) if (x.has_response_data_summary()): self.set_response_data_summary(x.response_data_summary()) if (x.has_api_mcycles()): self.set_api_mcycles(x.api_mcycles()) if (x.has_api_milliseconds()): self.set_api_milliseconds(x.api_milliseconds()) if (x.has_start_offset_milliseconds()): self.set_start_offset_milliseconds(x.start_offset_milliseconds()) if (x.has_duration_milliseconds()): self.set_duration_milliseconds(x.duration_milliseconds()) if (x.has_namespace()): self.set_namespace(x.namespace()) if (x.has_was_successful()): self.set_was_successful(x.was_successful()) for i in xrange(x.call_stack_size()): self.add_call_stack().CopyFrom(x.call_stack(i)) if (x.has_datastore_details()): self.mutable_datastore_details().MergeFrom(x.datastore_details()) if (x.has_call_cost_microdollars()): self.set_call_cost_microdollars(x.call_cost_microdollars()) for i in xrange(x.billed_ops_size()): self.add_billed_ops().CopyFrom(x.billed_ops(i)) def Equals(self, x): if x is self: return 1 if self.has_service_call_name_ != x.has_service_call_name_: return 0 if self.has_service_call_name_ and self.service_call_name_ != x.service_call_name_: return 0 if self.has_request_data_summary_ != x.has_request_data_summary_: return 0 if self.has_request_data_summary_ and self.request_data_summary_ != x.request_data_summary_: return 0 if self.has_response_data_summary_ != x.has_response_data_summary_: return 0 if self.has_response_data_summary_ and self.response_data_summary_ != x.response_data_summary_: return 0 if self.has_api_mcycles_ != x.has_api_mcycles_: return 0 if self.has_api_mcycles_ and self.api_mcycles_ != x.api_mcycles_: return 0 if self.has_api_milliseconds_ != x.has_api_milliseconds_: return 0 if self.has_api_milliseconds_ and self.api_milliseconds_ != x.api_milliseconds_: return 0 if self.has_start_offset_milliseconds_ != x.has_start_offset_milliseconds_: return 0 if self.has_start_offset_milliseconds_ and self.start_offset_milliseconds_ != x.start_offset_milliseconds_: return 0 if self.has_duration_milliseconds_ != x.has_duration_milliseconds_: return 0 if self.has_duration_milliseconds_ and self.duration_milliseconds_ != x.duration_milliseconds_: return 0 if self.has_namespace_ != x.has_namespace_: return 0 if self.has_namespace_ and self.namespace_ != x.namespace_: return 0 if self.has_was_successful_ != x.has_was_successful_: return 0 if self.has_was_successful_ and self.was_successful_ != x.was_successful_: return 0 if len(self.call_stack_) != len(x.call_stack_): return 0 for e1, e2 in zip(self.call_stack_, x.call_stack_): if e1 != e2: return 0 if self.has_datastore_details_ != x.has_datastore_details_: return 0 if self.has_datastore_details_ and self.datastore_details_ != x.datastore_details_: return 0 if self.has_call_cost_microdollars_ != x.has_call_cost_microdollars_: return 0 if self.has_call_cost_microdollars_ and self.call_cost_microdollars_ != x.call_cost_microdollars_: return 0 if len(self.billed_ops_) != len(x.billed_ops_): return 0 for e1, e2 in zip(self.billed_ops_, x.billed_ops_): if e1 != e2: return 0 return 1 def IsInitialized(self, debug_strs=None): initialized = 1 if (not self.has_service_call_name_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: service_call_name not set.') if (not self.has_start_offset_milliseconds_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: start_offset_milliseconds not set.') for p in self.call_stack_: if not p.IsInitialized(debug_strs): initialized=0 if (self.has_datastore_details_ and not self.datastore_details_.IsInitialized(debug_strs)): initialized = 0 for p in self.billed_ops_: if not p.IsInitialized(debug_strs): initialized=0 return initialized def ByteSize(self): n = 0 n += self.lengthString(len(self.service_call_name_)) if (self.has_request_data_summary_): n += 1 + self.lengthString(len(self.request_data_summary_)) if (self.has_response_data_summary_): n += 1 + self.lengthString(len(self.response_data_summary_)) if (self.has_api_mcycles_): n += 1 + self.lengthVarInt64(self.api_mcycles_) if (self.has_api_milliseconds_): n += 1 + self.lengthVarInt64(self.api_milliseconds_) n += self.lengthVarInt64(self.start_offset_milliseconds_) if (self.has_duration_milliseconds_): n += 1 + self.lengthVarInt64(self.duration_milliseconds_) if (self.has_namespace_): n += 1 + self.lengthString(len(self.namespace_)) if (self.has_was_successful_): n += 2 n += 1 * len(self.call_stack_) for i in xrange(len(self.call_stack_)): n += self.lengthString(self.call_stack_[i].ByteSize()) if (self.has_datastore_details_): n += 1 + self.lengthString(self.datastore_details_.ByteSize()) if (self.has_call_cost_microdollars_): n += 1 + self.lengthVarInt64(self.call_cost_microdollars_) n += 1 * len(self.billed_ops_) for i in xrange(len(self.billed_ops_)): n += self.lengthString(self.billed_ops_[i].ByteSize()) return n + 2 def ByteSizePartial(self): n = 0 if (self.has_service_call_name_): n += 1 n += self.lengthString(len(self.service_call_name_)) if (self.has_request_data_summary_): n += 1 + self.lengthString(len(self.request_data_summary_)) if (self.has_response_data_summary_): n += 1 + self.lengthString(len(self.response_data_summary_)) if (self.has_api_mcycles_): n += 1 + self.lengthVarInt64(self.api_mcycles_) if (self.has_api_milliseconds_): n += 1 + self.lengthVarInt64(self.api_milliseconds_) if (self.has_start_offset_milliseconds_): n += 1 n += self.lengthVarInt64(self.start_offset_milliseconds_) if (self.has_duration_milliseconds_): n += 1 + self.lengthVarInt64(self.duration_milliseconds_) if (self.has_namespace_): n += 1 + self.lengthString(len(self.namespace_)) if (self.has_was_successful_): n += 2 n += 1 * len(self.call_stack_) for i in xrange(len(self.call_stack_)): n += self.lengthString(self.call_stack_[i].ByteSizePartial()) if (self.has_datastore_details_): n += 1 + self.lengthString(self.datastore_details_.ByteSizePartial()) if (self.has_call_cost_microdollars_): n += 1 + self.lengthVarInt64(self.call_cost_microdollars_) n += 1 * len(self.billed_ops_) for i in xrange(len(self.billed_ops_)): n += self.lengthString(self.billed_ops_[i].ByteSizePartial()) return n def Clear(self): self.clear_service_call_name() self.clear_request_data_summary() self.clear_response_data_summary() self.clear_api_mcycles() self.clear_api_milliseconds() self.clear_start_offset_milliseconds() self.clear_duration_milliseconds() self.clear_namespace() self.clear_was_successful() self.clear_call_stack() self.clear_datastore_details() self.clear_call_cost_microdollars() self.clear_billed_ops() def OutputUnchecked(self, out): out.putVarInt32(10) out.putPrefixedString(self.service_call_name_) if (self.has_request_data_summary_): out.putVarInt32(26) out.putPrefixedString(self.request_data_summary_) if (self.has_response_data_summary_): out.putVarInt32(34) out.putPrefixedString(self.response_data_summary_) if (self.has_api_mcycles_): out.putVarInt32(40) out.putVarInt64(self.api_mcycles_) out.putVarInt32(48) out.putVarInt64(self.start_offset_milliseconds_) if (self.has_duration_milliseconds_): out.putVarInt32(56) out.putVarInt64(self.duration_milliseconds_) if (self.has_namespace_): out.putVarInt32(66) out.putPrefixedString(self.namespace_) if (self.has_was_successful_): out.putVarInt32(72) out.putBoolean(self.was_successful_) for i in xrange(len(self.call_stack_)): out.putVarInt32(82) out.putVarInt32(self.call_stack_[i].ByteSize()) self.call_stack_[i].OutputUnchecked(out) if (self.has_api_milliseconds_): out.putVarInt32(88) out.putVarInt64(self.api_milliseconds_) if (self.has_datastore_details_): out.putVarInt32(98) out.putVarInt32(self.datastore_details_.ByteSize()) self.datastore_details_.OutputUnchecked(out) if (self.has_call_cost_microdollars_): out.putVarInt32(104) out.putVarInt64(self.call_cost_microdollars_) for i in xrange(len(self.billed_ops_)): out.putVarInt32(114) out.putVarInt32(self.billed_ops_[i].ByteSize()) self.billed_ops_[i].OutputUnchecked(out) def OutputPartial(self, out): if (self.has_service_call_name_): out.putVarInt32(10) out.putPrefixedString(self.service_call_name_) if (self.has_request_data_summary_): out.putVarInt32(26) out.putPrefixedString(self.request_data_summary_) if (self.has_response_data_summary_): out.putVarInt32(34) out.putPrefixedString(self.response_data_summary_) if (self.has_api_mcycles_): out.putVarInt32(40) out.putVarInt64(self.api_mcycles_) if (self.has_start_offset_milliseconds_): out.putVarInt32(48) out.putVarInt64(self.start_offset_milliseconds_) if (self.has_duration_milliseconds_): out.putVarInt32(56) out.putVarInt64(self.duration_milliseconds_) if (self.has_namespace_): out.putVarInt32(66) out.putPrefixedString(self.namespace_) if (self.has_was_successful_): out.putVarInt32(72) out.putBoolean(self.was_successful_) for i in xrange(len(self.call_stack_)): out.putVarInt32(82) out.putVarInt32(self.call_stack_[i].ByteSizePartial()) self.call_stack_[i].OutputPartial(out) if (self.has_api_milliseconds_): out.putVarInt32(88) out.putVarInt64(self.api_milliseconds_) if (self.has_datastore_details_): out.putVarInt32(98) out.putVarInt32(self.datastore_details_.ByteSizePartial()) self.datastore_details_.OutputPartial(out) if (self.has_call_cost_microdollars_): out.putVarInt32(104) out.putVarInt64(self.call_cost_microdollars_) for i in xrange(len(self.billed_ops_)): out.putVarInt32(114) out.putVarInt32(self.billed_ops_[i].ByteSizePartial()) self.billed_ops_[i].OutputPartial(out) def TryMerge(self, d): while d.avail() > 0: tt = d.getVarInt32() if tt == 10: self.set_service_call_name(d.getPrefixedString()) continue if tt == 26: self.set_request_data_summary(d.getPrefixedString()) continue if tt == 34: self.set_response_data_summary(d.getPrefixedString()) continue if tt == 40: self.set_api_mcycles(d.getVarInt64()) continue if tt == 48: self.set_start_offset_milliseconds(d.getVarInt64()) continue if tt == 56: self.set_duration_milliseconds(d.getVarInt64()) continue if tt == 66: self.set_namespace(d.getPrefixedString()) continue if tt == 72: self.set_was_successful(d.getBoolean()) continue if tt == 82: length = d.getVarInt32() tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length) d.skip(length) self.add_call_stack().TryMerge(tmp) continue if tt == 88: self.set_api_milliseconds(d.getVarInt64()) continue if tt == 98: length = d.getVarInt32() tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length) d.skip(length) self.mutable_datastore_details().TryMerge(tmp) continue if tt == 104: self.set_call_cost_microdollars(d.getVarInt64()) continue if tt == 114: length = d.getVarInt32() tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length) d.skip(length) self.add_billed_ops().TryMerge(tmp) continue if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError d.skipData(tt) def __str__(self, prefix="", printElemNumber=0): res="" if self.has_service_call_name_: res+=prefix+("service_call_name: %s\n" % self.DebugFormatString(self.service_call_name_)) if self.has_request_data_summary_: res+=prefix+("request_data_summary: %s\n" % self.DebugFormatString(self.request_data_summary_)) if self.has_response_data_summary_: res+=prefix+("response_data_summary: %s\n" % self.DebugFormatString(self.response_data_summary_)) if self.has_api_mcycles_: res+=prefix+("api_mcycles: %s\n" % self.DebugFormatInt64(self.api_mcycles_)) if self.has_api_milliseconds_: res+=prefix+("api_milliseconds: %s\n" % self.DebugFormatInt64(self.api_milliseconds_)) if self.has_start_offset_milliseconds_: res+=prefix+("start_offset_milliseconds: %s\n" % self.DebugFormatInt64(self.start_offset_milliseconds_)) if self.has_duration_milliseconds_: res+=prefix+("duration_milliseconds: %s\n" % self.DebugFormatInt64(self.duration_milliseconds_)) if self.has_namespace_: res+=prefix+("namespace: %s\n" % self.DebugFormatString(self.namespace_)) if self.has_was_successful_: res+=prefix+("was_successful: %s\n" % self.DebugFormatBool(self.was_successful_)) cnt=0 for e in self.call_stack_: elm="" if printElemNumber: elm="(%d)" % cnt res+=prefix+("call_stack%s <\n" % elm) res+=e.__str__(prefix + " ", printElemNumber) res+=prefix+">\n" cnt+=1 if self.has_datastore_details_: res+=prefix+"datastore_details <\n" res+=self.datastore_details_.__str__(prefix + " ", printElemNumber) res+=prefix+">\n" if self.has_call_cost_microdollars_: res+=prefix+("call_cost_microdollars: %s\n" % self.DebugFormatInt64(self.call_cost_microdollars_)) cnt=0 for e in self.billed_ops_: elm="" if printElemNumber: elm="(%d)" % cnt res+=prefix+("billed_ops%s <\n" % elm) res+=e.__str__(prefix + " ", printElemNumber) res+=prefix+">\n" cnt+=1 return res def _BuildTagLookupTable(sparse, maxtag, default=None): return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)]) kservice_call_name = 1 krequest_data_summary = 3 kresponse_data_summary = 4 kapi_mcycles = 5 kapi_milliseconds = 11 kstart_offset_milliseconds = 6 kduration_milliseconds = 7 knamespace = 8 kwas_successful = 9 kcall_stack = 10 kdatastore_details = 12 kcall_cost_microdollars = 13 kbilled_ops = 14 _TEXT = _BuildTagLookupTable({ 0: "ErrorCode", 1: "service_call_name", 3: "request_data_summary", 4: "response_data_summary", 5: "api_mcycles", 6: "start_offset_milliseconds", 7: "duration_milliseconds", 8: "namespace", 9: "was_successful", 10: "call_stack", 11: "api_milliseconds", 12: "datastore_details", 13: "call_cost_microdollars", 14: "billed_ops", }, 14) _TYPES = _BuildTagLookupTable({ 0: ProtocolBuffer.Encoder.NUMERIC, 1: ProtocolBuffer.Encoder.STRING, 3: ProtocolBuffer.Encoder.STRING, 4: ProtocolBuffer.Encoder.STRING, 5: ProtocolBuffer.Encoder.NUMERIC, 6: ProtocolBuffer.Encoder.NUMERIC, 7: ProtocolBuffer.Encoder.NUMERIC, 8: ProtocolBuffer.Encoder.STRING, 9: ProtocolBuffer.Encoder.NUMERIC, 10: ProtocolBuffer.Encoder.STRING, 11: ProtocolBuffer.Encoder.NUMERIC, 12: ProtocolBuffer.Encoder.STRING, 13: ProtocolBuffer.Encoder.NUMERIC, 14: ProtocolBuffer.Encoder.STRING, }, 14, ProtocolBuffer.Encoder.MAX_TYPE) _STYLE = """""" _STYLE_CONTENT_TYPE = """""" _PROTO_DESCRIPTOR_NAME = 'apphosting.IndividualRpcStatsProto' class RequestStatProto(ProtocolBuffer.ProtocolMessage): has_start_timestamp_milliseconds_ = 0 start_timestamp_milliseconds_ = 0 has_http_method_ = 0 http_method_ = "GET" has_http_path_ = 0 http_path_ = "/" has_http_query_ = 0 http_query_ = "" has_http_status_ = 0 http_status_ = 200 has_duration_milliseconds_ = 0 duration_milliseconds_ = 0 has_api_mcycles_ = 0 api_mcycles_ = 0 has_processor_mcycles_ = 0 processor_mcycles_ = 0 has_overhead_walltime_milliseconds_ = 0 overhead_walltime_milliseconds_ = 0 has_user_email_ = 0 user_email_ = "" has_is_admin_ = 0 is_admin_ = 0 def __init__(self, contents=None): self.rpc_stats_ = [] self.cgi_env_ = [] self.individual_stats_ = [] if contents is not None: self.MergeFromString(contents) def start_timestamp_milliseconds(self): return self.start_timestamp_milliseconds_ def set_start_timestamp_milliseconds(self, x): self.has_start_timestamp_milliseconds_ = 1 self.start_timestamp_milliseconds_ = x def clear_start_timestamp_milliseconds(self): if self.has_start_timestamp_milliseconds_: self.has_start_timestamp_milliseconds_ = 0 self.start_timestamp_milliseconds_ = 0 def has_start_timestamp_milliseconds(self): return self.has_start_timestamp_milliseconds_ def http_method(self): return self.http_method_ def set_http_method(self, x): self.has_http_method_ = 1 self.http_method_ = x def clear_http_method(self): if self.has_http_method_: self.has_http_method_ = 0 self.http_method_ = "GET" def has_http_method(self): return self.has_http_method_ def http_path(self): return self.http_path_ def set_http_path(self, x): self.has_http_path_ = 1 self.http_path_ = x def clear_http_path(self): if self.has_http_path_: self.has_http_path_ = 0 self.http_path_ = "/" def has_http_path(self): return self.has_http_path_ def http_query(self): return self.http_query_ def set_http_query(self, x): self.has_http_query_ = 1 self.http_query_ = x def clear_http_query(self): if self.has_http_query_: self.has_http_query_ = 0 self.http_query_ = "" def has_http_query(self): return self.has_http_query_ def http_status(self): return self.http_status_ def set_http_status(self, x): self.has_http_status_ = 1 self.http_status_ = x def clear_http_status(self): if self.has_http_status_: self.has_http_status_ = 0 self.http_status_ = 200 def has_http_status(self): return self.has_http_status_ def duration_milliseconds(self): return self.duration_milliseconds_ def set_duration_milliseconds(self, x): self.has_duration_milliseconds_ = 1 self.duration_milliseconds_ = x def clear_duration_milliseconds(self): if self.has_duration_milliseconds_: self.has_duration_milliseconds_ = 0 self.duration_milliseconds_ = 0 def has_duration_milliseconds(self): return self.has_duration_milliseconds_ def api_mcycles(self): return self.api_mcycles_ def set_api_mcycles(self, x): self.has_api_mcycles_ = 1 self.api_mcycles_ = x def clear_api_mcycles(self): if self.has_api_mcycles_: self.has_api_mcycles_ = 0 self.api_mcycles_ = 0 def has_api_mcycles(self): return self.has_api_mcycles_ def processor_mcycles(self): return self.processor_mcycles_ def set_processor_mcycles(self, x): self.has_processor_mcycles_ = 1 self.processor_mcycles_ = x def clear_processor_mcycles(self): if self.has_processor_mcycles_: self.has_processor_mcycles_ = 0 self.processor_mcycles_ = 0 def has_processor_mcycles(self): return self.has_processor_mcycles_ def rpc_stats_size(self): return len(self.rpc_stats_) def rpc_stats_list(self): return self.rpc_stats_ def rpc_stats(self, i): return self.rpc_stats_[i] def mutable_rpc_stats(self, i): return self.rpc_stats_[i] def add_rpc_stats(self): x = AggregateRpcStatsProto() self.rpc_stats_.append(x) return x def clear_rpc_stats(self): self.rpc_stats_ = [] def cgi_env_size(self): return len(self.cgi_env_) def cgi_env_list(self): return self.cgi_env_ def cgi_env(self, i): return self.cgi_env_[i] def mutable_cgi_env(self, i): return self.cgi_env_[i] def add_cgi_env(self): x = KeyValProto() self.cgi_env_.append(x) return x def clear_cgi_env(self): self.cgi_env_ = [] def overhead_walltime_milliseconds(self): return self.overhead_walltime_milliseconds_ def set_overhead_walltime_milliseconds(self, x): self.has_overhead_walltime_milliseconds_ = 1 self.overhead_walltime_milliseconds_ = x def clear_overhead_walltime_milliseconds(self): if self.has_overhead_walltime_milliseconds_: self.has_overhead_walltime_milliseconds_ = 0 self.overhead_walltime_milliseconds_ = 0 def has_overhead_walltime_milliseconds(self): return self.has_overhead_walltime_milliseconds_ def user_email(self): return self.user_email_ def set_user_email(self, x): self.has_user_email_ = 1 self.user_email_ = x def clear_user_email(self): if self.has_user_email_: self.has_user_email_ = 0 self.user_email_ = "" def has_user_email(self): return self.has_user_email_ def is_admin(self): return self.is_admin_ def set_is_admin(self, x): self.has_is_admin_ = 1 self.is_admin_ = x def clear_is_admin(self): if self.has_is_admin_: self.has_is_admin_ = 0 self.is_admin_ = 0 def has_is_admin(self): return self.has_is_admin_ def individual_stats_size(self): return len(self.individual_stats_) def individual_stats_list(self): return self.individual_stats_ def individual_stats(self, i): return self.individual_stats_[i] def mutable_individual_stats(self, i): return self.individual_stats_[i] def add_individual_stats(self): x = IndividualRpcStatsProto() self.individual_stats_.append(x) return x def clear_individual_stats(self): self.individual_stats_ = [] def MergeFrom(self, x): assert x is not self if (x.has_start_timestamp_milliseconds()): self.set_start_timestamp_milliseconds(x.start_timestamp_milliseconds()) if (x.has_http_method()): self.set_http_method(x.http_method()) if (x.has_http_path()): self.set_http_path(x.http_path()) if (x.has_http_query()): self.set_http_query(x.http_query()) if (x.has_http_status()): self.set_http_status(x.http_status()) if (x.has_duration_milliseconds()): self.set_duration_milliseconds(x.duration_milliseconds()) if (x.has_api_mcycles()): self.set_api_mcycles(x.api_mcycles()) if (x.has_processor_mcycles()): self.set_processor_mcycles(x.processor_mcycles()) for i in xrange(x.rpc_stats_size()): self.add_rpc_stats().CopyFrom(x.rpc_stats(i)) for i in xrange(x.cgi_env_size()): self.add_cgi_env().CopyFrom(x.cgi_env(i)) if (x.has_overhead_walltime_milliseconds()): self.set_overhead_walltime_milliseconds(x.overhead_walltime_milliseconds()) if (x.has_user_email()): self.set_user_email(x.user_email()) if (x.has_is_admin()): self.set_is_admin(x.is_admin()) for i in xrange(x.individual_stats_size()): self.add_individual_stats().CopyFrom(x.individual_stats(i)) def Equals(self, x): if x is self: return 1 if self.has_start_timestamp_milliseconds_ != x.has_start_timestamp_milliseconds_: return 0 if self.has_start_timestamp_milliseconds_ and self.start_timestamp_milliseconds_ != x.start_timestamp_milliseconds_: return 0 if self.has_http_method_ != x.has_http_method_: return 0 if self.has_http_method_ and self.http_method_ != x.http_method_: return 0 if self.has_http_path_ != x.has_http_path_: return 0 if self.has_http_path_ and self.http_path_ != x.http_path_: return 0 if self.has_http_query_ != x.has_http_query_: return 0 if self.has_http_query_ and self.http_query_ != x.http_query_: return 0 if self.has_http_status_ != x.has_http_status_: return 0 if self.has_http_status_ and self.http_status_ != x.http_status_: return 0 if self.has_duration_milliseconds_ != x.has_duration_milliseconds_: return 0 if self.has_duration_milliseconds_ and self.duration_milliseconds_ != x.duration_milliseconds_: return 0 if self.has_api_mcycles_ != x.has_api_mcycles_: return 0 if self.has_api_mcycles_ and self.api_mcycles_ != x.api_mcycles_: return 0 if self.has_processor_mcycles_ != x.has_processor_mcycles_: return 0 if self.has_processor_mcycles_ and self.processor_mcycles_ != x.processor_mcycles_: return 0 if len(self.rpc_stats_) != len(x.rpc_stats_): return 0 for e1, e2 in zip(self.rpc_stats_, x.rpc_stats_): if e1 != e2: return 0 if len(self.cgi_env_) != len(x.cgi_env_): return 0 for e1, e2 in zip(self.cgi_env_, x.cgi_env_): if e1 != e2: return 0 if self.has_overhead_walltime_milliseconds_ != x.has_overhead_walltime_milliseconds_: return 0 if self.has_overhead_walltime_milliseconds_ and self.overhead_walltime_milliseconds_ != x.overhead_walltime_milliseconds_: return 0 if self.has_user_email_ != x.has_user_email_: return 0 if self.has_user_email_ and self.user_email_ != x.user_email_: return 0 if self.has_is_admin_ != x.has_is_admin_: return 0 if self.has_is_admin_ and self.is_admin_ != x.is_admin_: return 0 if len(self.individual_stats_) != len(x.individual_stats_): return 0 for e1, e2 in zip(self.individual_stats_, x.individual_stats_): if e1 != e2: return 0 return 1 def IsInitialized(self, debug_strs=None): initialized = 1 if (not self.has_start_timestamp_milliseconds_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: start_timestamp_milliseconds not set.') if (not self.has_duration_milliseconds_): initialized = 0 if debug_strs is not None: debug_strs.append('Required field: duration_milliseconds not set.') for p in self.rpc_stats_: if not p.IsInitialized(debug_strs): initialized=0 for p in self.cgi_env_: if not p.IsInitialized(debug_strs): initialized=0 for p in self.individual_stats_: if not p.IsInitialized(debug_strs): initialized=0 return initialized def ByteSize(self): n = 0 n += self.lengthVarInt64(self.start_timestamp_milliseconds_) if (self.has_http_method_): n += 1 + self.lengthString(len(self.http_method_)) if (self.has_http_path_): n += 1 + self.lengthString(len(self.http_path_)) if (self.has_http_query_): n += 1 + self.lengthString(len(self.http_query_)) if (self.has_http_status_): n += 1 + self.lengthVarInt64(self.http_status_) n += self.lengthVarInt64(self.duration_milliseconds_) if (self.has_api_mcycles_): n += 1 + self.lengthVarInt64(self.api_mcycles_) if (self.has_processor_mcycles_): n += 1 + self.lengthVarInt64(self.processor_mcycles_) n += 1 * len(self.rpc_stats_) for i in xrange(len(self.rpc_stats_)): n += self.lengthString(self.rpc_stats_[i].ByteSize()) n += 2 * len(self.cgi_env_) for i in xrange(len(self.cgi_env_)): n += self.lengthString(self.cgi_env_[i].ByteSize()) if (self.has_overhead_walltime_milliseconds_): n += 2 + self.lengthVarInt64(self.overhead_walltime_milliseconds_) if (self.has_user_email_): n += 2 + self.lengthString(len(self.user_email_)) if (self.has_is_admin_): n += 3 n += 2 * len(self.individual_stats_) for i in xrange(len(self.individual_stats_)): n += self.lengthString(self.individual_stats_[i].ByteSize()) return n + 2 def ByteSizePartial(self): n = 0 if (self.has_start_timestamp_milliseconds_): n += 1 n += self.lengthVarInt64(self.start_timestamp_milliseconds_) if (self.has_http_method_): n += 1 + self.lengthString(len(self.http_method_)) if (self.has_http_path_): n += 1 + self.lengthString(len(self.http_path_)) if (self.has_http_query_): n += 1 + self.lengthString(len(self.http_query_)) if (self.has_http_status_): n += 1 + self.lengthVarInt64(self.http_status_) if (self.has_duration_milliseconds_): n += 1 n += self.lengthVarInt64(self.duration_milliseconds_) if (self.has_api_mcycles_): n += 1 + self.lengthVarInt64(self.api_mcycles_) if (self.has_processor_mcycles_): n += 1 + self.lengthVarInt64(self.processor_mcycles_) n += 1 * len(self.rpc_stats_) for i in xrange(len(self.rpc_stats_)): n += self.lengthString(self.rpc_stats_[i].ByteSizePartial()) n += 2 * len(self.cgi_env_) for i in xrange(len(self.cgi_env_)): n += self.lengthString(self.cgi_env_[i].ByteSizePartial()) if (self.has_overhead_walltime_milliseconds_): n += 2 + self.lengthVarInt64(self.overhead_walltime_milliseconds_) if (self.has_user_email_): n += 2 + self.lengthString(len(self.user_email_)) if (self.has_is_admin_): n += 3 n += 2 * len(self.individual_stats_) for i in xrange(len(self.individual_stats_)): n += self.lengthString(self.individual_stats_[i].ByteSizePartial()) return n def Clear(self): self.clear_start_timestamp_milliseconds() self.clear_http_method() self.clear_http_path() self.clear_http_query() self.clear_http_status() self.clear_duration_milliseconds() self.clear_api_mcycles() self.clear_processor_mcycles() self.clear_rpc_stats() self.clear_cgi_env() self.clear_overhead_walltime_milliseconds() self.clear_user_email() self.clear_is_admin() self.clear_individual_stats() def OutputUnchecked(self, out): out.putVarInt32(8) out.putVarInt64(self.start_timestamp_milliseconds_) if (self.has_http_method_): out.putVarInt32(18) out.putPrefixedString(self.http_method_) if (self.has_http_path_): out.putVarInt32(26) out.putPrefixedString(self.http_path_) if (self.has_http_query_): out.putVarInt32(34) out.putPrefixedString(self.http_query_) if (self.has_http_status_): out.putVarInt32(40) out.putVarInt32(self.http_status_) out.putVarInt32(48) out.putVarInt64(self.duration_milliseconds_) if (self.has_api_mcycles_): out.putVarInt32(56) out.putVarInt64(self.api_mcycles_) if (self.has_processor_mcycles_): out.putVarInt32(64) out.putVarInt64(self.processor_mcycles_) for i in xrange(len(self.rpc_stats_)): out.putVarInt32(74) out.putVarInt32(self.rpc_stats_[i].ByteSize()) self.rpc_stats_[i].OutputUnchecked(out) for i in xrange(len(self.cgi_env_)): out.putVarInt32(810) out.putVarInt32(self.cgi_env_[i].ByteSize()) self.cgi_env_[i].OutputUnchecked(out) if (self.has_overhead_walltime_milliseconds_): out.putVarInt32(816) out.putVarInt64(self.overhead_walltime_milliseconds_) if (self.has_user_email_): out.putVarInt32(826) out.putPrefixedString(self.user_email_) if (self.has_is_admin_): out.putVarInt32(832) out.putBoolean(self.is_admin_) for i in xrange(len(self.individual_stats_)): out.putVarInt32(858) out.putVarInt32(self.individual_stats_[i].ByteSize()) self.individual_stats_[i].OutputUnchecked(out) def OutputPartial(self, out): if (self.has_start_timestamp_milliseconds_): out.putVarInt32(8) out.putVarInt64(self.start_timestamp_milliseconds_) if (self.has_http_method_): out.putVarInt32(18) out.putPrefixedString(self.http_method_) if (self.has_http_path_): out.putVarInt32(26) out.putPrefixedString(self.http_path_) if (self.has_http_query_): out.putVarInt32(34) out.putPrefixedString(self.http_query_) if (self.has_http_status_): out.putVarInt32(40) out.putVarInt32(self.http_status_) if (self.has_duration_milliseconds_): out.putVarInt32(48) out.putVarInt64(self.duration_milliseconds_) if (self.has_api_mcycles_): out.putVarInt32(56) out.putVarInt64(self.api_mcycles_) if (self.has_processor_mcycles_): out.putVarInt32(64) out.putVarInt64(self.processor_mcycles_) for i in xrange(len(self.rpc_stats_)): out.putVarInt32(74) out.putVarInt32(self.rpc_stats_[i].ByteSizePartial()) self.rpc_stats_[i].OutputPartial(out) for i in xrange(len(self.cgi_env_)): out.putVarInt32(810) out.putVarInt32(self.cgi_env_[i].ByteSizePartial()) self.cgi_env_[i].OutputPartial(out) if (self.has_overhead_walltime_milliseconds_): out.putVarInt32(816) out.putVarInt64(self.overhead_walltime_milliseconds_) if (self.has_user_email_): out.putVarInt32(826) out.putPrefixedString(self.user_email_) if (self.has_is_admin_): out.putVarInt32(832) out.putBoolean(self.is_admin_) for i in xrange(len(self.individual_stats_)): out.putVarInt32(858) out.putVarInt32(self.individual_stats_[i].ByteSizePartial()) self.individual_stats_[i].OutputPartial(out) def TryMerge(self, d): while d.avail() > 0: tt = d.getVarInt32() if tt == 8: self.set_start_timestamp_milliseconds(d.getVarInt64()) continue if tt == 18: self.set_http_method(d.getPrefixedString()) continue if tt == 26: self.set_http_path(d.getPrefixedString()) continue if tt == 34: self.set_http_query(d.getPrefixedString()) continue if tt == 40: self.set_http_status(d.getVarInt32()) continue if tt == 48: self.set_duration_milliseconds(d.getVarInt64()) continue if tt == 56: self.set_api_mcycles(d.getVarInt64()) continue if tt == 64: self.set_processor_mcycles(d.getVarInt64()) continue if tt == 74: length = d.getVarInt32() tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length) d.skip(length) self.add_rpc_stats().TryMerge(tmp) continue if tt == 810: length = d.getVarInt32() tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length) d.skip(length) self.add_cgi_env().TryMerge(tmp) continue if tt == 816: self.set_overhead_walltime_milliseconds(d.getVarInt64()) continue if tt == 826: self.set_user_email(d.getPrefixedString()) continue if tt == 832: self.set_is_admin(d.getBoolean()) continue if tt == 858: length = d.getVarInt32() tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length) d.skip(length) self.add_individual_stats().TryMerge(tmp) continue if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError d.skipData(tt) def __str__(self, prefix="", printElemNumber=0): res="" if self.has_start_timestamp_milliseconds_: res+=prefix+("start_timestamp_milliseconds: %s\n" % self.DebugFormatInt64(self.start_timestamp_milliseconds_)) if self.has_http_method_: res+=prefix+("http_method: %s\n" % self.DebugFormatString(self.http_method_)) if self.has_http_path_: res+=prefix+("http_path: %s\n" % self.DebugFormatString(self.http_path_)) if self.has_http_query_: res+=prefix+("http_query: %s\n" % self.DebugFormatString(self.http_query_)) if self.has_http_status_: res+=prefix+("http_status: %s\n" % self.DebugFormatInt32(self.http_status_)) if self.has_duration_milliseconds_: res+=prefix+("duration_milliseconds: %s\n" % self.DebugFormatInt64(self.duration_milliseconds_)) if self.has_api_mcycles_: res+=prefix+("api_mcycles: %s\n" % self.DebugFormatInt64(self.api_mcycles_)) if self.has_processor_mcycles_: res+=prefix+("processor_mcycles: %s\n" % self.DebugFormatInt64(self.processor_mcycles_)) cnt=0 for e in self.rpc_stats_: elm="" if printElemNumber: elm="(%d)" % cnt res+=prefix+("rpc_stats%s <\n" % elm) res+=e.__str__(prefix + " ", printElemNumber) res+=prefix+">\n" cnt+=1 cnt=0 for e in self.cgi_env_: elm="" if printElemNumber: elm="(%d)" % cnt res+=prefix+("cgi_env%s <\n" % elm) res+=e.__str__(prefix + " ", printElemNumber) res+=prefix+">\n" cnt+=1 if self.has_overhead_walltime_milliseconds_: res+=prefix+("overhead_walltime_milliseconds: %s\n" % self.DebugFormatInt64(self.overhead_walltime_milliseconds_)) if self.has_user_email_: res+=prefix+("user_email: %s\n" % self.DebugFormatString(self.user_email_)) if self.has_is_admin_: res+=prefix+("is_admin: %s\n" % self.DebugFormatBool(self.is_admin_)) cnt=0 for e in self.individual_stats_: elm="" if printElemNumber: elm="(%d)" % cnt res+=prefix+("individual_stats%s <\n" % elm) res+=e.__str__(prefix + " ", printElemNumber) res+=prefix+">\n" cnt+=1 return res def _BuildTagLookupTable(sparse, maxtag, default=None): return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)]) kstart_timestamp_milliseconds = 1 khttp_method = 2 khttp_path = 3 khttp_query = 4 khttp_status = 5 kduration_milliseconds = 6 kapi_mcycles = 7 kprocessor_mcycles = 8 krpc_stats = 9 kcgi_env = 101 koverhead_walltime_milliseconds = 102 kuser_email = 103 kis_admin = 104 kindividual_stats = 107 _TEXT = _BuildTagLookupTable({ 0: "ErrorCode", 1: "start_timestamp_milliseconds", 2: "http_method", 3: "http_path", 4: "http_query", 5: "http_status", 6: "duration_milliseconds", 7: "api_mcycles", 8: "processor_mcycles", 9: "rpc_stats", 101: "cgi_env", 102: "overhead_walltime_milliseconds", 103: "user_email", 104: "is_admin", 107: "individual_stats", }, 107) _TYPES = _BuildTagLookupTable({ 0: ProtocolBuffer.Encoder.NUMERIC, 1: ProtocolBuffer.Encoder.NUMERIC, 2: ProtocolBuffer.Encoder.STRING, 3: ProtocolBuffer.Encoder.STRING, 4: ProtocolBuffer.Encoder.STRING, 5: ProtocolBuffer.Encoder.NUMERIC, 6: ProtocolBuffer.Encoder.NUMERIC, 7: ProtocolBuffer.Encoder.NUMERIC, 8: ProtocolBuffer.Encoder.NUMERIC, 9: ProtocolBuffer.Encoder.STRING, 101: ProtocolBuffer.Encoder.STRING, 102: ProtocolBuffer.Encoder.NUMERIC, 103: ProtocolBuffer.Encoder.STRING, 104: ProtocolBuffer.Encoder.NUMERIC, 107: ProtocolBuffer.Encoder.STRING, }, 107, ProtocolBuffer.Encoder.MAX_TYPE) _STYLE = """""" _STYLE_CONTENT_TYPE = """""" _PROTO_DESCRIPTOR_NAME = 'apphosting.RequestStatProto' if _extension_runtime: pass __all__ = ['AggregateRpcStatsProto','KeyValProto','StackFrameProto','BilledOpProto','DatastoreCallDetailsProto','IndividualRpcStatsProto','RequestStatProto']
illicitonion/givabit
lib/sdks/google_appengine_1.7.1/google_appengine/google/appengine/ext/appstats/datamodel_pb.py
Python
apache-2.0
81,798
// Copyright 2013 The GLFW-RS Developers. For a full listing of the authors, // refer to the AUTHORS file at the top-level directory of this distribution. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. extern crate glfw; use glfw::{Action, Context, Key}; fn main() { let mut glfw = glfw::init(glfw::FAIL_ON_ERRORS).unwrap(); glfw.window_hint(glfw::WindowHint::Resizable(true)); let (mut window, events) = glfw .create_window( 800, 600, "Hello, I am a window.", glfw::WindowMode::Windowed, ) .expect("Failed to create GLFW window."); window.set_sticky_keys(true); // Polling of events can be turned on and off by the specific event type window.set_pos_polling(true); window.set_all_polling(true); window.set_size_polling(true); window.set_close_polling(true); window.set_refresh_polling(true); window.set_focus_polling(true); window.set_iconify_polling(true); window.set_framebuffer_size_polling(true); window.set_key_polling(true); window.set_char_polling(true); window.set_char_mods_polling(true); window.set_mouse_button_polling(true); window.set_cursor_pos_polling(true); window.set_cursor_enter_polling(true); window.set_scroll_polling(true); window.set_maximize_polling(true); window.set_content_scale_polling(true); // Alternatively, all event types may be set to poll at once. Note that // in this example, this call is redundant as all events have been set // to poll in the above code. window.set_all_polling(true); window.make_current(); while !window.should_close() { glfw.poll_events(); for event in glfw::flush_messages(&events) { handle_window_event(&mut window, event); } } } fn handle_window_event(window: &mut glfw::Window, (time, event): (f64, glfw::WindowEvent)) { match event { glfw::WindowEvent::Pos(x, y) => { window.set_title(&format!("Time: {:?}, Window pos: ({:?}, {:?})", time, x, y)) } glfw::WindowEvent::Size(w, h) => window.set_title(&format!( "Time: {:?}, Window size: ({:?}, {:?})", time, w, h )), glfw::WindowEvent::Close => println!("Time: {:?}, Window close requested.", time), glfw::WindowEvent::Refresh => { println!("Time: {:?}, Window refresh callback triggered.", time) } glfw::WindowEvent::Focus(true) => println!("Time: {:?}, Window focus gained.", time), glfw::WindowEvent::Focus(false) => println!("Time: {:?}, Window focus lost.", time), glfw::WindowEvent::Iconify(true) => println!("Time: {:?}, Window was minimised", time), glfw::WindowEvent::Iconify(false) => println!("Time: {:?}, Window was maximised.", time), glfw::WindowEvent::FramebufferSize(w, h) => { println!("Time: {:?}, Framebuffer size: ({:?}, {:?})", time, w, h) } glfw::WindowEvent::Char(character) => { println!("Time: {:?}, Character: {:?}", time, character) } glfw::WindowEvent::CharModifiers(character, mods) => println!( "Time: {:?}, Character: {:?}, Modifiers: [{:?}]", time, character, mods ), glfw::WindowEvent::MouseButton(btn, action, mods) => println!( "Time: {:?}, Button: {:?}, Action: {:?}, Modifiers: [{:?}]", time, glfw::DebugAliases(btn), action, mods ), glfw::WindowEvent::CursorPos(xpos, ypos) => window.set_title(&format!( "Time: {:?}, Cursor position: ({:?}, {:?})", time, xpos, ypos )), glfw::WindowEvent::CursorEnter(true) => { println!("Time: {:?}, Cursor entered window.", time) } glfw::WindowEvent::CursorEnter(false) => println!("Time: {:?}, Cursor left window.", time), glfw::WindowEvent::Scroll(x, y) => window.set_title(&format!( "Time: {:?}, Scroll offset: ({:?}, {:?})", time, x, y )), glfw::WindowEvent::Key(key, scancode, action, mods) => { println!( "Time: {:?}, Key: {:?}, ScanCode: {:?}, Action: {:?}, Modifiers: [{:?}]", time, key, scancode, action, mods ); match (key, action) { (Key::Escape, Action::Press) => window.set_should_close(true), (Key::R, Action::Press) => { // Resize should cause the window to "refresh" let (window_width, window_height) = window.get_size(); window.set_size(window_width + 1, window_height); window.set_size(window_width, window_height); } _ => {} } } glfw::WindowEvent::FileDrop(paths) => { println!("Time: {:?}, Files dropped: {:?}", time, paths) } glfw::WindowEvent::Maximize(maximized) => { println!("Time: {:?}, Window maximized: {:?}.", time, maximized) } glfw::WindowEvent::ContentScale(xscale, yscale) => println!( "Time: {:?}, Content scale x: {:?}, Content scale y: {:?}", time, xscale, yscale ), } }
bvssvni/glfw-rs
examples/events.rs
Rust
apache-2.0
5,806
/* Copyright 2018 The OpenEBS Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package command import ( goflag "flag" "sync" startcontroller "github.com/openebs/maya/cmd/cstor-volume-mgmt/controller/start-controller" "github.com/openebs/maya/cmd/cstor-volume-mgmt/volume" serverclient "github.com/openebs/maya/pkg/cstor/volume/serverclient/v1alpha1" targetserver "github.com/openebs/maya/pkg/cstor/volume/targetserver" "github.com/spf13/cobra" ) // CmdStartOptions has flags for starting CStorVolume watcher. type CmdStartOptions struct { kubeconfig string port string } // NewCmdStart starts gRPC server and watcher for CStorVolume. func NewCmdStart() *cobra.Command { options := CmdStartOptions{} cmd := &cobra.Command{ Use: "start", Short: "starts CStorVolume gRPC and watcher", Long: `The grpc server would be serving snapshot requests whereas the watcher would be watching for add, updat, delete events`, Run: func(cmd *cobra.Command, args []string) { var wg sync.WaitGroup wg.Add(1) go func() { targetserver.StartTargetServer(options.kubeconfig) wg.Done() }() wg.Add(1) go func() { serverclient.StartServer(volume.UnixSockVar, options.port) wg.Done() }() wg.Add(1) go func() { startcontroller.StartControllers(options.kubeconfig) wg.Done() }() wg.Wait() }, } goflag.CommandLine.Parse([]string{}) cmd.Flags().StringVar(&options.kubeconfig, "kubeconfig", "", `kubeconfig needs to be specified if out of cluster`) cmd.Flags().StringVarP(&options.port, "port", "p", options.port, "port on which the server should listen on") return cmd }
openebs/maya
cmd/cstor-volume-mgmt/app/command/start.go
GO
apache-2.0
2,131
// Copyright (c) 2015 Alachisoft // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //------------------------------------------------------------------------------ // <auto-generated> // This code was generated by a tool. // // Changes to this file may cause incorrect behavior and will be lost if // the code is regenerated. // </auto-generated> //------------------------------------------------------------------------------ // Generated from: CacheStoppedEventResponse.proto // Note: requires additional types generated from: EventId.proto namespace Alachisoft.NCache.Common.Protobuf { [global::System.Serializable, global::ProtoBuf.ProtoContract(Name=@"CacheStoppedEventResponse")] public partial class CacheStoppedEventResponse : global::ProtoBuf.IExtensible { public CacheStoppedEventResponse() {} private string _cacheId = ""; [global::ProtoBuf.ProtoMember(1, IsRequired = false, Name=@"cacheId", DataFormat = global::ProtoBuf.DataFormat.Default)] [global::System.ComponentModel.DefaultValue("")] public string cacheId { get { return _cacheId; } set { _cacheId = value; } } private Alachisoft.NCache.Common.Protobuf.EventId _eventId = null; [global::ProtoBuf.ProtoMember(2, IsRequired = false, Name=@"eventId", DataFormat = global::ProtoBuf.DataFormat.Default)] [global::System.ComponentModel.DefaultValue(null)] public Alachisoft.NCache.Common.Protobuf.EventId eventId { get { return _eventId; } set { _eventId = value; } } private global::ProtoBuf.IExtension extensionObject; global::ProtoBuf.IExtension global::ProtoBuf.IExtensible.GetExtensionObject(bool createIfMissing) { return global::ProtoBuf.Extensible.GetExtensionObject(ref extensionObject, createIfMissing); } } }
modulexcite/NCache
Src/NCCommon/Protobuf/CacheStoppedEventResponse.cs
C#
apache-2.0
2,316
/** * Copyright 2014-2016 CyberVision, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kaaproject.kaa.server.operations.service.akka.actors.supervision; import org.kaaproject.kaa.server.operations.service.akka.AkkaContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import scala.concurrent.duration.Duration; import akka.actor.OneForOneStrategy; import akka.actor.SupervisorStrategy; import akka.actor.SupervisorStrategy.Directive; import akka.japi.Function; public final class SupervisionStrategyFactory { private static final Logger LOG = LoggerFactory.getLogger(SupervisionStrategyFactory.class); private SupervisionStrategyFactory() { } public static SupervisorStrategy createIORouterStrategy(AkkaContext context) { return buildResumeOrEscalateStrategy(); } public static SupervisorStrategy createOpsActorStrategy(AkkaContext context) { return buildResumeOnRuntimeErrorStrategy(); } public static SupervisorStrategy createTenantActorStrategy(AkkaContext context) { return buildResumeOnRuntimeErrorStrategy(); } public static SupervisorStrategy createApplicationActorStrategy(AkkaContext context) { return buildRestartOrEscalateStrategy(); } private static SupervisorStrategy buildResumeOrEscalateStrategy() { return new OneForOneStrategy(-1, Duration.Inf(), new Function<Throwable, SupervisorStrategy.Directive>() { @Override public Directive apply(Throwable t) throws Exception { logException(t); if (t instanceof Error) { return OneForOneStrategy.escalate(); } else { return OneForOneStrategy.resume(); } } }); } private static SupervisorStrategy buildRestartOrEscalateStrategy() { return new OneForOneStrategy(-1, Duration.Inf(), new Function<Throwable, SupervisorStrategy.Directive>() { @Override public Directive apply(Throwable t) throws Exception { logException(t); if (t instanceof Error) { return OneForOneStrategy.escalate(); } else { return OneForOneStrategy.restart(); } } }); } private static SupervisorStrategy buildResumeOnRuntimeErrorStrategy() { return new OneForOneStrategy(-1, Duration.Inf(), new Function<Throwable, SupervisorStrategy.Directive>() { @Override public Directive apply(Throwable t) throws Exception { logException(t); if (t instanceof Error) { return OneForOneStrategy.escalate(); } else if (t instanceof RuntimeException) { return OneForOneStrategy.resume(); } else { return OneForOneStrategy.restart(); } } }); } private static void logException(Throwable t) { LOG.error("Supervisor strategy got exception: {}", t.getMessage(), t); } }
Deepnekroz/kaa
server/node/src/main/java/org/kaaproject/kaa/server/operations/service/akka/actors/supervision/SupervisionStrategyFactory.java
Java
apache-2.0
3,672
require File.expand_path(File.dirname(__FILE__) + '/spec_helper') require 'digest/md5' module Mrg module Grid module Config describe Node do before(:each) do setup_rhubarb @store = Store.new reconstitute_db end after(:each) do teardown_rhubarb end include BaseDBFixture it "should consider group deletion to be a configuration change for nodes" do node = @store.addNode("test") group = @store.addExplicitGroup("group1") @store.addParam("PARAM1") group.modifyParams("ADD", {"PARAM1"=>"value1"}, {}) node.modifyMemberships("ADD", %w{group1}, {}) @store.activateConfiguration old_version = node.getConfig()["WALLABY_CONFIG_VERSION"] @store.removeGroup("group1") @store.activateConfiguration new_version = node.getConfig()["WALLABY_CONFIG_VERSION"] new_version.should_not == old_version node.getConfig().keys.should_not include("PARAM1") end end end end end
willb/configuration-store
spec/bz796406_spec.rb
Ruby
apache-2.0
1,129
/* * Copyright (c) 2008-2020, Hazelcast, Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.hazelcast.map.impl; import com.hazelcast.map.listener.MapListener; import static com.hazelcast.map.impl.MapListenerAdaptors.createMapListenerAdaptor; /** * Contains support methods for creating various {@link com.hazelcast.map.impl.ListenerAdapter ListenerAdapter} * * @see com.hazelcast.map.impl.MapListenerAdaptors */ public final class ListenerAdapters { private ListenerAdapters() { } public static <T> ListenerAdapter<T> createListenerAdapter(Object listener) { if (listener instanceof ListenerAdapter) { return ((ListenerAdapter<T>) listener); } if (listener instanceof MapListener) { return createMapListenerAdaptor((MapListener) listener); } throw new IllegalArgumentException("Not a valid type to create a listener: " + listener.getClass().getSimpleName()); } }
mdogan/hazelcast
hazelcast/src/main/java/com/hazelcast/map/impl/ListenerAdapters.java
Java
apache-2.0
1,501
package com.alibaba.json.bvt.parser; import org.junit.Assert; import junit.framework.TestCase; import com.alibaba.fastjson.JSON; public class JSONScannerTest__x extends TestCase { public void test_x() throws Exception { StringBuilder buf = new StringBuilder(); buf.append("\""); for (int i = 0; i < 16; ++i) { for (int j = 0; j < 16; ++j) { buf.append("\\x"); buf.append(Integer.toHexString(i)); buf.append(Integer.toHexString(j)); } } buf.append("\""); String jsonString = (String) JSON.parse(buf.toString()); Assert.assertEquals(16 * 16, jsonString.length()); for (int i = 0; i < 16 * 16; ++i) { char c = jsonString.charAt(i); if ((int) c != i) { Assert.fail(); } } } }
alibaba/fastjson
src/test/java/com/alibaba/json/bvt/parser/JSONScannerTest__x.java
Java
apache-2.0
910
@echo off setlocal enableextensions set config=%1 if "%config%" == "" ( set config=Release ) set version= if not "%PackageVersion%" == "" ( set version=-Version %PackageVersion% ) REM Clean echo Cleaning... del /q src\NuPeek\bin\Release\* REM Build DotPeek 1.0 version %WINDIR%\Microsoft.NET\Framework\v4.0.30319\msbuild src\NuPeek.1.0.sln /p:Configuration="%config%" /t:Clean,Rebuild /m /v:M /fl /flp:LogFile=msbuild.log;Verbosity=Normal /nr:false mkdir install\NuPeek.1.0 2> NUL copy /y src\NuPeek\bin\Release\*.1.0.dll install\NuPeek.1.0\ copy /y src\NuPeek\bin\Release\NuGet.Core.dll install\NuPeek.1.0\ REM Clean echo Cleaning... del /q src\NuPeek\bin\Release\* REM Build DotPeek 1.1 version %WINDIR%\Microsoft.NET\Framework\v4.0.30319\msbuild src\NuPeek.1.1.sln /p:Configuration="%config%" /t:Clean,Rebuild /m /v:M /fl /flp:LogFile=msbuild.log;Verbosity=Normal /nr:false mkdir install\NuPeek.1.1 2> NUL copy /y src\NuPeek\bin\Release\*.1.1.dll install\NuPeek.1.1\ copy /y src\NuPeek\bin\Release\NuGet.Core.dll install\NuPeek.1.1\ REM Clean echo Cleaning... del /q src\NuPeek\bin\Release\* REM Build DotPeek 1.2 version %WINDIR%\Microsoft.NET\Framework\v4.0.30319\msbuild src\NuPeek.1.2.sln /p:Configuration="%config%" /t:Clean,Rebuild /m /v:M /fl /flp:LogFile=msbuild.log;Verbosity=Normal /nr:false mkdir install\NuPeek.1.2 2> NUL copy /y src\NuPeek\bin\Release\*.1.2.dll install\NuPeek.1.2\ copy /y src\NuPeek\bin\Release\NuGet.Core.dll install\NuPeek.1.2\ REM Build DotPeek 1.3 version %WINDIR%\Microsoft.NET\Framework\v4.0.30319\msbuild src\NuPeek.1.2.sln /p:Configuration="%config%" /t:Clean,Rebuild /m /v:M /fl /flp:LogFile=msbuild.log;Verbosity=Normal /nr:false mkdir install\NuPeek.1.3 2> NUL copy /y src\NuPeek\bin\Release\*.1.3.dll install\NuPeek.1.3\ copy /y src\NuPeek\bin\Release\NuGet.Core.dll install\NuPeek.1.3\
modulexcite/dotpeek-nupeek
build.bat
Batchfile
apache-2.0
1,847
/* * Copyright 2010-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.services.opsworks.model; import java.io.Serializable; /** * <p> * Contains the response to a <code>DescribeLoadBasedAutoScaling</code> request. * </p> */ public class DescribeLoadBasedAutoScalingResult implements Serializable, Cloneable { /** * <p> * An array of <code>LoadBasedAutoScalingConfiguration</code> objects that * describe each layer's configuration. * </p> */ private com.amazonaws.internal.SdkInternalList<LoadBasedAutoScalingConfiguration> loadBasedAutoScalingConfigurations; /** * <p> * An array of <code>LoadBasedAutoScalingConfiguration</code> objects that * describe each layer's configuration. * </p> * * @return An array of <code>LoadBasedAutoScalingConfiguration</code> * objects that describe each layer's configuration. */ public java.util.List<LoadBasedAutoScalingConfiguration> getLoadBasedAutoScalingConfigurations() { if (loadBasedAutoScalingConfigurations == null) { loadBasedAutoScalingConfigurations = new com.amazonaws.internal.SdkInternalList<LoadBasedAutoScalingConfiguration>(); } return loadBasedAutoScalingConfigurations; } /** * <p> * An array of <code>LoadBasedAutoScalingConfiguration</code> objects that * describe each layer's configuration. * </p> * * @param loadBasedAutoScalingConfigurations * An array of <code>LoadBasedAutoScalingConfiguration</code> objects * that describe each layer's configuration. */ public void setLoadBasedAutoScalingConfigurations( java.util.Collection<LoadBasedAutoScalingConfiguration> loadBasedAutoScalingConfigurations) { if (loadBasedAutoScalingConfigurations == null) { this.loadBasedAutoScalingConfigurations = null; return; } this.loadBasedAutoScalingConfigurations = new com.amazonaws.internal.SdkInternalList<LoadBasedAutoScalingConfiguration>( loadBasedAutoScalingConfigurations); } /** * <p> * An array of <code>LoadBasedAutoScalingConfiguration</code> objects that * describe each layer's configuration. * </p> * <p> * <b>NOTE:</b> This method appends the values to the existing list (if * any). Use * {@link #setLoadBasedAutoScalingConfigurations(java.util.Collection)} or * {@link #withLoadBasedAutoScalingConfigurations(java.util.Collection)} if * you want to override the existing values. * </p> * * @param loadBasedAutoScalingConfigurations * An array of <code>LoadBasedAutoScalingConfiguration</code> objects * that describe each layer's configuration. * @return Returns a reference to this object so that method calls can be * chained together. */ public DescribeLoadBasedAutoScalingResult withLoadBasedAutoScalingConfigurations( LoadBasedAutoScalingConfiguration... loadBasedAutoScalingConfigurations) { if (this.loadBasedAutoScalingConfigurations == null) { setLoadBasedAutoScalingConfigurations(new com.amazonaws.internal.SdkInternalList<LoadBasedAutoScalingConfiguration>( loadBasedAutoScalingConfigurations.length)); } for (LoadBasedAutoScalingConfiguration ele : loadBasedAutoScalingConfigurations) { this.loadBasedAutoScalingConfigurations.add(ele); } return this; } /** * <p> * An array of <code>LoadBasedAutoScalingConfiguration</code> objects that * describe each layer's configuration. * </p> * * @param loadBasedAutoScalingConfigurations * An array of <code>LoadBasedAutoScalingConfiguration</code> objects * that describe each layer's configuration. * @return Returns a reference to this object so that method calls can be * chained together. */ public DescribeLoadBasedAutoScalingResult withLoadBasedAutoScalingConfigurations( java.util.Collection<LoadBasedAutoScalingConfiguration> loadBasedAutoScalingConfigurations) { setLoadBasedAutoScalingConfigurations(loadBasedAutoScalingConfigurations); return this; } /** * Returns a string representation of this object; useful for testing and * debugging. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getLoadBasedAutoScalingConfigurations() != null) sb.append("LoadBasedAutoScalingConfigurations: " + getLoadBasedAutoScalingConfigurations()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof DescribeLoadBasedAutoScalingResult == false) return false; DescribeLoadBasedAutoScalingResult other = (DescribeLoadBasedAutoScalingResult) obj; if (other.getLoadBasedAutoScalingConfigurations() == null ^ this.getLoadBasedAutoScalingConfigurations() == null) return false; if (other.getLoadBasedAutoScalingConfigurations() != null && other.getLoadBasedAutoScalingConfigurations().equals( this.getLoadBasedAutoScalingConfigurations()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getLoadBasedAutoScalingConfigurations() == null) ? 0 : getLoadBasedAutoScalingConfigurations().hashCode()); return hashCode; } @Override public DescribeLoadBasedAutoScalingResult clone() { try { return (DescribeLoadBasedAutoScalingResult) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException( "Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e); } } }
sdole/aws-sdk-java
aws-java-sdk-opsworks/src/main/java/com/amazonaws/services/opsworks/model/DescribeLoadBasedAutoScalingResult.java
Java
apache-2.0
6,974
import {parseQueryString} from '#core/types/string/url'; import {BrowserController, RequestBank} from '#testing/test-helper'; const config = describes.sandboxed.configure().skipEdge().skipSafari(); config.run('amp-story analytics', {}, () => { const extensions = ['amp-story:1.0', 'amp-analytics', 'amp-social-share']; describes.integration( 'amp-story analytics', { body: ` <amp-story standalone supports-landscape> <amp-story-page id="page-1"> <amp-story-grid-layer template="horizontal"> <p>First page</p> <a href="google.com" data-vars-link-id="myLink" id="link-1">Link</a> <p id="right-1">Click me</p> </amp-story-grid-layer> </amp-story-page> <amp-story-page id="page-2"> <amp-story-grid-layer template="horizontal"> <p id="left-2">Left side</p> <p>Center</p> <p id="right-2">Click me</p> </amp-story-grid-layer> </amp-story-page> <amp-story-social-share layout="nodisplay"> <script type="application/json"> { "shareProviders": [ { "provider": "facebook", "data-param-app_id": "1682114265451337", "data-param-href": "https://fr-fr.facebook.com/LaRochePosayFrance/" }, { "provider": "twitter", "data-param-url": "https://twitter.com/larocheposayfr?lang=fr" } ] } </script> </amp-story-social-share> </amp-story> <amp-analytics> <script type="application/json"> { "requests": { "endpoint": "${RequestBank.getUrl()}" }, "triggers": { "trackPageview": { "on": "story-page-visible", "request": "endpoint", "extraUrlParams": { "pageVisible": "\${storyPageId}" } }, "trackFocusedState": { "on": "story-focus", "request": "endpoint", "tagName": "a", "extraUrlParams": { "focusedLink": "\${linkId}" } } }, "extraUrlParams": { "pageVisible": "\${storyPageId}", "muted": false, "unmuted": false, "focusedLink": "\${linkId}" } } </script> </amp-analytics>`, extensions, }, (env) => { let browser; let clickAndWait; let clickAtPosition; let doc; beforeEach(async () => { browser = new BrowserController(env.win); clickAndWait = async (selector) => { browser.click(selector); await browser.wait(1000); }; clickAtPosition = async (selector, clientX = 0, clientY = 0) => { doc = env.win.document; const element = doc.querySelector(selector); const clickEvent = new MouseEvent('click', {clientX, clientY}); element.dispatchEvent(clickEvent); }; env.iframe.style.height = '732px'; env.iframe.style.width = '412px'; await browser.waitForElementLayout('amp-analytics'); return browser.waitForElementLayout('amp-story'); }); afterEach(() => { return RequestBank.tearDown(); }); it('should send analytics event when landing on a page', async () => { await browser.waitForElementLayout('#page-1[active]'); const req = await RequestBank.withdraw(); const q = parseQueryString(req.url.substr(1)); expect(q['pageVisible']).to.equal('page-1'); }); it('should send analytics event when navigating', async () => { await browser.waitForElementLayout('#page-1[active]'); await clickAndWait('#right-1'); await browser.waitForElementLayout('#page-2[active]'); const req = await RequestBank.withdraw(); const q = parseQueryString(req.url.substr(1)); expect(q['pageVisible']).to.equal('page-2'); }); it('should send same event twice when repeat option is absent in storyspec', async () => { await browser.waitForElementLayout('#page-1[active]'); await clickAndWait('#right-1'); await browser.waitForElementLayout('#page-2[active]'); // Go back to page 1. clickAtPosition('#left-2', 10); await browser.wait(1000); await browser.waitForElementLayout('#page-1[active]'); const req = await RequestBank.withdraw(); const q = parseQueryString(req.url.substr(1)); expect(q['pageVisible']).to.equal('page-1'); }); it('should send data vars attribute when specified', async () => { await browser.waitForElementLayout('#page-1[active]'); browser.click('#link-1'); await browser.wait(1000); const req = await RequestBank.withdraw(); const q = parseQueryString(req.url.substr(1)); expect(q['focusedLink']).to.equal('myLink'); }); } ); describes.integration( 'repeat in storySpec', { body: ` <amp-story standalone supports-landscape> <amp-story-page id="page-1"> <amp-story-grid-layer template="horizontal"> <p>Left side</p> <p>Center</p> <p id="right-1">Click me</p> </amp-story-grid-layer> </amp-story-page> <amp-story-page id="page-2"> <amp-story-grid-layer template="horizontal"> <p id="left-2">Left side</p> <p>Center</p> <p id="right-2">Click me</p> </amp-story-grid-layer> </amp-story-page> </amp-story> <amp-analytics> <script type="application/json"> { "requests": { "endpoint": "${RequestBank.getUrl()}" }, "triggers": { "trackPageview": { "on": "story-page-visible", "request": "endpoint", "storySpec": { "repeat": false }, "extraUrlParams": { "pageVisible": "\${storyPageId}" } } }, "extraUrlParams": { "pageVisible": "\${storyPageId}" } } </script> </amp-analytics>`, extensions, }, (env) => { let browser; let clickAtPosition; let doc; beforeEach(async () => { browser = new BrowserController(env.win); clickAtPosition = async (selector, clientX = 0, clientY = 0) => { doc = env.win.document; const element = doc.querySelector(selector); const clickEvent = new MouseEvent('click', {clientX, clientY}); element.dispatchEvent(clickEvent); }; env.iframe.style.height = '732px'; env.iframe.style.width = '412px'; await browser.waitForElementLayout('amp-analytics'); return browser.waitForElementLayout('amp-story'); }); afterEach(() => { return RequestBank.tearDown(); }); it('should not send same analytics event twice when repeat option is present', async () => { await browser.waitForElementLayout('#page-1[active]'); browser.click('#page-1'); await browser.wait(1000); await browser.waitForElementLayout('#page-2[active]'); // Go back to page 1. clickAtPosition('#left-2', 10); await browser.wait(1000); const req = await RequestBank.withdraw(); const q = parseQueryString(req.url.substr(1)); expect(q['pageVisible']).to.equal('page-2'); }); } ); });
jpettitt/amphtml
test/integration/test-amp-story-analytics.js
JavaScript
apache-2.0
7,557
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include <aws/ec2/EC2_EXPORTS.h> #include <aws/ec2/EC2Request.h> #include <aws/core/utils/memory/stl/AWSString.h> #include <aws/ec2/model/FpgaImageAttributeName.h> #include <aws/ec2/model/OperationType.h> #include <aws/core/utils/memory/stl/AWSVector.h> #include <aws/ec2/model/LoadPermissionModifications.h> #include <utility> namespace Aws { namespace EC2 { namespace Model { /** */ class AWS_EC2_API ModifyFpgaImageAttributeRequest : public EC2Request { public: ModifyFpgaImageAttributeRequest(); // Service request name is the Operation name which will send this request out, // each operation should has unique request name, so that we can get operation's name from this request. // Note: this is not true for response, multiple operations may have the same response name, // so we can not get operation's name from response. inline virtual const char* GetServiceRequestName() const override { return "ModifyFpgaImageAttribute"; } Aws::String SerializePayload() const override; protected: void DumpBodyToUrl(Aws::Http::URI& uri ) const override; public: /** * <p>Checks whether you have the required permissions for the action, without * actually making the request, and provides an error response. If you have the * required permissions, the error response is <code>DryRunOperation</code>. * Otherwise, it is <code>UnauthorizedOperation</code>.</p> */ inline bool GetDryRun() const{ return m_dryRun; } /** * <p>Checks whether you have the required permissions for the action, without * actually making the request, and provides an error response. If you have the * required permissions, the error response is <code>DryRunOperation</code>. * Otherwise, it is <code>UnauthorizedOperation</code>.</p> */ inline bool DryRunHasBeenSet() const { return m_dryRunHasBeenSet; } /** * <p>Checks whether you have the required permissions for the action, without * actually making the request, and provides an error response. If you have the * required permissions, the error response is <code>DryRunOperation</code>. * Otherwise, it is <code>UnauthorizedOperation</code>.</p> */ inline void SetDryRun(bool value) { m_dryRunHasBeenSet = true; m_dryRun = value; } /** * <p>Checks whether you have the required permissions for the action, without * actually making the request, and provides an error response. If you have the * required permissions, the error response is <code>DryRunOperation</code>. * Otherwise, it is <code>UnauthorizedOperation</code>.</p> */ inline ModifyFpgaImageAttributeRequest& WithDryRun(bool value) { SetDryRun(value); return *this;} /** * <p>The ID of the AFI.</p> */ inline const Aws::String& GetFpgaImageId() const{ return m_fpgaImageId; } /** * <p>The ID of the AFI.</p> */ inline bool FpgaImageIdHasBeenSet() const { return m_fpgaImageIdHasBeenSet; } /** * <p>The ID of the AFI.</p> */ inline void SetFpgaImageId(const Aws::String& value) { m_fpgaImageIdHasBeenSet = true; m_fpgaImageId = value; } /** * <p>The ID of the AFI.</p> */ inline void SetFpgaImageId(Aws::String&& value) { m_fpgaImageIdHasBeenSet = true; m_fpgaImageId = std::move(value); } /** * <p>The ID of the AFI.</p> */ inline void SetFpgaImageId(const char* value) { m_fpgaImageIdHasBeenSet = true; m_fpgaImageId.assign(value); } /** * <p>The ID of the AFI.</p> */ inline ModifyFpgaImageAttributeRequest& WithFpgaImageId(const Aws::String& value) { SetFpgaImageId(value); return *this;} /** * <p>The ID of the AFI.</p> */ inline ModifyFpgaImageAttributeRequest& WithFpgaImageId(Aws::String&& value) { SetFpgaImageId(std::move(value)); return *this;} /** * <p>The ID of the AFI.</p> */ inline ModifyFpgaImageAttributeRequest& WithFpgaImageId(const char* value) { SetFpgaImageId(value); return *this;} /** * <p>The name of the attribute.</p> */ inline const FpgaImageAttributeName& GetAttribute() const{ return m_attribute; } /** * <p>The name of the attribute.</p> */ inline bool AttributeHasBeenSet() const { return m_attributeHasBeenSet; } /** * <p>The name of the attribute.</p> */ inline void SetAttribute(const FpgaImageAttributeName& value) { m_attributeHasBeenSet = true; m_attribute = value; } /** * <p>The name of the attribute.</p> */ inline void SetAttribute(FpgaImageAttributeName&& value) { m_attributeHasBeenSet = true; m_attribute = std::move(value); } /** * <p>The name of the attribute.</p> */ inline ModifyFpgaImageAttributeRequest& WithAttribute(const FpgaImageAttributeName& value) { SetAttribute(value); return *this;} /** * <p>The name of the attribute.</p> */ inline ModifyFpgaImageAttributeRequest& WithAttribute(FpgaImageAttributeName&& value) { SetAttribute(std::move(value)); return *this;} /** * <p>The operation type.</p> */ inline const OperationType& GetOperationType() const{ return m_operationType; } /** * <p>The operation type.</p> */ inline bool OperationTypeHasBeenSet() const { return m_operationTypeHasBeenSet; } /** * <p>The operation type.</p> */ inline void SetOperationType(const OperationType& value) { m_operationTypeHasBeenSet = true; m_operationType = value; } /** * <p>The operation type.</p> */ inline void SetOperationType(OperationType&& value) { m_operationTypeHasBeenSet = true; m_operationType = std::move(value); } /** * <p>The operation type.</p> */ inline ModifyFpgaImageAttributeRequest& WithOperationType(const OperationType& value) { SetOperationType(value); return *this;} /** * <p>The operation type.</p> */ inline ModifyFpgaImageAttributeRequest& WithOperationType(OperationType&& value) { SetOperationType(std::move(value)); return *this;} /** * <p>The AWS account IDs. This parameter is valid only when modifying the * <code>loadPermission</code> attribute.</p> */ inline const Aws::Vector<Aws::String>& GetUserIds() const{ return m_userIds; } /** * <p>The AWS account IDs. This parameter is valid only when modifying the * <code>loadPermission</code> attribute.</p> */ inline bool UserIdsHasBeenSet() const { return m_userIdsHasBeenSet; } /** * <p>The AWS account IDs. This parameter is valid only when modifying the * <code>loadPermission</code> attribute.</p> */ inline void SetUserIds(const Aws::Vector<Aws::String>& value) { m_userIdsHasBeenSet = true; m_userIds = value; } /** * <p>The AWS account IDs. This parameter is valid only when modifying the * <code>loadPermission</code> attribute.</p> */ inline void SetUserIds(Aws::Vector<Aws::String>&& value) { m_userIdsHasBeenSet = true; m_userIds = std::move(value); } /** * <p>The AWS account IDs. This parameter is valid only when modifying the * <code>loadPermission</code> attribute.</p> */ inline ModifyFpgaImageAttributeRequest& WithUserIds(const Aws::Vector<Aws::String>& value) { SetUserIds(value); return *this;} /** * <p>The AWS account IDs. This parameter is valid only when modifying the * <code>loadPermission</code> attribute.</p> */ inline ModifyFpgaImageAttributeRequest& WithUserIds(Aws::Vector<Aws::String>&& value) { SetUserIds(std::move(value)); return *this;} /** * <p>The AWS account IDs. This parameter is valid only when modifying the * <code>loadPermission</code> attribute.</p> */ inline ModifyFpgaImageAttributeRequest& AddUserIds(const Aws::String& value) { m_userIdsHasBeenSet = true; m_userIds.push_back(value); return *this; } /** * <p>The AWS account IDs. This parameter is valid only when modifying the * <code>loadPermission</code> attribute.</p> */ inline ModifyFpgaImageAttributeRequest& AddUserIds(Aws::String&& value) { m_userIdsHasBeenSet = true; m_userIds.push_back(std::move(value)); return *this; } /** * <p>The AWS account IDs. This parameter is valid only when modifying the * <code>loadPermission</code> attribute.</p> */ inline ModifyFpgaImageAttributeRequest& AddUserIds(const char* value) { m_userIdsHasBeenSet = true; m_userIds.push_back(value); return *this; } /** * <p>The user groups. This parameter is valid only when modifying the * <code>loadPermission</code> attribute.</p> */ inline const Aws::Vector<Aws::String>& GetUserGroups() const{ return m_userGroups; } /** * <p>The user groups. This parameter is valid only when modifying the * <code>loadPermission</code> attribute.</p> */ inline bool UserGroupsHasBeenSet() const { return m_userGroupsHasBeenSet; } /** * <p>The user groups. This parameter is valid only when modifying the * <code>loadPermission</code> attribute.</p> */ inline void SetUserGroups(const Aws::Vector<Aws::String>& value) { m_userGroupsHasBeenSet = true; m_userGroups = value; } /** * <p>The user groups. This parameter is valid only when modifying the * <code>loadPermission</code> attribute.</p> */ inline void SetUserGroups(Aws::Vector<Aws::String>&& value) { m_userGroupsHasBeenSet = true; m_userGroups = std::move(value); } /** * <p>The user groups. This parameter is valid only when modifying the * <code>loadPermission</code> attribute.</p> */ inline ModifyFpgaImageAttributeRequest& WithUserGroups(const Aws::Vector<Aws::String>& value) { SetUserGroups(value); return *this;} /** * <p>The user groups. This parameter is valid only when modifying the * <code>loadPermission</code> attribute.</p> */ inline ModifyFpgaImageAttributeRequest& WithUserGroups(Aws::Vector<Aws::String>&& value) { SetUserGroups(std::move(value)); return *this;} /** * <p>The user groups. This parameter is valid only when modifying the * <code>loadPermission</code> attribute.</p> */ inline ModifyFpgaImageAttributeRequest& AddUserGroups(const Aws::String& value) { m_userGroupsHasBeenSet = true; m_userGroups.push_back(value); return *this; } /** * <p>The user groups. This parameter is valid only when modifying the * <code>loadPermission</code> attribute.</p> */ inline ModifyFpgaImageAttributeRequest& AddUserGroups(Aws::String&& value) { m_userGroupsHasBeenSet = true; m_userGroups.push_back(std::move(value)); return *this; } /** * <p>The user groups. This parameter is valid only when modifying the * <code>loadPermission</code> attribute.</p> */ inline ModifyFpgaImageAttributeRequest& AddUserGroups(const char* value) { m_userGroupsHasBeenSet = true; m_userGroups.push_back(value); return *this; } /** * <p>The product codes. After you add a product code to an AFI, it can't be * removed. This parameter is valid only when modifying the * <code>productCodes</code> attribute.</p> */ inline const Aws::Vector<Aws::String>& GetProductCodes() const{ return m_productCodes; } /** * <p>The product codes. After you add a product code to an AFI, it can't be * removed. This parameter is valid only when modifying the * <code>productCodes</code> attribute.</p> */ inline bool ProductCodesHasBeenSet() const { return m_productCodesHasBeenSet; } /** * <p>The product codes. After you add a product code to an AFI, it can't be * removed. This parameter is valid only when modifying the * <code>productCodes</code> attribute.</p> */ inline void SetProductCodes(const Aws::Vector<Aws::String>& value) { m_productCodesHasBeenSet = true; m_productCodes = value; } /** * <p>The product codes. After you add a product code to an AFI, it can't be * removed. This parameter is valid only when modifying the * <code>productCodes</code> attribute.</p> */ inline void SetProductCodes(Aws::Vector<Aws::String>&& value) { m_productCodesHasBeenSet = true; m_productCodes = std::move(value); } /** * <p>The product codes. After you add a product code to an AFI, it can't be * removed. This parameter is valid only when modifying the * <code>productCodes</code> attribute.</p> */ inline ModifyFpgaImageAttributeRequest& WithProductCodes(const Aws::Vector<Aws::String>& value) { SetProductCodes(value); return *this;} /** * <p>The product codes. After you add a product code to an AFI, it can't be * removed. This parameter is valid only when modifying the * <code>productCodes</code> attribute.</p> */ inline ModifyFpgaImageAttributeRequest& WithProductCodes(Aws::Vector<Aws::String>&& value) { SetProductCodes(std::move(value)); return *this;} /** * <p>The product codes. After you add a product code to an AFI, it can't be * removed. This parameter is valid only when modifying the * <code>productCodes</code> attribute.</p> */ inline ModifyFpgaImageAttributeRequest& AddProductCodes(const Aws::String& value) { m_productCodesHasBeenSet = true; m_productCodes.push_back(value); return *this; } /** * <p>The product codes. After you add a product code to an AFI, it can't be * removed. This parameter is valid only when modifying the * <code>productCodes</code> attribute.</p> */ inline ModifyFpgaImageAttributeRequest& AddProductCodes(Aws::String&& value) { m_productCodesHasBeenSet = true; m_productCodes.push_back(std::move(value)); return *this; } /** * <p>The product codes. After you add a product code to an AFI, it can't be * removed. This parameter is valid only when modifying the * <code>productCodes</code> attribute.</p> */ inline ModifyFpgaImageAttributeRequest& AddProductCodes(const char* value) { m_productCodesHasBeenSet = true; m_productCodes.push_back(value); return *this; } /** * <p>The load permission for the AFI.</p> */ inline const LoadPermissionModifications& GetLoadPermission() const{ return m_loadPermission; } /** * <p>The load permission for the AFI.</p> */ inline bool LoadPermissionHasBeenSet() const { return m_loadPermissionHasBeenSet; } /** * <p>The load permission for the AFI.</p> */ inline void SetLoadPermission(const LoadPermissionModifications& value) { m_loadPermissionHasBeenSet = true; m_loadPermission = value; } /** * <p>The load permission for the AFI.</p> */ inline void SetLoadPermission(LoadPermissionModifications&& value) { m_loadPermissionHasBeenSet = true; m_loadPermission = std::move(value); } /** * <p>The load permission for the AFI.</p> */ inline ModifyFpgaImageAttributeRequest& WithLoadPermission(const LoadPermissionModifications& value) { SetLoadPermission(value); return *this;} /** * <p>The load permission for the AFI.</p> */ inline ModifyFpgaImageAttributeRequest& WithLoadPermission(LoadPermissionModifications&& value) { SetLoadPermission(std::move(value)); return *this;} /** * <p>A description for the AFI.</p> */ inline const Aws::String& GetDescription() const{ return m_description; } /** * <p>A description for the AFI.</p> */ inline bool DescriptionHasBeenSet() const { return m_descriptionHasBeenSet; } /** * <p>A description for the AFI.</p> */ inline void SetDescription(const Aws::String& value) { m_descriptionHasBeenSet = true; m_description = value; } /** * <p>A description for the AFI.</p> */ inline void SetDescription(Aws::String&& value) { m_descriptionHasBeenSet = true; m_description = std::move(value); } /** * <p>A description for the AFI.</p> */ inline void SetDescription(const char* value) { m_descriptionHasBeenSet = true; m_description.assign(value); } /** * <p>A description for the AFI.</p> */ inline ModifyFpgaImageAttributeRequest& WithDescription(const Aws::String& value) { SetDescription(value); return *this;} /** * <p>A description for the AFI.</p> */ inline ModifyFpgaImageAttributeRequest& WithDescription(Aws::String&& value) { SetDescription(std::move(value)); return *this;} /** * <p>A description for the AFI.</p> */ inline ModifyFpgaImageAttributeRequest& WithDescription(const char* value) { SetDescription(value); return *this;} /** * <p>A name for the AFI.</p> */ inline const Aws::String& GetName() const{ return m_name; } /** * <p>A name for the AFI.</p> */ inline bool NameHasBeenSet() const { return m_nameHasBeenSet; } /** * <p>A name for the AFI.</p> */ inline void SetName(const Aws::String& value) { m_nameHasBeenSet = true; m_name = value; } /** * <p>A name for the AFI.</p> */ inline void SetName(Aws::String&& value) { m_nameHasBeenSet = true; m_name = std::move(value); } /** * <p>A name for the AFI.</p> */ inline void SetName(const char* value) { m_nameHasBeenSet = true; m_name.assign(value); } /** * <p>A name for the AFI.</p> */ inline ModifyFpgaImageAttributeRequest& WithName(const Aws::String& value) { SetName(value); return *this;} /** * <p>A name for the AFI.</p> */ inline ModifyFpgaImageAttributeRequest& WithName(Aws::String&& value) { SetName(std::move(value)); return *this;} /** * <p>A name for the AFI.</p> */ inline ModifyFpgaImageAttributeRequest& WithName(const char* value) { SetName(value); return *this;} private: bool m_dryRun; bool m_dryRunHasBeenSet; Aws::String m_fpgaImageId; bool m_fpgaImageIdHasBeenSet; FpgaImageAttributeName m_attribute; bool m_attributeHasBeenSet; OperationType m_operationType; bool m_operationTypeHasBeenSet; Aws::Vector<Aws::String> m_userIds; bool m_userIdsHasBeenSet; Aws::Vector<Aws::String> m_userGroups; bool m_userGroupsHasBeenSet; Aws::Vector<Aws::String> m_productCodes; bool m_productCodesHasBeenSet; LoadPermissionModifications m_loadPermission; bool m_loadPermissionHasBeenSet; Aws::String m_description; bool m_descriptionHasBeenSet; Aws::String m_name; bool m_nameHasBeenSet; }; } // namespace Model } // namespace EC2 } // namespace Aws
awslabs/aws-sdk-cpp
aws-cpp-sdk-ec2/include/aws/ec2/model/ModifyFpgaImageAttributeRequest.h
C
apache-2.0
18,714
#!/bin/sh echo "Dropping DB schema" mysql -u alfresco -palfresco -e "DROP SCHEMA activiticompatibility" echo "Creating DB schema" mysql -u alfresco -palfresco -e "CREATE SCHEMA activiticompatibility DEFAULT CHARACTER SET utf8 COLLATE utf8_bin" echo "Building dependencies" cd .. mvn clean install -DskipTests cd modules/activiti5-engine/ mvn clean install -DskipTests cd ../.. cd modules/activiti5-compatibility/ mvn clean install -DskipTests cd ../.. echo "Building test data generators" cd modules/activiti5-compatibility-testdata mvn clean package shade:shade echo "Generating test data" cd target java -jar activiti5-compatibility-testdata.jar echo "Running tests" cd ../../activiti5-compatibility-test mvn clean test
stefan-ziel/Activiti
scripts/run-compatibility-tests.sh
Shell
apache-2.0
730
//////////////////////////////////////////////////////////////////////////////// /// DISCLAIMER /// /// Copyright 2014-2021 ArangoDB GmbH, Cologne, Germany /// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany /// /// Licensed under the Apache License, Version 2.0 (the "License"); /// you may not use this file except in compliance with the License. /// You may obtain a copy of the License at /// /// http://www.apache.org/licenses/LICENSE-2.0 /// /// Unless required by applicable law or agreed to in writing, software /// distributed under the License is distributed on an "AS IS" BASIS, /// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. /// See the License for the specific language governing permissions and /// limitations under the License. /// /// Copyright holder is ArangoDB GmbH, Cologne, Germany /// /// @author Jan Steemann //////////////////////////////////////////////////////////////////////////////// #include "QueryList.h" #include "ApplicationFeatures/ApplicationServer.h" #include "Aql/Query.h" #include "Aql/QueryProfile.h" #include "Aql/Timing.h" #include "Basics/Exceptions.h" #include "Basics/ReadLocker.h" #include "Basics/Result.h" #include "Basics/StringUtils.h" #include "Basics/WriteLocker.h" #include "Basics/conversions.h" #include "Basics/system-functions.h" #include "Logger/LogMacros.h" #include "Logger/Logger.h" #include "Logger/LoggerStream.h" #include "RestServer/MetricsFeature.h" #include "RestServer/QueryRegistryFeature.h" #include "VocBase/vocbase.h" #include <velocypack/Builder.h> #include <velocypack/Dumper.h> #include <velocypack/Sink.h> #include <velocypack/StringRef.h> #include <velocypack/Value.h> #include <velocypack/velocypack-aliases.h> using namespace arangodb; using namespace arangodb::aql; QueryEntryCopy::QueryEntryCopy(TRI_voc_tick_t id, std::string const& database, std::string const& user, std::string&& queryString, std::shared_ptr<arangodb::velocypack::Builder> const& bindParameters, std::vector<std::string> dataSources, double started, double runTime, QueryExecutionState::ValueType state, bool stream, std::optional<ErrorCode> resultCode) : id(id), database(database), user(user), queryString(std::move(queryString)), bindParameters(bindParameters), dataSources(std::move(dataSources)), started(started), runTime(runTime), state(state), resultCode(resultCode), stream(stream) {} void QueryEntryCopy::toVelocyPack(velocypack::Builder& out) const { auto timeString = TRI_StringTimeStamp(started, Logger::getUseLocalTime()); out.add(VPackValue(VPackValueType::Object)); out.add("id", VPackValue(basics::StringUtils::itoa(id))); out.add("database", VPackValue(database)); out.add("user", VPackValue(user)); out.add("query", VPackValue(queryString)); if (bindParameters != nullptr && !bindParameters->slice().isNone()) { out.add("bindVars", bindParameters->slice()); } else { out.add("bindVars", arangodb::velocypack::Slice::emptyObjectSlice()); } if (!dataSources.empty()) { out.add("dataSources", VPackValue(VPackValueType::Array)); for (auto const& dn : dataSources) { out.add(VPackValue(dn)); } out.close(); } out.add("started", VPackValue(timeString)); out.add("runTime", VPackValue(runTime)); out.add("state", VPackValue(aql::QueryExecutionState::toString(state))); out.add("stream", VPackValue(stream)); if (resultCode.has_value()) { // exit code can only be determined if query is fully finished out.add("exitCode", VPackValue(*resultCode)); } out.close(); } /// @brief create a query list QueryList::QueryList(QueryRegistryFeature& feature) : _queryRegistryFeature(feature), _enabled(feature.trackingEnabled()), _trackSlowQueries(_enabled && feature.trackSlowQueries()), _trackQueryString(feature.trackQueryString()), _trackBindVars(feature.trackBindVars()), _trackDataSources(feature.trackDataSources()), _slowQueryThreshold(feature.slowQueryThreshold()), _slowStreamingQueryThreshold(feature.slowStreamingQueryThreshold()), _maxSlowQueries(defaultMaxSlowQueries), _maxQueryStringLength(defaultMaxQueryStringLength) { _current.reserve(32); } /// @brief insert a query bool QueryList::insert(Query* query) { TRI_ASSERT(query != nullptr); // not enabled or no query string if (!enabled() || query->queryString().empty()) { return false; } try { WRITE_LOCKER(writeLocker, _lock); TRI_IF_FAILURE("QueryList::insert") { THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG); } // return whether or not insertion worked bool inserted = _current.insert({query->id(), query}).second; _queryRegistryFeature.trackQueryStart(); return inserted; } catch (...) { return false; } } /// @brief remove a query void QueryList::remove(Query* query) { TRI_ASSERT(query != nullptr); // we're intentionally not checking _enabled here... // note: there is the possibility that a query got inserted when the // tracking was turned on, but is going to be removed when the tracking // is turned off. in this case, removal is forced so the contents of // the list are correct TRI_ASSERT(!query->queryString().empty()); { // acquire the query list's write lock only for a short amount of // time. if we need to insert a slow query later, we will re-acquire // the lock. but the hope is that for the majority of queries this is // not required WRITE_LOCKER(writeLocker, _lock); if (_current.erase(query->id()) == 0) { // not found return; } } // elapsed time since query start double const elapsed = elapsedSince(query->startTime()); _queryRegistryFeature.trackQueryEnd(elapsed); if (!trackSlowQueries()) { return; } bool const isStreaming = query->queryOptions().stream; double threshold = isStreaming ? _slowStreamingQueryThreshold.load(std::memory_order_relaxed) : _slowQueryThreshold.load(std::memory_order_relaxed); // check if we need to push the query into the list of slow queries if (elapsed >= threshold && threshold >= 0.0) { // yes. try { TRI_IF_FAILURE("QueryList::remove") { THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG); } _queryRegistryFeature.trackSlowQuery(elapsed); // we calculate the query start timestamp as the current time minus // the elapsed time since query start. this is not 100% accurrate, but // best effort, and saves us from bookkeeping the start timestamp of the // query inside the Query object. double const now = TRI_microtime(); size_t const maxQueryStringLength = _maxQueryStringLength.load(std::memory_order_relaxed); std::string q = extractQueryString(*query, maxQueryStringLength); std::string bindParameters; if (_trackBindVars) { // also log bind variables auto bp = query->bindParameters(); if (bp != nullptr && !bp->slice().isNone()) { bindParameters.append(", bind vars: "); bp->slice().toJson(bindParameters); if (bindParameters.size() > maxQueryStringLength) { bindParameters.resize(maxQueryStringLength - 3); bindParameters.append("..."); } } } std::string dataSources; if (_trackDataSources) { auto const d = query->collectionNames(); if (!d.empty()) { size_t i = 0; dataSources = ", data sources: ["; arangodb::velocypack::StringSink sink(&dataSources); arangodb::velocypack::Dumper dumper(&sink); for (auto const& dn : d) { if (i > 0) { dataSources.push_back(','); } dumper.appendString(dn.data(), dn.size()); ++i; } dataSources.push_back(']'); } } auto resultCode = query->resultCode(); LOG_TOPIC("8bcee", WARN, Logger::QUERIES) << "slow " << (isStreaming ? "streaming " : "") << "query: '" << q << "'" << bindParameters << dataSources << ", database: " << query->vocbase().name() << ", user: " << query->user() << ", id: " << query->id() << ", token: QRY" << query->id() << ", exit code: " << resultCode << ", took: " << Logger::FIXED(elapsed) << " s"; // acquire the query list lock again WRITE_LOCKER(writeLocker, _lock); _slow.emplace_back(query->id(), query->vocbase().name(), query->user(), std::move(q), _trackBindVars ? query->bindParameters() : nullptr, _trackDataSources ? query->collectionNames() : std::vector<std::string>(), now - elapsed, /* start timestamp */ elapsed /* run time */, query->killed() ? QueryExecutionState::ValueType::KILLED : QueryExecutionState::ValueType::FINISHED, isStreaming, resultCode); // _slow is an std::list, but since c++11 the size() method of all standard // containers is O(1), so this is ok if (_slow.size() > _maxSlowQueries) { // free first element _slow.pop_front(); } } catch (...) { } } } /// @brief kills a query Result QueryList::kill(TRI_voc_tick_t id) { size_t const maxLength = _maxQueryStringLength.load(std::memory_order_relaxed); READ_LOCKER(writeLocker, _lock); auto it = _current.find(id); if (it == _current.end()) { return {TRI_ERROR_QUERY_NOT_FOUND, "query ID not found in query list"}; } Query* query = (*it).second; killQuery(*query, maxLength, false); return Result(); } /// @brief kills all currently running queries that match the filter function /// (i.e. the filter should return true for a queries to be killed) uint64_t QueryList::kill(std::function<bool(Query&)> const& filter, bool silent) { uint64_t killed = 0; size_t const maxLength = _maxQueryStringLength.load(std::memory_order_relaxed); READ_LOCKER(readLocker, _lock); for (auto& it : _current) { Query& query = *(it.second); if (!filter(query)) { continue; } killQuery(query, maxLength, silent); ++killed; } return killed; } /// @brief get the list of currently running queries std::vector<QueryEntryCopy> QueryList::listCurrent() { std::vector<QueryEntryCopy> result; // reserve room for some queries outside of the lock already, // so we reduce the possibility of having to reserve more room // later result.reserve(16); size_t const maxLength = _maxQueryStringLength.load(std::memory_order_relaxed); double const now = TRI_microtime(); { READ_LOCKER(readLocker, _lock); // reserve the actually needed space result.reserve(_current.size()); for (auto const& it : _current) { Query const* query = it.second; TRI_ASSERT(query != nullptr); // elapsed time since query start double const elapsed = elapsedSince(query->startTime()); // we calculate the query start timestamp as the current time minus // the elapsed time since query start. this is not 100% accurrate, but // best effort, and saves us from bookkeeping the start timestamp of the // query inside the Query object. result.emplace_back(query->id(), query->vocbase().name(), query->user(), extractQueryString(*query, maxLength), _trackBindVars ? query->bindParameters() : nullptr, _trackDataSources ? query->collectionNames() : std::vector<std::string>(), now - elapsed /* start timestamp */, elapsed /* run time */, query->killed() ? QueryExecutionState::ValueType::KILLED : query->state(), query->queryOptions().stream, /*resultCode*/ std::nullopt /*not set yet*/); } } return result; } /// @brief get the list of slow queries std::vector<QueryEntryCopy> QueryList::listSlow() { std::vector<QueryEntryCopy> result; // reserve room for some queries outside of the lock already, // so we reduce the possibility of having to reserve more room // later result.reserve(16); { READ_LOCKER(readLocker, _lock); // reserve the actually needed space result.reserve(_slow.size()); for (auto const& it : _slow) { result.emplace_back(it); } } return result; } /// @brief clear the list of slow queries void QueryList::clearSlow() { WRITE_LOCKER(writeLocker, _lock); _slow.clear(); } size_t QueryList::count() { READ_LOCKER(writeLocker, _lock); return _current.size(); } std::string QueryList::extractQueryString(Query const& query, size_t maxLength) const { if (trackQueryString()) { return query.queryString().extract(maxLength); } return "<hidden>"; } void QueryList::killQuery(Query& query, size_t maxLength, bool silent) { std::string msg = "killing AQL query '" + extractQueryString(query, maxLength) + "', id: " + std::to_string(query.id()) + ", token: QRY" + std::to_string(query.id()); if (silent) { LOG_TOPIC("f7722", TRACE, arangodb::Logger::QUERIES) << msg; } else { LOG_TOPIC("90113", WARN, arangodb::Logger::QUERIES) << msg; } query.kill(); }
Simran-B/arangodb
arangod/Aql/QueryList.cpp
C++
apache-2.0
13,671
/* * Licensed to Metamarkets Group Inc. (Metamarkets) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. Metamarkets licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package io.druid.segment.data; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.io.Closeables; import com.google.common.primitives.Ints; import com.google.common.primitives.Shorts; import io.druid.collections.ResourceHolder; import io.druid.collections.StupidResourceHolder; import io.druid.java.util.common.IAE; import io.druid.java.util.common.guava.CloseQuietly; import io.druid.java.util.common.io.smoosh.SmooshedFileMapper; import io.druid.query.monomorphicprocessing.RuntimeShapeInspector; import io.druid.segment.CompressedPools; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.nio.IntBuffer; import java.nio.ShortBuffer; import java.nio.channels.WritableByteChannel; import java.util.Iterator; import java.util.List; public class CompressedVSizeIntsIndexedSupplier implements WritableSupplier<IndexedInts> { public static final byte VERSION = 0x2; private final int totalSize; private final int sizePer; private final int numBytes; private final int bigEndianShift; private final int littleEndianMask; private final GenericIndexed<ResourceHolder<ByteBuffer>> baseBuffers; private final CompressedObjectStrategy.CompressionStrategy compression; CompressedVSizeIntsIndexedSupplier( int totalSize, int sizePer, int numBytes, GenericIndexed<ResourceHolder<ByteBuffer>> baseBuffers, CompressedObjectStrategy.CompressionStrategy compression ) { Preconditions.checkArgument( sizePer == (1 << Integer.numberOfTrailingZeros(sizePer)), "Number of entries per chunk must be a power of 2" ); this.totalSize = totalSize; this.sizePer = sizePer; this.baseBuffers = baseBuffers; this.compression = compression; this.numBytes = numBytes; this.bigEndianShift = Integer.SIZE - (numBytes << 3); // numBytes * 8 this.littleEndianMask = (int) ((1L << (numBytes << 3)) - 1); // set numBytes * 8 lower bits to 1 } public static int maxIntsInBufferForBytes(int numBytes) { int maxSizePer = (CompressedPools.BUFFER_SIZE - bufferPadding(numBytes)) / numBytes; // round down to the nearest power of 2 return 1 << (Integer.SIZE - 1 - Integer.numberOfLeadingZeros(maxSizePer)); } public static int bufferPadding(int numBytes) { // when numBytes == 3 we need to pad the buffer to allow reading an extra byte // beyond the end of the last value, since we use buffer.getInt() to read values. // for numBytes 1, 2 we remove the need for padding by reading bytes or shorts directly. switch (numBytes) { case Shorts.BYTES: case 1: return 0; default: return Ints.BYTES - numBytes; } } public static int maxIntsInBufferForValue(int maxValue) { return maxIntsInBufferForBytes(VSizeIndexedInts.getNumBytesForMax(maxValue)); } public int size() { return totalSize; } @Override public IndexedInts get() { // optimized versions for int, short, and byte columns if (numBytes == Ints.BYTES) { return new CompressedFullSizeIndexedInts(); } else if (numBytes == Shorts.BYTES) { return new CompressedShortSizeIndexedInts(); } else if (numBytes == 1) { return new CompressedByteSizeIndexedInts(); } else { // default version of everything else, i.e. 3-bytes per value return new CompressedVSizeIndexedInts(); } } @Override public long getSerializedSize() { return 1 + // version 1 + // numBytes Ints.BYTES + // totalSize Ints.BYTES + // sizePer 1 + // compression id baseBuffers.getSerializedSize(); // data } @Override public void writeToChannel(WritableByteChannel channel) throws IOException { channel.write(ByteBuffer.wrap(new byte[]{VERSION, (byte) numBytes})); channel.write(ByteBuffer.wrap(Ints.toByteArray(totalSize))); channel.write(ByteBuffer.wrap(Ints.toByteArray(sizePer))); channel.write(ByteBuffer.wrap(new byte[]{compression.getId()})); baseBuffers.writeToChannel(channel); } @VisibleForTesting GenericIndexed<ResourceHolder<ByteBuffer>> getBaseBuffers() { return baseBuffers; } public static CompressedVSizeIntsIndexedSupplier fromByteBuffer( ByteBuffer buffer, ByteOrder order, SmooshedFileMapper fileMapper ) { byte versionFromBuffer = buffer.get(); if (versionFromBuffer == VERSION) { final int numBytes = buffer.get(); final int totalSize = buffer.getInt(); final int sizePer = buffer.getInt(); final int chunkBytes = sizePer * numBytes + bufferPadding(numBytes); final CompressedObjectStrategy.CompressionStrategy compression = CompressedObjectStrategy.CompressionStrategy.forId( buffer.get() ); return new CompressedVSizeIntsIndexedSupplier( totalSize, sizePer, numBytes, GenericIndexed.read( buffer, CompressedByteBufferObjectStrategy.getBufferForOrder(order, compression, chunkBytes), fileMapper ), compression ); } throw new IAE("Unknown version[%s]", versionFromBuffer); } public static CompressedVSizeIntsIndexedSupplier fromList( final List<Integer> list, final int maxValue, final int chunkFactor, final ByteOrder byteOrder, CompressedObjectStrategy.CompressionStrategy compression ) { final int numBytes = VSizeIndexedInts.getNumBytesForMax(maxValue); final int chunkBytes = chunkFactor * numBytes + bufferPadding(numBytes); Preconditions.checkArgument( chunkFactor <= maxIntsInBufferForBytes(numBytes), "Chunks must be <= 64k bytes. chunkFactor was[%s]", chunkFactor ); return new CompressedVSizeIntsIndexedSupplier( list.size(), chunkFactor, numBytes, GenericIndexed.fromIterable( new Iterable<ResourceHolder<ByteBuffer>>() { @Override public Iterator<ResourceHolder<ByteBuffer>> iterator() { return new Iterator<ResourceHolder<ByteBuffer>>() { int position = 0; @Override public boolean hasNext() { return position < list.size(); } @Override public ResourceHolder<ByteBuffer> next() { ByteBuffer retVal = ByteBuffer .allocate(chunkBytes) .order(byteOrder); if (chunkFactor > list.size() - position) { retVal.limit((list.size() - position) * numBytes); } else { retVal.limit(chunkFactor * numBytes); } final List<Integer> ints = list.subList(position, position + retVal.remaining() / numBytes); final ByteBuffer buf = ByteBuffer .allocate(Ints.BYTES) .order(byteOrder); final boolean bigEndian = byteOrder.equals(ByteOrder.BIG_ENDIAN); for (int value : ints) { buf.putInt(0, value); if (bigEndian) { retVal.put(buf.array(), Ints.BYTES - numBytes, numBytes); } else { retVal.put(buf.array(), 0, numBytes); } } retVal.rewind(); position += retVal.remaining() / numBytes; return StupidResourceHolder.create(retVal); } @Override public void remove() { throw new UnsupportedOperationException(); } }; } }, CompressedByteBufferObjectStrategy.getBufferForOrder(byteOrder, compression, chunkBytes) ), compression ); } private class CompressedFullSizeIndexedInts extends CompressedVSizeIndexedInts { IntBuffer intBuffer; @Override protected void loadBuffer(int bufferNum) { super.loadBuffer(bufferNum); intBuffer = buffer.asIntBuffer(); } @Override protected int _get(int index) { return intBuffer.get(intBuffer.position() + index); } } private class CompressedShortSizeIndexedInts extends CompressedVSizeIndexedInts { ShortBuffer shortBuffer; @Override protected void loadBuffer(int bufferNum) { super.loadBuffer(bufferNum); shortBuffer = buffer.asShortBuffer(); } @Override protected int _get(int index) { // removes the need for padding return shortBuffer.get(shortBuffer.position() + index) & 0xFFFF; } } private class CompressedByteSizeIndexedInts extends CompressedVSizeIndexedInts { @Override protected int _get(int index) { // removes the need for padding return buffer.get(buffer.position() + index) & 0xFF; } } private class CompressedVSizeIndexedInts implements IndexedInts { final Indexed<ResourceHolder<ByteBuffer>> singleThreadedBuffers = baseBuffers.singleThreaded(); final int div = Integer.numberOfTrailingZeros(sizePer); final int rem = sizePer - 1; int currIndex = -1; ResourceHolder<ByteBuffer> holder; ByteBuffer buffer; boolean bigEndian; @Override public int size() { return totalSize; } /** * Returns the value at the given index into the column. * <p/> * Assumes the number of entries in each decompression buffers is a power of two. * * @param index index of the value in the column * * @return the value at the given index */ @Override public int get(int index) { // assumes the number of entries in each buffer is a power of 2 final int bufferNum = index >> div; if (bufferNum != currIndex) { loadBuffer(bufferNum); } return _get(index & rem); } /** * Returns the value at the given index in the current decompression buffer * * @param index index of the value in the current buffer * * @return the value at the given index */ protected int _get(final int index) { final int pos = buffer.position() + index * numBytes; // example for numBytes = 3 // big-endian: 0x000c0b0a stored 0c 0b 0a XX, read 0x0c0b0aXX >>> 8 // little-endian: 0x000c0b0a stored 0a 0b 0c XX, read 0xXX0c0b0a & 0x00FFFFFF return bigEndian ? buffer.getInt(pos) >>> bigEndianShift : buffer.getInt(pos) & littleEndianMask; } protected void loadBuffer(int bufferNum) { CloseQuietly.close(holder); holder = singleThreadedBuffers.get(bufferNum); buffer = holder.get(); currIndex = bufferNum; bigEndian = buffer.order().equals(ByteOrder.BIG_ENDIAN); } @Override public String toString() { return "CompressedVSizedIntsIndexedSupplier{" + "currIndex=" + currIndex + ", sizePer=" + sizePer + ", numChunks=" + singleThreadedBuffers.size() + ", totalSize=" + totalSize + '}'; } @Override public void close() throws IOException { Closeables.close(holder, false); } @Override public void inspectRuntimeShape(RuntimeShapeInspector inspector) { // ideally should inspect buffer and bigEndian, but at the moment of inspectRuntimeShape() call buffer is likely // to be null and bigEndian = false, because loadBuffer() is not yet called, although during the processing buffer // is not null, hence "visiting" null is not representative, and visiting bigEndian = false could be misleading. inspector.visit("singleThreadedBuffers", singleThreadedBuffers); } } }
andy256/druid
processing/src/main/java/io/druid/segment/data/CompressedVSizeIntsIndexedSupplier.java
Java
apache-2.0
13,000
/*! * ${copyright} */ sap.ui.define(['jquery.sap.global', './Matcher'], function ($, Matcher) { "use strict"; /** * BindingPath - checks if a control has a binding context with the exact same binding path. * * @class BindingPath - checks if a control has a binding context with the exact same binding path * @extends sap.ui.test.matchers.Matcher * @param {object} [mSettings] Map/JSON-object with initial settings for the new BindingPath. * @public * @name sap.ui.test.matchers.BindingPath * @author SAP SE * @since 1.32 */ return Matcher.extend("sap.ui.test.matchers.BindingPath", /** @lends sap.ui.test.matchers.BindingPath.prototype */ { metadata: { publicMethods: ["isMatching"], properties: { /** * The value of the binding path that is used for matching. */ path: { type: "string" }, /** * The name of the binding model that is used for matching. */ modelName: { type: "string" } } }, /** * Checks if the control has a binding context that matches the path * * @param {sap.ui.core.Control} oControl the control that is checked by the matcher * @return {boolean} true if the binding path has a strictly matching value. * @public */ isMatching: function (oControl) { var oBindingContext; // check if there is a binding path if (!this.getPath()) { throw new Error(this + " the path needs to be a not empty string"); } // check if there is a model name if (this.getModelName()) { oBindingContext = oControl.getBindingContext(this.getModelName()); } else { oBindingContext = oControl.getBindingContext(); } // check if there is a binding context if (!oBindingContext) { this._oLogger.debug("The control " + oControl + " has no binding context for the model " + this.getModelName()); return false; } // check if the binding context is correct var bResult = this.getPath() === oBindingContext.getPath(); if (!bResult) { this._oLogger.debug("The control " + oControl + " does not " + "have a matching binding context expected " + this.getPath() + " but got " + oBindingContext.getPath()); } return bResult; } }); }, /* bExport= */ true);
SQCLabs/openui5
src/sap.ui.core/src/sap/ui/test/matchers/BindingPath.js
JavaScript
apache-2.0
2,247
/* * Copyright 2016 The BigDL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.dllib.nn import com.intel.analytics.bigdl.dllib.tensor.Tensor import com.intel.analytics.bigdl.dllib.utils.serializer.ModuleSerializationTest import scala.util.Random class MinSerialTest extends ModuleSerializationTest { override def test(): Unit = { val min = Min[Float](2).setName("min") val input = Tensor[Float](5, 5).apply1(_ => Random.nextFloat()) runSerializationTest(min, input) } }
intel-analytics/BigDL
scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/nn/MinSerialTest.scala
Scala
apache-2.0
1,047
/* * Copyright 2000-2014 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jetbrains.plugins.gradle.service.project; import com.intellij.execution.configurations.ParametersList; import com.intellij.externalSystem.JavaProjectData; import com.intellij.openapi.diagnostic.Logger; import com.intellij.openapi.externalSystem.model.DataNode; import com.intellij.openapi.externalSystem.model.ExternalSystemException; import com.intellij.openapi.externalSystem.model.ProjectKeys; import com.intellij.openapi.externalSystem.model.project.*; import com.intellij.openapi.externalSystem.model.task.ExternalSystemTaskId; import com.intellij.openapi.externalSystem.model.task.ExternalSystemTaskNotificationListener; import com.intellij.openapi.externalSystem.service.project.ExternalSystemProjectResolver; import com.intellij.openapi.externalSystem.util.ExternalSystemApiUtil; import com.intellij.openapi.externalSystem.util.ExternalSystemDebugEnvironment; import com.intellij.openapi.module.StdModuleTypes; import com.intellij.openapi.util.Factory; import com.intellij.openapi.util.Key; import com.intellij.openapi.util.KeyValue; import com.intellij.openapi.util.Pair; import com.intellij.openapi.util.io.FileUtil; import com.intellij.openapi.util.text.StringUtil; import com.intellij.util.ArrayUtil; import com.intellij.util.BooleanFunction; import com.intellij.util.Function; import com.intellij.util.containers.ContainerUtil; import com.intellij.util.containers.ContainerUtilRt; import com.intellij.util.containers.MultiMap; import org.gradle.tooling.*; import org.gradle.tooling.model.DomainObjectSet; import org.gradle.tooling.model.build.BuildEnvironment; import org.gradle.tooling.model.idea.BasicIdeaProject; import org.gradle.tooling.model.idea.IdeaModule; import org.gradle.tooling.model.idea.IdeaProject; import org.gradle.util.GradleVersion; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import org.jetbrains.plugins.gradle.model.*; import org.jetbrains.plugins.gradle.model.data.GradleSourceSetData; import org.jetbrains.plugins.gradle.remote.impl.GradleLibraryNamesMixer; import org.jetbrains.plugins.gradle.service.execution.UnsupportedCancellationToken; import org.jetbrains.plugins.gradle.settings.ClassHolder; import org.jetbrains.plugins.gradle.settings.GradleExecutionSettings; import org.jetbrains.plugins.gradle.util.GradleConstants; import java.io.File; import java.util.*; import static org.jetbrains.plugins.gradle.service.project.GradleProjectResolverUtil.attachGradleSdkSources; import static org.jetbrains.plugins.gradle.service.project.GradleProjectResolverUtil.getModuleId; /** * @author Denis Zhdanov, Vladislav Soroka * @since 8/8/11 11:09 AM */ public class GradleProjectResolver implements ExternalSystemProjectResolver<GradleExecutionSettings> { private static final Logger LOG = Logger.getInstance("#" + GradleProjectResolver.class.getName()); @NotNull private final GradleExecutionHelper myHelper; private final GradleLibraryNamesMixer myLibraryNamesMixer = new GradleLibraryNamesMixer(); private final MultiMap<ExternalSystemTaskId, CancellationTokenSource> myCancellationMap = MultiMap.create(); public static final Key<Map<String/* module id */, Pair<DataNode<GradleSourceSetData>, ExternalSourceSet>>> RESOLVED_SOURCE_SETS = Key.create("resolvedSourceSets"); public static final Key<Map<String/* output path */, Pair<String /* module id*/, ExternalSystemSourceType>>> MODULES_OUTPUTS = Key.create("moduleOutputsMap"); public static final Key<Map<String/* artifact path */, String /* module id*/>> CONFIGURATION_ARTIFACTS = Key.create("gradleArtifactsMap"); // This constructor is called by external system API, see AbstractExternalSystemFacadeImpl class constructor. @SuppressWarnings("UnusedDeclaration") public GradleProjectResolver() { this(new GradleExecutionHelper()); } public GradleProjectResolver(@NotNull GradleExecutionHelper helper) { myHelper = helper; } @Nullable @Override public DataNode<ProjectData> resolveProjectInfo(@NotNull final ExternalSystemTaskId id, @NotNull final String projectPath, final boolean isPreviewMode, @Nullable final GradleExecutionSettings settings, @NotNull final ExternalSystemTaskNotificationListener listener) throws ExternalSystemException, IllegalArgumentException, IllegalStateException { if (settings != null) { myHelper.ensureInstalledWrapper(id, projectPath, settings, listener); } final GradleProjectResolverExtension projectResolverChain = createProjectResolverChain(settings); final DataNode<ProjectData> resultProjectDataNode = myHelper.execute( projectPath, settings, new ProjectConnectionDataNodeFunction( id, projectPath, settings, listener, isPreviewMode, projectResolverChain, false) ); // auto-discover buildSrc project if needed final String buildSrcProjectPath = projectPath + "/buildSrc"; handleBuildSrcProject( resultProjectDataNode, new ProjectConnectionDataNodeFunction(id, buildSrcProjectPath, settings, listener, isPreviewMode, projectResolverChain, true) ); return resultProjectDataNode; } @Override public boolean cancelTask(@NotNull ExternalSystemTaskId id, @NotNull ExternalSystemTaskNotificationListener listener) { synchronized (myCancellationMap) { for (CancellationTokenSource cancellationTokenSource : myCancellationMap.get(id)) { cancellationTokenSource.cancel(); } } return true; } @NotNull private DataNode<ProjectData> doResolveProjectInfo(@NotNull final ProjectResolverContext resolverCtx, @NotNull final GradleProjectResolverExtension projectResolverChain, boolean isBuildSrcProject) throws IllegalArgumentException, IllegalStateException { final ProjectImportAction projectImportAction = new ProjectImportAction(resolverCtx.isPreviewMode()); final List<KeyValue<String, String>> extraJvmArgs = new ArrayList<KeyValue<String, String>>(); final List<String> commandLineArgs = ContainerUtil.newArrayList(); final Set<Class> toolingExtensionClasses = ContainerUtil.newHashSet(); if(resolverCtx.isPreviewMode()){ commandLineArgs.add("-Didea.isPreviewMode=true"); final Set<Class> previewLightWeightToolingModels = ContainerUtil.<Class>set(ExternalProjectPreview.class); projectImportAction.addExtraProjectModelClasses(previewLightWeightToolingModels); } final GradleImportCustomizer importCustomizer = GradleImportCustomizer.get(); for (GradleProjectResolverExtension resolverExtension = projectResolverChain; resolverExtension != null; resolverExtension = resolverExtension.getNext()) { // inject ProjectResolverContext into gradle project resolver extensions resolverExtension.setProjectResolverContext(resolverCtx); // pre-import checks resolverExtension.preImportCheck(); if(!resolverCtx.isPreviewMode()){ // register classes of extra gradle project models required for extensions (e.g. com.android.builder.model.AndroidProject) projectImportAction.addExtraProjectModelClasses(resolverExtension.getExtraProjectModelClasses()); } if (importCustomizer == null || importCustomizer.useExtraJvmArgs()) { // collect extra JVM arguments provided by gradle project resolver extensions extraJvmArgs.addAll(resolverExtension.getExtraJvmArgs()); } // collect extra command-line arguments commandLineArgs.addAll(resolverExtension.getExtraCommandLineArgs()); // collect tooling extensions classes toolingExtensionClasses.addAll(resolverExtension.getToolingExtensionsClasses()); } final ParametersList parametersList = new ParametersList(); for (KeyValue<String, String> jvmArg : extraJvmArgs) { parametersList.addProperty(jvmArg.getKey(), jvmArg.getValue()); } final BuildEnvironment buildEnvironment = GradleExecutionHelper.getBuildEnvironment(resolverCtx.getConnection()); GradleVersion gradleVersion = null; if (buildEnvironment != null) { gradleVersion = GradleVersion.version(buildEnvironment.getGradle().getGradleVersion()); } BuildActionExecuter<ProjectImportAction.AllModels> buildActionExecutor = resolverCtx.getConnection().action(projectImportAction); File initScript = GradleExecutionHelper.generateInitScript(isBuildSrcProject, toolingExtensionClasses); if (initScript != null) { ContainerUtil.addAll(commandLineArgs, GradleConstants.INIT_SCRIPT_CMD_OPTION, initScript.getAbsolutePath()); } GradleExecutionHelper.prepare( buildActionExecutor, resolverCtx.getExternalSystemTaskId(), resolverCtx.getSettings(), resolverCtx.getListener(), parametersList.getParameters(), commandLineArgs, resolverCtx.getConnection()); resolverCtx.checkCancelled(); ProjectImportAction.AllModels allModels; final CancellationTokenSource cancellationTokenSource = GradleConnector.newCancellationTokenSource(); try { buildActionExecutor.withCancellationToken(cancellationTokenSource.token()); synchronized (myCancellationMap) { myCancellationMap.putValue(resolverCtx.getExternalSystemTaskId(), cancellationTokenSource); if (gradleVersion != null && gradleVersion.compareTo(GradleVersion.version("2.1")) < 0) { myCancellationMap.putValue(resolverCtx.getExternalSystemTaskId(), new UnsupportedCancellationToken()); } } allModels = buildActionExecutor.run(); if (allModels == null) { throw new IllegalStateException("Unable to get project model for the project: " + resolverCtx.getProjectPath()); } } catch (UnsupportedVersionException unsupportedVersionException) { resolverCtx.checkCancelled(); // Old gradle distribution version used (before ver. 1.8) // fallback to use ModelBuilder gradle tooling API Class<? extends IdeaProject> aClass = resolverCtx.isPreviewMode() ? BasicIdeaProject.class : IdeaProject.class; ModelBuilder<? extends IdeaProject> modelBuilder = myHelper.getModelBuilder( aClass, resolverCtx.getExternalSystemTaskId(), resolverCtx.getSettings(), resolverCtx.getConnection(), resolverCtx.getListener(), parametersList.getParameters()); final IdeaProject ideaProject = modelBuilder.get(); allModels = new ProjectImportAction.AllModels(ideaProject); } finally { synchronized (myCancellationMap) { myCancellationMap.remove(resolverCtx.getExternalSystemTaskId(), cancellationTokenSource); } } resolverCtx.checkCancelled(); allModels.setBuildEnvironment(buildEnvironment); extractExternalProjectModels(allModels, resolverCtx.isPreviewMode()); resolverCtx.setModels(allModels); // import project data ProjectData projectData = projectResolverChain.createProject(); DataNode<ProjectData> projectDataNode = new DataNode<ProjectData>(ProjectKeys.PROJECT, projectData, null); // import java project data JavaProjectData javaProjectData = projectResolverChain.createJavaProjectData(); projectDataNode.createChild(JavaProjectData.KEY, javaProjectData); IdeaProject ideaProject = resolverCtx.getModels().getIdeaProject(); projectResolverChain.populateProjectExtraModels(ideaProject, projectDataNode); DomainObjectSet<? extends IdeaModule> gradleModules = ideaProject.getModules(); if (gradleModules == null || gradleModules.isEmpty()) { throw new IllegalStateException("No modules found for the target project: " + ideaProject); } final Map<String /* module id */, Pair<DataNode<ModuleData>, IdeaModule>> moduleMap = ContainerUtilRt.newHashMap(); final Map<String /* module id */, Pair<DataNode<GradleSourceSetData>, ExternalSourceSet>> sourceSetsMap = ContainerUtil.newHashMap(); projectDataNode.putUserData(RESOLVED_SOURCE_SETS, sourceSetsMap); final Map<String/* output path */, Pair<String /* module id*/, ExternalSystemSourceType>> moduleOutputsMap = ContainerUtil.newTroveMap(FileUtil.PATH_HASHING_STRATEGY); projectDataNode.putUserData(MODULES_OUTPUTS, moduleOutputsMap); final Map<String/* artifact path */, String /* module id*/> artifactsMap = ContainerUtil.newTroveMap(FileUtil.PATH_HASHING_STRATEGY); projectDataNode.putUserData(CONFIGURATION_ARTIFACTS, artifactsMap); // import modules data for (IdeaModule gradleModule : gradleModules) { if (gradleModule == null) { continue; } resolverCtx.checkCancelled(); if (ExternalSystemDebugEnvironment.DEBUG_ORPHAN_MODULES_PROCESSING) { LOG.info(String.format("Importing module data: %s", gradleModule)); } final String moduleName = gradleModule.getName(); if (moduleName == null) { throw new IllegalStateException("Module with undefined name detected: " + gradleModule); } DataNode<ModuleData> moduleDataNode = projectResolverChain.createModule(gradleModule, projectDataNode); String mainModuleId = getModuleId(gradleModule); moduleMap.put(mainModuleId, Pair.create(moduleDataNode, gradleModule)); } File gradleHomeDir = null; // populate modules nodes for (final Pair<DataNode<ModuleData>, IdeaModule> pair : moduleMap.values()) { final DataNode<ModuleData> moduleDataNode = pair.first; final IdeaModule ideaModule = pair.second; if (gradleHomeDir == null) { final BuildScriptClasspathModel buildScriptClasspathModel = resolverCtx.getExtraProject(ideaModule, BuildScriptClasspathModel.class); if (buildScriptClasspathModel != null) { gradleHomeDir = buildScriptClasspathModel.getGradleHomeDir(); } } projectResolverChain.populateModuleContentRoots(ideaModule, moduleDataNode); projectResolverChain.populateModuleCompileOutputSettings(ideaModule, moduleDataNode); if (!isBuildSrcProject) { projectResolverChain.populateModuleTasks(ideaModule, moduleDataNode, projectDataNode); } final List<DataNode<? extends ModuleData>> modules = ContainerUtil.newSmartList(); modules.add(moduleDataNode); modules.addAll(ExternalSystemApiUtil.findAll(moduleDataNode, GradleSourceSetData.KEY)); final ExternalSystemSourceType[] sourceTypes = new ExternalSystemSourceType[]{ ExternalSystemSourceType.SOURCE, ExternalSystemSourceType.RESOURCE, ExternalSystemSourceType.TEST, ExternalSystemSourceType.TEST_RESOURCE }; for (DataNode<? extends ModuleData> module : modules) { final ModuleData moduleData = module.getData(); for (ExternalSystemSourceType sourceType : sourceTypes) { final String path = moduleData.getCompileOutputPath(sourceType); if (path != null) { moduleOutputsMap.put(path, Pair.create(moduleData.getId(), sourceType)); } } if (moduleData instanceof GradleSourceSetData) { for (File artifactFile : moduleData.getArtifacts()) { artifactsMap.put(ExternalSystemApiUtil.toCanonicalPath(artifactFile.getAbsolutePath()), moduleData.getId()); } } } } for (final Pair<DataNode<ModuleData>, IdeaModule> pair : moduleMap.values()) { final DataNode<ModuleData> moduleDataNode = pair.first; final IdeaModule ideaModule = pair.second; projectResolverChain.populateModuleDependencies(ideaModule, moduleDataNode, projectDataNode); projectResolverChain.populateModuleExtraModels(ideaModule, moduleDataNode); } mergeSourceSetContentRoots(moduleMap, resolverCtx); mergeLibraryAndModuleDependencyData(projectDataNode, gradleHomeDir, gradleVersion); // ensure unique library names Collection<DataNode<LibraryData>> libraries = ExternalSystemApiUtil.getChildren(projectDataNode, ProjectKeys.LIBRARY); myLibraryNamesMixer.mixNames(libraries); return projectDataNode; } private static void mergeLibraryAndModuleDependencyData(DataNode<ProjectData> projectDataNode, @Nullable File gradleHomeDir, @Nullable GradleVersion gradleVersion) { final Map<String, Pair<DataNode<GradleSourceSetData>, ExternalSourceSet>> sourceSetMap = projectDataNode.getUserData(RESOLVED_SOURCE_SETS); assert sourceSetMap != null; final Map<String, Pair<String, ExternalSystemSourceType>> moduleOutputsMap = projectDataNode.getUserData(MODULES_OUTPUTS); assert moduleOutputsMap != null; final Map<String, String> artifactsMap = projectDataNode.getUserData(CONFIGURATION_ARTIFACTS); assert artifactsMap != null; final Collection<DataNode<LibraryDependencyData>> libraryDependencies = ExternalSystemApiUtil.findAllRecursively(projectDataNode, ProjectKeys.LIBRARY_DEPENDENCY); for (DataNode<LibraryDependencyData> libraryDependencyDataNode : libraryDependencies) { if (!libraryDependencyDataNode.getChildren().isEmpty()) continue; final DataNode<?> libraryNodeParent = libraryDependencyDataNode.getParent(); if (libraryNodeParent == null) continue; final LibraryDependencyData libraryDependencyData = libraryDependencyDataNode.getData(); final LibraryData libraryData = libraryDependencyData.getTarget(); final Set<String> libraryPaths = libraryData.getPaths(LibraryPathType.BINARY); if (libraryPaths.isEmpty()) continue; if(StringUtil.isNotEmpty(libraryData.getExternalName())) continue; final LinkedList<String> unprocessedPaths = ContainerUtil.newLinkedList(libraryPaths); while (!unprocessedPaths.isEmpty()) { final String path = unprocessedPaths.remove(); Set<String> targetModuleOutputPaths = null; final String moduleId; final Pair<String, ExternalSystemSourceType> sourceTypePair = moduleOutputsMap.get(path); if (sourceTypePair == null) { moduleId = artifactsMap.get(path); if (moduleId != null) { targetModuleOutputPaths = ContainerUtil.set(path); } } else { moduleId = sourceTypePair.first; } if (moduleId == null) continue; final Pair<DataNode<GradleSourceSetData>, ExternalSourceSet> pair = sourceSetMap.get(moduleId); if (pair == null) { continue; } final ModuleData moduleData = pair.first.getData(); if (targetModuleOutputPaths == null) { final Set<String> compileSet = ContainerUtil.newHashSet(); ContainerUtil.addAllNotNull(compileSet, moduleData.getCompileOutputPath(ExternalSystemSourceType.SOURCE), moduleData.getCompileOutputPath(ExternalSystemSourceType.RESOURCE)); if (!compileSet.isEmpty() && libraryPaths.containsAll(compileSet)) { targetModuleOutputPaths = compileSet; } else { final Set<String> testSet = ContainerUtil.newHashSet(); ContainerUtil.addAllNotNull(testSet, moduleData.getCompileOutputPath(ExternalSystemSourceType.TEST), moduleData.getCompileOutputPath(ExternalSystemSourceType.TEST_RESOURCE)); if (compileSet.isEmpty() && libraryPaths.containsAll(testSet)) { targetModuleOutputPaths = testSet; } } } final ModuleData ownerModule = libraryDependencyData.getOwnerModule(); final ModuleDependencyData moduleDependencyData = new ModuleDependencyData(ownerModule, moduleData); moduleDependencyData.setScope(libraryDependencyData.getScope()); if ("test".equals(pair.second.getName())) { moduleDependencyData.setProductionOnTestDependency(true); } final DataNode<ModuleDependencyData> found = ExternalSystemApiUtil.find( libraryNodeParent, ProjectKeys.MODULE_DEPENDENCY, new BooleanFunction<DataNode<ModuleDependencyData>>() { @Override public boolean fun(DataNode<ModuleDependencyData> node) { return moduleDependencyData.equals(node.getData()); } }); if (targetModuleOutputPaths != null) { if (found == null) { libraryNodeParent.createChild(ProjectKeys.MODULE_DEPENDENCY, moduleDependencyData); } libraryPaths.removeAll(targetModuleOutputPaths); unprocessedPaths.removeAll(targetModuleOutputPaths); if (libraryPaths.isEmpty()) { libraryDependencyDataNode.clear(true); break; } continue; } else { // do not add the path as library dependency if another module dependency is already contain the path as one of its output paths if (found != null) { libraryPaths.remove(path); if (libraryPaths.isEmpty()) { libraryDependencyDataNode.clear(true); break; } continue; } } final ExternalSourceDirectorySet directorySet = pair.second.getSources().get(sourceTypePair.second); if (directorySet != null) { for (File file : directorySet.getSrcDirs()) { libraryData.addPath(LibraryPathType.SOURCE, file.getAbsolutePath()); } } } if (libraryDependencyDataNode.getParent() != null) { if (libraryPaths.size() > 1) { List<String> toRemove = ContainerUtil.newSmartList(); for (String path : libraryPaths) { final File binaryPath = new File(path); if (binaryPath.isFile()) { final LibraryData extractedLibrary = new LibraryData(libraryDependencyData.getOwner(), ""); extractedLibrary.addPath(LibraryPathType.BINARY, path); if (gradleHomeDir != null && gradleVersion != null) { attachGradleSdkSources(binaryPath, extractedLibrary, gradleHomeDir, gradleVersion); } LibraryDependencyData extractedDependencyData = new LibraryDependencyData( libraryDependencyData.getOwnerModule(), extractedLibrary, LibraryLevel.MODULE); libraryDependencyDataNode.getParent().createChild(ProjectKeys.LIBRARY_DEPENDENCY, extractedDependencyData); toRemove.add(path); } } libraryPaths.removeAll(toRemove); if (libraryPaths.isEmpty()) { libraryDependencyDataNode.clear(true); } } } } } private static Map<String, ExternalProject> extractExternalProjectModels(ProjectImportAction.AllModels models, boolean isPreview) { final ExternalProject externalRootProject = isPreview ? models.getExtraProject(null, ExternalProjectPreview.class) : models.getExtraProject(null, ExternalProject.class); if (externalRootProject == null) return Collections.emptyMap(); final DefaultExternalProject wrappedExternalRootProject = new DefaultExternalProject(externalRootProject); models.addExtraProject(wrappedExternalRootProject, ExternalProject.class); final Map<String, ExternalProject> externalProjectsMap = createExternalProjectsMap(wrappedExternalRootProject); DomainObjectSet<? extends IdeaModule> gradleModules = models.getIdeaProject().getModules(); if (gradleModules != null && !gradleModules.isEmpty()) { for (IdeaModule ideaModule : gradleModules) { final ExternalProject externalProject = externalProjectsMap.get(getModuleId(ideaModule)); if (externalProject != null) { models.addExtraProject(externalProject, ExternalProject.class, ideaModule); } } } return externalProjectsMap; } private static Map<String, ExternalProject> createExternalProjectsMap(@Nullable final ExternalProject rootExternalProject) { final Map<String, ExternalProject> externalProjectMap = ContainerUtilRt.newHashMap(); if (rootExternalProject == null) return externalProjectMap; Queue<ExternalProject> queue = new LinkedList<ExternalProject>(); queue.add(rootExternalProject); while (!queue.isEmpty()) { ExternalProject externalProject = queue.remove(); queue.addAll(externalProject.getChildProjects().values()); final String moduleName = externalProject.getName(); final String qName = externalProject.getQName(); String moduleId = StringUtil.isEmpty(qName) || ":".equals(qName) ? moduleName : qName; externalProjectMap.put(moduleId, externalProject); } return externalProjectMap; } private static void mergeSourceSetContentRoots(@NotNull Map<String, Pair<DataNode<ModuleData>, IdeaModule>> moduleMap, @NotNull ProjectResolverContext resolverCtx) { class Counter { int count; void increment() { count++; } } final Factory<Counter> counterFactory = new Factory<Counter>() { @Override public Counter create() { return new Counter(); } }; final Map<String, Counter> weightMap = ContainerUtil.newHashMap(); for (final Pair<DataNode<ModuleData>, IdeaModule> pair : moduleMap.values()) { final DataNode<ModuleData> moduleNode = pair.first; for (DataNode<ContentRootData> contentRootNode : ExternalSystemApiUtil.findAll(moduleNode, ProjectKeys.CONTENT_ROOT)) { File file = new File(contentRootNode.getData().getRootPath()); while (file != null) { ContainerUtil.getOrCreate(weightMap, file.getPath(), counterFactory).increment(); file = file.getParentFile(); } } for (DataNode<GradleSourceSetData> sourceSetNode : ExternalSystemApiUtil.findAll(moduleNode, GradleSourceSetData.KEY)) { final Set<String> set = ContainerUtil.newHashSet(); for (DataNode<ContentRootData> contentRootNode : ExternalSystemApiUtil.findAll(sourceSetNode, ProjectKeys.CONTENT_ROOT)) { File file = new File(contentRootNode.getData().getRootPath()); while (file != null) { set.add(file.getPath()); file = file.getParentFile(); } } for (String path : set) { ContainerUtil.getOrCreate(weightMap, path, counterFactory).increment(); } } } for (final Pair<DataNode<ModuleData>, IdeaModule> pair : moduleMap.values()) { final DataNode<ModuleData> moduleNode = pair.first; final ExternalProject externalProject = resolverCtx.getExtraProject(pair.second, ExternalProject.class); if (externalProject == null) continue; final File buildDir = externalProject.getBuildDir(); for (DataNode<GradleSourceSetData> sourceSetNode : ExternalSystemApiUtil.findAll(moduleNode, GradleSourceSetData.KEY)) { final Map<String, DataNode<ContentRootData>> sourceSetRoots = ContainerUtil.newLinkedHashMap(); for (DataNode<ContentRootData> contentRootNode : ExternalSystemApiUtil.findAll(sourceSetNode, ProjectKeys.CONTENT_ROOT)) { File root = new File(contentRootNode.getData().getRootPath()); if (FileUtil.isAncestor(buildDir, root, true)) continue; while (weightMap.containsKey(root.getParent()) && weightMap.get(root.getParent()).count <= 1) { root = root.getParentFile(); } DataNode<ContentRootData> mergedContentRootNode = sourceSetRoots.get(root.getPath()); if (mergedContentRootNode == null) { ContentRootData mergedContentRoot = new ContentRootData(GradleConstants.SYSTEM_ID, root.getAbsolutePath()); mergedContentRootNode = sourceSetNode.createChild(ProjectKeys.CONTENT_ROOT, mergedContentRoot); sourceSetRoots.put(root.getPath(), mergedContentRootNode); } for (ExternalSystemSourceType sourceType : ExternalSystemSourceType.values()) { for (ContentRootData.SourceRoot sourceRoot : contentRootNode.getData().getPaths(sourceType)) { mergedContentRootNode.getData().storePath(sourceType, sourceRoot.getPath(), sourceRoot.getPackagePrefix()); } } contentRootNode.clear(true); } } } } private void handleBuildSrcProject(@NotNull final DataNode<ProjectData> resultProjectDataNode, @NotNull final ProjectConnectionDataNodeFunction projectConnectionDataNodeFunction) { if (!new File(projectConnectionDataNodeFunction.myProjectPath).isDirectory()) { return; } if (projectConnectionDataNodeFunction.myIsPreviewMode) { ModuleData buildSrcModuleData = new ModuleData(":buildSrc", GradleConstants.SYSTEM_ID, StdModuleTypes.JAVA.getId(), "buildSrc", projectConnectionDataNodeFunction.myProjectPath, projectConnectionDataNodeFunction.myProjectPath); resultProjectDataNode.createChild(ProjectKeys.MODULE, buildSrcModuleData); return; } final DataNode<ModuleData> buildSrcModuleDataNode = GradleProjectResolverUtil.findModule(resultProjectDataNode, projectConnectionDataNodeFunction.myProjectPath); // check if buildSrc project was already exposed in settings.gradle file if (buildSrcModuleDataNode != null) return; final DataNode<ProjectData> buildSrcProjectDataDataNode = myHelper.execute( projectConnectionDataNodeFunction.myProjectPath, projectConnectionDataNodeFunction.mySettings, projectConnectionDataNodeFunction); if (buildSrcProjectDataDataNode != null) { for (DataNode<ModuleData> moduleNode : ExternalSystemApiUtil.getChildren(buildSrcProjectDataDataNode, ProjectKeys.MODULE)) { resultProjectDataNode.addChild(moduleNode); // adjust ide module group final ModuleData moduleData = moduleNode.getData(); if (moduleData.getIdeModuleGroup() != null) { String[] moduleGroup = ArrayUtil.prepend(resultProjectDataNode.getData().getInternalName(), moduleData.getIdeModuleGroup()); moduleData.setIdeModuleGroup(moduleGroup); for (DataNode<GradleSourceSetData> sourceSetNode : ExternalSystemApiUtil.getChildren(moduleNode, GradleSourceSetData.KEY)) { sourceSetNode.getData().setIdeModuleGroup(moduleGroup); } } } } } private class ProjectConnectionDataNodeFunction implements Function<ProjectConnection, DataNode<ProjectData>> { @NotNull private final ExternalSystemTaskId myId; @NotNull private final String myProjectPath; @Nullable private final GradleExecutionSettings mySettings; @NotNull private final ExternalSystemTaskNotificationListener myListener; private final boolean myIsPreviewMode; @NotNull private final GradleProjectResolverExtension myProjectResolverChain; private final boolean myIsBuildSrcProject; public ProjectConnectionDataNodeFunction(@NotNull ExternalSystemTaskId id, @NotNull String projectPath, @Nullable GradleExecutionSettings settings, @NotNull ExternalSystemTaskNotificationListener listener, boolean isPreviewMode, @NotNull GradleProjectResolverExtension projectResolverChain, boolean isBuildSrcProject) { myId = id; myProjectPath = projectPath; mySettings = settings; myListener = listener; myIsPreviewMode = isPreviewMode; myProjectResolverChain = projectResolverChain; myIsBuildSrcProject = isBuildSrcProject; } @Override public DataNode<ProjectData> fun(ProjectConnection connection) { try { return doResolveProjectInfo( new ProjectResolverContext(myId, myProjectPath, mySettings, connection, myListener, myIsPreviewMode), myProjectResolverChain, myIsBuildSrcProject); } catch (RuntimeException e) { LOG.info("Gradle project resolve error", e); throw myProjectResolverChain.getUserFriendlyError(e, myProjectPath, null); } } } @NotNull public static GradleProjectResolverExtension createProjectResolverChain(@Nullable final GradleExecutionSettings settings) { GradleProjectResolverExtension projectResolverChain; if (settings != null) { List<ClassHolder<? extends GradleProjectResolverExtension>> extensionClasses = settings.getResolverExtensions(); Deque<GradleProjectResolverExtension> extensions = new ArrayDeque<GradleProjectResolverExtension>(); for (ClassHolder<? extends GradleProjectResolverExtension> holder : extensionClasses) { final GradleProjectResolverExtension extension; try { extension = holder.getTargetClass().newInstance(); } catch (Throwable e) { throw new IllegalArgumentException( String.format("Can't instantiate project resolve extension for class '%s'", holder.getTargetClassName()), e); } final GradleProjectResolverExtension previous = extensions.peekLast(); if (previous != null) { previous.setNext(extension); if (previous.getNext() != extension) { throw new AssertionError("Illegal next resolver got, current resolver class is " + previous.getClass().getName()); } } extensions.add(extension); } projectResolverChain = extensions.peekFirst(); GradleProjectResolverExtension resolverExtension = projectResolverChain; assert resolverExtension != null; while (resolverExtension.getNext() != null) { resolverExtension = resolverExtension.getNext(); } if (!(resolverExtension instanceof BaseGradleProjectResolverExtension)) { throw new AssertionError("Illegal last resolver got of class " + resolverExtension.getClass().getName()); } } else { projectResolverChain = new BaseGradleProjectResolverExtension(); } return projectResolverChain; } }
MichaelNedzelsky/intellij-community
plugins/gradle/src/org/jetbrains/plugins/gradle/service/project/GradleProjectResolver.java
Java
apache-2.0
35,093
// Copyright (C) 2020 T. Zachary Laine // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_TEXT_DETAIL_UNPACK_HPP #define BOOST_TEXT_DETAIL_UNPACK_HPP #include <boost/text/transcode_iterator.hpp> namespace boost { namespace text { namespace detail { struct utf8_tag {}; struct utf16_tag {}; struct utf32_tag {}; template<typename Tag, typename Iter, typename Sentinel = Iter> struct tagged_range { Iter f_; Sentinel l_; Tag tag_; }; template< typename Iter, typename Sentinel, bool UTF8 = is_char_iter<Iter>::value, bool UTF16 = is_16_iter<Iter>::value, bool UTF32 = is_cp_iter<Iter>::value> struct unpack_iterator_and_sentinel_impl { }; template<typename Iter, typename Sentinel> struct unpack_iterator_and_sentinel_impl<Iter, Sentinel, true, false, false> { static constexpr auto call(Iter first, Sentinel last) noexcept { return tagged_range<utf8_tag, Iter, Sentinel>{first, last}; } }; template<typename Iter, typename Sentinel> struct unpack_iterator_and_sentinel_impl<Iter, Sentinel, false, true, false> { static constexpr auto call(Iter first, Sentinel last) noexcept { return tagged_range<utf16_tag, Iter, Sentinel>{first, last}; } }; template<typename Iter, typename Sentinel> struct unpack_iterator_and_sentinel_impl<Iter, Sentinel, false, false, true> { static constexpr auto call(Iter first, Sentinel last) noexcept { return tagged_range<utf32_tag, Iter, Sentinel>{first, last}; } }; template<typename Iter, typename Sentinel> constexpr auto unpack_iterator_and_sentinel(Iter first, Sentinel last) noexcept -> decltype(unpack_iterator_and_sentinel_impl< std::remove_cv_t<Iter>, std::remove_cv_t<Sentinel>>::call(first, last)) { using iterator = std::remove_cv_t<Iter>; using sentinel = std::remove_cv_t<Sentinel>; return detail::unpack_iterator_and_sentinel_impl<iterator, sentinel>:: call(first, last); } // 8 -> 32 template<typename Iter> constexpr auto unpack_iterator_and_sentinel( utf_8_to_32_iterator<Iter> first, utf_8_to_32_iterator<Iter> last) noexcept; template<typename Iter, typename Sentinel> constexpr auto unpack_iterator_and_sentinel( utf_8_to_32_iterator<Iter, Sentinel> first, Sentinel last) noexcept; // 32 -> 8 template<typename Iter> constexpr auto unpack_iterator_and_sentinel( utf_32_to_8_iterator<Iter> first, utf_32_to_8_iterator<Iter> last) noexcept; template<typename Iter, typename Sentinel> constexpr auto unpack_iterator_and_sentinel( utf_32_to_8_iterator<Iter, Sentinel> first, Sentinel last) noexcept; // 16 -> 32 template<typename Iter> constexpr auto unpack_iterator_and_sentinel( utf_16_to_32_iterator<Iter> first, utf_16_to_32_iterator<Iter> last) noexcept; template<typename Iter, typename Sentinel> constexpr auto unpack_iterator_and_sentinel( utf_16_to_32_iterator<Iter, Sentinel> first, Sentinel last) noexcept; // 32 -> 16 template<typename Iter> constexpr auto unpack_iterator_and_sentinel( utf_32_to_16_iterator<Iter> first, utf_32_to_16_iterator<Iter> last) noexcept; template<typename Iter, typename Sentinel> constexpr auto unpack_iterator_and_sentinel( utf_32_to_16_iterator<Iter, Sentinel> first, Sentinel last) noexcept; // 8 -> 16 template<typename Iter> constexpr auto unpack_iterator_and_sentinel( utf_8_to_16_iterator<Iter> first, utf_8_to_16_iterator<Iter> last) noexcept; template<typename Iter, typename Sentinel> constexpr auto unpack_iterator_and_sentinel( utf_8_to_16_iterator<Iter, Sentinel> first, Sentinel last) noexcept; // 16 -> 8 template<typename Iter> constexpr auto unpack_iterator_and_sentinel( utf_16_to_8_iterator<Iter> first, utf_16_to_8_iterator<Iter> last) noexcept; template<typename Iter, typename Sentinel> constexpr auto unpack_iterator_and_sentinel( utf_16_to_8_iterator<Iter, Sentinel> first, Sentinel last) noexcept; // 8 -> 32 template<typename Iter> constexpr auto unpack_iterator_and_sentinel( utf_8_to_32_iterator<Iter> first, utf_8_to_32_iterator<Iter> last) noexcept { return detail::unpack_iterator_and_sentinel(first.base(), last.base()); } template<typename Iter, typename Sentinel> constexpr auto unpack_iterator_and_sentinel( utf_8_to_32_iterator<Iter, Sentinel> first, Sentinel last) noexcept { return detail::unpack_iterator_and_sentinel(first.base(), last); } // 32 -> 8 template<typename Iter> constexpr auto unpack_iterator_and_sentinel( utf_32_to_8_iterator<Iter> first, utf_32_to_8_iterator<Iter> last) noexcept { return detail::unpack_iterator_and_sentinel(first.base(), last.base()); } template<typename Iter, typename Sentinel> constexpr auto unpack_iterator_and_sentinel( utf_32_to_8_iterator<Iter, Sentinel> first, Sentinel last) noexcept { return detail::unpack_iterator_and_sentinel(first.base(), last); } // 16 -> 32 template<typename Iter> constexpr auto unpack_iterator_and_sentinel( utf_16_to_32_iterator<Iter> first, utf_16_to_32_iterator<Iter> last) noexcept { return detail::unpack_iterator_and_sentinel(first.base(), last.base()); } template<typename Iter, typename Sentinel> constexpr auto unpack_iterator_and_sentinel( utf_16_to_32_iterator<Iter, Sentinel> first, Sentinel last) noexcept { return detail::unpack_iterator_and_sentinel(first.base(), last); } // 32 -> 16 template<typename Iter> constexpr auto unpack_iterator_and_sentinel( utf_32_to_16_iterator<Iter> first, utf_32_to_16_iterator<Iter> last) noexcept { return detail::unpack_iterator_and_sentinel(first.base(), last.base()); } template<typename Iter, typename Sentinel> constexpr auto unpack_iterator_and_sentinel( utf_32_to_16_iterator<Iter, Sentinel> first, Sentinel last) noexcept { return detail::unpack_iterator_and_sentinel(first.base(), last); } // 8 -> 16 template<typename Iter> constexpr auto unpack_iterator_and_sentinel( utf_8_to_16_iterator<Iter> first, utf_8_to_16_iterator<Iter> last) noexcept { return detail::unpack_iterator_and_sentinel(first.base(), last.base()); } template<typename Iter, typename Sentinel> constexpr auto unpack_iterator_and_sentinel( utf_8_to_16_iterator<Iter, Sentinel> first, Sentinel last) noexcept { return detail::unpack_iterator_and_sentinel(first.base(), last); } // 16 -> 8 template<typename Iter> constexpr auto unpack_iterator_and_sentinel( utf_16_to_8_iterator<Iter> first, utf_16_to_8_iterator<Iter> last) noexcept { return detail::unpack_iterator_and_sentinel(first.base(), last.base()); } template<typename Iter, typename Sentinel> constexpr auto unpack_iterator_and_sentinel( utf_16_to_8_iterator<Iter, Sentinel> first, Sentinel last) noexcept { return detail::unpack_iterator_and_sentinel(first.base(), last); } }}} #endif
wiltonlazary/arangodb
3rdParty/iresearch/external/text/include/boost/text/detail/unpack.hpp
C++
apache-2.0
7,706
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.index.mapper; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.UnicodeUtil; import org.elasticsearch.common.bytes.HashedBytesArray; import org.elasticsearch.common.lucene.BytesRefs; import java.util.Collection; import java.util.Collections; import java.util.List; /** * */ public final class Uid { public static final char DELIMITER = '#'; public static final byte DELIMITER_BYTE = 0x23; public static final BytesRef DELIMITER_BYTES = new BytesRef(new byte[]{DELIMITER_BYTE}); private final String type; private final String id; public Uid(String type, String id) { this.type = type; this.id = id; } public String type() { return type; } public String id() { return id; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Uid uid = (Uid) o; if (id != null ? !id.equals(uid.id) : uid.id != null) return false; if (type != null ? !type.equals(uid.type) : uid.type != null) return false; return true; } @Override public int hashCode() { int result = type != null ? type.hashCode() : 0; result = 31 * result + (id != null ? id.hashCode() : 0); return result; } @Override public String toString() { return createUid(type, id); } public BytesRef toBytesRef() { return createUidAsBytes(type, id); } public static String typePrefix(String type) { return type + DELIMITER; } public static BytesRef typePrefixAsBytes(BytesRef type) { BytesRef bytesRef = new BytesRef(type.length + 1); bytesRef.append(type); bytesRef.append(DELIMITER_BYTES); return bytesRef; } public static String idFromUid(String uid) { int delimiterIndex = uid.indexOf(DELIMITER); // type is not allowed to have # in it..., ids can return uid.substring(delimiterIndex + 1); } public static HashedBytesArray idFromUid(BytesRef uid) { return splitUidIntoTypeAndId(uid)[1]; } public static HashedBytesArray typeFromUid(BytesRef uid) { return splitUidIntoTypeAndId(uid)[0]; } public static String typeFromUid(String uid) { int delimiterIndex = uid.indexOf(DELIMITER); // type is not allowed to have # in it..., ids can return uid.substring(0, delimiterIndex); } public static Uid createUid(String uid) { int delimiterIndex = uid.indexOf(DELIMITER); // type is not allowed to have # in it..., ids can return new Uid(uid.substring(0, delimiterIndex), uid.substring(delimiterIndex + 1)); } public static BytesRef createUidAsBytes(String type, String id) { return createUidAsBytes(new BytesRef(type), new BytesRef(id)); } public static BytesRef createUidAsBytes(String type, BytesRef id) { return createUidAsBytes(new BytesRef(type), id); } public static BytesRef createUidAsBytes(BytesRef type, BytesRef id) { final BytesRef ref = new BytesRef(type.length + 1 + id.length); System.arraycopy(type.bytes, type.offset, ref.bytes, 0, type.length); ref.offset = type.length; ref.bytes[ref.offset++] = DELIMITER_BYTE; System.arraycopy(id.bytes, id.offset, ref.bytes, ref.offset, id.length); ref.offset = 0; ref.length = ref.bytes.length; return ref; } public static void createUidAsBytes(BytesRef type, BytesRef id, BytesRef spare) { spare.copyBytes(type); spare.append(DELIMITER_BYTES); spare.append(id); } public static BytesRef[] createTypeUids(Collection<String> types, Object ids) { return createTypeUids(types, Collections.singletonList(ids)); } public static BytesRef[] createTypeUids(Collection<String> types, List<? extends Object> ids) { final int numIds = ids.size(); BytesRef[] uids = new BytesRef[types.size() * ids.size()]; BytesRef typeBytes = new BytesRef(); BytesRef idBytes = new BytesRef(); int index = 0; for (String type : types) { UnicodeUtil.UTF16toUTF8(type, 0, type.length(), typeBytes); for (int i = 0; i < numIds; i++, index++) { uids[index] = Uid.createUidAsBytes(typeBytes, BytesRefs.toBytesRef(ids.get(i), idBytes)); } } return uids; } public static String createUid(String type, String id) { return createUid(new StringBuilder(), type, id); } public static String createUid(StringBuilder sb, String type, String id) { return sb.append(type).append(DELIMITER).append(id).toString(); } public static boolean hasDelimiter(BytesRef uid) { final int limit = uid.offset + uid.length; for (int i = uid.offset; i < limit; i++) { if (uid.bytes[i] == DELIMITER_BYTE) { // 0x23 is equal to '#' return true; } } return false; } // LUCENE 4 UPGRADE: HashedBytesArray or BytesRef as return type? public static HashedBytesArray[] splitUidIntoTypeAndId(BytesRef uid) { int loc = -1; final int limit = uid.offset + uid.length; for (int i = uid.offset; i < limit; i++) { if (uid.bytes[i] == DELIMITER_BYTE) { // 0x23 is equal to '#' loc = i; break; } } if (loc == -1) { return null; } byte[] type = new byte[loc - uid.offset]; System.arraycopy(uid.bytes, uid.offset, type, 0, type.length); byte[] id = new byte[uid.length - type.length - 1]; System.arraycopy(uid.bytes, loc + 1, id, 0, id.length); return new HashedBytesArray[]{new HashedBytesArray(type), new HashedBytesArray(id)}; } }
alexksikes/elasticsearch
src/main/java/org/elasticsearch/index/mapper/Uid.java
Java
apache-2.0
6,728
/** * Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies * * Please see distribution for license. */ package com.opengamma.strata.math.impl; import com.opengamma.strata.collect.ArgChecker; /** * Utilities for working with complex numbers. */ public class ComplexMathUtils { public static ComplexNumber add(ComplexNumber z1, ComplexNumber z2) { ArgChecker.notNull(z1, "z1"); ArgChecker.notNull(z2, "z2"); return new ComplexNumber(z1.getReal() + z2.getReal(), z1.getImaginary() + z2.getImaginary()); } public static ComplexNumber add(ComplexNumber... z) { ArgChecker.notNull(z, "z"); double res = 0.0; double img = 0.0; for (ComplexNumber aZ : z) { res += aZ.getReal(); img += aZ.getImaginary(); } return new ComplexNumber(res, img); } public static ComplexNumber add(ComplexNumber z, double x) { ArgChecker.notNull(z, "z"); return new ComplexNumber(z.getReal() + x, z.getImaginary()); } public static ComplexNumber add(double x, ComplexNumber z) { ArgChecker.notNull(z, "z"); return new ComplexNumber(z.getReal() + x, z.getImaginary()); } public static double arg(ComplexNumber z) { ArgChecker.notNull(z, "z"); return Math.atan2(z.getImaginary(), z.getReal()); } public static ComplexNumber conjugate(ComplexNumber z) { ArgChecker.notNull(z, "z"); return new ComplexNumber(z.getReal(), -z.getImaginary()); } public static ComplexNumber divide(ComplexNumber z1, ComplexNumber z2) { ArgChecker.notNull(z1, "z1"); ArgChecker.notNull(z2, "z2"); double a = z1.getReal(); double b = z1.getImaginary(); double c = z2.getReal(); double d = z2.getImaginary(); if (Math.abs(c) > Math.abs(d)) { double dOverC = d / c; double denom = c + d * dOverC; return new ComplexNumber((a + b * dOverC) / denom, (b - a * dOverC) / denom); } double cOverD = c / d; double denom = c * cOverD + d; return new ComplexNumber((a * cOverD + b) / denom, (b * cOverD - a) / denom); } public static ComplexNumber divide(ComplexNumber z, double x) { ArgChecker.notNull(z, "z"); return new ComplexNumber(z.getReal() / x, z.getImaginary() / x); } public static ComplexNumber divide(double x, ComplexNumber z) { ArgChecker.notNull(z, "z"); double c = z.getReal(); double d = z.getImaginary(); if (Math.abs(c) > Math.abs(d)) { double dOverC = d / c; double denom = c + d * dOverC; return new ComplexNumber(x / denom, -x * dOverC / denom); } double cOverD = c / d; double denom = c * cOverD + d; return new ComplexNumber(x * cOverD / denom, -x / denom); } public static ComplexNumber exp(ComplexNumber z) { ArgChecker.notNull(z, "z"); double mult = Math.exp(z.getReal()); return new ComplexNumber(mult * Math.cos(z.getImaginary()), mult * Math.sin(z.getImaginary())); } public static ComplexNumber inverse(ComplexNumber z) { ArgChecker.notNull(z, "z"); double c = z.getReal(); double d = z.getImaginary(); if (Math.abs(c) > Math.abs(d)) { double dOverC = d / c; double denom = c + d * dOverC; return new ComplexNumber(1 / denom, -dOverC / denom); } double cOverD = c / d; double denom = c * cOverD + d; return new ComplexNumber(cOverD / denom, -1 / denom); } /** * Returns the principal value of log, with z the principal argument of z defined to lie in the interval (-pi, pi] * @param z ComplexNumber * @return The log */ public static ComplexNumber log(ComplexNumber z) { ArgChecker.notNull(z, "z"); return new ComplexNumber(Math.log(Math.hypot(z.getReal(), z.getImaginary())), Math.atan2(z.getImaginary(), z.getReal())); } public static double mod(ComplexNumber z) { ArgChecker.notNull(z, "z"); return Math.hypot(z.getReal(), z.getImaginary()); } public static ComplexNumber square(ComplexNumber z) { ArgChecker.notNull(z, "z"); double a = z.getReal(); double b = z.getImaginary(); return new ComplexNumber(a * a - b * b, 2 * a * b); } public static ComplexNumber multiply(ComplexNumber z1, ComplexNumber z2) { ArgChecker.notNull(z1, "z1"); ArgChecker.notNull(z2, "z2"); double a = z1.getReal(); double b = z1.getImaginary(); double c = z2.getReal(); double d = z2.getImaginary(); return new ComplexNumber(a * c - b * d, a * d + b * c); } public static ComplexNumber multiply(ComplexNumber... z) { ArgChecker.notNull(z, "z"); int n = z.length; ArgChecker.isTrue(n > 0, "nothing to multiply"); if (n == 1) { return z[0]; } else if (n == 2) { return multiply(z[0], z[1]); } else { ComplexNumber product = multiply(z[0], z[1]); for (int i = 2; i < n; i++) { product = multiply(product, z[i]); } return product; } } public static ComplexNumber multiply(double x, ComplexNumber... z) { ComplexNumber product = multiply(z); return multiply(x, product); } public static ComplexNumber multiply(ComplexNumber z, double x) { ArgChecker.notNull(z, "z"); return new ComplexNumber(z.getReal() * x, z.getImaginary() * x); } public static ComplexNumber multiply(double x, ComplexNumber z) { ArgChecker.notNull(z, "z"); return new ComplexNumber(z.getReal() * x, z.getImaginary() * x); } public static ComplexNumber pow(ComplexNumber z1, ComplexNumber z2) { ArgChecker.notNull(z1, "z1"); ArgChecker.notNull(z2, "z2"); double mod = mod(z1); double arg = arg(z1); double mult = Math.pow(mod, z2.getReal()) * Math.exp(-z2.getImaginary() * arg); double theta = z2.getReal() * arg + z2.getImaginary() * Math.log(mod); return new ComplexNumber(mult * Math.cos(theta), mult * Math.sin(theta)); } public static ComplexNumber pow(ComplexNumber z, double x) { double mod = mod(z); double arg = arg(z); double mult = Math.pow(mod, x); return new ComplexNumber(mult * Math.cos(x * arg), mult * Math.sin(x * arg)); } public static ComplexNumber pow(double x, ComplexNumber z) { ArgChecker.notNull(z, "z"); return pow(new ComplexNumber(x, 0), z); } public static ComplexNumber sqrt(ComplexNumber z) { ArgChecker.notNull(z, "z"); double c = z.getReal(); double d = z.getImaginary(); if (c == 0.0 && d == 0.0) { return z; } double w; if (Math.abs(c) > Math.abs(d)) { double dOverC = d / c; w = Math.sqrt(Math.abs(c)) * Math.sqrt((1 + Math.sqrt(1 + dOverC * dOverC)) / 2); } else { double cOverD = c / d; w = Math.sqrt(Math.abs(d)) * Math.sqrt((Math.abs(cOverD) + Math.sqrt(1 + cOverD * cOverD)) / 2); } if (c >= 0.0) { return new ComplexNumber(w, d / 2 / w); } if (d >= 0.0) { return new ComplexNumber(d / 2 / w, w); } return new ComplexNumber(-d / 2 / w, -w); } public static ComplexNumber subtract(ComplexNumber z1, ComplexNumber z2) { ArgChecker.notNull(z1, "z1"); ArgChecker.notNull(z2, "z2"); return new ComplexNumber(z1.getReal() - z2.getReal(), z1.getImaginary() - z2.getImaginary()); } public static ComplexNumber subtract(ComplexNumber z, double x) { ArgChecker.notNull(z, "z"); return new ComplexNumber(z.getReal() - x, z.getImaginary()); } public static ComplexNumber subtract(double x, ComplexNumber z) { ArgChecker.notNull(z, "z"); return new ComplexNumber(x - z.getReal(), -z.getImaginary()); } }
jmptrader/Strata
modules/math/src/main/java/com/opengamma/strata/math/impl/ComplexMathUtils.java
Java
apache-2.0
7,523