code
stringlengths 4
1.01M
| language
stringclasses 2
values |
|---|---|
---
title: End of August Blooms
date: 2009-08-31 00:00:00 -06:00
categories:
- whats-blooming
layout: post
blog-banner: whats-blooming-now-summer.jpg
post-date: August 31, 2009
post-time: 8:09 AM
blog-image: wbn-default.jpg
---
<div class = "text-center">
<p>Look for these beauties as you stroll through the garden.</p>
</div>
<div class="text-center">
<img src="/images/blogs/old-posts/Buddleja davidii 'Pink Delight'.jpg" width="450" height="450" alt="" title="" />
</div>
<br>
<div class="text-center">
<img src="/images/blogs/old-posts/Caryopteris x clandonensis 'First Choice'.jpg" width="450" height="450" alt="" title="" />
</div>
<br>
<div class="text-center">
<img src="/images/blogs/old-posts/Chasmanthium latifolium.jpg" width="450" height="450" alt="" title="" />
</div>
<br>
<div class="text-center">
<img src="/images/blogs/old-posts/Cirsium undulatum.jpg" width="450" height="450" alt="" title="" />
</div>
<br>
<div class="text-center">
<img src="/images/blogs/old-posts/Linaria dalmatica.jpg" width="450" height="450" alt="" title="" />
</div>
<br>
<div class= "text-center">
Don't forget to visit the What's Blooming Blog every day for cool and interesting facts about each of these plants.
</div>
|
Java
|
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"math"
"time"
"github.com/pingcap/tidb/util/collate"
)
// CompareInt64 returns an integer comparing the int64 x to y.
func CompareInt64(x, y int64) int {
if x < y {
return -1
} else if x == y {
return 0
}
return 1
}
// CompareUint64 returns an integer comparing the uint64 x to y.
func CompareUint64(x, y uint64) int {
if x < y {
return -1
} else if x == y {
return 0
}
return 1
}
//VecCompareUU returns []int64 comparing the []uint64 x to []uint64 y
func VecCompareUU(x, y []uint64, res []int64) {
n := len(x)
for i := 0; i < n; i++ {
if x[i] < y[i] {
res[i] = -1
} else if x[i] == y[i] {
res[i] = 0
} else {
res[i] = 1
}
}
}
//VecCompareII returns []int64 comparing the []int64 x to []int64 y
func VecCompareII(x, y, res []int64) {
n := len(x)
for i := 0; i < n; i++ {
if x[i] < y[i] {
res[i] = -1
} else if x[i] == y[i] {
res[i] = 0
} else {
res[i] = 1
}
}
}
//VecCompareUI returns []int64 comparing the []uint64 x to []int64y
func VecCompareUI(x []uint64, y, res []int64) {
n := len(x)
for i := 0; i < n; i++ {
if y[i] < 0 || x[i] > math.MaxInt64 {
res[i] = 1
} else if int64(x[i]) < y[i] {
res[i] = -1
} else if int64(x[i]) == y[i] {
res[i] = 0
} else {
res[i] = 1
}
}
}
//VecCompareIU returns []int64 comparing the []int64 x to []uint64y
func VecCompareIU(x []int64, y []uint64, res []int64) {
n := len(x)
for i := 0; i < n; i++ {
if x[i] < 0 || uint64(y[i]) > math.MaxInt64 {
res[i] = -1
} else if x[i] < int64(y[i]) {
res[i] = -1
} else if x[i] == int64(y[i]) {
res[i] = 0
} else {
res[i] = 1
}
}
}
// CompareFloat64 returns an integer comparing the float64 x to y.
func CompareFloat64(x, y float64) int {
if x < y {
return -1
} else if x == y {
return 0
}
return 1
}
// CompareString returns an integer comparing the string x to y with the specified collation and length.
func CompareString(x, y, collation string, length int) int {
return collate.GetCollator(collation).Compare(x, y, collate.NewCollatorOption(length))
}
// CompareDuration returns an integer comparing the duration x to y.
func CompareDuration(x, y time.Duration) int {
if x < y {
return -1
} else if x == y {
return 0
}
return 1
}
|
Java
|
package com.wangshan.service.impl;
import com.wangshan.dao.UserDao;
import com.wangshan.models.User;
import com.wangshan.service.ValidateService;
import com.wangshan.utils.gabriel.EncryptUtil;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
/**
* Created by Administrator on 2015/11/15.
*/
@Service
public class ValidateServiceImpl implements ValidateService{
@Autowired
private UserDao userDao;
@Override
public Boolean validatePassword(String email, String password){
User user = userDao.getUserByEmail(email);
if(user != null && new EncryptUtil().encrypt(password + "-" + user.getSalt(), "SHA-1").equals(user.getPassword())){
return true;
} else {
return false;
}
}
@Override
public Boolean validateMobileRepeat(String mobile){
return false;
}
@Override
public Boolean validateEmailRepeat(String email){
return false;
}
}
|
Java
|
# Mitozus scabridulus Miers SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null
|
Java
|
## Versioning & Releasing
Increments in the version should be done by one of the project owners.
The version should follow the standard `major.minor.patch` style, see http://semver.org/ for which defines the specification. The examples below are a paraphrasing of that specification.
# Examples of changes that would warrant a `patch` version changes
* Small changes that are completely backwards compatible, normally bug fixes.
* changes in dependencies
# Examples of changes that would warrant a `minor` version changes
* Introduction of new functionality, without breaking backwards compatibility.
# Examples of changes that would warrant a `major` version changes
* Any break in backwards compatibility must result in an increment of the `major` version.
|
Java
|
<?php
/**
* Created by PhpStorm.
* User: Mohammad Eslahi Sani
* Date: 04/10/1394
* Time: 9:06 PM
*/
//dl('php_pdo_sqlsrv_55_ts.dll');
// phpinfo();
if(isset($_SESSION['login'])){
}
elseif(isset($_POST['username']) && isset($_POST['password'])){
$u = $_POST['username'];
$p = $_POST['password'];
// exec("echo username and password are: $u --- $p >> debug.txt");
$serverName = "MMDES"; //serverName\instanceName
// Since UID and PWD are not specified in the $connectionInfo array,
// The connection will be attempted using Windows Authentication.
$connectionInfo = array( "Database"=>"officeAutomation");
$conn = sqlsrv_connect( $serverName, $connectionInfo);
if( $conn ) {
// echo "Connection established.<br />";
}else{
// echo "Connection could not be established.<br />";
// die( print_r( sqlsrv_errors(), true));
exec("echo connection was not established >> debug.txt");
}
$query = "";
$query = "SELECT * FROM sysUser WHERE Username='".$u . "'";
$result = sqlsrv_query( $conn , $query);
if (!$result)
die( print_r( sqlsrv_errors(), true));
$row = sqlsrv_fetch_array($result);
if( $row['Password'] == $p ){
$query2 = "SELECT firstName,lastName,Gender FROM Person JOIN Employee on Person.NationalID=Employee.NationalID WHERE PersonalID='".$row['PersonalID'] . "'";
$result2 = sqlsrv_query( $conn , $query2);
if (!$result2)
die( print_r( sqlsrv_errors(), true));
$row2 = sqlsrv_fetch_array($result2);
// print_r($row2);
$tempAry=array('username'=>$row['Username'],'role'=>$row['Role'],'personalId'=>$row['PersonalID'],
'firstName'=>$row2['firstName'],'lastName'=>$row2['lastName'],'gender'=>$row2['Gender']);
$_SESSION['login'] = $tempAry;
header('location: ');
// print_r($_SESSION);
}
else{
header('location: ?invalid');
die();
}
}
elseif (isset($_GET['invalid'])){
?>
<body>
<div class="container sign-in-container">
<p class="invalid-text">Invalid username or password,<br> Try again!</p>
<form method="post" class="form-signin login-form">
<h2 class="form-signin-heading">Please sign in</h2>
<label for="inputEmail" class="sr-only">Username</label>
<input name="username" type="text" id="inputEmail" class="username-input form-control" placeholder="Username" required autofocus>
<label for="inputPassword" class="password-input sr-only">Password</label>
<input name="password" type="password" id="inputPassword" class="form-control" placeholder="Password" required>
<button class="submit-button btn btn-lg btn-primary btn-block" type="submit">Sign in</button>
</form>
</div> <!-- /container -->
</body>
</html>
<?php
}
else{
?>
<body>
<div class="container sign-in-container">
<form method="post" class="form-signin login-form">
<h2 class="form-signin-heading">Please sign in</h2>
<label for="inputEmail" class="sr-only">Username</label>
<input name="username" type="text" id="inputEmail" class="username-input form-control" placeholder="Username" required autofocus>
<label for="inputPassword" class="password-input sr-only">Password</label>
<input name="password" type="password" id="inputPassword" class="form-control" placeholder="Password" required>
<button class="submit-button btn btn-lg btn-primary btn-block" type="submit">Sign in</button>
</form>
</div> <!-- /container -->
</body>
</html>
<?php } ?>
|
Java
|
# Croton touranensis Gagnep. SPECIES
#### Status
ACCEPTED
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null
|
Java
|
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.util.prop;
import ghidra.util.*;
import java.io.*;
/**
* Handles general storage and retrieval of saveable objects indexed by long
* keys.
*
*/
public class SaveableObjectPropertySet extends PropertySet {
private final static long serialVersionUID = 1;
/**
* Constructor for SaveableObjectPropertySet.
* @param name the name associated with this property set.
*/
public SaveableObjectPropertySet(String name, Class<?> objectClass) {
super(name, objectClass);
if (!Saveable.class.isAssignableFrom(objectClass)) {
throw new IllegalArgumentException("Class "+objectClass+
"does not implement the Saveable interface");
}
try {
objectClass.newInstance();
} catch(Exception e) {
throw new IllegalArgumentException("Class "+objectClass+
"must be public and have a public, no args, constructor");
}
}
/**
* @see PropertySet#getDataSize()
*/
@Override
public int getDataSize() {
return 20;
}
/**
* Stores a saveable object at the given index. Any object currently at
* that index will be replaced by the new object.
* @param index the index at which to store the saveable object.
* @param value the saveable object to store.
*/
public void putObject(long index, Saveable value) {
PropertyPage page = getOrCreatePage(getPageID(index));
int n = page.getSize();
page.addSaveableObject(getPageOffset(index), value);
numProperties += page.getSize() - n;
}
/**
* Retrieves the saveable object stored at the given index.
* @param index the index at which to retrieve the saveable object.
* @return the saveable object stored at the given index or null if no
* object is stored at the index.
*/
public Saveable getObject(long index) {
PropertyPage page = getPage(getPageID(index));
if (page != null) {
return page.getSaveableObject(getPageOffset(index));
}
return null;
}
/* (non-Javadoc)
* @see ghidra.util.prop.PropertySet#moveIndex(long, long)
*/
@Override
protected void moveIndex(long from, long to) {
Saveable value = getObject(from);
remove(from);
putObject(to, value);
}
/**
* saves the property at the given index to the given output stream.
*/
@Override
protected void saveProperty(ObjectOutputStream oos, long index) throws IOException {
Saveable obj = getObject(index);
oos.writeObject(obj.getClass().getName());
obj.save(new ObjectStorageStreamAdapter(oos));
}
/**
* restores the property from the input stream to the given index.
*/
@Override
protected void restoreProperty(ObjectInputStream ois, long index)
throws IOException, ClassNotFoundException {
try {
String className = (String)ois.readObject();
Class<?> c = Class.forName(className);
Saveable obj = (Saveable)c.newInstance();
obj.restore(new ObjectStorageStreamAdapter(ois));
putObject(index, obj);
} catch (Exception e) {
Msg.showError(this, null, null, null, e);
}
}
/**
*
* @see ghidra.util.prop.PropertySet#applyValue(PropertyVisitor, long)
*/
@Override
public void applyValue(PropertyVisitor visitor, long addr) {
Saveable obj = getObject(addr);
if (obj != null) {
visitor.visit(obj);
}
}
}
|
Java
|
using System;
using System.Collections.Generic;
using System.Linq;
using System.Net;
using System.Text.RegularExpressions;
using HtmlAgilityPack;
namespace Html2Markdown.Replacement
{
internal static class HtmlParser
{
private static readonly Regex NoChildren = new Regex(@"<(ul|ol)\b[^>]*>(?:(?!<ul|<ol)[\s\S])*?<\/\1>");
internal static string ReplaceLists(string html)
{
var finalHtml = html;
while (HasNoChildLists(finalHtml))
{
var listToReplace = NoChildren.Match(finalHtml).Value;
var formattedList = ReplaceList(listToReplace);
finalHtml = finalHtml.Replace(listToReplace, formattedList);
}
return finalHtml;
}
private static string ReplaceList(string html)
{
var list = Regex.Match(html, @"<(ul|ol)\b[^>]*>([\s\S]*?)<\/\1>");
var listType = list.Groups[1].Value;
var listItems = Regex.Split(list.Groups[2].Value, "<li[^>]*>");
if(listItems.All(string.IsNullOrEmpty))
{
return String.Empty;
}
listItems = listItems.Skip(1).ToArray();
var counter = 0;
var markdownList = new List<string>();
listItems.ToList().ForEach(listItem =>
{
var listPrefix = (listType.Equals("ol")) ? $"{++counter}. " : "* ";
var finalList = listItem.Replace(@"</li>", string.Empty);
if (finalList.Trim().Length == 0) {
return;
}
finalList = Regex.Replace(finalList, @"^\s+", string.Empty);
finalList = Regex.Replace(finalList, @"\n{2}", $"{Environment.NewLine}{Environment.NewLine} ");
// indent nested lists
finalList = Regex.Replace(finalList, @"\n([ ]*)+(\*|\d+\.)", "\n$1 $2");
markdownList.Add($"{listPrefix}{finalList}");
});
return Environment.NewLine + Environment.NewLine + markdownList.Aggregate((current, item) => current + Environment.NewLine + item);
}
private static bool HasNoChildLists(string html)
{
return NoChildren.Match(html).Success;
}
internal static string ReplacePre(string html)
{
var doc = GetHtmlDocument(html);
var nodes = doc.DocumentNode.SelectNodes("//pre");
if (nodes == null) {
return html;
}
nodes.ToList().ForEach(node =>
{
var tagContents = node.InnerHtml;
var markdown = ConvertPre(tagContents);
ReplaceNode(node, markdown);
});
return doc.DocumentNode.OuterHtml;
}
private static string ConvertPre(string html)
{
var tag = TabsToSpaces(html);
tag = IndentNewLines(tag);
return Environment.NewLine + Environment.NewLine + tag + Environment.NewLine;
}
private static string IndentNewLines(string tag)
{
return tag.Replace(Environment.NewLine, Environment.NewLine + " ");
}
private static string TabsToSpaces(string tag)
{
return tag.Replace("\t", " ");
}
internal static string ReplaceImg(string html)
{
var doc = GetHtmlDocument(html);
var nodes = doc.DocumentNode.SelectNodes("//img");
if (nodes == null) {
return html;
}
nodes.ToList().ForEach(node =>
{
var src = node.Attributes.GetAttributeOrEmpty("src");
var alt = node.Attributes.GetAttributeOrEmpty("alt");
var title = node.Attributes.GetAttributeOrEmpty("title");
var markdown = $@" ? $" \"{title}\"" : "")})";
ReplaceNode(node, markdown);
});
return doc.DocumentNode.OuterHtml;
}
public static string ReplaceAnchor(string html)
{
var doc = GetHtmlDocument(html);
var nodes = doc.DocumentNode.SelectNodes("//a");
if (nodes == null) {
return html;
}
nodes.ToList().ForEach(node =>
{
var linkText = node.InnerHtml;
var href = node.Attributes.GetAttributeOrEmpty("href");
var title = node.Attributes.GetAttributeOrEmpty("title");
var markdown = "";
if (!IsEmptyLink(linkText, href))
{
markdown = $@"[{linkText}]({href}{((title.Length > 0) ? $" \"{title}\"" : "")})";
}
ReplaceNode(node, markdown);
});
return doc.DocumentNode.OuterHtml;
}
public static string ReplaceCode(string html)
{
var finalHtml = html;
var doc = GetHtmlDocument(finalHtml);
var nodes = doc.DocumentNode.SelectNodes("//code");
if (nodes == null) {
return finalHtml;
}
nodes.ToList().ForEach(node =>
{
var code = node.InnerHtml;
string markdown;
if(IsSingleLineCodeBlock(code))
{
markdown = "`" + code + "`";
}
else
{
markdown = ReplaceBreakTagsWithNewLines(code);
markdown = Regex.Replace(markdown, "^\r\n", "");
markdown = Regex.Replace(markdown, "\r\n$", "");
markdown = "```" + Environment.NewLine + markdown + Environment.NewLine + "```";
}
ReplaceNode(node, markdown);
});
return doc.DocumentNode.OuterHtml;
}
private static string ReplaceBreakTagsWithNewLines(string code)
{
return Regex.Replace(code, "<\\s*?/?\\s*?br\\s*?>", "");
}
private static bool IsSingleLineCodeBlock(string code)
{
// single line code blocks do not have new line characters
return code.IndexOf(Environment.NewLine, StringComparison.Ordinal) == -1;
}
public static string ReplaceBlockquote(string html)
{
var doc = GetHtmlDocument(html);
var nodes = doc.DocumentNode.SelectNodes("//blockquote");
if (nodes == null) {
return html;
}
nodes.ToList().ForEach(node =>
{
var quote = node.InnerHtml;
var lines = quote.TrimStart().Split(new[] { Environment.NewLine }, StringSplitOptions.None);
var markdown = "";
lines.ToList().ForEach(line =>
{
markdown += $"> {line.TrimEnd()}{Environment.NewLine}";
});
markdown = Regex.Replace(markdown, @"(>\s\r\n)+$", "");
markdown = Environment.NewLine + Environment.NewLine + markdown + Environment.NewLine + Environment.NewLine;
ReplaceNode(node, markdown);
});
return doc.DocumentNode.OuterHtml;
}
public static string ReplaceEntites(string html)
{
return WebUtility.HtmlDecode(html);
}
public static string ReplaceParagraph(string html)
{
var doc = GetHtmlDocument(html);
var nodes = doc.DocumentNode.SelectNodes("//p");
if (nodes == null) {
return html;
}
nodes.ToList().ForEach(node =>
{
var text = node.InnerHtml;
var markdown = Regex.Replace(text, @"\s+", " ");
markdown = markdown.Replace(Environment.NewLine, " ");
markdown = Environment.NewLine + Environment.NewLine + markdown + Environment.NewLine;
ReplaceNode(node, markdown);
});
return doc.DocumentNode.OuterHtml;
}
private static bool IsEmptyLink(string linkText, string href)
{
var length = linkText.Length + href.Length;
return length == 0;
}
private static HtmlDocument GetHtmlDocument(string html)
{
var doc = new HtmlDocument();
doc.LoadHtml(html);
return doc;
}
private static void ReplaceNode(HtmlNode node, string markdown)
{
if (string.IsNullOrEmpty(markdown))
{
node.ParentNode.RemoveChild(node);
}
else
{
node.ReplaceNodeWithString(markdown);
}
}
}
}
|
Java
|
"""api_server URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
version = 'v1.0'
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'api/%s/' % version, include('apis.urls'))
]
|
Java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.seda;
import org.apache.camel.CamelExecutionException;
import org.apache.camel.ContextTestSupport;
import org.apache.camel.builder.RouteBuilder;
/**
* @version
*/
public class SedaInOutWithErrorDeadLetterChannelTest extends ContextTestSupport {
public void testInOutWithErrorUsingDLC() throws Exception {
getMockEndpoint("mock:result").expectedMessageCount(0);
getMockEndpoint("mock:dead").expectedMessageCount(1);
try {
template.requestBody("direct:start", "Hello World", String.class);
fail("Should have thrown an exception");
} catch (CamelExecutionException e) {
assertIsInstanceOf(IllegalArgumentException.class, e.getCause());
assertEquals("Damn I cannot do this", e.getCause().getMessage());
}
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
errorHandler(deadLetterChannel("mock:dead").maximumRedeliveries(2).redeliveryDelay(0).handled(false));
from("direct:start").to("seda:foo");
from("seda:foo").transform(constant("Bye World"))
.throwException(new IllegalArgumentException("Damn I cannot do this"))
.to("mock:result");
}
};
}
}
|
Java
|
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package coniks provides hashing for maps.
package coniks
import (
"bytes"
"crypto"
"encoding/binary"
"fmt"
"github.com/golang/glog"
"github.com/google/trillian"
"github.com/google/trillian/merkle/hashers"
)
func init() {
hashers.RegisterMapHasher(trillian.HashStrategy_CONIKS_SHA512_256, Default)
hashers.RegisterMapHasher(trillian.HashStrategy_CONIKS_SHA256, New(crypto.SHA256))
}
// Domain separation prefixes
var (
leafIdentifier = []byte("L")
emptyIdentifier = []byte("E")
// Default is the standard CONIKS hasher.
Default = New(crypto.SHA512_256)
// Some zeroes, to avoid allocating temporary slices.
zeroes = make([]byte, 32)
)
// hasher implements the sparse merkle tree hashing algorithm specified in the CONIKS paper.
type hasher struct {
crypto.Hash
}
// New creates a new hashers.TreeHasher using the passed in hash function.
func New(h crypto.Hash) hashers.MapHasher {
return &hasher{Hash: h}
}
// EmptyRoot returns the root of an empty tree.
func (m *hasher) EmptyRoot() []byte {
panic("EmptyRoot() not defined for coniks.Hasher")
}
// HashEmpty returns the hash of an empty branch at a given height.
// A height of 0 indicates the hash of an empty leaf.
// Empty branches within the tree are plain interior nodes e1 = H(e0, e0) etc.
func (m *hasher) HashEmpty(treeID int64, index []byte, height int) []byte {
depth := m.BitLen() - height
buf := bytes.NewBuffer(make([]byte, 0, 32))
h := m.New()
buf.Write(emptyIdentifier)
binary.Write(buf, binary.BigEndian, uint64(treeID))
m.writeMaskedIndex(buf, index, depth)
binary.Write(buf, binary.BigEndian, uint32(depth))
h.Write(buf.Bytes())
r := h.Sum(nil)
if glog.V(5) {
glog.Infof("HashEmpty(%x, %d): %x", index, depth, r)
}
return r
}
// HashLeaf calculate the merkle tree leaf value:
// H(Identifier || treeID || depth || index || dataHash)
func (m *hasher) HashLeaf(treeID int64, index []byte, leaf []byte) []byte {
depth := m.BitLen()
buf := bytes.NewBuffer(make([]byte, 0, 32+len(leaf)))
h := m.New()
buf.Write(leafIdentifier)
binary.Write(buf, binary.BigEndian, uint64(treeID))
m.writeMaskedIndex(buf, index, depth)
binary.Write(buf, binary.BigEndian, uint32(depth))
buf.Write(leaf)
h.Write(buf.Bytes())
p := h.Sum(nil)
if glog.V(5) {
glog.Infof("HashLeaf(%x, %d, %s): %x", index, depth, leaf, p)
}
return p
}
// HashChildren returns the internal Merkle tree node hash of the the two child nodes l and r.
// The hashed structure is H(l || r).
func (m *hasher) HashChildren(l, r []byte) []byte {
buf := bytes.NewBuffer(make([]byte, 0, 32+len(l)+len(r)))
h := m.New()
buf.Write(l)
buf.Write(r)
h.Write(buf.Bytes())
p := h.Sum(nil)
if glog.V(5) {
glog.Infof("HashChildren(%x, %x): %x", l, r, p)
}
return p
}
// BitLen returns the number of bits in the hash function.
func (m *hasher) BitLen() int {
return m.Size() * 8
}
// leftmask contains bitmasks indexed such that the left x bits are set. It is
// indexed by byte position from 0-7 0 is special cased to 0xFF since 8 mod 8
// is 0. leftmask is only used to mask the last byte.
var leftmask = [8]byte{0xFF, 0x80, 0xC0, 0xE0, 0xF0, 0xF8, 0xFC, 0xFE}
// writeMaskedIndex writes the left depth bits of index directly to a Buffer (which never
// returns an error on writes). This is then padded with zero bits to the Size()
// of the index values in use by this hashes. This avoids the need to allocate
// space for and copy a value that will then be discarded immediately.
func (m *hasher) writeMaskedIndex(b *bytes.Buffer, index []byte, depth int) {
if got, want := len(index), m.Size(); got != want {
panic(fmt.Sprintf("index len: %d, want %d", got, want))
}
if got, want := depth, m.BitLen(); got < 0 || got > want {
panic(fmt.Sprintf("depth: %d, want <= %d && >= 0", got, want))
}
prevLen := b.Len()
if depth > 0 {
// Write the first depthBytes, if there are any complete bytes.
depthBytes := depth >> 3
if depthBytes > 0 {
b.Write(index[:depthBytes])
}
// Mask off unwanted bits in the last byte, if there is an incomplete one.
if depth%8 != 0 {
b.WriteByte(index[depthBytes] & leftmask[depth%8])
}
}
// Pad to the correct length with zeros. Allow for future hashers that
// might be > 256 bits.
needZeros := prevLen + len(index) - b.Len()
for needZeros > 0 {
chunkSize := needZeros
if chunkSize > 32 {
chunkSize = 32
}
b.Write(zeroes[:chunkSize])
needZeros -= chunkSize
}
}
|
Java
|
/*
* Copyright 2010-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
/*
* Do not modify this file. This file is generated from the waf-2015-08-24.normal.json service model.
*/
using System;
using System.Collections.Generic;
using System.Globalization;
using System.IO;
using System.Net;
using System.Text;
using System.Xml.Serialization;
using Amazon.WAF.Model;
using Amazon.Runtime;
using Amazon.Runtime.Internal;
using Amazon.Runtime.Internal.Transform;
using Amazon.Runtime.Internal.Util;
using ThirdParty.Json.LitJson;
namespace Amazon.WAF.Model.Internal.MarshallTransformations
{
/// <summary>
/// Response Unmarshaller for RuleSummary Object
/// </summary>
public class RuleSummaryUnmarshaller : IUnmarshaller<RuleSummary, XmlUnmarshallerContext>, IUnmarshaller<RuleSummary, JsonUnmarshallerContext>
{
/// <summary>
/// Unmarshaller the response from the service to the response class.
/// </summary>
/// <param name="context"></param>
/// <returns></returns>
RuleSummary IUnmarshaller<RuleSummary, XmlUnmarshallerContext>.Unmarshall(XmlUnmarshallerContext context)
{
throw new NotImplementedException();
}
/// <summary>
/// Unmarshaller the response from the service to the response class.
/// </summary>
/// <param name="context"></param>
/// <returns></returns>
public RuleSummary Unmarshall(JsonUnmarshallerContext context)
{
context.Read();
if (context.CurrentTokenType == JsonToken.Null)
return null;
RuleSummary unmarshalledObject = new RuleSummary();
int targetDepth = context.CurrentDepth;
while (context.ReadAtDepth(targetDepth))
{
if (context.TestExpression("Name", targetDepth))
{
var unmarshaller = StringUnmarshaller.Instance;
unmarshalledObject.Name = unmarshaller.Unmarshall(context);
continue;
}
if (context.TestExpression("RuleId", targetDepth))
{
var unmarshaller = StringUnmarshaller.Instance;
unmarshalledObject.RuleId = unmarshaller.Unmarshall(context);
continue;
}
}
return unmarshalledObject;
}
private static RuleSummaryUnmarshaller _instance = new RuleSummaryUnmarshaller();
/// <summary>
/// Gets the singleton.
/// </summary>
public static RuleSummaryUnmarshaller Instance
{
get
{
return _instance;
}
}
}
}
|
Java
|
# Contributing guidelines
## How to become a contributor and submit your own code
### Contributor License Agreements
We'd love to accept your patches! Before we can take them, we have to jump a couple of legal hurdles.
Please fill out either the individual or corporate Contributor License Agreement (CLA).
* If you are an individual writing original source code and you're sure you own the intellectual property, then you'll need to sign an [individual CLA](http://code.google.com/legal/individual-cla-v1.0.html).
* If you work for a company that wants to allow you to contribute your work, then you'll need to sign a [corporate CLA](http://code.google.com/legal/corporate-cla-v1.0.html).
Follow either of the two links above to access the appropriate CLA and instructions for how to sign and return it. Once we receive it, we'll be able to accept your pull requests.
***NOTE***: Only original source code from you and other people that have signed the CLA can be accepted into the main repository.
### Contributing code
If you have improvements to Copybara, send us your pull requests!
|
Java
|
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package endpoints
import (
"fmt"
"net/http"
gpath "path"
"reflect"
"sort"
"strings"
"time"
"unicode"
restful "github.com/emicklei/go-restful"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/endpoints/discovery"
"k8s.io/apiserver/pkg/endpoints/handlers"
"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager"
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
"k8s.io/apiserver/pkg/endpoints/metrics"
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/registry/rest"
genericfilters "k8s.io/apiserver/pkg/server/filters"
utilfeature "k8s.io/apiserver/pkg/util/feature"
)
const (
ROUTE_META_GVK = "x-kubernetes-group-version-kind"
ROUTE_META_ACTION = "x-kubernetes-action"
)
type APIInstaller struct {
group *APIGroupVersion
prefix string // Path prefix where API resources are to be registered.
minRequestTimeout time.Duration
enableAPIResponseCompression bool
}
// Struct capturing information about an action ("GET", "POST", "WATCH", "PROXY", etc).
type action struct {
Verb string // Verb identifying the action ("GET", "POST", "WATCH", "PROXY", etc).
Path string // The path of the action
Params []*restful.Parameter // List of parameters associated with the action.
Namer handlers.ScopeNamer
AllNamespaces bool // true iff the action is namespaced but works on aggregate result for all namespaces
}
// An interface to see if one storage supports override its default verb for monitoring
type StorageMetricsOverride interface {
// OverrideMetricsVerb gives a storage object an opportunity to override the verb reported to the metrics endpoint
OverrideMetricsVerb(oldVerb string) (newVerb string)
}
// An interface to see if an object supports swagger documentation as a method
type documentable interface {
SwaggerDoc() map[string]string
}
// toDiscoveryKubeVerb maps an action.Verb to the logical kube verb, used for discovery
var toDiscoveryKubeVerb = map[string]string{
"CONNECT": "", // do not list in discovery.
"DELETE": "delete",
"DELETECOLLECTION": "deletecollection",
"GET": "get",
"LIST": "list",
"PATCH": "patch",
"POST": "create",
"PROXY": "proxy",
"PUT": "update",
"WATCH": "watch",
"WATCHLIST": "watch",
}
// Install handlers for API resources.
func (a *APIInstaller) Install() ([]metav1.APIResource, *restful.WebService, []error) {
var apiResources []metav1.APIResource
var errors []error
ws := a.newWebService()
// Register the paths in a deterministic (sorted) order to get a deterministic swagger spec.
paths := make([]string, len(a.group.Storage))
var i int = 0
for path := range a.group.Storage {
paths[i] = path
i++
}
sort.Strings(paths)
for _, path := range paths {
apiResource, err := a.registerResourceHandlers(path, a.group.Storage[path], ws)
if err != nil {
errors = append(errors, fmt.Errorf("error in registering resource: %s, %v", path, err))
}
if apiResource != nil {
apiResources = append(apiResources, *apiResource)
}
}
return apiResources, ws, errors
}
// newWebService creates a new restful webservice with the api installer's prefix and version.
func (a *APIInstaller) newWebService() *restful.WebService {
ws := new(restful.WebService)
ws.Path(a.prefix)
// a.prefix contains "prefix/group/version"
ws.Doc("API at " + a.prefix)
// Backwards compatibility, we accepted objects with empty content-type at V1.
// If we stop using go-restful, we can default empty content-type to application/json on an
// endpoint by endpoint basis
ws.Consumes("*/*")
mediaTypes, streamMediaTypes := negotiation.MediaTypesForSerializer(a.group.Serializer)
ws.Produces(append(mediaTypes, streamMediaTypes...)...)
ws.ApiVersion(a.group.GroupVersion.String())
return ws
}
// calculate the storage gvk, the gvk objects are converted to before persisted to the etcd.
func getStorageVersionKind(storageVersioner runtime.GroupVersioner, storage rest.Storage, typer runtime.ObjectTyper) (schema.GroupVersionKind, error) {
object := storage.New()
fqKinds, _, err := typer.ObjectKinds(object)
if err != nil {
return schema.GroupVersionKind{}, err
}
gvk, ok := storageVersioner.KindForGroupVersionKinds(fqKinds)
if !ok {
return schema.GroupVersionKind{}, fmt.Errorf("cannot find the storage version kind for %v", reflect.TypeOf(object))
}
return gvk, nil
}
// GetResourceKind returns the external group version kind registered for the given storage
// object. If the storage object is a subresource and has an override supplied for it, it returns
// the group version kind supplied in the override.
func GetResourceKind(groupVersion schema.GroupVersion, storage rest.Storage, typer runtime.ObjectTyper) (schema.GroupVersionKind, error) {
// Let the storage tell us exactly what GVK it has
if gvkProvider, ok := storage.(rest.GroupVersionKindProvider); ok {
return gvkProvider.GroupVersionKind(groupVersion), nil
}
object := storage.New()
fqKinds, _, err := typer.ObjectKinds(object)
if err != nil {
return schema.GroupVersionKind{}, err
}
// a given go type can have multiple potential fully qualified kinds. Find the one that corresponds with the group
// we're trying to register here
fqKindToRegister := schema.GroupVersionKind{}
for _, fqKind := range fqKinds {
if fqKind.Group == groupVersion.Group {
fqKindToRegister = groupVersion.WithKind(fqKind.Kind)
break
}
}
if fqKindToRegister.Empty() {
return schema.GroupVersionKind{}, fmt.Errorf("unable to locate fully qualified kind for %v: found %v when registering for %v", reflect.TypeOf(object), fqKinds, groupVersion)
}
// group is guaranteed to match based on the check above
return fqKindToRegister, nil
}
func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storage, ws *restful.WebService) (*metav1.APIResource, error) {
admit := a.group.Admit
optionsExternalVersion := a.group.GroupVersion
if a.group.OptionsExternalVersion != nil {
optionsExternalVersion = *a.group.OptionsExternalVersion
}
resource, subresource, err := splitSubresource(path)
if err != nil {
return nil, err
}
group, version := a.group.GroupVersion.Group, a.group.GroupVersion.Version
fqKindToRegister, err := GetResourceKind(a.group.GroupVersion, storage, a.group.Typer)
if err != nil {
return nil, err
}
versionedPtr, err := a.group.Creater.New(fqKindToRegister)
if err != nil {
return nil, err
}
defaultVersionedObject := indirectArbitraryPointer(versionedPtr)
kind := fqKindToRegister.Kind
isSubresource := len(subresource) > 0
// If there is a subresource, namespace scoping is defined by the parent resource
namespaceScoped := true
if isSubresource {
parentStorage, ok := a.group.Storage[resource]
if !ok {
return nil, fmt.Errorf("missing parent storage: %q", resource)
}
scoper, ok := parentStorage.(rest.Scoper)
if !ok {
return nil, fmt.Errorf("%q must implement scoper", resource)
}
namespaceScoped = scoper.NamespaceScoped()
} else {
scoper, ok := storage.(rest.Scoper)
if !ok {
return nil, fmt.Errorf("%q must implement scoper", resource)
}
namespaceScoped = scoper.NamespaceScoped()
}
// what verbs are supported by the storage, used to know what verbs we support per path
creater, isCreater := storage.(rest.Creater)
namedCreater, isNamedCreater := storage.(rest.NamedCreater)
lister, isLister := storage.(rest.Lister)
getter, isGetter := storage.(rest.Getter)
getterWithOptions, isGetterWithOptions := storage.(rest.GetterWithOptions)
gracefulDeleter, isGracefulDeleter := storage.(rest.GracefulDeleter)
collectionDeleter, isCollectionDeleter := storage.(rest.CollectionDeleter)
updater, isUpdater := storage.(rest.Updater)
patcher, isPatcher := storage.(rest.Patcher)
watcher, isWatcher := storage.(rest.Watcher)
connecter, isConnecter := storage.(rest.Connecter)
storageMeta, isMetadata := storage.(rest.StorageMetadata)
storageVersionProvider, isStorageVersionProvider := storage.(rest.StorageVersionProvider)
if !isMetadata {
storageMeta = defaultStorageMetadata{}
}
exporter, isExporter := storage.(rest.Exporter)
if !isExporter {
exporter = nil
}
versionedExportOptions, err := a.group.Creater.New(optionsExternalVersion.WithKind("ExportOptions"))
if err != nil {
return nil, err
}
if isNamedCreater {
isCreater = true
}
var versionedList interface{}
if isLister {
list := lister.NewList()
listGVKs, _, err := a.group.Typer.ObjectKinds(list)
if err != nil {
return nil, err
}
versionedListPtr, err := a.group.Creater.New(a.group.GroupVersion.WithKind(listGVKs[0].Kind))
if err != nil {
return nil, err
}
versionedList = indirectArbitraryPointer(versionedListPtr)
}
versionedListOptions, err := a.group.Creater.New(optionsExternalVersion.WithKind("ListOptions"))
if err != nil {
return nil, err
}
versionedCreateOptions, err := a.group.Creater.New(optionsExternalVersion.WithKind("CreateOptions"))
if err != nil {
return nil, err
}
versionedPatchOptions, err := a.group.Creater.New(optionsExternalVersion.WithKind("PatchOptions"))
if err != nil {
return nil, err
}
versionedUpdateOptions, err := a.group.Creater.New(optionsExternalVersion.WithKind("UpdateOptions"))
if err != nil {
return nil, err
}
var versionedDeleteOptions runtime.Object
var versionedDeleterObject interface{}
if isGracefulDeleter {
versionedDeleteOptions, err = a.group.Creater.New(optionsExternalVersion.WithKind("DeleteOptions"))
if err != nil {
return nil, err
}
versionedDeleterObject = indirectArbitraryPointer(versionedDeleteOptions)
}
versionedStatusPtr, err := a.group.Creater.New(optionsExternalVersion.WithKind("Status"))
if err != nil {
return nil, err
}
versionedStatus := indirectArbitraryPointer(versionedStatusPtr)
var (
getOptions runtime.Object
versionedGetOptions runtime.Object
getOptionsInternalKind schema.GroupVersionKind
getSubpath bool
)
if isGetterWithOptions {
getOptions, getSubpath, _ = getterWithOptions.NewGetOptions()
getOptionsInternalKinds, _, err := a.group.Typer.ObjectKinds(getOptions)
if err != nil {
return nil, err
}
getOptionsInternalKind = getOptionsInternalKinds[0]
versionedGetOptions, err = a.group.Creater.New(a.group.GroupVersion.WithKind(getOptionsInternalKind.Kind))
if err != nil {
versionedGetOptions, err = a.group.Creater.New(optionsExternalVersion.WithKind(getOptionsInternalKind.Kind))
if err != nil {
return nil, err
}
}
isGetter = true
}
var versionedWatchEvent interface{}
if isWatcher {
versionedWatchEventPtr, err := a.group.Creater.New(a.group.GroupVersion.WithKind("WatchEvent"))
if err != nil {
return nil, err
}
versionedWatchEvent = indirectArbitraryPointer(versionedWatchEventPtr)
}
var (
connectOptions runtime.Object
versionedConnectOptions runtime.Object
connectOptionsInternalKind schema.GroupVersionKind
connectSubpath bool
)
if isConnecter {
connectOptions, connectSubpath, _ = connecter.NewConnectOptions()
if connectOptions != nil {
connectOptionsInternalKinds, _, err := a.group.Typer.ObjectKinds(connectOptions)
if err != nil {
return nil, err
}
connectOptionsInternalKind = connectOptionsInternalKinds[0]
versionedConnectOptions, err = a.group.Creater.New(a.group.GroupVersion.WithKind(connectOptionsInternalKind.Kind))
if err != nil {
versionedConnectOptions, err = a.group.Creater.New(optionsExternalVersion.WithKind(connectOptionsInternalKind.Kind))
if err != nil {
return nil, err
}
}
}
}
allowWatchList := isWatcher && isLister // watching on lists is allowed only for kinds that support both watch and list.
nameParam := ws.PathParameter("name", "name of the "+kind).DataType("string")
pathParam := ws.PathParameter("path", "path to the resource").DataType("string")
params := []*restful.Parameter{}
actions := []action{}
var resourceKind string
kindProvider, ok := storage.(rest.KindProvider)
if ok {
resourceKind = kindProvider.Kind()
} else {
resourceKind = kind
}
tableProvider, _ := storage.(rest.TableConvertor)
var apiResource metav1.APIResource
if utilfeature.DefaultFeatureGate.Enabled(features.StorageVersionHash) &&
isStorageVersionProvider &&
storageVersionProvider.StorageVersion() != nil {
versioner := storageVersionProvider.StorageVersion()
gvk, err := getStorageVersionKind(versioner, storage, a.group.Typer)
if err != nil {
return nil, err
}
apiResource.StorageVersionHash = discovery.StorageVersionHash(gvk.Group, gvk.Version, gvk.Kind)
}
// Get the list of actions for the given scope.
switch {
case !namespaceScoped:
// Handle non-namespace scoped resources like nodes.
resourcePath := resource
resourceParams := params
itemPath := resourcePath + "/{name}"
nameParams := append(params, nameParam)
proxyParams := append(nameParams, pathParam)
suffix := ""
if isSubresource {
suffix = "/" + subresource
itemPath = itemPath + suffix
resourcePath = itemPath
resourceParams = nameParams
}
apiResource.Name = path
apiResource.Namespaced = false
apiResource.Kind = resourceKind
namer := handlers.ContextBasedNaming{
SelfLinker: a.group.Linker,
ClusterScoped: true,
SelfLinkPathPrefix: gpath.Join(a.prefix, resource) + "/",
SelfLinkPathSuffix: suffix,
}
// Handler for standard REST verbs (GET, PUT, POST and DELETE).
// Add actions at the resource path: /api/apiVersion/resource
actions = appendIf(actions, action{"LIST", resourcePath, resourceParams, namer, false}, isLister)
actions = appendIf(actions, action{"POST", resourcePath, resourceParams, namer, false}, isCreater)
actions = appendIf(actions, action{"DELETECOLLECTION", resourcePath, resourceParams, namer, false}, isCollectionDeleter)
// DEPRECATED in 1.11
actions = appendIf(actions, action{"WATCHLIST", "watch/" + resourcePath, resourceParams, namer, false}, allowWatchList)
// Add actions at the item path: /api/apiVersion/resource/{name}
actions = appendIf(actions, action{"GET", itemPath, nameParams, namer, false}, isGetter)
if getSubpath {
actions = appendIf(actions, action{"GET", itemPath + "/{path:*}", proxyParams, namer, false}, isGetter)
}
actions = appendIf(actions, action{"PUT", itemPath, nameParams, namer, false}, isUpdater)
actions = appendIf(actions, action{"PATCH", itemPath, nameParams, namer, false}, isPatcher)
actions = appendIf(actions, action{"DELETE", itemPath, nameParams, namer, false}, isGracefulDeleter)
// DEPRECATED in 1.11
actions = appendIf(actions, action{"WATCH", "watch/" + itemPath, nameParams, namer, false}, isWatcher)
actions = appendIf(actions, action{"CONNECT", itemPath, nameParams, namer, false}, isConnecter)
actions = appendIf(actions, action{"CONNECT", itemPath + "/{path:*}", proxyParams, namer, false}, isConnecter && connectSubpath)
default:
namespaceParamName := "namespaces"
// Handler for standard REST verbs (GET, PUT, POST and DELETE).
namespaceParam := ws.PathParameter("namespace", "object name and auth scope, such as for teams and projects").DataType("string")
namespacedPath := namespaceParamName + "/{namespace}/" + resource
namespaceParams := []*restful.Parameter{namespaceParam}
resourcePath := namespacedPath
resourceParams := namespaceParams
itemPath := namespacedPath + "/{name}"
nameParams := append(namespaceParams, nameParam)
proxyParams := append(nameParams, pathParam)
itemPathSuffix := ""
if isSubresource {
itemPathSuffix = "/" + subresource
itemPath = itemPath + itemPathSuffix
resourcePath = itemPath
resourceParams = nameParams
}
apiResource.Name = path
apiResource.Namespaced = true
apiResource.Kind = resourceKind
namer := handlers.ContextBasedNaming{
SelfLinker: a.group.Linker,
ClusterScoped: false,
SelfLinkPathPrefix: gpath.Join(a.prefix, namespaceParamName) + "/",
SelfLinkPathSuffix: itemPathSuffix,
}
actions = appendIf(actions, action{"LIST", resourcePath, resourceParams, namer, false}, isLister)
actions = appendIf(actions, action{"POST", resourcePath, resourceParams, namer, false}, isCreater)
actions = appendIf(actions, action{"DELETECOLLECTION", resourcePath, resourceParams, namer, false}, isCollectionDeleter)
// DEPRECATED in 1.11
actions = appendIf(actions, action{"WATCHLIST", "watch/" + resourcePath, resourceParams, namer, false}, allowWatchList)
actions = appendIf(actions, action{"GET", itemPath, nameParams, namer, false}, isGetter)
if getSubpath {
actions = appendIf(actions, action{"GET", itemPath + "/{path:*}", proxyParams, namer, false}, isGetter)
}
actions = appendIf(actions, action{"PUT", itemPath, nameParams, namer, false}, isUpdater)
actions = appendIf(actions, action{"PATCH", itemPath, nameParams, namer, false}, isPatcher)
actions = appendIf(actions, action{"DELETE", itemPath, nameParams, namer, false}, isGracefulDeleter)
// DEPRECATED in 1.11
actions = appendIf(actions, action{"WATCH", "watch/" + itemPath, nameParams, namer, false}, isWatcher)
actions = appendIf(actions, action{"CONNECT", itemPath, nameParams, namer, false}, isConnecter)
actions = appendIf(actions, action{"CONNECT", itemPath + "/{path:*}", proxyParams, namer, false}, isConnecter && connectSubpath)
// list or post across namespace.
// For ex: LIST all pods in all namespaces by sending a LIST request at /api/apiVersion/pods.
// TODO: more strongly type whether a resource allows these actions on "all namespaces" (bulk delete)
if !isSubresource {
actions = appendIf(actions, action{"LIST", resource, params, namer, true}, isLister)
// DEPRECATED in 1.11
actions = appendIf(actions, action{"WATCHLIST", "watch/" + resource, params, namer, true}, allowWatchList)
}
}
// Create Routes for the actions.
// TODO: Add status documentation using Returns()
// Errors (see api/errors/errors.go as well as go-restful router):
// http.StatusNotFound, http.StatusMethodNotAllowed,
// http.StatusUnsupportedMediaType, http.StatusNotAcceptable,
// http.StatusBadRequest, http.StatusUnauthorized, http.StatusForbidden,
// http.StatusRequestTimeout, http.StatusConflict, http.StatusPreconditionFailed,
// http.StatusUnprocessableEntity, http.StatusInternalServerError,
// http.StatusServiceUnavailable
// and api error codes
// Note that if we specify a versioned Status object here, we may need to
// create one for the tests, also
// Success:
// http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent
//
// test/integration/auth_test.go is currently the most comprehensive status code test
for _, s := range a.group.Serializer.SupportedMediaTypes() {
if len(s.MediaTypeSubType) == 0 || len(s.MediaTypeType) == 0 {
return nil, fmt.Errorf("all serializers in the group Serializer must have MediaTypeType and MediaTypeSubType set: %s", s.MediaType)
}
}
mediaTypes, streamMediaTypes := negotiation.MediaTypesForSerializer(a.group.Serializer)
allMediaTypes := append(mediaTypes, streamMediaTypes...)
ws.Produces(allMediaTypes...)
kubeVerbs := map[string]struct{}{}
reqScope := handlers.RequestScope{
Serializer: a.group.Serializer,
ParameterCodec: a.group.ParameterCodec,
Creater: a.group.Creater,
Convertor: a.group.Convertor,
Defaulter: a.group.Defaulter,
Typer: a.group.Typer,
UnsafeConvertor: a.group.UnsafeConvertor,
Authorizer: a.group.Authorizer,
EquivalentResourceMapper: a.group.EquivalentResourceRegistry,
// TODO: Check for the interface on storage
TableConvertor: tableProvider,
// TODO: This seems wrong for cross-group subresources. It makes an assumption that a subresource and its parent are in the same group version. Revisit this.
Resource: a.group.GroupVersion.WithResource(resource),
Subresource: subresource,
Kind: fqKindToRegister,
HubGroupVersion: schema.GroupVersion{Group: fqKindToRegister.Group, Version: runtime.APIVersionInternal},
MetaGroupVersion: metav1.SchemeGroupVersion,
MaxRequestBodyBytes: a.group.MaxRequestBodyBytes,
}
if a.group.MetaGroupVersion != nil {
reqScope.MetaGroupVersion = *a.group.MetaGroupVersion
}
if a.group.OpenAPIModels != nil && utilfeature.DefaultFeatureGate.Enabled(features.ServerSideApply) {
fm, err := fieldmanager.NewFieldManager(
a.group.OpenAPIModels,
a.group.UnsafeConvertor,
a.group.Defaulter,
fqKindToRegister.GroupVersion(),
reqScope.HubGroupVersion,
)
if err != nil {
return nil, fmt.Errorf("failed to create field manager: %v", err)
}
reqScope.FieldManager = fm
}
for _, action := range actions {
producedObject := storageMeta.ProducesObject(action.Verb)
if producedObject == nil {
producedObject = defaultVersionedObject
}
reqScope.Namer = action.Namer
requestScope := "cluster"
var namespaced string
var operationSuffix string
if apiResource.Namespaced {
requestScope = "namespace"
namespaced = "Namespaced"
}
if strings.HasSuffix(action.Path, "/{path:*}") {
requestScope = "resource"
operationSuffix = operationSuffix + "WithPath"
}
if action.AllNamespaces {
requestScope = "cluster"
operationSuffix = operationSuffix + "ForAllNamespaces"
namespaced = ""
}
if kubeVerb, found := toDiscoveryKubeVerb[action.Verb]; found {
if len(kubeVerb) != 0 {
kubeVerbs[kubeVerb] = struct{}{}
}
} else {
return nil, fmt.Errorf("unknown action verb for discovery: %s", action.Verb)
}
routes := []*restful.RouteBuilder{}
// If there is a subresource, kind should be the parent's kind.
if isSubresource {
parentStorage, ok := a.group.Storage[resource]
if !ok {
return nil, fmt.Errorf("missing parent storage: %q", resource)
}
fqParentKind, err := GetResourceKind(a.group.GroupVersion, parentStorage, a.group.Typer)
if err != nil {
return nil, err
}
kind = fqParentKind.Kind
}
verbOverrider, needOverride := storage.(StorageMetricsOverride)
switch action.Verb {
case "GET": // Get a resource.
var handler restful.RouteFunction
if isGetterWithOptions {
handler = restfulGetResourceWithOptions(getterWithOptions, reqScope, isSubresource)
} else {
handler = restfulGetResource(getter, exporter, reqScope)
}
if needOverride {
// need change the reported verb
handler = metrics.InstrumentRouteFunc(verbOverrider.OverrideMetricsVerb(action.Verb), group, version, resource, subresource, requestScope, metrics.APIServerComponent, handler)
} else {
handler = metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, handler)
}
if a.enableAPIResponseCompression {
handler = genericfilters.RestfulWithCompression(handler)
}
doc := "read the specified " + kind
if isSubresource {
doc = "read " + subresource + " of the specified " + kind
}
route := ws.GET(action.Path).To(handler).
Doc(doc).
Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")).
Operation("read"+namespaced+kind+strings.Title(subresource)+operationSuffix).
Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...).
Returns(http.StatusOK, "OK", producedObject).
Writes(producedObject)
if isGetterWithOptions {
if err := AddObjectParams(ws, route, versionedGetOptions); err != nil {
return nil, err
}
}
if isExporter {
if err := AddObjectParams(ws, route, versionedExportOptions); err != nil {
return nil, err
}
}
addParams(route, action.Params)
routes = append(routes, route)
case "LIST": // List all resources of a kind.
doc := "list objects of kind " + kind
if isSubresource {
doc = "list " + subresource + " of objects of kind " + kind
}
handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulListResource(lister, watcher, reqScope, false, a.minRequestTimeout))
if a.enableAPIResponseCompression {
handler = genericfilters.RestfulWithCompression(handler)
}
route := ws.GET(action.Path).To(handler).
Doc(doc).
Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")).
Operation("list"+namespaced+kind+strings.Title(subresource)+operationSuffix).
Produces(append(storageMeta.ProducesMIMETypes(action.Verb), allMediaTypes...)...).
Returns(http.StatusOK, "OK", versionedList).
Writes(versionedList)
if err := AddObjectParams(ws, route, versionedListOptions); err != nil {
return nil, err
}
switch {
case isLister && isWatcher:
doc := "list or watch objects of kind " + kind
if isSubresource {
doc = "list or watch " + subresource + " of objects of kind " + kind
}
route.Doc(doc)
case isWatcher:
doc := "watch objects of kind " + kind
if isSubresource {
doc = "watch " + subresource + "of objects of kind " + kind
}
route.Doc(doc)
}
addParams(route, action.Params)
routes = append(routes, route)
case "PUT": // Update a resource.
doc := "replace the specified " + kind
if isSubresource {
doc = "replace " + subresource + " of the specified " + kind
}
handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulUpdateResource(updater, reqScope, admit))
route := ws.PUT(action.Path).To(handler).
Doc(doc).
Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")).
Operation("replace"+namespaced+kind+strings.Title(subresource)+operationSuffix).
Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...).
Returns(http.StatusOK, "OK", producedObject).
// TODO: in some cases, the API may return a v1.Status instead of the versioned object
// but currently go-restful can't handle multiple different objects being returned.
Returns(http.StatusCreated, "Created", producedObject).
Reads(defaultVersionedObject).
Writes(producedObject)
if err := AddObjectParams(ws, route, versionedUpdateOptions); err != nil {
return nil, err
}
addParams(route, action.Params)
routes = append(routes, route)
case "PATCH": // Partially update a resource
doc := "partially update the specified " + kind
if isSubresource {
doc = "partially update " + subresource + " of the specified " + kind
}
supportedTypes := []string{
string(types.JSONPatchType),
string(types.MergePatchType),
string(types.StrategicMergePatchType),
}
if utilfeature.DefaultFeatureGate.Enabled(features.ServerSideApply) {
supportedTypes = append(supportedTypes, string(types.ApplyPatchType))
}
handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulPatchResource(patcher, reqScope, admit, supportedTypes))
route := ws.PATCH(action.Path).To(handler).
Doc(doc).
Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")).
Consumes(supportedTypes...).
Operation("patch"+namespaced+kind+strings.Title(subresource)+operationSuffix).
Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...).
Returns(http.StatusOK, "OK", producedObject).
Reads(metav1.Patch{}).
Writes(producedObject)
if err := AddObjectParams(ws, route, versionedPatchOptions); err != nil {
return nil, err
}
addParams(route, action.Params)
routes = append(routes, route)
case "POST": // Create a resource.
var handler restful.RouteFunction
if isNamedCreater {
handler = restfulCreateNamedResource(namedCreater, reqScope, admit)
} else {
handler = restfulCreateResource(creater, reqScope, admit)
}
handler = metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, handler)
article := GetArticleForNoun(kind, " ")
doc := "create" + article + kind
if isSubresource {
doc = "create " + subresource + " of" + article + kind
}
route := ws.POST(action.Path).To(handler).
Doc(doc).
Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")).
Operation("create"+namespaced+kind+strings.Title(subresource)+operationSuffix).
Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...).
Returns(http.StatusOK, "OK", producedObject).
// TODO: in some cases, the API may return a v1.Status instead of the versioned object
// but currently go-restful can't handle multiple different objects being returned.
Returns(http.StatusCreated, "Created", producedObject).
Returns(http.StatusAccepted, "Accepted", producedObject).
Reads(defaultVersionedObject).
Writes(producedObject)
if err := AddObjectParams(ws, route, versionedCreateOptions); err != nil {
return nil, err
}
addParams(route, action.Params)
routes = append(routes, route)
case "DELETE": // Delete a resource.
article := GetArticleForNoun(kind, " ")
doc := "delete" + article + kind
if isSubresource {
doc = "delete " + subresource + " of" + article + kind
}
handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulDeleteResource(gracefulDeleter, isGracefulDeleter, reqScope, admit))
route := ws.DELETE(action.Path).To(handler).
Doc(doc).
Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")).
Operation("delete"+namespaced+kind+strings.Title(subresource)+operationSuffix).
Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...).
Writes(versionedStatus).
Returns(http.StatusOK, "OK", versionedStatus).
Returns(http.StatusAccepted, "Accepted", versionedStatus)
if isGracefulDeleter {
route.Reads(versionedDeleterObject)
route.ParameterNamed("body").Required(false)
if err := AddObjectParams(ws, route, versionedDeleteOptions); err != nil {
return nil, err
}
}
addParams(route, action.Params)
routes = append(routes, route)
case "DELETECOLLECTION":
doc := "delete collection of " + kind
if isSubresource {
doc = "delete collection of " + subresource + " of a " + kind
}
handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulDeleteCollection(collectionDeleter, isCollectionDeleter, reqScope, admit))
route := ws.DELETE(action.Path).To(handler).
Doc(doc).
Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")).
Operation("deletecollection"+namespaced+kind+strings.Title(subresource)+operationSuffix).
Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...).
Writes(versionedStatus).
Returns(http.StatusOK, "OK", versionedStatus)
if isCollectionDeleter {
route.Reads(versionedDeleterObject)
route.ParameterNamed("body").Required(false)
if err := AddObjectParams(ws, route, versionedDeleteOptions); err != nil {
return nil, err
}
}
if err := AddObjectParams(ws, route, versionedListOptions); err != nil {
return nil, err
}
addParams(route, action.Params)
routes = append(routes, route)
// deprecated in 1.11
case "WATCH": // Watch a resource.
doc := "watch changes to an object of kind " + kind
if isSubresource {
doc = "watch changes to " + subresource + " of an object of kind " + kind
}
doc += ". deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter."
handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulListResource(lister, watcher, reqScope, true, a.minRequestTimeout))
route := ws.GET(action.Path).To(handler).
Doc(doc).
Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")).
Operation("watch"+namespaced+kind+strings.Title(subresource)+operationSuffix).
Produces(allMediaTypes...).
Returns(http.StatusOK, "OK", versionedWatchEvent).
Writes(versionedWatchEvent)
if err := AddObjectParams(ws, route, versionedListOptions); err != nil {
return nil, err
}
addParams(route, action.Params)
routes = append(routes, route)
// deprecated in 1.11
case "WATCHLIST": // Watch all resources of a kind.
doc := "watch individual changes to a list of " + kind
if isSubresource {
doc = "watch individual changes to a list of " + subresource + " of " + kind
}
doc += ". deprecated: use the 'watch' parameter with a list operation instead."
handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulListResource(lister, watcher, reqScope, true, a.minRequestTimeout))
route := ws.GET(action.Path).To(handler).
Doc(doc).
Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")).
Operation("watch"+namespaced+kind+strings.Title(subresource)+"List"+operationSuffix).
Produces(allMediaTypes...).
Returns(http.StatusOK, "OK", versionedWatchEvent).
Writes(versionedWatchEvent)
if err := AddObjectParams(ws, route, versionedListOptions); err != nil {
return nil, err
}
addParams(route, action.Params)
routes = append(routes, route)
case "CONNECT":
for _, method := range connecter.ConnectMethods() {
connectProducedObject := storageMeta.ProducesObject(method)
if connectProducedObject == nil {
connectProducedObject = "string"
}
doc := "connect " + method + " requests to " + kind
if isSubresource {
doc = "connect " + method + " requests to " + subresource + " of " + kind
}
handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulConnectResource(connecter, reqScope, admit, path, isSubresource))
route := ws.Method(method).Path(action.Path).
To(handler).
Doc(doc).
Operation("connect" + strings.Title(strings.ToLower(method)) + namespaced + kind + strings.Title(subresource) + operationSuffix).
Produces("*/*").
Consumes("*/*").
Writes(connectProducedObject)
if versionedConnectOptions != nil {
if err := AddObjectParams(ws, route, versionedConnectOptions); err != nil {
return nil, err
}
}
addParams(route, action.Params)
routes = append(routes, route)
// transform ConnectMethods to kube verbs
if kubeVerb, found := toDiscoveryKubeVerb[method]; found {
if len(kubeVerb) != 0 {
kubeVerbs[kubeVerb] = struct{}{}
}
}
}
default:
return nil, fmt.Errorf("unrecognized action verb: %s", action.Verb)
}
for _, route := range routes {
route.Metadata(ROUTE_META_GVK, metav1.GroupVersionKind{
Group: reqScope.Kind.Group,
Version: reqScope.Kind.Version,
Kind: reqScope.Kind.Kind,
})
route.Metadata(ROUTE_META_ACTION, strings.ToLower(action.Verb))
ws.Route(route)
}
// Note: update GetAuthorizerAttributes() when adding a custom handler.
}
apiResource.Verbs = make([]string, 0, len(kubeVerbs))
for kubeVerb := range kubeVerbs {
apiResource.Verbs = append(apiResource.Verbs, kubeVerb)
}
sort.Strings(apiResource.Verbs)
if shortNamesProvider, ok := storage.(rest.ShortNamesProvider); ok {
apiResource.ShortNames = shortNamesProvider.ShortNames()
}
if categoriesProvider, ok := storage.(rest.CategoriesProvider); ok {
apiResource.Categories = categoriesProvider.Categories()
}
if gvkProvider, ok := storage.(rest.GroupVersionKindProvider); ok {
gvk := gvkProvider.GroupVersionKind(a.group.GroupVersion)
apiResource.Group = gvk.Group
apiResource.Version = gvk.Version
apiResource.Kind = gvk.Kind
}
// Record the existence of the GVR and the corresponding GVK
a.group.EquivalentResourceRegistry.RegisterKindFor(reqScope.Resource, reqScope.Subresource, fqKindToRegister)
return &apiResource, nil
}
// indirectArbitraryPointer returns *ptrToObject for an arbitrary pointer
func indirectArbitraryPointer(ptrToObject interface{}) interface{} {
return reflect.Indirect(reflect.ValueOf(ptrToObject)).Interface()
}
func appendIf(actions []action, a action, shouldAppend bool) []action {
if shouldAppend {
actions = append(actions, a)
}
return actions
}
func addParams(route *restful.RouteBuilder, params []*restful.Parameter) {
for _, param := range params {
route.Param(param)
}
}
// AddObjectParams converts a runtime.Object into a set of go-restful Param() definitions on the route.
// The object must be a pointer to a struct; only fields at the top level of the struct that are not
// themselves interfaces or structs are used; only fields with a json tag that is non empty (the standard
// Go JSON behavior for omitting a field) become query parameters. The name of the query parameter is
// the JSON field name. If a description struct tag is set on the field, that description is used on the
// query parameter. In essence, it converts a standard JSON top level object into a query param schema.
func AddObjectParams(ws *restful.WebService, route *restful.RouteBuilder, obj interface{}) error {
sv, err := conversion.EnforcePtr(obj)
if err != nil {
return err
}
st := sv.Type()
switch st.Kind() {
case reflect.Struct:
for i := 0; i < st.NumField(); i++ {
name := st.Field(i).Name
sf, ok := st.FieldByName(name)
if !ok {
continue
}
switch sf.Type.Kind() {
case reflect.Interface, reflect.Struct:
case reflect.Ptr:
// TODO: This is a hack to let metav1.Time through. This needs to be fixed in a more generic way eventually. bug #36191
if (sf.Type.Elem().Kind() == reflect.Interface || sf.Type.Elem().Kind() == reflect.Struct) && strings.TrimPrefix(sf.Type.String(), "*") != "metav1.Time" {
continue
}
fallthrough
default:
jsonTag := sf.Tag.Get("json")
if len(jsonTag) == 0 {
continue
}
jsonName := strings.SplitN(jsonTag, ",", 2)[0]
if len(jsonName) == 0 {
continue
}
var desc string
if docable, ok := obj.(documentable); ok {
desc = docable.SwaggerDoc()[jsonName]
}
route.Param(ws.QueryParameter(jsonName, desc).DataType(typeToJSON(sf.Type.String())))
}
}
}
return nil
}
// TODO: this is incomplete, expand as needed.
// Convert the name of a golang type to the name of a JSON type
func typeToJSON(typeName string) string {
switch typeName {
case "bool", "*bool":
return "boolean"
case "uint8", "*uint8", "int", "*int", "int32", "*int32", "int64", "*int64", "uint32", "*uint32", "uint64", "*uint64":
return "integer"
case "float64", "*float64", "float32", "*float32":
return "number"
case "metav1.Time", "*metav1.Time":
return "string"
case "byte", "*byte":
return "string"
case "v1.DeletionPropagation", "*v1.DeletionPropagation":
return "string"
// TODO: Fix these when go-restful supports a way to specify an array query param:
// https://github.com/emicklei/go-restful/issues/225
case "[]string", "[]*string":
return "string"
case "[]int32", "[]*int32":
return "integer"
default:
return typeName
}
}
// defaultStorageMetadata provides default answers to rest.StorageMetadata.
type defaultStorageMetadata struct{}
// defaultStorageMetadata implements rest.StorageMetadata
var _ rest.StorageMetadata = defaultStorageMetadata{}
func (defaultStorageMetadata) ProducesMIMETypes(verb string) []string {
return nil
}
func (defaultStorageMetadata) ProducesObject(verb string) interface{} {
return nil
}
// splitSubresource checks if the given storage path is the path of a subresource and returns
// the resource and subresource components.
func splitSubresource(path string) (string, string, error) {
var resource, subresource string
switch parts := strings.Split(path, "/"); len(parts) {
case 2:
resource, subresource = parts[0], parts[1]
case 1:
resource = parts[0]
default:
// TODO: support deeper paths
return "", "", fmt.Errorf("api_installer allows only one or two segment paths (resource or resource/subresource)")
}
return resource, subresource, nil
}
// GetArticleForNoun returns the article needed for the given noun.
func GetArticleForNoun(noun string, padding string) string {
if noun[len(noun)-2:] != "ss" && noun[len(noun)-1:] == "s" {
// Plurals don't have an article.
// Don't catch words like class
return fmt.Sprintf("%v", padding)
}
article := "a"
if isVowel(rune(noun[0])) {
article = "an"
}
return fmt.Sprintf("%s%s%s", padding, article, padding)
}
// isVowel returns true if the rune is a vowel (case insensitive).
func isVowel(c rune) bool {
vowels := []rune{'a', 'e', 'i', 'o', 'u'}
for _, value := range vowels {
if value == unicode.ToLower(c) {
return true
}
}
return false
}
func restfulListResource(r rest.Lister, rw rest.Watcher, scope handlers.RequestScope, forceWatch bool, minRequestTimeout time.Duration) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
handlers.ListResource(r, rw, &scope, forceWatch, minRequestTimeout)(res.ResponseWriter, req.Request)
}
}
func restfulCreateNamedResource(r rest.NamedCreater, scope handlers.RequestScope, admit admission.Interface) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
handlers.CreateNamedResource(r, &scope, admit)(res.ResponseWriter, req.Request)
}
}
func restfulCreateResource(r rest.Creater, scope handlers.RequestScope, admit admission.Interface) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
handlers.CreateResource(r, &scope, admit)(res.ResponseWriter, req.Request)
}
}
func restfulDeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope handlers.RequestScope, admit admission.Interface) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
handlers.DeleteResource(r, allowsOptions, &scope, admit)(res.ResponseWriter, req.Request)
}
}
func restfulDeleteCollection(r rest.CollectionDeleter, checkBody bool, scope handlers.RequestScope, admit admission.Interface) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
handlers.DeleteCollection(r, checkBody, &scope, admit)(res.ResponseWriter, req.Request)
}
}
func restfulUpdateResource(r rest.Updater, scope handlers.RequestScope, admit admission.Interface) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
handlers.UpdateResource(r, &scope, admit)(res.ResponseWriter, req.Request)
}
}
func restfulPatchResource(r rest.Patcher, scope handlers.RequestScope, admit admission.Interface, supportedTypes []string) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
handlers.PatchResource(r, &scope, admit, supportedTypes)(res.ResponseWriter, req.Request)
}
}
func restfulGetResource(r rest.Getter, e rest.Exporter, scope handlers.RequestScope) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
handlers.GetResource(r, e, &scope)(res.ResponseWriter, req.Request)
}
}
func restfulGetResourceWithOptions(r rest.GetterWithOptions, scope handlers.RequestScope, isSubresource bool) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
handlers.GetResourceWithOptions(r, &scope, isSubresource)(res.ResponseWriter, req.Request)
}
}
func restfulConnectResource(connecter rest.Connecter, scope handlers.RequestScope, admit admission.Interface, restPath string, isSubresource bool) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
handlers.ConnectResource(connecter, &scope, admit, restPath, isSubresource)(res.ResponseWriter, req.Request)
}
}
|
Java
|
# Obione tularensis (Coville) Ulbr. SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null
|
Java
|
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"
"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
<head>
<meta http-equiv="Content-Type" content="application/xhtml+xml; charset=UTF-8" />
<meta name="generator" content="AsciiDoc 8.6.8" />
<title>git-filter-branch(1)</title>
<style type="text/css">
/* Shared CSS for AsciiDoc xhtml11 and html5 backends */
/* Default font. */
body {
font-family: Georgia,serif;
}
/* Title font. */
h1, h2, h3, h4, h5, h6,
div.title, caption.title,
thead, p.table.header,
#toctitle,
#author, #revnumber, #revdate, #revremark,
#footer {
font-family: Arial,Helvetica,sans-serif;
}
body {
margin: 1em 5% 1em 5%;
}
a {
color: blue;
text-decoration: underline;
}
a:visited {
color: fuchsia;
}
em {
font-style: italic;
color: navy;
}
strong {
font-weight: bold;
color: #083194;
}
h1, h2, h3, h4, h5, h6 {
color: #527bbd;
margin-top: 1.2em;
margin-bottom: 0.5em;
line-height: 1.3;
}
h1, h2, h3 {
border-bottom: 2px solid silver;
}
h2 {
padding-top: 0.5em;
}
h3 {
float: left;
}
h3 + * {
clear: left;
}
h5 {
font-size: 1.0em;
}
div.sectionbody {
margin-left: 0;
}
hr {
border: 1px solid silver;
}
p {
margin-top: 0.5em;
margin-bottom: 0.5em;
}
ul, ol, li > p {
margin-top: 0;
}
ul > li { color: #aaa; }
ul > li > * { color: black; }
.monospaced, code, pre {
font-family: "Courier New", Courier, monospace;
font-size: inherit;
color: navy;
padding: 0;
margin: 0;
}
#author {
color: #527bbd;
font-weight: bold;
font-size: 1.1em;
}
#email {
}
#revnumber, #revdate, #revremark {
}
#footer {
font-size: small;
border-top: 2px solid silver;
padding-top: 0.5em;
margin-top: 4.0em;
}
#footer-text {
float: left;
padding-bottom: 0.5em;
}
#footer-badges {
float: right;
padding-bottom: 0.5em;
}
#preamble {
margin-top: 1.5em;
margin-bottom: 1.5em;
}
div.imageblock, div.exampleblock, div.verseblock,
div.quoteblock, div.literalblock, div.listingblock, div.sidebarblock,
div.admonitionblock {
margin-top: 1.0em;
margin-bottom: 1.5em;
}
div.admonitionblock {
margin-top: 2.0em;
margin-bottom: 2.0em;
margin-right: 10%;
color: #606060;
}
div.content { /* Block element content. */
padding: 0;
}
/* Block element titles. */
div.title, caption.title {
color: #527bbd;
font-weight: bold;
text-align: left;
margin-top: 1.0em;
margin-bottom: 0.5em;
}
div.title + * {
margin-top: 0;
}
td div.title:first-child {
margin-top: 0.0em;
}
div.content div.title:first-child {
margin-top: 0.0em;
}
div.content + div.title {
margin-top: 0.0em;
}
div.sidebarblock > div.content {
background: #ffffee;
border: 1px solid #dddddd;
border-left: 4px solid #f0f0f0;
padding: 0.5em;
}
div.listingblock > div.content {
border: 1px solid #dddddd;
border-left: 5px solid #f0f0f0;
background: #f8f8f8;
padding: 0.5em;
}
div.quoteblock, div.verseblock {
padding-left: 1.0em;
margin-left: 1.0em;
margin-right: 10%;
border-left: 5px solid #f0f0f0;
color: #888;
}
div.quoteblock > div.attribution {
padding-top: 0.5em;
text-align: right;
}
div.verseblock > pre.content {
font-family: inherit;
font-size: inherit;
}
div.verseblock > div.attribution {
padding-top: 0.75em;
text-align: left;
}
/* DEPRECATED: Pre version 8.2.7 verse style literal block. */
div.verseblock + div.attribution {
text-align: left;
}
div.admonitionblock .icon {
vertical-align: top;
font-size: 1.1em;
font-weight: bold;
text-decoration: underline;
color: #527bbd;
padding-right: 0.5em;
}
div.admonitionblock td.content {
padding-left: 0.5em;
border-left: 3px solid #dddddd;
}
div.exampleblock > div.content {
border-left: 3px solid #dddddd;
padding-left: 0.5em;
}
div.imageblock div.content { padding-left: 0; }
span.image img { border-style: none; }
a.image:visited { color: white; }
dl {
margin-top: 0.8em;
margin-bottom: 0.8em;
}
dt {
margin-top: 0.5em;
margin-bottom: 0;
font-style: normal;
color: navy;
}
dd > *:first-child {
margin-top: 0.1em;
}
ul, ol {
list-style-position: outside;
}
ol.arabic {
list-style-type: decimal;
}
ol.loweralpha {
list-style-type: lower-alpha;
}
ol.upperalpha {
list-style-type: upper-alpha;
}
ol.lowerroman {
list-style-type: lower-roman;
}
ol.upperroman {
list-style-type: upper-roman;
}
div.compact ul, div.compact ol,
div.compact p, div.compact p,
div.compact div, div.compact div {
margin-top: 0.1em;
margin-bottom: 0.1em;
}
tfoot {
font-weight: bold;
}
td > div.verse {
white-space: pre;
}
div.hdlist {
margin-top: 0.8em;
margin-bottom: 0.8em;
}
div.hdlist tr {
padding-bottom: 15px;
}
dt.hdlist1.strong, td.hdlist1.strong {
font-weight: bold;
}
td.hdlist1 {
vertical-align: top;
font-style: normal;
padding-right: 0.8em;
color: navy;
}
td.hdlist2 {
vertical-align: top;
}
div.hdlist.compact tr {
margin: 0;
padding-bottom: 0;
}
.comment {
background: yellow;
}
.footnote, .footnoteref {
font-size: 0.8em;
}
span.footnote, span.footnoteref {
vertical-align: super;
}
#footnotes {
margin: 20px 0 20px 0;
padding: 7px 0 0 0;
}
#footnotes div.footnote {
margin: 0 0 5px 0;
}
#footnotes hr {
border: none;
border-top: 1px solid silver;
height: 1px;
text-align: left;
margin-left: 0;
width: 20%;
min-width: 100px;
}
div.colist td {
padding-right: 0.5em;
padding-bottom: 0.3em;
vertical-align: top;
}
div.colist td img {
margin-top: 0.3em;
}
@media print {
#footer-badges { display: none; }
}
#toc {
margin-bottom: 2.5em;
}
#toctitle {
color: #527bbd;
font-size: 1.1em;
font-weight: bold;
margin-top: 1.0em;
margin-bottom: 0.1em;
}
div.toclevel0, div.toclevel1, div.toclevel2, div.toclevel3, div.toclevel4 {
margin-top: 0;
margin-bottom: 0;
}
div.toclevel2 {
margin-left: 2em;
font-size: 0.9em;
}
div.toclevel3 {
margin-left: 4em;
font-size: 0.9em;
}
div.toclevel4 {
margin-left: 6em;
font-size: 0.9em;
}
span.aqua { color: aqua; }
span.black { color: black; }
span.blue { color: blue; }
span.fuchsia { color: fuchsia; }
span.gray { color: gray; }
span.green { color: green; }
span.lime { color: lime; }
span.maroon { color: maroon; }
span.navy { color: navy; }
span.olive { color: olive; }
span.purple { color: purple; }
span.red { color: red; }
span.silver { color: silver; }
span.teal { color: teal; }
span.white { color: white; }
span.yellow { color: yellow; }
span.aqua-background { background: aqua; }
span.black-background { background: black; }
span.blue-background { background: blue; }
span.fuchsia-background { background: fuchsia; }
span.gray-background { background: gray; }
span.green-background { background: green; }
span.lime-background { background: lime; }
span.maroon-background { background: maroon; }
span.navy-background { background: navy; }
span.olive-background { background: olive; }
span.purple-background { background: purple; }
span.red-background { background: red; }
span.silver-background { background: silver; }
span.teal-background { background: teal; }
span.white-background { background: white; }
span.yellow-background { background: yellow; }
span.big { font-size: 2em; }
span.small { font-size: 0.6em; }
span.underline { text-decoration: underline; }
span.overline { text-decoration: overline; }
span.line-through { text-decoration: line-through; }
div.unbreakable { page-break-inside: avoid; }
/*
* xhtml11 specific
*
* */
div.tableblock {
margin-top: 1.0em;
margin-bottom: 1.5em;
}
div.tableblock > table {
border: 3px solid #527bbd;
}
thead, p.table.header {
font-weight: bold;
color: #527bbd;
}
p.table {
margin-top: 0;
}
/* Because the table frame attribute is overriden by CSS in most browsers. */
div.tableblock > table[frame="void"] {
border-style: none;
}
div.tableblock > table[frame="hsides"] {
border-left-style: none;
border-right-style: none;
}
div.tableblock > table[frame="vsides"] {
border-top-style: none;
border-bottom-style: none;
}
/*
* html5 specific
*
* */
table.tableblock {
margin-top: 1.0em;
margin-bottom: 1.5em;
}
thead, p.tableblock.header {
font-weight: bold;
color: #527bbd;
}
p.tableblock {
margin-top: 0;
}
table.tableblock {
border-width: 3px;
border-spacing: 0px;
border-style: solid;
border-color: #527bbd;
border-collapse: collapse;
}
th.tableblock, td.tableblock {
border-width: 1px;
padding: 4px;
border-style: solid;
border-color: #527bbd;
}
table.tableblock.frame-topbot {
border-left-style: hidden;
border-right-style: hidden;
}
table.tableblock.frame-sides {
border-top-style: hidden;
border-bottom-style: hidden;
}
table.tableblock.frame-none {
border-style: hidden;
}
th.tableblock.halign-left, td.tableblock.halign-left {
text-align: left;
}
th.tableblock.halign-center, td.tableblock.halign-center {
text-align: center;
}
th.tableblock.halign-right, td.tableblock.halign-right {
text-align: right;
}
th.tableblock.valign-top, td.tableblock.valign-top {
vertical-align: top;
}
th.tableblock.valign-middle, td.tableblock.valign-middle {
vertical-align: middle;
}
th.tableblock.valign-bottom, td.tableblock.valign-bottom {
vertical-align: bottom;
}
/*
* manpage specific
*
* */
body.manpage h1 {
padding-top: 0.5em;
padding-bottom: 0.5em;
border-top: 2px solid silver;
border-bottom: 2px solid silver;
}
body.manpage h2 {
border-style: none;
}
body.manpage div.sectionbody {
margin-left: 3em;
}
@media print {
body.manpage div#toc { display: none; }
}
</style>
<script type="text/javascript">
/*<+'])');
// Function that scans the DOM tree for header elements (the DOM2
// nodeIterator API would be a better technique but not supported by all
// browsers).
var iterate = function (el) {
for (var i = el.firstChild; i != null; i = i.nextSibling) {
if (i.nodeType == 1 /* Node.ELEMENT_NODE */) {
var mo = re.exec(i.tagName);
if (mo && (i.getAttribute("class") || i.getAttribute("className")) != "float") {
result[result.length] = new TocEntry(i, getText(i), mo[1]-1);
}
iterate(i);
}
}
}
iterate(el);
return result;
}
var toc = document.getElementById("toc");
if (!toc) {
return;
}
// Delete existing TOC entries in case we're reloading the TOC.
var tocEntriesToRemove = [];
var i;
for (i = 0; i < toc.childNodes.length; i++) {
var entry = toc.childNodes[i];
if (entry.nodeName.toLowerCase() == 'div'
&& entry.getAttribute("class")
&& entry.getAttribute("class").match(/^toclevel/))
tocEntriesToRemove.push(entry);
}
for (i = 0; i < tocEntriesToRemove.length; i++) {
toc.removeChild(tocEntriesToRemove[i]);
}
// Rebuild TOC entries.
var entries = tocEntries(document.getElementById("content"), toclevels);
for (var i = 0; i < entries.length; ++i) {
var entry = entries[i];
if (entry.element.id == "")
entry.element.id = "_toc_" + i;
var a = document.createElement("a");
a.href = "#" + entry.element.id;
a.appendChild(document.createTextNode(entry.text));
var div = document.createElement("div");
div.appendChild(a);
div.className = "toclevel" + entry.toclevel;
toc.appendChild(div);
}
if (entries.length == 0)
toc.parentNode.removeChild(toc);
},
/////////////////////////////////////////////////////////////////////
// Footnotes generator
/////////////////////////////////////////////////////////////////////
/* Based on footnote generation code from:
* http://www.brandspankingnew.net/archive/2005/07/format_footnote.html
*/
footnotes: function () {
// Delete existing footnote entries in case we're reloading the footnodes.
var i;
var noteholder = document.getElementById("footnotes");
if (!noteholder) {
return;
}
var entriesToRemove = [];
for (i = 0; i < noteholder.childNodes.length; i++) {
var entry = noteholder.childNodes[i];
if (entry.nodeName.toLowerCase() == 'div' && entry.getAttribute("class") == "footnote")
entriesToRemove.push(entry);
}
for (i = 0; i < entriesToRemove.length; i++) {
noteholder.removeChild(entriesToRemove[i]);
}
// Rebuild footnote entries.
var cont = document.getElementById("content");
var spans = cont.getElementsByTagName("span");
var refs = {};
var n = 0;
for (i=0; i<spans.length; i++) {
if (spans[i].className == "footnote") {
n++;
var note = spans[i].getAttribute("data-note");
if (!note) {
// Use [\s\S] in place of . so multi-line matches work.
// Because JavaScript has no s (dotall) regex flag.
note = spans[i].innerHTML.match(/\s*\[([\s\S]*)]\s*/)[1];
spans[i].innerHTML =
"[<a id='_footnoteref_" + n + "' href='#_footnote_" + n +
"' title='View footnote' class='footnote'>" + n + "</a>]";
spans[i].setAttribute("data-note", note);
}
noteholder.innerHTML +=
"<div class='footnote' id='_footnote_" + n + "'>" +
"<a href='#_footnoteref_" + n + "' title='Return to text'>" +
n + "</a>. " + note + "</div>";
var id =spans[i].getAttribute("id");
if (id != null) refs["#"+id] = n;
}
}
if (n == 0)
noteholder.parentNode.removeChild(noteholder);
else {
// Process footnoterefs.
for (i=0; i<spans.length; i++) {
if (spans[i].className == "footnoteref") {
var href = spans[i].getElementsByTagName("a")[0].getAttribute("href");
href = href.match(/#.*/)[0]; // Because IE return full URL.
n = refs[href];
spans[i].innerHTML =
"[<a href='#_footnote_" + n +
"' title='View footnote' class='footnote'>" + n + "</a>]";
}
}
}
},
install: function(toclevels) {
var timerId;
function reinstall() {
asciidoc.footnotes();
if (toclevels) {
asciidoc.toc(toclevels);
}
}
function reinstallAndRemoveTimer() {
clearInterval(timerId);
reinstall();
}
timerId = setInterval(reinstall, 500);
if (document.addEventListener)
document.addEventListener("DOMContentLoaded", reinstallAndRemoveTimer, false);
else
window.onload = reinstallAndRemoveTimer;
}
}
asciidoc.install();
/*]]>*/
</script>
</head>
<body class="manpage">
<div id="header">
<h1>
git-filter-branch(1) Manual Page
</h1>
<h2>NAME</h2>
<div class="sectionbody">
<p>git-filter-branch -
Rewrite branches
</p>
</div>
</div>
<div id="content">
<div class="sect1">
<h2 id="_synopsis">SYNOPSIS</h2>
<div class="sectionbody">
<div class="verseblock">
<pre class="content"><em>git filter-branch</em> [--env-filter <command>] [--tree-filter <command>]
[--index-filter <command>] [--parent-filter <command>]
[--msg-filter <command>] [--commit-filter <command>]
[--tag-name-filter <command>] [--subdirectory-filter <directory>]
[--prune-empty]
[--original <namespace>] [-d <directory>] [-f | --force]
[--] [<rev-list options>…]</pre>
<div class="attribution">
</div></div>
</div>
</div>
<div class="sect1">
<h2 id="_description">DESCRIPTION</h2>
<div class="sectionbody">
<div class="paragraph"><p>Lets you rewrite git revision history by rewriting the branches mentioned
in the <rev-list options>, applying custom filters on each revision.
Those filters can modify each tree (e.g. removing a file or running
a perl rewrite on all files) or information about each commit.
Otherwise, all information (including original commit times or merge
information) will be preserved.</p></div>
<div class="paragraph"><p>The command will only rewrite the <em>positive</em> refs mentioned in the
command line (e.g. if you pass <em>a..b</em>, only <em>b</em> will be rewritten).
If you specify no filters, the commits will be recommitted without any
changes, which would normally have no effect. Nevertheless, this may be
useful in the future for compensating for some git bugs or such,
therefore such a usage is permitted.</p></div>
<div class="paragraph"><p><strong>NOTE</strong>: This command honors <code>.git/info/grafts</code> file and refs in
the <code>refs/replace/</code> namespace.
If you have any grafts or replacement refs defined, running this command
will make them permanent.</p></div>
<div class="paragraph"><p><strong>WARNING</strong>! The rewritten history will have different object names for all
the objects and will not converge with the original branch. You will not
be able to easily push and distribute the rewritten branch on top of the
original branch. Please do not use this command if you do not know the
full implications, and avoid using it anyway, if a simple single commit
would suffice to fix your problem. (See the "RECOVERING FROM UPSTREAM
REBASE" section in <a href="git-rebase.html">git-rebase(1)</a> for further information about
rewriting published history.)</p></div>
<div class="paragraph"><p>Always verify that the rewritten version is correct: The original refs,
if different from the rewritten ones, will be stored in the namespace
<em>refs/original/</em>.</p></div>
<div class="paragraph"><p>Note that since this operation is very I/O expensive, it might
be a good idea to redirect the temporary directory off-disk with the
<em>-d</em> option, e.g. on tmpfs. Reportedly the speedup is very noticeable.</p></div>
<div class="sect2">
<h3 id="_filters">Filters</h3>
<div class="paragraph"><p>The filters are applied in the order as listed below. The <command>
argument is always evaluated in the shell context using the <em>eval</em> command
(with the notable exception of the commit filter, for technical reasons).
Prior to that, the $GIT_COMMIT environment variable will be set to contain
the id of the commit being rewritten. Also, GIT_AUTHOR_NAME,
GIT_AUTHOR_EMAIL, GIT_AUTHOR_DATE, GIT_COMMITTER_NAME, GIT_COMMITTER_EMAIL,
and GIT_COMMITTER_DATE are set according to the current commit. The values
of these variables after the filters have run, are used for the new commit.
If any evaluation of <command> returns a non-zero exit status, the whole
operation will be aborted.</p></div>
<div class="paragraph"><p>A <em>map</em> function is available that takes an "original sha1 id" argument
and outputs a "rewritten sha1 id" if the commit has been already
rewritten, and "original sha1 id" otherwise; the <em>map</em> function can
return several ids on separate lines if your commit filter emitted
multiple commits.</p></div>
</div>
</div>
</div>
<div class="sect1">
<h2 id="_options">OPTIONS</h2>
<div class="sectionbody">
<div class="dlist"><dl>
<dt class="hdlist1">
--env-filter <command>
</dt>
<dd>
<p>
This filter may be used if you only need to modify the environment
in which the commit will be performed. Specifically, you might
want to rewrite the author/committer name/email/time environment
variables (see <a href="git-commit-tree.html">git-commit-tree(1)</a> for details). Do not forget
to re-export the variables.
</p>
</dd>
<dt class="hdlist1">
--tree-filter <command>
</dt>
<dd>
<p>
This is the filter for rewriting the tree and its contents.
The argument is evaluated in shell with the working
directory set to the root of the checked out tree. The new tree
is then used as-is (new files are auto-added, disappeared files
are auto-removed - neither .gitignore files nor any other ignore
rules <strong>HAVE ANY EFFECT</strong>!).
</p>
</dd>
<dt class="hdlist1">
--index-filter <command>
</dt>
<dd>
<p>
This is the filter for rewriting the index. It is similar to the
tree filter but does not check out the tree, which makes it much
faster. Frequently used with <code>git rm --cached
--ignore-unmatch ...</code>, see EXAMPLES below. For hairy
cases, see <a href="git-update-index.html">git-update-index(1)</a>.
</p>
</dd>
<dt class="hdlist1">
--parent-filter <command>
</dt>
<dd>
<p>
This is the filter for rewriting the commit’s parent list.
It will receive the parent string on stdin and shall output
the new parent string on stdout. The parent string is in
the format described in <a href="git-commit-tree.html">git-commit-tree(1)</a>: empty for
the initial commit, "-p parent" for a normal commit and
"-p parent1 -p parent2 -p parent3 …" for a merge commit.
</p>
</dd>
<dt class="hdlist1">
--msg-filter <command>
</dt>
<dd>
<p>
This is the filter for rewriting the commit messages.
The argument is evaluated in the shell with the original
commit message on standard input; its standard output is
used as the new commit message.
</p>
</dd>
<dt class="hdlist1">
--commit-filter <command>
</dt>
<dd>
<p>
This is the filter for performing the commit.
If this filter is specified, it will be called instead of the
<em>git commit-tree</em> command, with arguments of the form
"<TREE_ID> [(-p <PARENT_COMMIT_ID>)…]" and the log message on
stdin. The commit id is expected on stdout.
</p>
<div class="paragraph"><p>As a special extension, the commit filter may emit multiple
commit ids; in that case, the rewritten children of the original commit will
have all of them as parents.</p></div>
<div class="paragraph"><p>You can use the <em>map</em> convenience function in this filter, and other
convenience functions, too. For example, calling <em>skip_commit "$@"</em>
will leave out the current commit (but not its changes! If you want
that, use <em>git rebase</em> instead).</p></div>
<div class="paragraph"><p>You can also use the <code>git_commit_non_empty_tree "$@"</code> instead of
<code>git commit-tree "$@"</code> if you don’t wish to keep commits with a single parent
and that makes no change to the tree.</p></div>
</dd>
<dt class="hdlist1">
--tag-name-filter <command>
</dt>
<dd>
<p>
This is the filter for rewriting tag names. When passed,
it will be called for every tag ref that points to a rewritten
object (or to a tag object which points to a rewritten object).
The original tag name is passed via standard input, and the new
tag name is expected on standard output.
</p>
<div class="paragraph"><p>The original tags are not deleted, but can be overwritten;
use "--tag-name-filter cat" to simply update the tags. In this
case, be very careful and make sure you have the old tags
backed up in case the conversion has run afoul.</p></div>
<div class="paragraph"><p>Nearly proper rewriting of tag objects is supported. If the tag has
a message attached, a new tag object will be created with the same message,
author, and timestamp. If the tag has a signature attached, the
signature will be stripped. It is by definition impossible to preserve
signatures. The reason this is "nearly" proper, is because ideally if
the tag did not change (points to the same object, has the same name, etc.)
it should retain any signature. That is not the case, signatures will always
be removed, buyer beware. There is also no support for changing the
author or timestamp (or the tag message for that matter). Tags which point
to other tags will be rewritten to point to the underlying commit.</p></div>
</dd>
<dt class="hdlist1">
--subdirectory-filter <directory>
</dt>
<dd>
<p>
Only look at the history which touches the given subdirectory.
The result will contain that directory (and only that) as its
project root. Implies <a href="#Remap_to_ancestor">[Remap_to_ancestor]</a>.
</p>
</dd>
<dt class="hdlist1">
--prune-empty
</dt>
<dd>
<p>
Some kind of filters will generate empty commits, that left the tree
untouched. This switch allow git-filter-branch to ignore such
commits. Though, this switch only applies for commits that have one
and only one parent, it will hence keep merges points. Also, this
option is not compatible with the use of <em>--commit-filter</em>. Though you
just need to use the function <em>git_commit_non_empty_tree "$@"</em> instead
of the <code>git commit-tree "$@"</code> idiom in your commit filter to make that
happen.
</p>
</dd>
<dt class="hdlist1">
--original <namespace>
</dt>
<dd>
<p>
Use this option to set the namespace where the original commits
will be stored. The default value is <em>refs/original</em>.
</p>
</dd>
<dt class="hdlist1">
-d <directory>
</dt>
<dd>
<p>
Use this option to set the path to the temporary directory used for
rewriting. When applying a tree filter, the command needs to
temporarily check out the tree to some directory, which may consume
considerable space in case of large projects. By default it
does this in the <em>.git-rewrite/</em> directory but you can override
that choice by this parameter.
</p>
</dd>
<dt class="hdlist1">
-f
</dt>
<dt class="hdlist1">
--force
</dt>
<dd>
<p>
<em>git filter-branch</em> refuses to start with an existing temporary
directory or when there are already refs starting with
<em>refs/original/</em>, unless forced.
</p>
</dd>
<dt class="hdlist1">
<rev-list options>…
</dt>
<dd>
<p>
Arguments for <em>git rev-list</em>. All positive refs included by
these options are rewritten. You may also specify options
such as <em>--all</em>, but you must use <em>--</em> to separate them from
the <em>git filter-branch</em> options. Implies <a href="#Remap_to_ancestor">[Remap_to_ancestor]</a>.
</p>
</dd>
</dl></div>
<div class="sect2">
<h3 id="Remap_to_ancestor">Remap to ancestor</h3>
<div class="paragraph"><p>By using <a href="rev-list.html">rev-list(1)</a> arguments, e.g., path limiters, you can limit the
set of revisions which get rewritten. However, positive refs on the command
line are distinguished: we don’t let them be excluded by such limiters. For
this purpose, they are instead rewritten to point at the nearest ancestor that
was not excluded.</p></div>
</div>
</div>
</div>
<div class="sect1">
<h2 id="_examples">Examples</h2>
<div class="sectionbody">
<div class="paragraph"><p>Suppose you want to remove a file (containing confidential information
or copyright violation) from all commits:</p></div>
<div class="listingblock">
<div class="content">
<pre><code>git filter-branch --tree-filter 'rm filename' HEAD</code></pre>
</div></div>
<div class="paragraph"><p>However, if the file is absent from the tree of some commit,
a simple <code>rm filename</code> will fail for that tree and commit.
Thus you may instead want to use <code>rm -f filename</code> as the script.</p></div>
<div class="paragraph"><p>Using <code>--index-filter</code> with <em>git rm</em> yields a significantly faster
version. Like with using <code>rm filename</code>, <code>git rm --cached filename</code>
will fail if the file is absent from the tree of a commit. If you
want to "completely forget" a file, it does not matter when it entered
history, so we also add <code>--ignore-unmatch</code>:</p></div>
<div class="listingblock">
<div class="content">
<pre><code>git filter-branch --index-filter 'git rm --cached --ignore-unmatch filename' HEAD</code></pre>
</div></div>
<div class="paragraph"><p>Now, you will get the rewritten history saved in HEAD.</p></div>
<div class="paragraph"><p>To rewrite the repository to look as if <code>foodir/</code> had been its project
root, and discard all other history:</p></div>
<div class="listingblock">
<div class="content">
<pre><code>git filter-branch --subdirectory-filter foodir -- --all</code></pre>
</div></div>
<div class="paragraph"><p>Thus you can, e.g., turn a library subdirectory into a repository of
its own. Note the <code>--</code> that separates <em>filter-branch</em> options from
revision options, and the <code>--all</code> to rewrite all branches and tags.</p></div>
<div class="paragraph"><p>To set a commit (which typically is at the tip of another
history) to be the parent of the current initial commit, in
order to paste the other history behind the current history:</p></div>
<div class="listingblock">
<div class="content">
<pre><code>git filter-branch --parent-filter 'sed "s/^\$/-p <graft-id>/"' HEAD</code></pre>
</div></div>
<div class="paragraph"><p>(if the parent string is empty - which happens when we are dealing with
the initial commit - add graftcommit as a parent). Note that this assumes
history with a single root (that is, no merge without common ancestors
happened). If this is not the case, use:</p></div>
<div class="listingblock">
<div class="content">
<pre><code>git filter-branch --parent-filter \
'test $GIT_COMMIT = <commit-id> && echo "-p <graft-id>" || cat' HEAD</code></pre>
</div></div>
<div class="paragraph"><p>or even simpler:</p></div>
<div class="listingblock">
<div class="content">
<pre><code>echo "$commit-id $graft-id" >> .git/info/grafts
git filter-branch $graft-id..HEAD</code></pre>
</div></div>
<div class="paragraph"><p>To remove commits authored by "Darl McBribe" from the history:</p></div>
<div class="listingblock">
<div class="content">
<pre><code>git filter-branch --commit-filter '
if [ "$GIT_AUTHOR_NAME" = "Darl McBribe" ];
then
skip_commit "$@";
else
git commit-tree "$@";
fi' HEAD</code></pre>
</div></div>
<div class="paragraph"><p>The function <em>skip_commit</em> is defined as follows:</p></div>
<div class="listingblock">
<div class="content">
<pre><code>skip_commit()
{
shift;
while [ -n "$1" ];
do
shift;
map "$1";
shift;
done;
}</code></pre>
</div></div>
<div class="paragraph"><p>The shift magic first throws away the tree id and then the -p
parameters. Note that this handles merges properly! In case Darl
committed a merge between P1 and P2, it will be propagated properly
and all children of the merge will become merge commits with P1,P2
as their parents instead of the merge commit.</p></div>
<div class="paragraph"><p><strong>NOTE</strong> the changes introduced by the commits, and which are not reverted
by subsequent commits, will still be in the rewritten branch. If you want
to throw out <em>changes</em> together with the commits, you should use the
interactive mode of <em>git rebase</em>.</p></div>
<div class="paragraph"><p>You can rewrite the commit log messages using <code>--msg-filter</code>. For
example, <em>git svn-id</em> strings in a repository created by <em>git svn</em> can
be removed this way:</p></div>
<div class="listingblock">
<div class="content">
<pre><code>git filter-branch --msg-filter '
sed -e "/^git-svn-id:/d"
'</code></pre>
</div></div>
<div class="paragraph"><p>If you need to add <em>Acked-by</em> lines to, say, the last 10 commits (none
of which is a merge), use this command:</p></div>
<div class="listingblock">
<div class="content">
<pre><code>git filter-branch --msg-filter '
cat &&
echo "Acked-by: Bugs Bunny <bunny@bugzilla.org>"
' HEAD~10..HEAD</code></pre>
</div></div>
<div class="paragraph"><p>To restrict rewriting to only part of the history, specify a revision
range in addition to the new branch name. The new branch name will
point to the top-most revision that a <em>git rev-list</em> of this range
will print.</p></div>
<div class="paragraph"><p>Consider this history:</p></div>
<div class="listingblock">
<div class="content">
<pre><code> D--E--F--G--H
/ /
A--B-----C</code></pre>
</div></div>
<div class="paragraph"><p>To rewrite only commits D,E,F,G,H, but leave A, B and C alone, use:</p></div>
<div class="listingblock">
<div class="content">
<pre><code>git filter-branch ... C..H</code></pre>
</div></div>
<div class="paragraph"><p>To rewrite commits E,F,G,H, use one of these:</p></div>
<div class="listingblock">
<div class="content">
<pre><code>git filter-branch ... C..H --not D
git filter-branch ... D..H --not C</code></pre>
</div></div>
<div class="paragraph"><p>To move the whole tree into a subdirectory, or remove it from there:</p></div>
<div class="listingblock">
<div class="content">
<pre><code>git filter-branch --index-filter \
'git ls-files -s | sed "s-\t\"*-&newsubdir/-" |
GIT_INDEX_FILE=$GIT_INDEX_FILE.new \
git update-index --index-info &&
mv "$GIT_INDEX_FILE.new" "$GIT_INDEX_FILE"' HEAD</code></pre>
</div></div>
</div>
</div>
<div class="sect1">
<h2 id="_checklist_for_shrinking_a_repository">Checklist for Shrinking a Repository</h2>
<div class="sectionbody">
<div class="paragraph"><p>git-filter-branch is often used to get rid of a subset of files,
usually with some combination of <code>--index-filter</code> and
<code>--subdirectory-filter</code>. People expect the resulting repository to
be smaller than the original, but you need a few more steps to
actually make it smaller, because git tries hard not to lose your
objects until you tell it to. First make sure that:</p></div>
<div class="ulist"><ul>
<li>
<p>
You really removed all variants of a filename, if a blob was moved
over its lifetime. <code>git log --name-only --follow --all -- filename</code>
can help you find renames.
</p>
</li>
<li>
<p>
You really filtered all refs: use <code>--tag-name-filter cat -- --all</code>
when calling git-filter-branch.
</p>
</li>
</ul></div>
<div class="paragraph"><p>Then there are two ways to get a smaller repository. A safer way is
to clone, that keeps your original intact.</p></div>
<div class="ulist"><ul>
<li>
<p>
Clone it with <code>git clone file:///path/to/repo</code>. The clone
will not have the removed objects. See <a href="git-clone.html">git-clone(1)</a>. (Note
that cloning with a plain path just hardlinks everything!)
</p>
</li>
</ul></div>
<div class="paragraph"><p>If you really don’t want to clone it, for whatever reasons, check the
following points instead (in this order). This is a very destructive
approach, so <strong>make a backup</strong> or go back to cloning it. You have been
warned.</p></div>
<div class="ulist"><ul>
<li>
<p>
Remove the original refs backed up by git-filter-branch: say <code>git
for-each-ref --format="%(refname)" refs/original/ | xargs -n 1 git
update-ref -d</code>.
</p>
</li>
<li>
<p>
Expire all reflogs with <code>git reflog expire --expire=now --all</code>.
</p>
</li>
<li>
<p>
Garbage collect all unreferenced objects with <code>git gc --prune=now</code>
(or if your git-gc is not new enough to support arguments to
<code>--prune</code>, use <code>git repack -ad; git prune</code> instead).
</p>
</li>
</ul></div>
</div>
</div>
<div class="sect1">
<h2 id="_git">GIT</h2>
<div class="sectionbody">
<div class="paragraph"><p>Part of the <a href="git.html">git(1)</a> suite</p></div>
</div>
</div>
</div>
<div id="footnotes"><hr /></div>
<div id="footer">
<div id="footer-text">
Last updated 2012-09-18 15:30:10 PDT
</div>
</div>
</body>
</html>
|
Java
|
// -*- coding: us-ascii-unix -*-
// Copyright 2012 Lukas Kemmer
//
// Licensed under the Apache License, Version 2.0 (the "License"); you
// may not use this file except in compliance with the License. You
// may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
#include <cassert>
#include "text/utf8.hh"
#include "text/utf8-string.hh"
namespace faint{
inline bool outside(const std::string& data, size_t pos){
return utf8::num_characters(data) <= pos;
}
utf8_string::utf8_string(size_t n, const utf8_char& ch){
for (size_t i = 0; i != n; i++){
m_data += ch.str();
}
}
utf8_string::utf8_string(const utf8_char& ch)
: utf8_string(1, ch)
{}
utf8_string::utf8_string(const char* str)
: m_data(str)
{}
utf8_string::utf8_string(const std::string& str)
: m_data(str)
{}
utf8_char utf8_string::at(size_t pos) const{
if (outside(m_data, pos)){
throw std::out_of_range("utf8_string::at invalid string position");
}
return operator[](pos);
}
utf8_char utf8_string::back() const{
assert(!m_data.empty());
return operator[](size() - 1);
}
utf8_char utf8_string::front() const{
assert(!m_data.empty());
return operator[](0);
}
size_t utf8_string::bytes() const{
return m_data.size();
}
void utf8_string::clear(){
m_data.clear();
}
utf8_string utf8_string::substr(size_t pos, size_t n) const{
if (outside(m_data, pos)){
throw std::out_of_range("utf8_string::substr invalid string position");
}
size_t startByte = utf8::char_num_to_byte_num_checked(pos, m_data);
size_t numBytes = (n == utf8_string::npos) ?
std::string::npos :
utf8::char_num_to_byte_num_clamped(pos + n, m_data) - startByte;
return utf8_string(m_data.substr(startByte, numBytes));
}
const char* utf8_string::c_str() const{
return m_data.c_str();
}
const std::string& utf8_string::str() const{
return m_data;
}
size_t utf8_string::size() const{
return utf8::num_characters(m_data);
}
bool utf8_string::empty() const{
return m_data.empty();
}
utf8_string& utf8_string::erase(size_t pos, size_t n){
if (outside(m_data, pos)){
throw std::out_of_range("utf8_string::erase invalid string position");
}
size_t startByte = utf8::char_num_to_byte_num_clamped(pos, m_data);
size_t numBytes = (n == npos ? npos :
utf8::char_num_to_byte_num_clamped(pos + n, m_data) - startByte);
m_data.erase(startByte, numBytes);
return *this;
}
utf8_string& utf8_string::insert(size_t pos, const utf8_string& inserted){
if (pos > utf8::num_characters(m_data)){
throw std::out_of_range("invalid insertion index");
}
m_data.insert(utf8::char_num_to_byte_num_checked(pos, m_data), inserted.str());
return *this;
}
utf8_string& utf8_string::insert(size_t pos, size_t num, const utf8_char& c){
if (pos > utf8::num_characters(m_data)){
throw std::out_of_range("invalid insertion index");
}
insert(pos, utf8_string(num, c));
return *this;
}
utf8_char utf8_string::operator[](size_t i) const{
size_t pos = utf8::char_num_to_byte_num_checked(i, m_data);
size_t numBytes = faint::utf8::prefix_num_bytes(m_data[pos]);
return utf8_char(m_data.substr(pos, numBytes));
}
size_t utf8_string::find(const utf8_char& ch, size_t start) const{
// Since the leading byte has a unique pattern, using regular
// std::string find should be OK, I think.
size_t pos = m_data.find(ch.str(),
utf8::char_num_to_byte_num_checked(start, m_data));
if (pos == npos){
return pos;
}
return utf8::byte_num_to_char_num(pos, m_data);
}
size_t utf8_string::find_last_of(const utf8_string& s, size_t inPos) const{
const size_t endPos = inPos == npos ? size() : inPos;
for (size_t i = 0; i != endPos; i++){
auto pos = endPos - i - 1;
if (s.find((*this)[pos]) != utf8_string::npos){
return pos;
}
}
return utf8_string::npos;
}
size_t utf8_string::rfind(const utf8_char& ch, size_t start) const{
// Since the leading byte has a unique pattern, using regular
// std::string rfind should be OK, I think.
if (m_data.empty()){
return npos;
}
size_t startByte = (start == npos) ? m_data.size() - 1 :
utf8::char_num_to_byte_num_checked(start, m_data);
size_t pos = m_data.rfind(ch.str(), startByte);
if (pos == npos){
return pos;
}
return pos == npos ? npos :
utf8::byte_num_to_char_num(pos, m_data);
}
utf8_string& utf8_string::operator=(const utf8_string& other){
if (&other == this){
return *this;
}
m_data = other.m_data;
return *this;
}
utf8_string& utf8_string::operator+=(const utf8_char& ch){
m_data += ch.str();
return *this;
}
utf8_string& utf8_string::operator+=(const utf8_string& str){
m_data += str.str();
return *this;
}
utf8_string operator+(const utf8_string& lhs, const utf8_char& rhs){
return utf8_string(lhs.str() + rhs.str());
}
utf8_string operator+(const utf8_string& lhs, const utf8_string& rhs){
return utf8_string(lhs.str() + rhs.str());
}
utf8_string operator+(const utf8_char& lhs, const utf8_string& rhs){
return utf8_string(lhs.str() + rhs.str());
}
const size_t utf8_string::npos(std::string::npos);
bool utf8_string::operator<(const utf8_string& s) const{
return m_data < s.m_data;
}
bool is_ascii(const utf8_string& s){
const std::string& bytes = s.str();
for (char ch : bytes){
if (utf8::prefix_num_bytes(ch) != 1){
return false;
}
}
return true;
}
std::ostream& operator<<(std::ostream& o, const utf8_string& s){
o << s.str();
return o;
}
bool operator==(const utf8_string& lhs, const utf8_string& rhs){
return lhs.str() == rhs.str();
}
bool operator!=(const utf8_string& lhs, const utf8_string& rhs){
return !(lhs == rhs);
}
utf8_string_const_iterator begin(const utf8_string& s){
return utf8_string_const_iterator(s, 0);
}
utf8_string_const_iterator end(const utf8_string& s){
return utf8_string_const_iterator(s, s.size());
}
} // namespace
|
Java
|
package com.asura.monitor.platform.dao;
import com.asura.framework.base.paging.PagingResult;
import com.asura.framework.base.paging.SearchMap;
import com.asura.framework.dao.mybatis.base.MybatisDaoContext;
import com.asura.framework.dao.mybatis.paginator.domain.PageBounds;
import com.asura.common.dao.BaseDao;
import com.asura.monitor.platform.entity.MonitorPlatformServerEntity;
import org.springframework.stereotype.Repository;
import javax.annotation.Resource;
/**
* <p></p>
* <p/>
* <PRE>
* <BR>
* <BR>-----------------------------------------------
* <BR>
* </PRE>
*
* @author zhaozq14
* @version 1.0
* @date 2016-11-07 11:35:05
* @since 1.0
*/
@Repository("com.asura.monitor.configure.dao.MonitorPlatformServerDao")
public class MonitorPlatformServerDao extends BaseDao<MonitorPlatformServerEntity>{
@Resource(name="monitor.MybatisDaoContext")
private MybatisDaoContext mybatisDaoContext;
/**
*
* @param searchMap
* @param pageBounds
* @return
*/
public PagingResult<MonitorPlatformServerEntity> findAll(SearchMap searchMap, PageBounds pageBounds, String sqlId){
return mybatisDaoContext.findForPage(this.getClass().getName()+"."+sqlId,MonitorPlatformServerEntity.class,searchMap,pageBounds);
}
}
|
Java
|
/**
* Jakarta Bean Validation TCK
*
* License: Apache License, Version 2.0
* See the license.txt file in the root directory or <http://www.apache.org/licenses/LICENSE-2.0>.
*/
package org.hibernate.beanvalidation.tck.tests.constraints.constraintdefinition;
import static org.hibernate.beanvalidation.tck.util.ConstraintViolationAssert.assertNoViolations;
import static org.hibernate.beanvalidation.tck.util.ConstraintViolationAssert.assertThat;
import static org.hibernate.beanvalidation.tck.util.ConstraintViolationAssert.violationOf;
import static org.testng.Assert.assertEquals;
import java.util.Set;
import jakarta.validation.ConstraintViolation;
import jakarta.validation.Validator;
import jakarta.validation.constraints.Size;
import jakarta.validation.groups.Default;
import jakarta.validation.metadata.ConstraintDescriptor;
import org.hibernate.beanvalidation.tck.beanvalidation.Sections;
import org.hibernate.beanvalidation.tck.tests.AbstractTCKTest;
import org.hibernate.beanvalidation.tck.util.TestUtil;
import org.jboss.arquillian.container.test.api.Deployment;
import org.jboss.shrinkwrap.api.spec.WebArchive;
import org.jboss.test.audit.annotations.SpecAssertion;
import org.jboss.test.audit.annotations.SpecVersion;
import org.testng.annotations.Test;
/**
* @author Hardy Ferentschik
* @author Guillaume Smet
*/
@SpecVersion(spec = "beanvalidation", version = "3.0.0")
public class ConstraintDefinitionsTest extends AbstractTCKTest {
@Deployment
public static WebArchive createTestArchive() {
return webArchiveBuilder()
.withTestClassPackage( ConstraintDefinitionsTest.class )
.build();
}
@Test
@SpecAssertion(section = Sections.CONSTRAINTSDEFINITIONIMPLEMENTATION_CONSTRAINTDEFINITION_PROPERTIES, id = "a")
@SpecAssertion(section = Sections.CONSTRAINTSDEFINITIONIMPLEMENTATION_MULTIPLECONSTRAINTS, id = "a")
public void testConstraintWithCustomAttributes() {
Validator validator = TestUtil.getValidatorUnderTest();
Set<ConstraintDescriptor<?>> descriptors = validator.getConstraintsForClass( Person.class )
.getConstraintsForProperty( "lastName" )
.getConstraintDescriptors();
assertEquals( descriptors.size(), 2, "There should be two constraints on the lastName property." );
for ( ConstraintDescriptor<?> descriptor : descriptors ) {
assertEquals(
descriptor.getAnnotation().annotationType().getName(),
AlwaysValid.class.getName(),
"Wrong annotation type."
);
}
Set<ConstraintViolation<Person>> constraintViolations = validator.validate( new Person( "John", "Doe" ) );
assertThat( constraintViolations ).containsOnlyViolations(
violationOf( AlwaysValid.class )
);
}
@Test
@SpecAssertion(section = Sections.CONSTRAINTSDEFINITIONIMPLEMENTATION_MULTIPLECONSTRAINTS, id = "a")
@SpecAssertion(section = Sections.CONSTRAINTSDEFINITIONIMPLEMENTATION_MULTIPLECONSTRAINTS, id = "b")
public void testRepeatableConstraint() {
Validator validator = TestUtil.getValidatorUnderTest();
Set<ConstraintDescriptor<?>> descriptors = validator.getConstraintsForClass( Movie.class )
.getConstraintsForProperty( "title" )
.getConstraintDescriptors();
assertEquals( descriptors.size(), 2, "There should be two constraints on the title property." );
for ( ConstraintDescriptor<?> descriptor : descriptors ) {
assertEquals(
descriptor.getAnnotation().annotationType().getName(),
Size.class.getName(),
"Wrong annotation type."
);
}
Set<ConstraintViolation<Movie>> constraintViolations = validator.validate( new Movie( "Title" ) );
assertNoViolations( constraintViolations );
constraintViolations = validator.validate( new Movie( "A" ) );
assertThat( constraintViolations ).containsOnlyViolations(
violationOf( Size.class )
);
constraintViolations = validator.validate( new Movie( "A movie title far too long that does not respect the constraint" ) );
assertThat( constraintViolations ).containsOnlyViolations(
violationOf( Size.class )
);
}
@Test
@SpecAssertion(section = Sections.CONSTRAINTSDEFINITIONIMPLEMENTATION_CONSTRAINTDEFINITION_PROPERTIES_GROUPS, id = "d")
public void testDefaultGroupAssumedWhenNoGroupsSpecified() {
Validator validator = TestUtil.getValidatorUnderTest();
ConstraintDescriptor<?> descriptor = validator.getConstraintsForClass( Person.class )
.getConstraintsForProperty( "firstName" )
.getConstraintDescriptors()
.iterator()
.next();
Set<Class<?>> groups = descriptor.getGroups();
assertEquals( groups.size(), 1, "The group set should only contain one entry." );
assertEquals( groups.iterator().next(), Default.class, "The Default group should be returned." );
}
}
|
Java
|
/*
* Copyright (c) 2008-2017, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.map.impl;
import com.hazelcast.config.MaxSizeConfig;
import com.hazelcast.core.IFunction;
import com.hazelcast.nio.serialization.Data;
import com.hazelcast.nio.serialization.SerializableByConvention;
import com.hazelcast.spi.partition.IPartitionService;
import com.hazelcast.util.CollectionUtil;
import com.hazelcast.util.UnmodifiableIterator;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.NoSuchElementException;
import static com.hazelcast.config.MaxSizeConfig.MaxSizePolicy.PER_NODE;
import static com.hazelcast.util.MapUtil.createHashMap;
import static com.hazelcast.util.Preconditions.checkNotNull;
public final class MapKeyLoaderUtil {
private MapKeyLoaderUtil() {
}
/**
* Returns the role for the map key loader based on the passed parameters.
* The partition owner of the map name partition is the sender.
* The first replica of the map name partition is the sender backup.
* Other partition owners are receivers and other partition replicas do
* not have a role.
*
* @param isPartitionOwner if this is the partition owner
* @param isMapNamePartition if this is the partition containing the map name
* @param isMapNamePartitionFirstReplica if this is the first replica for the partition
* containing the map name
* @return the map key loader role
*/
static MapKeyLoader.Role assignRole(boolean isPartitionOwner, boolean isMapNamePartition,
boolean isMapNamePartitionFirstReplica) {
if (isMapNamePartition) {
if (isPartitionOwner) {
// map-name partition owner is the SENDER
return MapKeyLoader.Role.SENDER;
} else {
if (isMapNamePartitionFirstReplica) {
// first replica of the map-name partition is the SENDER_BACKUP
return MapKeyLoader.Role.SENDER_BACKUP;
} else {
// other replicas of the map-name partition do not have a role
return MapKeyLoader.Role.NONE;
}
}
} else {
// ordinary partition owners are RECEIVERs, otherwise no role
return isPartitionOwner ? MapKeyLoader.Role.RECEIVER : MapKeyLoader.Role.NONE;
}
}
/**
* Transforms an iterator of entries to an iterator of entry batches
* where each batch is represented as a map from entry key to
* list of entry values.
* The maximum size of the entry value list in any batch is
* determined by the {@code maxBatch} parameter. Only one
* entry value list may have the {@code maxBatch} size, other
* lists will be smaller.
*
* @param entries the entries to be batched
* @param maxBatch the maximum size of an entry group in a single batch
* @return an iterator with entry batches
*/
static Iterator<Map<Integer, List<Data>>> toBatches(final Iterator<Entry<Integer, Data>> entries,
final int maxBatch) {
return new UnmodifiableIterator<Map<Integer, List<Data>>>() {
@Override
public boolean hasNext() {
return entries.hasNext();
}
@Override
public Map<Integer, List<Data>> next() {
if (!entries.hasNext()) {
throw new NoSuchElementException();
}
return nextBatch(entries, maxBatch);
}
};
}
/**
* Groups entries by the entry key. The entries will be grouped
* until at least one group has up to {@code maxBatch}
* entries or until the {@code entries} have been exhausted.
*
* @param entries the entries to be grouped by key
* @param maxBatch the maximum size of a group
* @return the grouped entries by entry key
*/
private static Map<Integer, List<Data>> nextBatch(Iterator<Entry<Integer, Data>> entries, int maxBatch) {
Map<Integer, List<Data>> batch = createHashMap(maxBatch);
while (entries.hasNext()) {
Entry<Integer, Data> e = entries.next();
List<Data> partitionKeys = CollectionUtil.addToValueList(batch, e.getKey(), e.getValue());
if (partitionKeys.size() >= maxBatch) {
break;
}
}
return batch;
}
/**
* Returns the configured maximum entry count per node if the max
* size policy is {@link MaxSizeConfig.MaxSizePolicy#PER_NODE}
* and is not the default, otherwise returns {@code -1}.
*
* @param maxSizeConfig the max size configuration
* @return the max size per node or {@code -1} if not configured or is the default
* @see MaxSizeConfig#getMaxSizePolicy()
* @see MaxSizeConfig#getSize()
*/
public static int getMaxSizePerNode(MaxSizeConfig maxSizeConfig) {
// max size or -1 if policy is different or not set
double maxSizePerNode = maxSizeConfig.getMaxSizePolicy() == PER_NODE ? maxSizeConfig.getSize() : -1D;
if (maxSizePerNode == MaxSizeConfig.DEFAULT_MAX_SIZE) {
// unlimited
return -1;
}
return (int) maxSizePerNode;
}
/**
* Returns a {@link IFunction} that transforms a {@link Data}
* parameter to an map entry where the key is the partition ID
* and the value is the provided parameter.
*
* @param partitionService the partition service
*/
static IFunction<Data, Entry<Integer, Data>> toPartition(final IPartitionService partitionService) {
return new DataToEntry(partitionService);
}
@SerializableByConvention
private static class DataToEntry implements IFunction<Data, Entry<Integer, Data>> {
private final IPartitionService partitionService;
public DataToEntry(IPartitionService partitionService) {
this.partitionService = partitionService;
}
@Override
public Entry<Integer, Data> apply(Data input) {
// Null-pointer here, in case of null key loaded by MapLoader
checkNotNull(input, "Key loaded by a MapLoader cannot be null.");
Integer partition = partitionService.getPartitionId(input);
return new MapEntrySimple<Integer, Data>(partition, input);
}
}
}
|
Java
|
<div class="btn-group mw-version-selector">
<button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown">
<span class="descriptor">Version </span>
<span class="descriptor-sm">V. </span>
{{currentVersionModel.attributes[versionNumberKey]}}
<span ng-if="currentVersionModel.attributes.published" mw-icon="rln-icon published"></span>
</button>
<ul class="version-dropdown dropdown-menu pull-right" style="min-width:100%" role="menu">
<li ng-repeat="version in versionCollection.models" ng-class="{active:(version.attributes.uuid === currentVersionModel.attributes.uuid)}">
<a ng-href="{{getUrl(version.attributes.uuid)}}">
{{version.attributes[versionNumberKey]}}
<span ng-if="version.attributes.published" mw-icon="rln-icon published"></span>
</a>
</li>
</ul>
</div>
|
Java
|
## Slurm :o:
| | |
| -------- | --------------------------- |
| title | Slurm |
| status | 10 |
| section | Cluster Resource Management |
| keywords | Cluster Resource Management |
Simple Linux Utility for Resource Management (SLURM) workload manager
is an open source, scalable cluster resource management tool used for
job scheduling in small to large Linux cluster using multi-core
architecture. SLURM has three key functions. First, it allocates
resources to users for some duration with exclusive and/or
non-exclusive access. Second, it enables users to start, execute and
monitor jobs on the resources allocated to them. Finally, it
intermediates to resolve conflicts on resources for pending work by
maintaining them in a queue [@www-slurmSchedmdSite]. The slurm
architecture has following components: a centralized manager to
monitor resources and work, may have a backup manager, daemon on each
server to provide fault-tolerant communications, an optional daemon
for clusters with multiple mangers and tools to initiate, terminate
and report about jobs in a graphical view with network topology. It
also provides around twenty additional plugins that could be used for
functionalities like accounting, advanced reservation, gang
scheduling, back fill scheduling and multifactor job
prioritization. Though originally developed for Linux, SLURM also
provides full support on platforms like AIX, FreeBSD, NetBSD and
Solaris [@www-slurmPlatformsSite] [@www-slurm].
|
Java
|
<?php
defined('ABSPATH') or die('No script kiddies please!');
/**
* Undocumented function
*
* @return void
*/
function Yonk_numeric_posts_nav() {
if(is_singular())
return;
global $wp_query;
/** Stop execution if there's only 1 page */
if($wp_query->max_num_pages <= 1)
return;
$paged = get_query_var('paged') ? absint(get_query_var('paged')) : 1;
$max = intval($wp_query->max_num_pages);
/** Add current page to the array */
if ($paged >= 1)
$links[] = $paged;
/** Add the pages around the current page to the array */
if ($paged >= 3) {
$links[] = $paged - 1;
$links[] = $paged - 2;
}
if (($paged + 2) <= $max) {
$links[] = $paged + 2;
$links[] = $paged + 1;
}
echo '<div class="navigation"><ul class="pagination">' . "\n";
/** Previous Post Link */
if (get_previous_posts_link())
printf( '<li>%s</li>' . "\n", get_previous_posts_link() );
/** Link to first page, plus ellipses if necessary */
if (!in_array(1, $links )) {
$class = 1 == $paged ? ' class="active"' : '';
printf('<li%s><a href="%s" aria-label="Previous"><span aria-hidden="true"%s</span></a></li>' . "\n", $class, esc_url(get_pagenum_link(1)), '1');
if (!in_array(2, $links))
echo '<li>…</li>';
}
/** Link to current page, plus 2 pages in either direction if necessary */
sort($links);
foreach ((array)$links as $link) {
$class = $paged == $link ? ' class="active"' : '';
printf('<li%s><a href="%s">%s</a></li>' . "\n", $class, esc_url(get_pagenum_link($link)), $link);
}
/** Link to last page, plus ellipses if necessary */
if (!in_array($max, $links)) {
if (!in_array($max - 1, $links))
echo '<li>…</li>' . "\n";
$class = $paged == $max ? ' class="active"' : '';
printf('<li%s><a href="%s" aria-label="Next"><span aria-hidden="true">%s</span></a></li>' . "\n", $class, esc_url(get_pagenum_link($max)), $max);
}
/** Next Post Link */
if (get_next_posts_link())
printf('<li>%s</li>' . "\n", get_next_posts_link());
echo '</ul></div>' . "\n";
}
|
Java
|
package org.targettest.org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import org.targettest.org.apache.lucene.document.Document;
import org.targettest.org.apache.lucene.document.FieldSelector;
import org.targettest.org.apache.lucene.index.DirectoryReader.MultiTermDocs;
import org.targettest.org.apache.lucene.index.DirectoryReader.MultiTermEnum;
import org.targettest.org.apache.lucene.index.DirectoryReader.MultiTermPositions;
import org.targettest.org.apache.lucene.search.DefaultSimilarity;
import org.targettest.org.apache.lucene.search.FieldCache;
/** An IndexReader which reads multiple indexes, appending
* their content. */
public class MultiReader extends IndexReader implements Cloneable {
protected IndexReader[] subReaders;
private int[] starts; // 1st docno for each segment
private boolean[] decrefOnClose; // remember which subreaders to decRef on close
private Map<String,byte[]> normsCache = new HashMap<String,byte[]>();
private int maxDoc = 0;
private int numDocs = -1;
private boolean hasDeletions = false;
/**
* <p>Construct a MultiReader aggregating the named set of (sub)readers.
* Directory locking for delete, undeleteAll, and setNorm operations is
* left to the subreaders. </p>
* <p>Note that all subreaders are closed if this Multireader is closed.</p>
* @param subReaders set of (sub)readers
* @throws IOException
*/
public MultiReader(IndexReader... subReaders) {
initialize(subReaders, true);
}
/**
* <p>Construct a MultiReader aggregating the named set of (sub)readers.
* Directory locking for delete, undeleteAll, and setNorm operations is
* left to the subreaders. </p>
* @param closeSubReaders indicates whether the subreaders should be closed
* when this MultiReader is closed
* @param subReaders set of (sub)readers
* @throws IOException
*/
public MultiReader(IndexReader[] subReaders, boolean closeSubReaders) {
initialize(subReaders, closeSubReaders);
}
private void initialize(IndexReader[] subReaders, boolean closeSubReaders) {
this.subReaders = subReaders.clone();
starts = new int[subReaders.length + 1]; // build starts array
decrefOnClose = new boolean[subReaders.length];
for (int i = 0; i < subReaders.length; i++) {
starts[i] = maxDoc;
maxDoc += subReaders[i].maxDoc(); // compute maxDocs
if (!closeSubReaders) {
subReaders[i].incRef();
decrefOnClose[i] = true;
} else {
decrefOnClose[i] = false;
}
if (subReaders[i].hasDeletions())
hasDeletions = true;
}
starts[subReaders.length] = maxDoc;
}
/**
* Tries to reopen the subreaders.
* <br>
* If one or more subreaders could be re-opened (i. e. subReader.reopen()
* returned a new instance != subReader), then a new MultiReader instance
* is returned, otherwise this instance is returned.
* <p>
* A re-opened instance might share one or more subreaders with the old
* instance. Index modification operations result in undefined behavior
* when performed before the old instance is closed.
* (see {@link IndexReader#reopen()}).
* <p>
* If subreaders are shared, then the reference count of those
* readers is increased to ensure that the subreaders remain open
* until the last referring reader is closed.
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
@Override
public synchronized IndexReader reopen() throws CorruptIndexException, IOException {
return doReopen(false);
}
/**
* Clones the subreaders.
* (see {@link IndexReader#clone()}).
* <br>
* <p>
* If subreaders are shared, then the reference count of those
* readers is increased to ensure that the subreaders remain open
* until the last referring reader is closed.
*/
@Override
public synchronized Object clone() {
try {
return doReopen(true);
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
/**
* If clone is true then we clone each of the subreaders
* @param doClone
* @return New IndexReader, or same one (this) if
* reopen/clone is not necessary
* @throws CorruptIndexException
* @throws IOException
*/
protected IndexReader doReopen(boolean doClone) throws CorruptIndexException, IOException {
ensureOpen();
boolean reopened = false;
IndexReader[] newSubReaders = new IndexReader[subReaders.length];
boolean success = false;
try {
for (int i = 0; i < subReaders.length; i++) {
if (doClone)
newSubReaders[i] = (IndexReader) subReaders[i].clone();
else
newSubReaders[i] = subReaders[i].reopen();
// if at least one of the subreaders was updated we remember that
// and return a new MultiReader
if (newSubReaders[i] != subReaders[i]) {
reopened = true;
}
}
success = true;
} finally {
if (!success && reopened) {
for (int i = 0; i < newSubReaders.length; i++) {
if (newSubReaders[i] != subReaders[i]) {
try {
newSubReaders[i].close();
} catch (IOException ignore) {
// keep going - we want to clean up as much as possible
}
}
}
}
}
if (reopened) {
boolean[] newDecrefOnClose = new boolean[subReaders.length];
for (int i = 0; i < subReaders.length; i++) {
if (newSubReaders[i] == subReaders[i]) {
newSubReaders[i].incRef();
newDecrefOnClose[i] = true;
}
}
MultiReader mr = new MultiReader(newSubReaders);
mr.decrefOnClose = newDecrefOnClose;
return mr;
} else {
return this;
}
}
@Override
public TermFreqVector[] getTermFreqVectors(int n) throws IOException {
ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].getTermFreqVectors(n - starts[i]); // dispatch to segment
}
@Override
public TermFreqVector getTermFreqVector(int n, String field)
throws IOException {
ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].getTermFreqVector(n - starts[i], field);
}
@Override
public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper) throws IOException {
ensureOpen();
int i = readerIndex(docNumber); // find segment num
subReaders[i].getTermFreqVector(docNumber - starts[i], field, mapper);
}
@Override
public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException {
ensureOpen();
int i = readerIndex(docNumber); // find segment num
subReaders[i].getTermFreqVector(docNumber - starts[i], mapper);
}
@Override
public boolean isOptimized() {
return false;
}
@Override
public int numDocs() {
// Don't call ensureOpen() here (it could affect performance)
// NOTE: multiple threads may wind up init'ing
// numDocs... but that's harmless
if (numDocs == -1) { // check cache
int n = 0; // cache miss--recompute
for (int i = 0; i < subReaders.length; i++)
n += subReaders[i].numDocs(); // sum from readers
numDocs = n;
}
return numDocs;
}
@Override
public int maxDoc() {
// Don't call ensureOpen() here (it could affect performance)
return maxDoc;
}
// inherit javadoc
@Override
public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].document(n - starts[i], fieldSelector); // dispatch to segment reader
}
@Override
public boolean isDeleted(int n) {
// Don't call ensureOpen() here (it could affect performance)
int i = readerIndex(n); // find segment num
return subReaders[i].isDeleted(n - starts[i]); // dispatch to segment reader
}
@Override
public boolean hasDeletions() {
// Don't call ensureOpen() here (it could affect performance)
return hasDeletions;
}
@Override
protected void doDelete(int n) throws CorruptIndexException, IOException {
numDocs = -1; // invalidate cache
int i = readerIndex(n); // find segment num
subReaders[i].deleteDocument(n - starts[i]); // dispatch to segment reader
hasDeletions = true;
}
@Override
protected void doUndeleteAll() throws CorruptIndexException, IOException {
for (int i = 0; i < subReaders.length; i++)
subReaders[i].undeleteAll();
hasDeletions = false;
numDocs = -1; // invalidate cache
}
private int readerIndex(int n) { // find reader for doc n:
return DirectoryReader.readerIndex(n, this.starts, this.subReaders.length);
}
@Override
public boolean hasNorms(String field) throws IOException {
ensureOpen();
for (int i = 0; i < subReaders.length; i++) {
if (subReaders[i].hasNorms(field)) return true;
}
return false;
}
@Override
public synchronized byte[] norms(String field) throws IOException {
ensureOpen();
byte[] bytes = normsCache.get(field);
if (bytes != null)
return bytes; // cache hit
if (!hasNorms(field))
return null;
bytes = new byte[maxDoc()];
for (int i = 0; i < subReaders.length; i++)
subReaders[i].norms(field, bytes, starts[i]);
normsCache.put(field, bytes); // update cache
return bytes;
}
@Override
public synchronized void norms(String field, byte[] result, int offset)
throws IOException {
ensureOpen();
byte[] bytes = normsCache.get(field);
for (int i = 0; i < subReaders.length; i++) // read from segments
subReaders[i].norms(field, result, offset + starts[i]);
if (bytes==null && !hasNorms(field)) {
Arrays.fill(result, offset, result.length, DefaultSimilarity.encodeNorm(1.0f));
} else if (bytes != null) { // cache hit
System.arraycopy(bytes, 0, result, offset, maxDoc());
} else {
for (int i = 0; i < subReaders.length; i++) { // read from segments
subReaders[i].norms(field, result, offset + starts[i]);
}
}
}
@Override
protected void doSetNorm(int n, String field, byte value)
throws CorruptIndexException, IOException {
synchronized (normsCache) {
normsCache.remove(field); // clear cache
}
int i = readerIndex(n); // find segment num
subReaders[i].setNorm(n-starts[i], field, value); // dispatch
}
@Override
public TermEnum terms() throws IOException {
ensureOpen();
return new MultiTermEnum(this, subReaders, starts, null);
}
@Override
public TermEnum terms(Term term) throws IOException {
ensureOpen();
return new MultiTermEnum(this, subReaders, starts, term);
}
@Override
public int docFreq(Term t) throws IOException {
ensureOpen();
int total = 0; // sum freqs in segments
for (int i = 0; i < subReaders.length; i++)
total += subReaders[i].docFreq(t);
return total;
}
@Override
public TermDocs termDocs() throws IOException {
ensureOpen();
return new MultiTermDocs(this, subReaders, starts);
}
@Override
public TermPositions termPositions() throws IOException {
ensureOpen();
return new MultiTermPositions(this, subReaders, starts);
}
@Override
protected void doCommit(Map<String,String> commitUserData) throws IOException {
for (int i = 0; i < subReaders.length; i++)
subReaders[i].commit(commitUserData);
}
@Override
protected synchronized void doClose() throws IOException {
for (int i = 0; i < subReaders.length; i++) {
if (decrefOnClose[i]) {
subReaders[i].decRef();
} else {
subReaders[i].close();
}
}
// NOTE: only needed in case someone had asked for
// FieldCache for top-level reader (which is generally
// not a good idea):
FieldCache.DEFAULT.purge(this);
}
@Override
public Collection<String> getFieldNames (IndexReader.FieldOption fieldNames) {
ensureOpen();
return DirectoryReader.getFieldNames(fieldNames, this.subReaders);
}
/**
* Checks recursively if all subreaders are up to date.
*/
@Override
public boolean isCurrent() throws CorruptIndexException, IOException {
for (int i = 0; i < subReaders.length; i++) {
if (!subReaders[i].isCurrent()) {
return false;
}
}
// all subreaders are up to date
return true;
}
/** Not implemented.
* @throws UnsupportedOperationException
*/
@Override
public long getVersion() {
throw new UnsupportedOperationException("MultiReader does not support this method.");
}
@Override
public IndexReader[] getSequentialSubReaders() {
return subReaders;
}
}
|
Java
|
/*
* Copyright Strimzi authors.
* License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
*/
package io.strimzi.systemtest.kafka;
import io.fabric8.kubernetes.api.model.ConfigMap;
import io.fabric8.kubernetes.api.model.HasMetadata;
import io.fabric8.kubernetes.api.model.PersistentVolumeClaim;
import io.fabric8.kubernetes.api.model.Pod;
import io.fabric8.kubernetes.api.model.Quantity;
import io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder;
import io.fabric8.kubernetes.api.model.Secret;
import io.fabric8.kubernetes.api.model.SecurityContextBuilder;
import io.fabric8.kubernetes.api.model.Service;
import io.fabric8.kubernetes.api.model.apps.StatefulSet;
import io.fabric8.kubernetes.client.dsl.base.CustomResourceDefinitionContext;
import io.strimzi.api.kafka.Crds;
import io.strimzi.api.kafka.KafkaTopicList;
import io.strimzi.api.kafka.model.EntityOperatorSpec;
import io.strimzi.api.kafka.model.EntityTopicOperatorSpec;
import io.strimzi.api.kafka.model.EntityUserOperatorSpec;
import io.strimzi.api.kafka.model.Kafka;
import io.strimzi.api.kafka.model.KafkaClusterSpec;
import io.strimzi.api.kafka.model.KafkaResources;
import io.strimzi.api.kafka.model.KafkaTopic;
import io.strimzi.api.kafka.model.SystemProperty;
import io.strimzi.api.kafka.model.SystemPropertyBuilder;
import io.strimzi.api.kafka.model.ZookeeperClusterSpec;
import io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListener;
import io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder;
import io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType;
import io.strimzi.api.kafka.model.storage.JbodStorage;
import io.strimzi.api.kafka.model.storage.JbodStorageBuilder;
import io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder;
import io.strimzi.operator.common.model.Labels;
import io.strimzi.systemtest.AbstractST;
import io.strimzi.systemtest.Constants;
import io.strimzi.systemtest.Environment;
import io.strimzi.systemtest.resources.operator.SetupClusterOperator;
import io.strimzi.systemtest.annotations.OpenShiftOnly;
import io.strimzi.systemtest.annotations.ParallelNamespaceTest;
import io.strimzi.systemtest.cli.KafkaCmdClient;
import io.strimzi.systemtest.kafkaclients.internalClients.InternalKafkaClient;
import io.strimzi.systemtest.resources.ResourceOperation;
import io.strimzi.systemtest.resources.crd.KafkaResource;
import io.strimzi.systemtest.resources.crd.KafkaTopicResource;
import io.strimzi.systemtest.templates.crd.KafkaClientsTemplates;
import io.strimzi.systemtest.templates.crd.KafkaTemplates;
import io.strimzi.systemtest.templates.crd.KafkaTopicTemplates;
import io.strimzi.systemtest.templates.crd.KafkaUserTemplates;
import io.strimzi.systemtest.utils.StUtils;
import io.strimzi.systemtest.utils.kafkaUtils.KafkaTopicUtils;
import io.strimzi.systemtest.utils.kafkaUtils.KafkaUtils;
import io.strimzi.systemtest.utils.kubeUtils.controllers.ConfigMapUtils;
import io.strimzi.systemtest.utils.kubeUtils.controllers.DeploymentUtils;
import io.strimzi.systemtest.utils.kubeUtils.controllers.StatefulSetUtils;
import io.strimzi.systemtest.utils.kubeUtils.objects.PersistentVolumeClaimUtils;
import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils;
import io.strimzi.systemtest.utils.kubeUtils.objects.ServiceUtils;
import io.strimzi.test.TestUtils;
import io.strimzi.test.executor.ExecResult;
import io.strimzi.test.timemeasuring.Operation;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.hamcrest.CoreMatchers;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.extension.ExtensionContext;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Properties;
import java.util.stream.Collectors;
import static io.strimzi.api.kafka.model.KafkaResources.kafkaStatefulSetName;
import static io.strimzi.api.kafka.model.KafkaResources.zookeeperStatefulSetName;
import static io.strimzi.systemtest.Constants.CRUISE_CONTROL;
import static io.strimzi.systemtest.Constants.INTERNAL_CLIENTS_USED;
import static io.strimzi.systemtest.Constants.LOADBALANCER_SUPPORTED;
import static io.strimzi.systemtest.Constants.REGRESSION;
import static io.strimzi.systemtest.Constants.STATEFUL_SET;
import static io.strimzi.systemtest.utils.StUtils.configMap2Properties;
import static io.strimzi.systemtest.utils.StUtils.stringToProperties;
import static io.strimzi.test.TestUtils.fromYamlString;
import static io.strimzi.test.TestUtils.map;
import static io.strimzi.test.k8s.KubeClusterResource.cmdKubeClient;
import static io.strimzi.test.k8s.KubeClusterResource.kubeClient;
import static java.util.Arrays.asList;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.anyOf;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.emptyOrNullString;
import static org.hamcrest.Matchers.hasItem;
import static org.hamcrest.Matchers.hasItems;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.notNullValue;
import static org.junit.jupiter.api.Assumptions.assumeFalse;
@Tag(REGRESSION)
@SuppressWarnings("checkstyle:ClassFanOutComplexity")
class KafkaST extends AbstractST {
private static final Logger LOGGER = LogManager.getLogger(KafkaST.class);
private static final String TEMPLATE_PATH = TestUtils.USER_PATH + "/../packaging/examples/templates/cluster-operator";
public static final String NAMESPACE = "kafka-cluster-test";
private static final String OPENSHIFT_CLUSTER_NAME = "openshift-my-cluster";
@ParallelNamespaceTest
@OpenShiftOnly
void testDeployKafkaClusterViaTemplate(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
cluster.createCustomResources(extensionContext, TEMPLATE_PATH);
String templateName = "strimzi-ephemeral";
cmdKubeClient(namespaceName).createResourceAndApply(templateName, map("CLUSTER_NAME", OPENSHIFT_CLUSTER_NAME));
StatefulSetUtils.waitForAllStatefulSetPodsReady(namespaceName, KafkaResources.zookeeperStatefulSetName(OPENSHIFT_CLUSTER_NAME), 3, ResourceOperation.getTimeoutForResourceReadiness(STATEFUL_SET));
StatefulSetUtils.waitForAllStatefulSetPodsReady(namespaceName, KafkaResources.kafkaStatefulSetName(OPENSHIFT_CLUSTER_NAME), 3, ResourceOperation.getTimeoutForResourceReadiness(STATEFUL_SET));
DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, KafkaResources.entityOperatorDeploymentName(OPENSHIFT_CLUSTER_NAME), 1);
//Testing docker images
testDockerImagesForKafkaCluster(OPENSHIFT_CLUSTER_NAME, NAMESPACE, namespaceName, 3, 3, false);
//Testing labels
verifyLabelsForKafkaCluster(NAMESPACE, namespaceName, OPENSHIFT_CLUSTER_NAME, templateName);
LOGGER.info("Deleting Kafka cluster {} after test", OPENSHIFT_CLUSTER_NAME);
cmdKubeClient(namespaceName).deleteByName("Kafka", OPENSHIFT_CLUSTER_NAME);
//Wait for kafka deletion
cmdKubeClient(namespaceName).waitForResourceDeletion(Kafka.RESOURCE_KIND, OPENSHIFT_CLUSTER_NAME);
kubeClient(namespaceName).listPods(namespaceName).stream()
.filter(p -> p.getMetadata().getName().startsWith(OPENSHIFT_CLUSTER_NAME))
.forEach(p -> PodUtils.deletePodWithWait(p.getMetadata().getName()));
StatefulSetUtils.waitForStatefulSetDeletion(namespaceName, KafkaResources.kafkaStatefulSetName(OPENSHIFT_CLUSTER_NAME));
StatefulSetUtils.waitForStatefulSetDeletion(namespaceName, KafkaResources.zookeeperStatefulSetName(OPENSHIFT_CLUSTER_NAME));
DeploymentUtils.waitForDeploymentDeletion(namespaceName, KafkaResources.entityOperatorDeploymentName(OPENSHIFT_CLUSTER_NAME));
}
@ParallelNamespaceTest
void testEODeletion(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
// Get pod name to check termination process
Pod pod = kubeClient(namespaceName).listPods(namespaceName).stream()
.filter(p -> p.getMetadata().getName().startsWith(KafkaResources.entityOperatorDeploymentName(clusterName)))
.findAny()
.orElseThrow();
assertThat("Entity operator pod does not exist", pod, notNullValue());
LOGGER.info("Setting entity operator to null");
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> kafka.getSpec().setEntityOperator(null), namespaceName);
// Wait when EO(UO + TO) will be removed
DeploymentUtils.waitForDeploymentDeletion(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName));
PodUtils.deletePodWithWait(namespaceName, pod.getMetadata().getName());
LOGGER.info("Entity operator was deleted");
}
@ParallelNamespaceTest
@SuppressWarnings({"checkstyle:MethodLength", "checkstyle:JavaNCSS"})
void testCustomAndUpdatedValues(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LinkedHashMap<String, String> envVarGeneral = new LinkedHashMap<>();
envVarGeneral.put("TEST_ENV_1", "test.env.one");
envVarGeneral.put("TEST_ENV_2", "test.env.two");
LinkedHashMap<String, String> envVarUpdated = new LinkedHashMap<>();
envVarUpdated.put("TEST_ENV_2", "updated.test.env.two");
envVarUpdated.put("TEST_ENV_3", "test.env.three");
// Kafka Broker config
Map<String, Object> kafkaConfig = new HashMap<>();
kafkaConfig.put("offsets.topic.replication.factor", "1");
kafkaConfig.put("transaction.state.log.replication.factor", "1");
kafkaConfig.put("default.replication.factor", "1");
Map<String, Object> updatedKafkaConfig = new HashMap<>();
updatedKafkaConfig.put("offsets.topic.replication.factor", "2");
updatedKafkaConfig.put("transaction.state.log.replication.factor", "2");
updatedKafkaConfig.put("default.replication.factor", "2");
// Zookeeper Config
Map<String, Object> zookeeperConfig = new HashMap<>();
zookeeperConfig.put("tickTime", "2000");
zookeeperConfig.put("initLimit", "5");
zookeeperConfig.put("syncLimit", "2");
zookeeperConfig.put("autopurge.purgeInterval", "1");
Map<String, Object> updatedZookeeperConfig = new HashMap<>();
updatedZookeeperConfig.put("tickTime", "2500");
updatedZookeeperConfig.put("initLimit", "3");
updatedZookeeperConfig.put("syncLimit", "5");
final int initialDelaySeconds = 30;
final int timeoutSeconds = 10;
final int updatedInitialDelaySeconds = 31;
final int updatedTimeoutSeconds = 11;
final int periodSeconds = 10;
final int successThreshold = 1;
final int failureThreshold = 3;
final int updatedPeriodSeconds = 5;
final int updatedFailureThreshold = 1;
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 2)
.editSpec()
.editKafka()
.withNewReadinessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endReadinessProbe()
.withNewLivenessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endLivenessProbe()
.withConfig(kafkaConfig)
.withNewTemplate()
.withNewKafkaContainer()
.withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral))
.endKafkaContainer()
.endTemplate()
.endKafka()
.editZookeeper()
.withReplicas(2)
.withNewReadinessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.endReadinessProbe()
.withNewLivenessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.endLivenessProbe()
.withConfig(zookeeperConfig)
.withNewTemplate()
.withNewZookeeperContainer()
.withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral))
.endZookeeperContainer()
.endTemplate()
.endZookeeper()
.editEntityOperator()
.withNewTemplate()
.withNewTopicOperatorContainer()
.withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral))
.endTopicOperatorContainer()
.withNewUserOperatorContainer()
.withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral))
.endUserOperatorContainer()
.withNewTlsSidecarContainer()
.withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral))
.endTlsSidecarContainer()
.endTemplate()
.editUserOperator()
.withNewReadinessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endReadinessProbe()
.withNewLivenessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endLivenessProbe()
.endUserOperator()
.editTopicOperator()
.withNewReadinessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endReadinessProbe()
.withNewLivenessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endLivenessProbe()
.endTopicOperator()
.withNewTlsSidecar()
.withNewReadinessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endReadinessProbe()
.withNewLivenessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endLivenessProbe()
.endTlsSidecar()
.endEntityOperator()
.endSpec()
.build());
final Map<String, String> kafkaSnapshot = StatefulSetUtils.ssSnapshot(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName));
final Map<String, String> zkSnapshot = StatefulSetUtils.ssSnapshot(namespaceName, KafkaResources.zookeeperStatefulSetName(clusterName));
final Map<String, String> eoPod = DeploymentUtils.depSnapshot(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName));
LOGGER.info("Verify values before update");
checkReadinessLivenessProbe(namespaceName, kafkaStatefulSetName(clusterName), "kafka", initialDelaySeconds, timeoutSeconds,
periodSeconds, successThreshold, failureThreshold);
checkKafkaConfiguration(namespaceName, kafkaStatefulSetName(clusterName), kafkaConfig, clusterName);
checkSpecificVariablesInContainer(namespaceName, kafkaStatefulSetName(clusterName), "kafka", envVarGeneral);
String kafkaConfiguration = kubeClient().getConfigMap(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName)).getData().get("server.config");
assertThat(kafkaConfiguration, containsString("offsets.topic.replication.factor=1"));
assertThat(kafkaConfiguration, containsString("transaction.state.log.replication.factor=1"));
assertThat(kafkaConfiguration, containsString("default.replication.factor=1"));
String kafkaConfigurationFromPod = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "cat", "/tmp/strimzi.properties").out();
assertThat(kafkaConfigurationFromPod, containsString("offsets.topic.replication.factor=1"));
assertThat(kafkaConfigurationFromPod, containsString("transaction.state.log.replication.factor=1"));
assertThat(kafkaConfigurationFromPod, containsString("default.replication.factor=1"));
LOGGER.info("Testing Zookeepers");
checkReadinessLivenessProbe(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", initialDelaySeconds, timeoutSeconds,
periodSeconds, successThreshold, failureThreshold);
checkComponentConfiguration(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", "ZOOKEEPER_CONFIGURATION", zookeeperConfig);
checkSpecificVariablesInContainer(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", envVarGeneral);
LOGGER.info("Checking configuration of TO and UO");
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "topic-operator", initialDelaySeconds, timeoutSeconds,
periodSeconds, successThreshold, failureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "topic-operator", envVarGeneral);
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "user-operator", initialDelaySeconds, timeoutSeconds,
periodSeconds, successThreshold, failureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "user-operator", envVarGeneral);
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "tls-sidecar", initialDelaySeconds, timeoutSeconds,
periodSeconds, successThreshold, failureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "tls-sidecar", envVarGeneral);
LOGGER.info("Updating configuration of Kafka cluster");
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
KafkaClusterSpec kafkaClusterSpec = k.getSpec().getKafka();
kafkaClusterSpec.getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
kafkaClusterSpec.getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
kafkaClusterSpec.getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
kafkaClusterSpec.getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
kafkaClusterSpec.getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds);
kafkaClusterSpec.getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds);
kafkaClusterSpec.getLivenessProbe().setFailureThreshold(updatedFailureThreshold);
kafkaClusterSpec.getReadinessProbe().setFailureThreshold(updatedFailureThreshold);
kafkaClusterSpec.setConfig(updatedKafkaConfig);
kafkaClusterSpec.getTemplate().getKafkaContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated));
ZookeeperClusterSpec zookeeperClusterSpec = k.getSpec().getZookeeper();
zookeeperClusterSpec.getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
zookeeperClusterSpec.getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
zookeeperClusterSpec.getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
zookeeperClusterSpec.getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
zookeeperClusterSpec.getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds);
zookeeperClusterSpec.getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds);
zookeeperClusterSpec.getLivenessProbe().setFailureThreshold(updatedFailureThreshold);
zookeeperClusterSpec.getReadinessProbe().setFailureThreshold(updatedFailureThreshold);
zookeeperClusterSpec.setConfig(updatedZookeeperConfig);
zookeeperClusterSpec.getTemplate().getZookeeperContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated));
// Configuring TO and UO to use new values for InitialDelaySeconds and TimeoutSeconds
EntityOperatorSpec entityOperatorSpec = k.getSpec().getEntityOperator();
entityOperatorSpec.getTopicOperator().getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getTopicOperator().getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getTopicOperator().getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getTopicOperator().getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getTopicOperator().getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getTopicOperator().getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getTopicOperator().getLivenessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getTopicOperator().getReadinessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getUserOperator().getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getUserOperator().getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getUserOperator().getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getUserOperator().getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getUserOperator().getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getUserOperator().getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getUserOperator().getLivenessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getUserOperator().getReadinessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getTlsSidecar().getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getTlsSidecar().getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getTlsSidecar().getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getTlsSidecar().getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getTlsSidecar().getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getTlsSidecar().getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getTlsSidecar().getLivenessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getTlsSidecar().getReadinessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getTemplate().getTopicOperatorContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated));
entityOperatorSpec.getTemplate().getUserOperatorContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated));
entityOperatorSpec.getTemplate().getTlsSidecarContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated));
}, namespaceName);
StatefulSetUtils.waitTillSsHasRolled(namespaceName, KafkaResources.zookeeperStatefulSetName(clusterName), 2, zkSnapshot);
StatefulSetUtils.waitTillSsHasRolled(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName), 2, kafkaSnapshot);
DeploymentUtils.waitTillDepHasRolled(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1, eoPod);
KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
LOGGER.info("Verify values after update");
checkReadinessLivenessProbe(namespaceName, kafkaStatefulSetName(clusterName), "kafka", updatedInitialDelaySeconds, updatedTimeoutSeconds,
updatedPeriodSeconds, successThreshold, updatedFailureThreshold);
checkKafkaConfiguration(namespaceName, kafkaStatefulSetName(clusterName), updatedKafkaConfig, clusterName);
checkSpecificVariablesInContainer(namespaceName, kafkaStatefulSetName(clusterName), "kafka", envVarUpdated);
kafkaConfiguration = kubeClient(namespaceName).getConfigMap(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName)).getData().get("server.config");
assertThat(kafkaConfiguration, containsString("offsets.topic.replication.factor=2"));
assertThat(kafkaConfiguration, containsString("transaction.state.log.replication.factor=2"));
assertThat(kafkaConfiguration, containsString("default.replication.factor=2"));
kafkaConfigurationFromPod = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "cat", "/tmp/strimzi.properties").out();
assertThat(kafkaConfigurationFromPod, containsString("offsets.topic.replication.factor=2"));
assertThat(kafkaConfigurationFromPod, containsString("transaction.state.log.replication.factor=2"));
assertThat(kafkaConfigurationFromPod, containsString("default.replication.factor=2"));
LOGGER.info("Testing Zookeepers");
checkReadinessLivenessProbe(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", updatedInitialDelaySeconds, updatedTimeoutSeconds,
updatedPeriodSeconds, successThreshold, updatedFailureThreshold);
checkComponentConfiguration(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", "ZOOKEEPER_CONFIGURATION", updatedZookeeperConfig);
checkSpecificVariablesInContainer(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", envVarUpdated);
LOGGER.info("Getting entity operator to check configuration of TO and UO");
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "topic-operator", updatedInitialDelaySeconds, updatedTimeoutSeconds,
updatedPeriodSeconds, successThreshold, updatedFailureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "topic-operator", envVarUpdated);
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "user-operator", updatedInitialDelaySeconds, updatedTimeoutSeconds,
updatedPeriodSeconds, successThreshold, updatedFailureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "user-operator", envVarUpdated);
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "tls-sidecar", updatedInitialDelaySeconds, updatedTimeoutSeconds,
updatedPeriodSeconds, successThreshold, updatedFailureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "tls-sidecar", envVarUpdated);
}
@ParallelNamespaceTest
void testJvmAndResources(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
ArrayList<SystemProperty> javaSystemProps = new ArrayList<>();
javaSystemProps.add(new SystemPropertyBuilder().withName("javax.net.debug")
.withValue("verbose").build());
Map<String, String> jvmOptionsXX = new HashMap<>();
jvmOptionsXX.put("UseG1GC", "true");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 1, 1)
.editSpec()
.editKafka()
.withResources(new ResourceRequirementsBuilder()
.addToLimits("memory", new Quantity("1.5Gi"))
.addToLimits("cpu", new Quantity("1"))
.addToRequests("memory", new Quantity("1Gi"))
.addToRequests("cpu", new Quantity("50m"))
.build())
.withNewJvmOptions()
.withXmx("1g")
.withXms("512m")
.withXx(jvmOptionsXX)
.endJvmOptions()
.endKafka()
.editZookeeper()
.withResources(
new ResourceRequirementsBuilder()
.addToLimits("memory", new Quantity("1G"))
.addToLimits("cpu", new Quantity("0.5"))
.addToRequests("memory", new Quantity("0.5G"))
.addToRequests("cpu", new Quantity("25m"))
.build())
.withNewJvmOptions()
.withXmx("1G")
.withXms("512M")
.withXx(jvmOptionsXX)
.endJvmOptions()
.endZookeeper()
.withNewEntityOperator()
.withNewTopicOperator()
.withResources(
new ResourceRequirementsBuilder()
.addToLimits("memory", new Quantity("1024Mi"))
.addToLimits("cpu", new Quantity("500m"))
.addToRequests("memory", new Quantity("384Mi"))
.addToRequests("cpu", new Quantity("0.025"))
.build())
.withNewJvmOptions()
.withXmx("2G")
.withXms("1024M")
.withJavaSystemProperties(javaSystemProps)
.endJvmOptions()
.endTopicOperator()
.withNewUserOperator()
.withResources(
new ResourceRequirementsBuilder()
.addToLimits("memory", new Quantity("512M"))
.addToLimits("cpu", new Quantity("300m"))
.addToRequests("memory", new Quantity("256M"))
.addToRequests("cpu", new Quantity("30m"))
.build())
.withNewJvmOptions()
.withXmx("1G")
.withXms("512M")
.withJavaSystemProperties(javaSystemProps)
.endJvmOptions()
.endUserOperator()
.endEntityOperator()
.endSpec()
.build());
// Make snapshots for Kafka cluster to meke sure that there is no rolling update after CO reconciliation
final String zkStsName = KafkaResources.zookeeperStatefulSetName(clusterName);
final String kafkaStsName = kafkaStatefulSetName(clusterName);
final String eoDepName = KafkaResources.entityOperatorDeploymentName(clusterName);
final Map<String, String> zkPods = StatefulSetUtils.ssSnapshot(namespaceName, zkStsName);
final Map<String, String> kafkaPods = StatefulSetUtils.ssSnapshot(namespaceName, kafkaStsName);
final Map<String, String> eoPods = DeploymentUtils.depSnapshot(namespaceName, eoDepName);
assertResources(namespaceName, KafkaResources.kafkaPodName(clusterName, 0), "kafka",
"1536Mi", "1", "1Gi", "50m");
assertExpectedJavaOpts(namespaceName, KafkaResources.kafkaPodName(clusterName, 0), "kafka",
"-Xmx1g", "-Xms512m", "-XX:+UseG1GC");
assertResources(namespaceName, KafkaResources.zookeeperPodName(clusterName, 0), "zookeeper",
"1G", "500m", "500M", "25m");
assertExpectedJavaOpts(namespaceName, KafkaResources.zookeeperPodName(clusterName, 0), "zookeeper",
"-Xmx1G", "-Xms512M", "-XX:+UseG1GC");
Optional<Pod> pod = kubeClient(namespaceName).listPods(namespaceName)
.stream().filter(p -> p.getMetadata().getName().startsWith(KafkaResources.entityOperatorDeploymentName(clusterName)))
.findFirst();
assertThat("EO pod does not exist", pod.isPresent(), is(true));
assertResources(namespaceName, pod.get().getMetadata().getName(), "topic-operator",
"1Gi", "500m", "384Mi", "25m");
assertResources(namespaceName, pod.get().getMetadata().getName(), "user-operator",
"512M", "300m", "256M", "30m");
assertExpectedJavaOpts(namespaceName, pod.get().getMetadata().getName(), "topic-operator",
"-Xmx2G", "-Xms1024M", null);
assertExpectedJavaOpts(namespaceName, pod.get().getMetadata().getName(), "user-operator",
"-Xmx1G", "-Xms512M", null);
String eoPod = eoPods.keySet().toArray()[0].toString();
kubeClient(namespaceName).getPod(namespaceName, eoPod).getSpec().getContainers().forEach(container -> {
if (!container.getName().equals("tls-sidecar")) {
LOGGER.info("Check if -D java options are present in {}", container.getName());
String javaSystemProp = container.getEnv().stream().filter(envVar ->
envVar.getName().equals("STRIMZI_JAVA_SYSTEM_PROPERTIES")).findFirst().orElseThrow().getValue();
String javaOpts = container.getEnv().stream().filter(envVar ->
envVar.getName().equals("STRIMZI_JAVA_OPTS")).findFirst().orElseThrow().getValue();
assertThat(javaSystemProp, is("-Djavax.net.debug=verbose"));
if (container.getName().equals("topic-operator")) {
assertThat(javaOpts, is("-Xms1024M -Xmx2G"));
}
if (container.getName().equals("user-operator")) {
assertThat(javaOpts, is("-Xms512M -Xmx1G"));
}
}
});
LOGGER.info("Checking no rolling update for Kafka cluster");
StatefulSetUtils.waitForNoRollingUpdate(namespaceName, zkStsName, zkPods);
StatefulSetUtils.waitForNoRollingUpdate(namespaceName, kafkaStsName, kafkaPods);
DeploymentUtils.waitForNoRollingUpdate(namespaceName, eoDepName, eoPods);
}
@ParallelNamespaceTest
void testForTopicOperator(ExtensionContext extensionContext) throws InterruptedException {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
final String topicName = KafkaTopicUtils.generateRandomNameOfTopic();
final String cliTopicName = "topic-from-cli";
//Creating topics for testing
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
KafkaTopicUtils.waitForKafkaTopicReady(namespaceName, topicName);
assertThat(KafkaTopicResource.kafkaTopicClient().inNamespace(namespaceName).withName(topicName).get().getMetadata().getName(), is(topicName));
assertThat(KafkaCmdClient.listTopicsUsingPodCli(namespaceName, clusterName, 0), hasItem(topicName));
KafkaCmdClient.createTopicUsingPodCli(namespaceName, clusterName, 0, cliTopicName, 1, 1);
assertThat(KafkaCmdClient.listTopicsUsingPodCli(namespaceName, clusterName, 0), hasItems(topicName, cliTopicName));
assertThat(cmdKubeClient(namespaceName).list(KafkaTopic.RESOURCE_KIND), hasItems(cliTopicName, topicName));
//Updating first topic using pod CLI
KafkaCmdClient.updateTopicPartitionsCountUsingPodCli(namespaceName, clusterName, 0, topicName, 2);
KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
assertThat(KafkaCmdClient.describeTopicUsingPodCli(namespaceName, clusterName, 0, topicName),
hasItems("PartitionCount:2"));
KafkaTopic testTopic = fromYamlString(cmdKubeClient().get(KafkaTopic.RESOURCE_KIND, topicName), KafkaTopic.class);
assertThat(testTopic, is(CoreMatchers.notNullValue()));
assertThat(testTopic.getSpec(), is(CoreMatchers.notNullValue()));
assertThat(testTopic.getSpec().getPartitions(), is(Integer.valueOf(2)));
//Updating second topic via KafkaTopic update
KafkaTopicResource.replaceTopicResourceInSpecificNamespace(cliTopicName, topic -> topic.getSpec().setPartitions(2), namespaceName);
KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
assertThat(KafkaCmdClient.describeTopicUsingPodCli(namespaceName, clusterName, 0, cliTopicName),
hasItems("PartitionCount:2"));
testTopic = fromYamlString(cmdKubeClient(namespaceName).get(KafkaTopic.RESOURCE_KIND, cliTopicName), KafkaTopic.class);
assertThat(testTopic, is(CoreMatchers.notNullValue()));
assertThat(testTopic.getSpec(), is(CoreMatchers.notNullValue()));
assertThat(testTopic.getSpec().getPartitions(), is(Integer.valueOf(2)));
//Deleting first topic by deletion of CM
cmdKubeClient(namespaceName).deleteByName(KafkaTopic.RESOURCE_KIND, cliTopicName);
//Deleting another topic using pod CLI
KafkaCmdClient.deleteTopicUsingPodCli(namespaceName, clusterName, 0, topicName);
KafkaTopicUtils.waitForKafkaTopicDeletion(namespaceName, topicName);
//Checking all topics were deleted
Thread.sleep(Constants.TIMEOUT_TEARDOWN);
List<String> topics = KafkaCmdClient.listTopicsUsingPodCli(namespaceName, clusterName, 0);
assertThat(topics, not(hasItems(topicName)));
assertThat(topics, not(hasItems(cliTopicName)));
}
@ParallelNamespaceTest
void testRemoveTopicOperatorFromEntityOperator(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LOGGER.info("Deploying Kafka cluster {}", clusterName);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
String eoPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName))
.get(0).getMetadata().getName();
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getEntityOperator().setTopicOperator(null), namespaceName);
//Waiting when EO pod will be recreated without TO
PodUtils.deletePodWithWait(namespaceName, eoPodName);
DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1);
PodUtils.waitUntilPodContainersCount(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 2);
//Checking that TO was removed
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), not(containsString("topic-operator")));
});
});
eoPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName))
.get(0).getMetadata().getName();
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getEntityOperator().setTopicOperator(new EntityTopicOperatorSpec()), namespaceName);
//Waiting when EO pod will be recreated with TO
PodUtils.deletePodWithWait(namespaceName, eoPodName);
DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1);
//Checking that TO was created
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), anyOf(
containsString("topic-operator"),
containsString("user-operator"),
containsString("tls-sidecar"))
);
});
});
}
@ParallelNamespaceTest
void testRemoveUserOperatorFromEntityOperator(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LOGGER.info("Deploying Kafka cluster {}", clusterName);
String operationId = timeMeasuringSystem.startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
String eoPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName))
.get(0).getMetadata().getName();
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getEntityOperator().setUserOperator(null), namespaceName);
//Waiting when EO pod will be recreated without UO
PodUtils.deletePodWithWait(namespaceName, eoPodName);
DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1);
PodUtils.waitUntilPodContainersCount(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 2);
//Checking that UO was removed
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), not(containsString("user-operator")));
});
});
eoPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName))
.get(0).getMetadata().getName();
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getEntityOperator().setUserOperator(new EntityUserOperatorSpec()), namespaceName);
//Waiting when EO pod will be recreated with UO
PodUtils.deletePodWithWait(namespaceName, eoPodName);
DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1);
//Checking that UO was created
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), anyOf(
containsString("topic-operator"),
containsString("user-operator"),
containsString("tls-sidecar"))
);
});
});
timeMeasuringSystem.stopOperation(operationId, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
assertNoCoErrorsLogged(NAMESPACE, timeMeasuringSystem.getDurationInSeconds(extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName(), operationId));
}
@ParallelNamespaceTest
void testRemoveUserAndTopicOperatorsFromEntityOperator(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
// TODO issue #4152 - temporarily disabled for Namespace RBAC scoped
assumeFalse(Environment.isNamespaceRbacScope());
LOGGER.info("Deploying Kafka cluster {}", clusterName);
String operationId = timeMeasuringSystem.startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
String eoDeploymentName = KafkaResources.entityOperatorDeploymentName(clusterName);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
k.getSpec().getEntityOperator().setTopicOperator(null);
k.getSpec().getEntityOperator().setUserOperator(null);
}, namespaceName);
PodUtils.waitUntilPodStabilityReplicasCount(namespaceName, eoDeploymentName, 0);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
k.getSpec().getEntityOperator().setTopicOperator(new EntityTopicOperatorSpec());
k.getSpec().getEntityOperator().setUserOperator(new EntityUserOperatorSpec());
}, namespaceName);
DeploymentUtils.waitForDeploymentReady(namespaceName, eoDeploymentName);
//Checking that EO was created
kubeClient().listPodsByPrefixInName(namespaceName, eoDeploymentName).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), anyOf(
containsString("topic-operator"),
containsString("user-operator"),
containsString("tls-sidecar"))
);
});
});
timeMeasuringSystem.stopOperation(operationId, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
assertNoCoErrorsLogged(NAMESPACE, timeMeasuringSystem.getDurationInSeconds(extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName(), operationId));
}
@ParallelNamespaceTest
void testEntityOperatorWithoutTopicOperator(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LOGGER.info("Deploying Kafka cluster without TO in EO");
String operationId = timeMeasuringSystem.startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3)
.editSpec()
.withNewEntityOperator()
.withNewUserOperator()
.endUserOperator()
.endEntityOperator()
.endSpec()
.build());
timeMeasuringSystem.stopOperation(operationId, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
assertNoCoErrorsLogged(NAMESPACE, timeMeasuringSystem.getDurationInSeconds(extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName(), operationId));
//Checking that TO was not deployed
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), not(containsString("topic-operator")));
});
});
}
@ParallelNamespaceTest
void testEntityOperatorWithoutUserOperator(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LOGGER.info("Deploying Kafka cluster without UO in EO");
String operationId = timeMeasuringSystem.startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3)
.editSpec()
.withNewEntityOperator()
.withNewTopicOperator()
.endTopicOperator()
.endEntityOperator()
.endSpec()
.build());
timeMeasuringSystem.stopOperation(operationId, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
assertNoCoErrorsLogged(NAMESPACE, timeMeasuringSystem.getDurationInSeconds(extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName(), operationId));
//Checking that UO was not deployed
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), not(containsString("user-operator")));
});
});
}
@ParallelNamespaceTest
void testEntityOperatorWithoutUserAndTopicOperators(ExtensionContext extensionContext) {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LOGGER.info("Deploying Kafka cluster without UO and TO in EO");
String operationId = timeMeasuringSystem.startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3)
.editSpec()
.withNewEntityOperator()
.endEntityOperator()
.endSpec()
.build());
timeMeasuringSystem.stopOperation(operationId, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
assertNoCoErrorsLogged(NAMESPACE, timeMeasuringSystem.getDurationInSeconds(extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName(), operationId));
//Checking that EO was not deployed
assertThat("EO should not be deployed", kubeClient().listPodsByPrefixInName(KafkaResources.entityOperatorDeploymentName(clusterName)).size(), is(0));
}
@ParallelNamespaceTest
void testTopicWithoutLabels(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
// Negative scenario: creating topic without any labels and make sure that TO can't handle this topic
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
// Creating topic without any label
resourceManager.createResource(extensionContext, false, KafkaTopicTemplates.topic(clusterName, "topic-without-labels", 1, 1, 1)
.editMetadata()
.withLabels(null)
.endMetadata()
.build());
// Checking that resource was created
assertThat(cmdKubeClient(namespaceName).list("kafkatopic"), hasItems("topic-without-labels"));
// Checking that TO didn't handle new topic and zk pods don't contain new topic
assertThat(KafkaCmdClient.listTopicsUsingPodCli(namespaceName, clusterName, 0), not(hasItems("topic-without-labels")));
// Checking TO logs
String tOPodName = cmdKubeClient(namespaceName).listResourcesByLabel("pod", Labels.STRIMZI_NAME_LABEL + "=" + clusterName + "-entity-operator").get(0);
String tOlogs = kubeClient(namespaceName).logsInSpecificNamespace(namespaceName, tOPodName, "topic-operator");
assertThat(tOlogs, not(containsString("Created topic 'topic-without-labels'")));
//Deleting topic
cmdKubeClient(namespaceName).deleteByName("kafkatopic", "topic-without-labels");
KafkaTopicUtils.waitForKafkaTopicDeletion(namespaceName, "topic-without-labels");
//Checking all topics were deleted
List<String> topics = KafkaCmdClient.listTopicsUsingPodCli(namespaceName, clusterName, 0);
assertThat(topics, not(hasItems("topic-without-labels")));
}
@ParallelNamespaceTest
void testKafkaJBODDeleteClaimsTrueFalse(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final int kafkaReplicas = 2;
final String diskSizeGi = "10";
JbodStorage jbodStorage = new JbodStorageBuilder().withVolumes(
new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize(diskSizeGi + "Gi").build(),
new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(1).withSize(diskSizeGi + "Gi").build()).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaJBOD(clusterName, kafkaReplicas, jbodStorage).build());
// kafka cluster already deployed
verifyVolumeNamesAndLabels(namespaceName, clusterName, kafkaReplicas, 2, diskSizeGi);
final int volumesCount = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).size();
LOGGER.info("Deleting cluster");
cmdKubeClient(namespaceName).deleteByName("kafka", clusterName);
LOGGER.info("Waiting for PVC deletion");
PersistentVolumeClaimUtils.waitForPVCDeletion(namespaceName, volumesCount, jbodStorage, clusterName);
}
@ParallelNamespaceTest
void testKafkaJBODDeleteClaimsTrue(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final int kafkaReplicas = 2;
final String diskSizeGi = "10";
JbodStorage jbodStorage = new JbodStorageBuilder().withVolumes(
new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(0).withSize(diskSizeGi + "Gi").build(),
new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(1).withSize(diskSizeGi + "Gi").build()).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaJBOD(clusterName, kafkaReplicas, jbodStorage).build());
// kafka cluster already deployed
verifyVolumeNamesAndLabels(namespaceName, clusterName, kafkaReplicas, 2, diskSizeGi);
final int volumesCount = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).size();
LOGGER.info("Deleting cluster");
cmdKubeClient(namespaceName).deleteByName("kafka", clusterName);
LOGGER.info("Waiting for PVC deletion");
PersistentVolumeClaimUtils.waitForPVCDeletion(namespaceName, volumesCount, jbodStorage, clusterName);
}
@ParallelNamespaceTest
void testKafkaJBODDeleteClaimsFalse(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final int kafkaReplicas = 2;
final String diskSizeGi = "10";
JbodStorage jbodStorage = new JbodStorageBuilder().withVolumes(
new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize(diskSizeGi + "Gi").build(),
new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(1).withSize(diskSizeGi + "Gi").build()).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaJBOD(clusterName, kafkaReplicas, jbodStorage).build());
// kafka cluster already deployed
verifyVolumeNamesAndLabels(namespaceName, clusterName, kafkaReplicas, 2, diskSizeGi);
int volumesCount = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).size();
LOGGER.info("Deleting cluster");
cmdKubeClient(namespaceName).deleteByName("kafka", clusterName);
LOGGER.info("Waiting for PVC deletion");
PersistentVolumeClaimUtils.waitForPVCDeletion(namespaceName, volumesCount, jbodStorage, clusterName);
}
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testPersistentStorageSize(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String[] diskSizes = {"70Gi", "20Gi"};
final int kafkaRepl = 2;
final int diskCount = 2;
JbodStorage jbodStorage = new JbodStorageBuilder()
.withVolumes(
new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize(diskSizes[0]).build(),
new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(1).withSize(diskSizes[1]).build()
).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, kafkaRepl)
.editSpec()
.editKafka()
.withStorage(jbodStorage)
.endKafka()
.editZookeeper().
withReplicas(1)
.endZookeeper()
.endSpec()
.build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
List<PersistentVolumeClaim> volumes = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream().filter(
persistentVolumeClaim -> persistentVolumeClaim.getMetadata().getName().contains(clusterName)).collect(Collectors.toList());
checkStorageSizeForVolumes(volumes, diskSizes, kafkaRepl, diskCount);
String kafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder()
.withUsingPodName(kafkaClientsPodName)
.withTopicName(topicName)
.withNamespaceName(namespaceName)
.withClusterName(clusterName)
.withMessageCount(MESSAGE_COUNT)
.withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.build();
LOGGER.info("Checking produced and consumed messages to pod:{}", kafkaClientsPodName);
internalKafkaClient.checkProducedAndConsumedMessages(
internalKafkaClient.sendMessagesPlain(),
internalKafkaClient.receiveMessagesPlain()
);
}
@ParallelNamespaceTest
@Tag(LOADBALANCER_SUPPORTED)
void testRegenerateCertExternalAddressChange(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LOGGER.info("Creating kafka without external listener");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 1).build());
final String brokerSecret = clusterName + "-kafka-brokers";
Secret secretsWithoutExt = kubeClient(namespaceName).getSecret(namespaceName, brokerSecret);
LOGGER.info("Editing kafka with external listener");
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
List<GenericKafkaListener> lst = asList(
new GenericKafkaListenerBuilder()
.withName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.withPort(9092)
.withType(KafkaListenerType.INTERNAL)
.withTls(false)
.build(),
new GenericKafkaListenerBuilder()
.withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME)
.withPort(9094)
.withType(KafkaListenerType.LOADBALANCER)
.withTls(true)
.withNewConfiguration()
.withFinalizers(LB_FINALIZERS)
.endConfiguration()
.build()
);
kafka.getSpec().getKafka().setListeners(lst);
}, namespaceName);
StatefulSetUtils.waitTillSsHasRolled(namespaceName, kafkaStatefulSetName(clusterName), 3, StatefulSetUtils.ssSnapshot(namespaceName, kafkaStatefulSetName(clusterName)));
Secret secretsWithExt = kubeClient(namespaceName).getSecret(namespaceName, brokerSecret);
LOGGER.info("Checking secrets");
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName)).forEach(kafkaPod -> {
String kafkaPodName = kafkaPod.getMetadata().getName();
assertThat(secretsWithExt.getData().get(kafkaPodName + ".crt"), is(not(secretsWithoutExt.getData().get(kafkaPodName + ".crt"))));
assertThat(secretsWithExt.getData().get(kafkaPodName + ".key"), is(not(secretsWithoutExt.getData().get(kafkaPodName + ".key"))));
});
}
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testLabelModificationDoesNotBreakCluster(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
Map<String, String> labels = new HashMap<>();
final String[] labelKeys = {"label-name-1", "label-name-2", ""};
final String[] labelValues = {"name-of-the-label-1", "name-of-the-label-2", ""};
labels.put(labelKeys[0], labelValues[0]);
labels.put(labelKeys[1], labelValues[1]);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 1)
.editMetadata()
.withLabels(labels)
.endMetadata()
.build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
final String kafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder()
.withUsingPodName(kafkaClientsPodName)
.withTopicName(topicName)
.withNamespaceName(namespaceName)
.withClusterName(clusterName)
.withMessageCount(MESSAGE_COUNT)
.withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.build();
Map<String, String> kafkaPods = StatefulSetUtils.ssSnapshot(namespaceName, kafkaStatefulSetName(clusterName));
LOGGER.info("Waiting for kafka stateful set labels changed {}", labels);
StatefulSetUtils.waitForStatefulSetLabelsChange(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName), labels);
LOGGER.info("Getting labels from stateful set resource");
StatefulSet statefulSet = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName));
LOGGER.info("Verifying default labels in the Kafka CR");
assertThat("Label exists in stateful set with concrete value",
labelValues[0].equals(statefulSet.getSpec().getTemplate().getMetadata().getLabels().get(labelKeys[0])));
assertThat("Label exists in stateful set with concrete value",
labelValues[1].equals(statefulSet.getSpec().getTemplate().getMetadata().getLabels().get(labelKeys[1])));
labelValues[0] = "new-name-of-the-label-1";
labelValues[1] = "new-name-of-the-label-2";
labelKeys[2] = "label-name-3";
labelValues[2] = "name-of-the-label-3";
LOGGER.info("Setting new values of labels from {} to {} | from {} to {} and adding one {} with value {}",
"name-of-the-label-1", labelValues[0], "name-of-the-label-2", labelValues[1], labelKeys[2], labelValues[2]);
LOGGER.info("Edit kafka labels in Kafka CR");
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, resource -> {
resource.getMetadata().getLabels().put(labelKeys[0], labelValues[0]);
resource.getMetadata().getLabels().put(labelKeys[1], labelValues[1]);
resource.getMetadata().getLabels().put(labelKeys[2], labelValues[2]);
}, namespaceName);
labels.put(labelKeys[0], labelValues[0]);
labels.put(labelKeys[1], labelValues[1]);
labels.put(labelKeys[2], labelValues[2]);
LOGGER.info("Waiting for kafka service labels changed {}", labels);
ServiceUtils.waitForServiceLabelsChange(namespaceName, KafkaResources.brokersServiceName(clusterName), labels);
LOGGER.info("Verifying kafka labels via services");
Service service = kubeClient(namespaceName).getService(namespaceName, KafkaResources.brokersServiceName(clusterName));
verifyPresentLabels(labels, service);
LOGGER.info("Waiting for kafka config map labels changed {}", labels);
ConfigMapUtils.waitForConfigMapLabelsChange(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName), labels);
LOGGER.info("Verifying kafka labels via config maps");
ConfigMap configMap = kubeClient(namespaceName).getConfigMap(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName));
verifyPresentLabels(labels, configMap);
LOGGER.info("Waiting for kafka stateful set labels changed {}", labels);
StatefulSetUtils.waitForStatefulSetLabelsChange(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName), labels);
LOGGER.info("Verifying kafka labels via stateful set");
statefulSet = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName));
verifyPresentLabels(labels, statefulSet);
StatefulSetUtils.waitTillSsHasRolled(namespaceName, kafkaStatefulSetName(clusterName), 3, kafkaPods);
LOGGER.info("Verifying via kafka pods");
labels = kubeClient(namespaceName).getPod(namespaceName, KafkaResources.kafkaPodName(clusterName, 0)).getMetadata().getLabels();
assertThat("Label exists in kafka pods", labelValues[0].equals(labels.get(labelKeys[0])));
assertThat("Label exists in kafka pods", labelValues[1].equals(labels.get(labelKeys[1])));
assertThat("Label exists in kafka pods", labelValues[2].equals(labels.get(labelKeys[2])));
LOGGER.info("Removing labels: {} -> {}, {} -> {}, {} -> {}", labelKeys[0], labels.get(labelKeys[0]),
labelKeys[1], labels.get(labelKeys[1]), labelKeys[2], labels.get(labelKeys[2]));
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, resource -> {
resource.getMetadata().getLabels().remove(labelKeys[0]);
resource.getMetadata().getLabels().remove(labelKeys[1]);
resource.getMetadata().getLabels().remove(labelKeys[2]);
}, namespaceName);
labels.remove(labelKeys[0]);
labels.remove(labelKeys[1]);
labels.remove(labelKeys[2]);
LOGGER.info("Waiting for kafka service labels deletion {}", labels.toString());
ServiceUtils.waitForServiceLabelsDeletion(namespaceName, KafkaResources.brokersServiceName(clusterName), labelKeys[0], labelKeys[1], labelKeys[2]);
LOGGER.info("Verifying kafka labels via services");
service = kubeClient(namespaceName).getService(namespaceName, KafkaResources.brokersServiceName(clusterName));
verifyNullLabels(labelKeys, service);
LOGGER.info("Verifying kafka labels via config maps");
ConfigMapUtils.waitForConfigMapLabelsDeletion(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName), labelKeys[0], labelKeys[1], labelKeys[2]);
configMap = kubeClient(namespaceName).getConfigMap(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName));
verifyNullLabels(labelKeys, configMap);
LOGGER.info("Waiting for kafka stateful set labels changed {}", labels);
String statefulSetName = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName)).getMetadata().getName();
StatefulSetUtils.waitForStatefulSetLabelsDeletion(namespaceName, statefulSetName, labelKeys[0], labelKeys[1], labelKeys[2]);
statefulSet = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName));
LOGGER.info("Verifying kafka labels via stateful set");
verifyNullLabels(labelKeys, statefulSet);
StatefulSetUtils.waitTillSsHasRolled(namespaceName, kafkaStatefulSetName(clusterName), 3, kafkaPods);
LOGGER.info("Waiting for kafka pod labels deletion {}", labels.toString());
PodUtils.waitUntilPodLabelsDeletion(namespaceName, KafkaResources.kafkaPodName(clusterName, 0), labelKeys[0], labelKeys[1], labelKeys[2]);
labels = kubeClient(namespaceName).getPod(namespaceName, KafkaResources.kafkaPodName(clusterName, 0)).getMetadata().getLabels();
LOGGER.info("Verifying via kafka pods");
verifyNullLabels(labelKeys, labels);
internalKafkaClient.checkProducedAndConsumedMessages(
internalKafkaClient.sendMessagesPlain(),
internalKafkaClient.receiveMessagesPlain()
);
}
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testAppDomainLabels(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 1).build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
final String kafkaClientsPodName =
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder()
.withUsingPodName(kafkaClientsPodName)
.withTopicName(topicName)
.withNamespaceName(namespaceName)
.withClusterName(clusterName)
.withMessageCount(MESSAGE_COUNT)
.withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.build();
Map<String, String> labels;
LOGGER.info("---> PODS <---");
List<Pod> pods = kubeClient(namespaceName).listPods(namespaceName, clusterName).stream()
.filter(pod -> pod.getMetadata().getName().startsWith(clusterName))
.filter(pod -> !pod.getMetadata().getName().startsWith(clusterName + "-" + Constants.KAFKA_CLIENTS))
.collect(Collectors.toList());
for (Pod pod : pods) {
LOGGER.info("Getting labels from {} pod", pod.getMetadata().getName());
verifyAppLabels(pod.getMetadata().getLabels());
}
LOGGER.info("---> STATEFUL SETS <---");
LOGGER.info("Getting labels from stateful set of kafka resource");
labels = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName)).getMetadata().getLabels();
verifyAppLabels(labels);
LOGGER.info("Getting labels from stateful set of zookeeper resource");
labels = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.zookeeperStatefulSetName(clusterName)).getMetadata().getLabels();
verifyAppLabels(labels);
LOGGER.info("---> SERVICES <---");
List<Service> services = kubeClient(namespaceName).listServices(namespaceName).stream()
.filter(service -> service.getMetadata().getName().startsWith(clusterName))
.collect(Collectors.toList());
for (Service service : services) {
LOGGER.info("Getting labels from {} service", service.getMetadata().getName());
verifyAppLabels(service.getMetadata().getLabels());
}
LOGGER.info("---> SECRETS <---");
List<Secret> secrets = kubeClient(namespaceName).listSecrets(namespaceName).stream()
.filter(secret -> secret.getMetadata().getName().startsWith(clusterName) && secret.getType().equals("Opaque"))
.collect(Collectors.toList());
for (Secret secret : secrets) {
LOGGER.info("Getting labels from {} secret", secret.getMetadata().getName());
verifyAppLabelsForSecretsAndConfigMaps(secret.getMetadata().getLabels());
}
LOGGER.info("---> CONFIG MAPS <---");
List<ConfigMap> configMaps = kubeClient(namespaceName).listConfigMapsInSpecificNamespace(namespaceName, clusterName);
for (ConfigMap configMap : configMaps) {
LOGGER.info("Getting labels from {} config map", configMap.getMetadata().getName());
verifyAppLabelsForSecretsAndConfigMaps(configMap.getMetadata().getLabels());
}
internalKafkaClient.checkProducedAndConsumedMessages(
internalKafkaClient.sendMessagesPlain(),
internalKafkaClient.receiveMessagesPlain()
);
}
@ParallelNamespaceTest
void testUOListeningOnlyUsersInSameCluster(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
final String firstClusterName = "my-cluster-1";
final String secondClusterName = "my-cluster-2";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(firstClusterName, 3, 1).build());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(secondClusterName, 3, 1).build());
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(firstClusterName, userName).build());
LOGGER.info("Verifying that user {} in cluster {} is created", userName, firstClusterName);
String entityOperatorPodName = kubeClient(namespaceName).listPodNamesInSpecificNamespace(namespaceName, Labels.STRIMZI_NAME_LABEL, KafkaResources.entityOperatorDeploymentName(firstClusterName)).get(0);
String uOLogs = kubeClient(namespaceName).logsInSpecificNamespace(namespaceName, entityOperatorPodName, "user-operator");
assertThat(uOLogs, containsString("User " + userName + " in namespace " + namespaceName + " was ADDED"));
LOGGER.info("Verifying that user {} in cluster {} is not created", userName, secondClusterName);
entityOperatorPodName = kubeClient(namespaceName).listPodNamesInSpecificNamespace(namespaceName, Labels.STRIMZI_NAME_LABEL, KafkaResources.entityOperatorDeploymentName(secondClusterName)).get(0);
uOLogs = kubeClient(namespaceName).logsInSpecificNamespace(namespaceName, entityOperatorPodName, "user-operator");
assertThat(uOLogs, not(containsString("User " + userName + " in namespace " + namespaceName + " was ADDED")));
LOGGER.info("Verifying that user belongs to {} cluster", firstClusterName);
String kafkaUserResource = cmdKubeClient(namespaceName).getResourceAsYaml("kafkauser", userName);
assertThat(kafkaUserResource, containsString(Labels.STRIMZI_CLUSTER_LABEL + ": " + firstClusterName));
}
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testMessagesAreStoredInDisk(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = KafkaTopicUtils.generateRandomNameOfTopic();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 1, 1).build());
Map<String, String> kafkaPodsSnapshot = StatefulSetUtils.ssSnapshot(namespaceName, kafkaStatefulSetName(clusterName));
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName, 1, 1).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
final String kafkaClientsPodName =
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder()
.withUsingPodName(kafkaClientsPodName)
.withTopicName(topicName)
.withNamespaceName(namespaceName)
.withClusterName(clusterName)
.withMessageCount(MESSAGE_COUNT)
.withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.build();
TestUtils.waitFor("KafkaTopic creation inside kafka pod", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT,
() -> cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash",
"-c", "cd /var/lib/kafka/data/kafka-log0; ls -1").out().contains(topicName));
String topicDirNameInPod = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash",
"-c", "cd /var/lib/kafka/data/kafka-log0; ls -1 | sed -n '/" + topicName + "/p'").out();
String commandToGetDataFromTopic =
"cd /var/lib/kafka/data/kafka-log0/" + topicDirNameInPod + "/;cat 00000000000000000000.log";
LOGGER.info("Executing command {} in {}", commandToGetDataFromTopic, KafkaResources.kafkaPodName(clusterName, 0));
String topicData = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0),
"/bin/bash", "-c", commandToGetDataFromTopic).out();
LOGGER.info("Topic {} is present in kafka broker {} with no data", topicName, KafkaResources.kafkaPodName(clusterName, 0));
assertThat("Topic contains data", topicData, emptyOrNullString());
internalKafkaClient.checkProducedAndConsumedMessages(
internalKafkaClient.sendMessagesPlain(),
internalKafkaClient.receiveMessagesPlain()
);
LOGGER.info("Executing command {} in {}", commandToGetDataFromTopic, KafkaResources.kafkaPodName(clusterName, 0));
topicData = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c",
commandToGetDataFromTopic).out();
assertThat("Topic has no data", topicData, notNullValue());
List<Pod> kafkaPods = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName));
for (Pod kafkaPod : kafkaPods) {
LOGGER.info("Deleting kafka pod {}", kafkaPod.getMetadata().getName());
kubeClient(namespaceName).deletePod(namespaceName, kafkaPod);
}
LOGGER.info("Wait for kafka to rolling restart ...");
StatefulSetUtils.waitTillSsHasRolled(namespaceName, kafkaStatefulSetName(clusterName), 1, kafkaPodsSnapshot);
LOGGER.info("Executing command {} in {}", commandToGetDataFromTopic, KafkaResources.kafkaPodName(clusterName, 0));
topicData = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c",
commandToGetDataFromTopic).out();
assertThat("Topic has no data", topicData, notNullValue());
}
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testConsumerOffsetFiles(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final Map<String, Object> kafkaConfig = new HashMap<>();
kafkaConfig.put("offsets.topic.replication.factor", "3");
kafkaConfig.put("offsets.topic.num.partitions", "100");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 1)
.editSpec()
.editKafka()
.withConfig(kafkaConfig)
.endKafka()
.endSpec()
.build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName, 3, 1).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
final String kafkaClientsPodName =
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder()
.withUsingPodName(kafkaClientsPodName)
.withTopicName(topicName)
.withNamespaceName(namespaceName)
.withClusterName(clusterName)
.withMessageCount(MESSAGE_COUNT)
.withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.build();
String commandToGetFiles = "cd /var/lib/kafka/data/kafka-log0/;" +
"ls -1 | sed -n \"s#__consumer_offsets-\\([0-9]*\\)#\\1#p\" | sort -V";
LOGGER.info("Executing command {} in {}", commandToGetFiles, KafkaResources.kafkaPodName(clusterName, 0));
String result = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0),
"/bin/bash", "-c", commandToGetFiles).out();
// TODO / FIXME
//assertThat("Folder kafka-log0 has data in files:\n" + result, result.equals(""));
LOGGER.info("Result: \n" + result);
internalKafkaClient.checkProducedAndConsumedMessages(
internalKafkaClient.sendMessagesPlain(),
internalKafkaClient.receiveMessagesPlain()
);
LOGGER.info("Executing command {} in {}", commandToGetFiles, KafkaResources.kafkaPodName(clusterName, 0));
result = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0),
"/bin/bash", "-c", commandToGetFiles).out();
StringBuilder stringToMatch = new StringBuilder();
for (int i = 0; i < 100; i++) {
stringToMatch.append(i).append("\n");
}
assertThat("Folder kafka-log0 doesn't contain 100 files", result, containsString(stringToMatch.toString()));
}
@ParallelNamespaceTest
void testLabelsAndAnnotationForPVC(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String labelAnnotationKey = "testKey";
final String firstValue = "testValue";
final String changedValue = "editedTestValue";
Map<String, String> pvcLabel = new HashMap<>();
pvcLabel.put(labelAnnotationKey, firstValue);
Map<String, String> pvcAnnotation = pvcLabel;
Map<String, String> statefulSetLabels = new HashMap<>();
statefulSetLabels.put("app.kubernetes.io/part-of", "some-app");
statefulSetLabels.put("app.kubernetes.io/managed-by", "some-app");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 1)
.editSpec()
.editKafka()
.withNewTemplate()
.withNewStatefulset()
.withNewMetadata()
.withLabels(statefulSetLabels)
.endMetadata()
.endStatefulset()
.withNewPersistentVolumeClaim()
.withNewMetadata()
.addToLabels(pvcLabel)
.addToAnnotations(pvcAnnotation)
.endMetadata()
.endPersistentVolumeClaim()
.endTemplate()
.withStorage(new JbodStorageBuilder().withVolumes(
new PersistentClaimStorageBuilder()
.withDeleteClaim(false)
.withId(0)
.withSize("20Gi")
.build(),
new PersistentClaimStorageBuilder()
.withDeleteClaim(true)
.withId(1)
.withSize("10Gi")
.build())
.build())
.endKafka()
.editZookeeper()
.withNewTemplate()
.withNewPersistentVolumeClaim()
.withNewMetadata()
.addToLabels(pvcLabel)
.addToAnnotations(pvcAnnotation)
.endMetadata()
.endPersistentVolumeClaim()
.endTemplate()
.withNewPersistentClaimStorage()
.withDeleteClaim(false)
.withId(0)
.withSize("3Gi")
.endPersistentClaimStorage()
.endZookeeper()
.endSpec()
.build());
LOGGER.info("Check if Kubernetes labels are applied");
Map<String, String> actualStatefulSetLabels = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName)).getMetadata().getLabels();
assertThat(actualStatefulSetLabels.get("app.kubernetes.io/part-of"), is("some-app"));
assertThat(actualStatefulSetLabels.get("app.kubernetes.io/managed-by"), is("some-app"));
LOGGER.info("Kubernetes labels are correctly set and present");
List<PersistentVolumeClaim> pvcs = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream().filter(
persistentVolumeClaim -> persistentVolumeClaim.getMetadata().getName().contains(clusterName)).collect(Collectors.toList());
assertThat(pvcs.size(), is(7));
for (PersistentVolumeClaim pvc : pvcs) {
LOGGER.info("Verifying that PVC label {} - {} = {}", pvc.getMetadata().getName(), firstValue, pvc.getMetadata().getLabels().get(labelAnnotationKey));
assertThat(firstValue, is(pvc.getMetadata().getLabels().get(labelAnnotationKey)));
assertThat(firstValue, is(pvc.getMetadata().getAnnotations().get(labelAnnotationKey)));
}
pvcLabel.put(labelAnnotationKey, changedValue);
pvcAnnotation.put(labelAnnotationKey, changedValue);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
LOGGER.info("Replacing kafka && zookeeper labels and annotations from {} to {}", labelAnnotationKey, changedValue);
kafka.getSpec().getKafka().getTemplate().getPersistentVolumeClaim().getMetadata().setLabels(pvcLabel);
kafka.getSpec().getKafka().getTemplate().getPersistentVolumeClaim().getMetadata().setAnnotations(pvcAnnotation);
kafka.getSpec().getZookeeper().getTemplate().getPersistentVolumeClaim().getMetadata().setLabels(pvcLabel);
kafka.getSpec().getZookeeper().getTemplate().getPersistentVolumeClaim().getMetadata().setAnnotations(pvcAnnotation);
}, namespaceName);
PersistentVolumeClaimUtils.waitUntilPVCLabelsChange(namespaceName, clusterName, pvcLabel, labelAnnotationKey);
PersistentVolumeClaimUtils.waitUntilPVCAnnotationChange(namespaceName, clusterName, pvcAnnotation, labelAnnotationKey);
KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
pvcs = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream().filter(
persistentVolumeClaim -> persistentVolumeClaim.getMetadata().getName().contains(clusterName)).collect(Collectors.toList());
LOGGER.info(pvcs.toString());
assertThat(pvcs.size(), is(7));
for (PersistentVolumeClaim pvc : pvcs) {
LOGGER.info("Verifying replaced PVC label {} - {} = {}", pvc.getMetadata().getName(), firstValue, pvc.getMetadata().getLabels().get(labelAnnotationKey));
assertThat(pvc.getMetadata().getLabels().get(labelAnnotationKey), is(changedValue));
assertThat(pvc.getMetadata().getAnnotations().get(labelAnnotationKey), is(changedValue));
}
}
@ParallelNamespaceTest
void testKafkaOffsetsReplicationFactorHigherThanReplicas(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, false, KafkaTemplates.kafkaEphemeral(clusterName, 3, 1)
.editSpec()
.editKafka()
.addToConfig("offsets.topic.replication.factor", 4)
.addToConfig("transaction.state.log.min.isr", 4)
.addToConfig("transaction.state.log.replication.factor", 4)
.endKafka()
.endSpec().build());
KafkaUtils.waitUntilKafkaStatusConditionContainsMessage(clusterName, namespaceName,
"Kafka configuration option .* should be set to " + 3 + " or less because 'spec.kafka.replicas' is " + 3);
}
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
@Tag(CRUISE_CONTROL)
void testReadOnlyRootFileSystem(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3)
.editSpec()
.editKafka()
.withNewTemplate()
.withNewKafkaContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endKafkaContainer()
.endTemplate()
.endKafka()
.editZookeeper()
.withNewTemplate()
.withNewZookeeperContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endZookeeperContainer()
.endTemplate()
.endZookeeper()
.editEntityOperator()
.withNewTemplate()
.withNewTlsSidecarContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endTlsSidecarContainer()
.withNewTopicOperatorContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endTopicOperatorContainer()
.withNewUserOperatorContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endUserOperatorContainer()
.endTemplate()
.endEntityOperator()
.editOrNewKafkaExporter()
.withNewTemplate()
.withNewContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endContainer()
.endTemplate()
.endKafkaExporter()
.editOrNewCruiseControl()
.withNewTemplate()
.withNewTlsSidecarContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endTlsSidecarContainer()
.withNewCruiseControlContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endCruiseControlContainer()
.endTemplate()
.endCruiseControl()
.endSpec()
.build());
KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
final String kafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder()
.withUsingPodName(kafkaClientsPodName)
.withTopicName(topicName)
.withNamespaceName(namespaceName)
.withClusterName(clusterName)
.withMessageCount(MESSAGE_COUNT)
.withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.build();
LOGGER.info("Checking produced and consumed messages to pod:{}", kafkaClientsPodName);
internalKafkaClient.checkProducedAndConsumedMessages(
internalKafkaClient.sendMessagesPlain(),
internalKafkaClient.receiveMessagesPlain()
);
}
protected void checkKafkaConfiguration(String namespaceName, String podNamePrefix, Map<String, Object> config, String clusterName) {
LOGGER.info("Checking kafka configuration");
List<Pod> pods = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, podNamePrefix);
Properties properties = configMap2Properties(kubeClient(namespaceName).getConfigMap(namespaceName, clusterName + "-kafka-config"));
for (Map.Entry<String, Object> property : config.entrySet()) {
String key = property.getKey();
Object val = property.getValue();
assertThat(properties.keySet().contains(key), is(true));
assertThat(properties.getProperty(key), is(val));
}
for (Pod pod: pods) {
ExecResult result = cmdKubeClient(namespaceName).execInPod(pod.getMetadata().getName(), "/bin/bash", "-c", "cat /tmp/strimzi.properties");
Properties execProperties = stringToProperties(result.out());
for (Map.Entry<String, Object> property : config.entrySet()) {
String key = property.getKey();
Object val = property.getValue();
assertThat(execProperties.keySet().contains(key), is(true));
assertThat(execProperties.getProperty(key), is(val));
}
}
}
void checkStorageSizeForVolumes(List<PersistentVolumeClaim> volumes, String[] diskSizes, int kafkaRepl, int diskCount) {
int k = 0;
for (int i = 0; i < kafkaRepl; i++) {
for (int j = 0; j < diskCount; j++) {
LOGGER.info("Checking volume {} and size of storage {}", volumes.get(k).getMetadata().getName(),
volumes.get(k).getSpec().getResources().getRequests().get("storage"));
assertThat(volumes.get(k).getSpec().getResources().getRequests().get("storage"), is(new Quantity(diskSizes[i])));
k++;
}
}
}
void verifyVolumeNamesAndLabels(String namespaceName, String clusterName, int kafkaReplicas, int diskCountPerReplica, String diskSizeGi) {
ArrayList<String> pvcs = new ArrayList<>();
kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream()
.filter(pvc -> pvc.getMetadata().getName().contains(clusterName + "-kafka"))
.forEach(volume -> {
String volumeName = volume.getMetadata().getName();
pvcs.add(volumeName);
LOGGER.info("Checking labels for volume:" + volumeName);
assertThat(volume.getMetadata().getLabels().get(Labels.STRIMZI_CLUSTER_LABEL), is(clusterName));
assertThat(volume.getMetadata().getLabels().get(Labels.STRIMZI_KIND_LABEL), is(Kafka.RESOURCE_KIND));
assertThat(volume.getMetadata().getLabels().get(Labels.STRIMZI_NAME_LABEL), is(clusterName.concat("-kafka")));
assertThat(volume.getSpec().getResources().getRequests().get("storage"), is(new Quantity(diskSizeGi, "Gi")));
});
LOGGER.info("Checking PVC names included in JBOD array");
for (int i = 0; i < kafkaReplicas; i++) {
for (int j = 0; j < diskCountPerReplica; j++) {
assertThat(pvcs.contains("data-" + j + "-" + clusterName + "-kafka-" + i), is(true));
}
}
LOGGER.info("Checking PVC on Kafka pods");
for (int i = 0; i < kafkaReplicas; i++) {
ArrayList<String> dataSourcesOnPod = new ArrayList<>();
ArrayList<String> pvcsOnPod = new ArrayList<>();
LOGGER.info("Getting list of mounted data sources and PVCs on Kafka pod " + i);
for (int j = 0; j < diskCountPerReplica; j++) {
dataSourcesOnPod.add(kubeClient(namespaceName).getPod(namespaceName, clusterName.concat("-kafka-" + i))
.getSpec().getVolumes().get(j).getName());
pvcsOnPod.add(kubeClient(namespaceName).getPod(namespaceName, clusterName.concat("-kafka-" + i))
.getSpec().getVolumes().get(j).getPersistentVolumeClaim().getClaimName());
}
LOGGER.info("Verifying mounted data sources and PVCs on Kafka pod " + i);
for (int j = 0; j < diskCountPerReplica; j++) {
assertThat(dataSourcesOnPod.contains("data-" + j), is(true));
assertThat(pvcsOnPod.contains("data-" + j + "-" + clusterName + "-kafka-" + i), is(true));
}
}
}
void verifyPresentLabels(Map<String, String> labels, HasMetadata resources) {
for (Map.Entry<String, String> label : labels.entrySet()) {
assertThat("Label exists with concrete value in HasMetadata(Services, CM, STS) resources",
label.getValue().equals(resources.getMetadata().getLabels().get(label.getKey())));
}
}
void verifyNullLabels(String[] labelKeys, Map<String, String> labels) {
for (String labelKey : labelKeys) {
assertThat(labels.get(labelKey), nullValue());
}
}
void verifyNullLabels(String[] labelKeys, HasMetadata resources) {
for (String labelKey : labelKeys) {
assertThat(resources.getMetadata().getLabels().get(labelKey), nullValue());
}
}
void verifyAppLabels(Map<String, String> labels) {
LOGGER.info("Verifying labels {}", labels);
assertThat("Label " + Labels.STRIMZI_CLUSTER_LABEL + " is not present", labels.containsKey(Labels.STRIMZI_CLUSTER_LABEL));
assertThat("Label " + Labels.STRIMZI_KIND_LABEL + " is not present", labels.containsKey(Labels.STRIMZI_KIND_LABEL));
assertThat("Label " + Labels.STRIMZI_NAME_LABEL + " is not present", labels.containsKey(Labels.STRIMZI_NAME_LABEL));
}
void verifyAppLabelsForSecretsAndConfigMaps(Map<String, String> labels) {
LOGGER.info("Verifying labels {}", labels);
assertThat("Label " + Labels.STRIMZI_CLUSTER_LABEL + " is not present", labels.containsKey(Labels.STRIMZI_CLUSTER_LABEL));
assertThat("Label " + Labels.STRIMZI_KIND_LABEL + " is not present", labels.containsKey(Labels.STRIMZI_KIND_LABEL));
}
@BeforeAll
void setup(ExtensionContext extensionContext) {
install = new SetupClusterOperator.SetupClusterOperatorBuilder()
.withExtensionContext(extensionContext)
.withNamespace(NAMESPACE)
.withWatchingNamespaces(Constants.WATCH_ALL_NAMESPACES)
.createInstallation()
.runInstallation();
}
protected void afterEachMayOverride(ExtensionContext extensionContext) throws Exception {
resourceManager.deleteResources(extensionContext);
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
if (cluster.getListOfDeployedResources().contains(TEMPLATE_PATH)) {
cluster.deleteCustomResources(extensionContext, TEMPLATE_PATH);
}
if (KafkaResource.kafkaClient().inNamespace(namespaceName).withName(OPENSHIFT_CLUSTER_NAME).get() != null) {
cmdKubeClient(namespaceName).deleteByName(Kafka.RESOURCE_KIND, OPENSHIFT_CLUSTER_NAME);
}
kubeClient(namespaceName).listPods(namespaceName).stream()
.filter(p -> p.getMetadata().getName().startsWith(OPENSHIFT_CLUSTER_NAME))
.forEach(p -> PodUtils.deletePodWithWait(p.getMetadata().getName()));
kubeClient(namespaceName).getClient().customResources(CustomResourceDefinitionContext.fromCrd(Crds.kafkaTopic()), KafkaTopic.class, KafkaTopicList.class).inNamespace(namespaceName).delete();
kubeClient(namespaceName).getClient().persistentVolumeClaims().inNamespace(namespaceName).delete();
}
}
|
Java
|
# Tanarius kingii (Hook.f.) Kuntze SPECIES
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null
|
Java
|
import os,json
from cgi import escape
def unescape(s):
s = s.replace("<", "<")
s = s.replace(">", ">")
# this has to be last:
s = s.replace("&", "&")
return s
class FilesystemMixin:
def h_fs_get(_,path,eltName=''):
from stat import S_ISDIR
data = (escape(open(path).read())
if not S_ISDIR(os.stat(path).st_mode)
else [(p,S_ISDIR(os.stat(path+'/'+p).st_mode))
for p in os.listdir(path)])
_.ws.send(json.dumps({"method":"fs_get","result":[path,data,eltName]}))
pass
def h_fs_put(_,path,data):
f=open(path,'w')
for x in data: f.write(unescape(x))
f.close()
pass
def h_fs_system(_,path,eltName='',cwd=None):
import subprocess as sp
import shlex
data=sp.Popen(shlex.split(path),cwd=cwd,stdout=sp.PIPE, stderr=sp.PIPE).communicate()
_.ws.send(json.dumps({"method":"fs_system","result":[path,data,eltName]}));
pass
def h_fs_mkdir (_,path): os.mkdir(path)
def h_fs_rmdir (_,path): os.rmdir(path)
def h_fs_touch (_,path): open(path,'w').close()
def h_fs_unlink(_,path): os.unlink(path)
pass
class FsApp(FilesystemMixin):
def __init__(_,ws):_.ws=ws
|
Java
|
# Tephrosia retamoides var. genuina R.Vig. VARIETY
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null
|
Java
|
# Marasmius microhaedinus Singer SPECIES
#### Status
ACCEPTED
#### According to
Index Fungorum
#### Published in
Sydowia 18(1-6): 260, 338 (1965)
#### Original name
Marasmius microhaedinus Singer
### Remarks
null
|
Java
|
# Bryum lamprostegum C. Müller, 1853 SPECIES
#### Status
ACCEPTED
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null
|
Java
|
# Malus domestica var. asiatica (Nakai) Ponomar. VARIETY
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null
|
Java
|
# Zanthoxylum ochroxylum DC. SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null
|
Java
|
# Ponerorchis hemipilioides (Finet) Soó SPECIES
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null
|
Java
|
# Navicula directa var. remota (Grunow) Cleve VARIETY
#### Status
ACCEPTED
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null
|
Java
|
# Sisymbrium nudum (Bél. ex Boiss.) Boiss. SPECIES
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null
|
Java
|
# Anthemis cossyrensis Guss. SPECIES
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null
|
Java
|
# Lepidodendron crenatum SPECIES
#### Status
ACCEPTED
#### According to
Interim Register of Marine and Nonmarine Genera
#### Published in
null
#### Original name
null
### Remarks
null
|
Java
|
# Pleroma erigeron Spruce ex Triana SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null
|
Java
|
# Ruagea insignis (C.DC.) T.D.Penn. SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
Cabralea insignis C.DC.
### Remarks
null
|
Java
|
// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"database/sql"
"database/sql/driver"
"fmt"
"hash/crc32"
"strings"
"time"
pbinlog "github.com/cwen0/cdb-syncer/protocol"
"github.com/go-sql-driver/mysql"
"github.com/juju/errors"
"github.com/ngaut/log"
"github.com/pingcap/tidb/ast"
tddl "github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/infoschema"
tmysql "github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/parser"
"github.com/pingcap/tidb/terror"
)
type job struct {
tp pbinlog.BinlogType
sql string
args []interface{}
key string
retry bool
pos Position
}
func newJob(tp pbinlog.BinlogType, sql string, args []interface{}, key string, retry bool, pos Position) *job {
return &job{tp: tp, sql: sql, args: args, key: key, retry: retry, pos: pos}
}
func genHashKey(key string) uint32 {
return crc32.ChecksumIEEE([]byte(key))
}
func genPKey(rows []*pbinlog.Row) string {
var values []string
for _, row := range rows {
values = append(values, row.GetColumnValue())
}
return strings.Join(values, ",")
}
func genInsertSQL(binlog *pbinlog.Binlog) (string, string, []interface{}, error) {
var sql string
var values []string
sql += "replace into " + binlog.GetDbName() + "." + binlog.GetTableName() + "("
rows := binlog.GetRows()
for _, row := range rows {
sql += row.GetColumnName() + ","
values = append(values, row.GetColumnValue())
}
sql = sql[0:len(sql)-1] + ") values ("
for _, _ = range rows {
sql += "?,"
}
sql = sql[0:len(sql)-1] + ")"
return sql, binlog.GetTableName() + genPKey(binlog.GetPrimaryKey()), stringToInterface(values), nil
}
func genUpdateSQL(binlog *pbinlog.Binlog) (string, string, []interface{}, error) {
var sql string
var values []string
sql += "update " + binlog.GetDbName() + "." + binlog.GetTableName() + " set "
rows := binlog.GetRows()
for _, row := range rows {
sql += row.GetColumnName() + "=?,"
values = append(values, row.GetColumnValue())
}
sql = sql[0:len(sql)-1] + " where 1=1 "
for _, row := range binlog.GetPrimaryKey() {
sql += " and " + row.GetColumnName() + " = ? "
values = append(values, row.GetColumnValue())
}
return sql, binlog.GetTableName() + genPKey(binlog.GetPrimaryKey()), stringToInterface(values), nil
}
func genDeleteSQL(binlog *pbinlog.Binlog) (string, string, []interface{}, error) {
var sql string
var values []string
sql += "delete from " + binlog.GetDbName() + "." + binlog.GetTableName() + " where 1=1 "
for _, row := range binlog.GetPrimaryKey() {
sql += " and " + row.GetColumnName() + " = ? "
values = append(values, row.GetColumnValue())
}
return sql, binlog.GetTableName() + genPKey(binlog.GetPrimaryKey()), stringToInterface(values), nil
}
func genDdlSQL(binlog *pbinlog.Binlog) ([]string, string, []interface{}, error) {
var sqls []string
empty := make([]interface{}, 0)
rows := binlog.GetRows()
for _, row := range rows {
tmpSqls, ok, err := resolveDDLSQL(row.GetSql())
if err != nil {
return sqls, "", empty, errors.Errorf("parse ddk sql: %v failed: %v", row.GetSql(), err)
}
if !ok {
continue
}
for _, sql := range tmpSqls {
//var sql string
//if binlog.GetDbName() != "" {
//sql += "use " + binlog.GetDbName() + ";"
//}
//sql += s + ";"
sqls = append(sqls, sql)
}
}
return sqls, "", empty, nil
}
func ignoreDDLError(err error) bool {
mysqlErr, ok := errors.Cause(err).(*mysql.MySQLError)
if !ok {
return false
}
errCode := terror.ErrCode(mysqlErr.Number)
switch errCode {
case infoschema.ErrDatabaseExists.Code(), infoschema.ErrDatabaseNotExists.Code(), infoschema.ErrDatabaseDropExists.Code(),
infoschema.ErrTableExists.Code(), infoschema.ErrTableNotExists.Code(), infoschema.ErrTableDropExists.Code(),
infoschema.ErrColumnExists.Code(), infoschema.ErrColumnNotExists.Code(),
infoschema.ErrIndexExists.Code(), tddl.ErrCantDropFieldOrKey.Code():
return true
default:
return false
}
}
func isRetryableError(err error) bool {
if err == driver.ErrBadConn {
return true
}
var e error
for {
e = errors.Cause(err)
if err == e {
break
}
err = e
}
mysqlErr, ok := err.(*mysql.MySQLError)
if ok {
if mysqlErr.Number == tmysql.ErrUnknown {
return true
}
return false
}
return true
}
func querySQL(db *sql.DB, query string) (*sql.Rows, error) {
var (
err error
rows *sql.Rows
)
for i := 0; i < maxRetryCount; i++ {
if i > 0 {
log.Warnf("query sql retry %d - %s", i, query)
time.Sleep(retryTimeout)
}
log.Debugf("[query][sql]%s", query)
rows, err = db.Query(query)
if err != nil {
if !isRetryableError(err) {
return rows, errors.Trace(err)
}
log.Warnf("[query][sql]%s[error]%v", query, err)
continue
}
return rows, nil
}
if err != nil {
log.Errorf("query sql[%s] failed %v", query, errors.ErrorStack(err))
return nil, errors.Trace(err)
}
return nil, errors.Errorf("query sql[%s] failed", query)
}
func executeSQL(db *sql.DB, sqls []string, args [][]interface{}, retry bool) error {
if len(sqls) == 0 {
return nil
}
var (
err error
txn *sql.Tx
)
retryCount := 1
if retry {
retryCount = maxRetryCount
}
LOOP:
for i := 0; i < retryCount; i++ {
if i > 0 {
log.Warnf("exec sql retry %d - %v - %v", i, sqls, args)
time.Sleep(retryTimeout)
}
txn, err = db.Begin()
if err != nil {
log.Errorf("exec sqls[%v] begin failed %v", sqls, errors.ErrorStack(err))
continue
}
for i := range sqls {
log.Debugf("[exec][sql]%s[args]%v", sqls[i], args[i])
_, err = txn.Exec(sqls[i], args[i]...)
if err != nil {
if !isRetryableError(err) {
rerr := txn.Rollback()
if rerr != nil {
log.Errorf("[exec][sql]%s[args]%v[error]%v", sqls[i], args[i], rerr)
}
break LOOP
}
log.Warnf("[exec][sql]%s[args]%v[error]%v", sqls[i], args[i], err)
rerr := txn.Rollback()
if rerr != nil {
log.Errorf("[exec][sql]%s[args]%v[error]%v", sqls[i], args[i], rerr)
}
continue LOOP
}
}
err = txn.Commit()
if err != nil {
log.Errorf("exec sqls[%v] commit failed %v", sqls, errors.ErrorStack(err))
continue
}
return nil
}
if err != nil {
log.Errorf("exec sqls[%v] failed %v", sqls, errors.ErrorStack(err))
return errors.Trace(err)
}
return errors.Errorf("exec sqls[%v] failed", sqls)
}
func createDB(cfg DBConfig) (*sql.DB, error) {
dbDSN := fmt.Sprintf("%s:%s@tcp(%s:%d)/?charset=utf8&interpolateParams=true", cfg.User, cfg.Password, cfg.Host, cfg.Port)
db, err := sql.Open("mysql", dbDSN)
if err != nil {
return nil, errors.Trace(err)
}
return db, nil
}
func closeDB(db *sql.DB) error {
if db == nil {
return nil
}
return errors.Trace(db.Close())
}
func createDBs(cfg DBConfig, count int) ([]*sql.DB, error) {
dbs := make([]*sql.DB, 0, count)
for i := 0; i < count; i++ {
db, err := createDB(cfg)
if err != nil {
return nil, errors.Trace(err)
}
dbs = append(dbs, db)
}
return dbs, nil
}
func closeDBs(dbs ...*sql.DB) {
for _, db := range dbs {
err := closeDB(db)
if err != nil {
log.Errorf("close db failed - %v", err)
}
}
}
func parserDDLTableName(sql string) (TableName, error) {
stmt, err := parser.New().ParseOneStmt(sql, "", "")
if err != nil {
return TableName{}, errors.Trace(err)
}
var res TableName
switch v := stmt.(type) {
case *ast.CreateDatabaseStmt:
res = genTableName(v.Name, "")
case *ast.DropDatabaseStmt:
res = genTableName(v.Name, "")
case *ast.CreateIndexStmt:
res = genTableName(v.Table.Schema.L, v.Table.Name.L)
case *ast.CreateTableStmt:
res = genTableName(v.Table.Schema.L, v.Table.Name.L)
case *ast.DropIndexStmt:
res = genTableName(v.Table.Schema.L, v.Table.Name.L)
case *ast.TruncateTableStmt:
res = genTableName(v.Table.Schema.L, v.Table.Name.L)
case *ast.DropTableStmt:
if len(v.Tables) != 1 {
return res, errors.Errorf("may resovle DDL sql failed")
}
res = genTableName(v.Tables[0].Schema.L, v.Tables[0].Name.L)
default:
return res, errors.Errorf("unkown DDL type")
}
return res, nil
}
func genTableName(schema string, table string) TableName {
return TableName{Schema: schema, Name: table}
}
// resolveDDLSQL resolve to one ddl sql
// example: drop table test.a,test2.b -> drop table test.a; drop table test2.b;
func resolveDDLSQL(sql string) (sqls []string, ok bool, err error) {
stmt, err := parser.New().ParseOneStmt(sql, "", "")
if err != nil {
log.Errorf("Parser SQL error: %s", sql)
return nil, false, errors.Trace(err)
}
_, isDDL := stmt.(ast.DDLNode)
if !isDDL {
sqls = append(sqls, sql)
return
}
switch v := stmt.(type) {
case *ast.DropTableStmt:
var ex string
if v.IfExists {
ex = "if exists"
}
for _, t := range v.Tables {
var db string
if t.Schema.O != "" {
db = fmt.Sprintf("`%s`.", t.Schema.O)
}
s := fmt.Sprintf("drop table %s %s`%s`", ex, db, t.Name.O)
sqls = append(sqls, s)
}
default:
sqls = append(sqls, sql)
}
return sqls, true, nil
}
|
Java
|
package com.basicalgorithms.coding_games;
import java.util.HashSet;
import java.util.Objects;
import java.util.Scanner;
import java.util.Set;
/**
* Original question: https://www.codingame.com/multiplayer/bot-programming/coders-strike-back
*/
public class CodersStrikeBack {
static double longestDist = Integer.MIN_VALUE;
static Point initialPoint = null;
static boolean hasFinishedOneLap;
static Point from = null;
static Point lastCheckpoint = null;
static final Set<Point> visitedCheckPoints = new HashSet<>();
static boolean hasBoosted = false;
public static void main(String args[]) {
Scanner in = new Scanner(System.in);
// game loop
while (true) {
int x = in.nextInt();
int y = in.nextInt();
int nextCheckpointX = in.nextInt(); // x position of the next check point
int nextCheckpointY = in.nextInt(); // y position of the next check point
int nextCheckpointDist = in.nextInt(); // distance to the next checkpoint
int nextCheckpointAngle = in.nextInt(); // angle between your pod orientation and the direction of the next checkpoint
int opponentX = in.nextInt();
int opponentY = in.nextInt();
// Write an action using System.out.println()
// To debug: System.err.println("Debug messages...");
// You have to output the target position
// followed by the power (0 <= thrust <= 100)
// i.e.: "x y thrust"
final Point nextCheckpoint = new Point(nextCheckpointX, nextCheckpointY);
final Point currentPosition = new Point(x, y);
final Point enemyPosition = new Point(opponentX, opponentY);
if (visitedCheckPoints.size() > 1 && enemyInRange(currentPosition, enemyPosition)) {
ramEnemyShip(currentPosition, enemyPosition);
} else {
cruise(currentPosition, nextCheckpoint, nextCheckpointAngle);
}
if (!nextCheckpoint.equals(lastCheckpoint)) {
from = lastCheckpoint;
}
lastCheckpoint = nextCheckpoint;
}
}
private static void ramEnemyShip(final Point currentPosition, final Point enemyPosition) {
sailToDestination((enemyPosition.x), enemyPosition.y, "100");
}
private static boolean enemyInRange(final Point currentPosition, final Point enemyPosition) {
return getDistant(currentPosition, enemyPosition) <= 1000;
}
private static void cruise(
final Point currentPosition,
final Point nextCheckpoint,
final int nextCheckpointAngle) {
if (initialPoint == null) {
initialPoint = currentPosition;
}
int thrust = isWithinAngle(nextCheckpointAngle) ? 100 : 0;
String power = String.valueOf(thrust);
visitedCheckPoints.add(nextCheckpoint);
System.err.println(
"Checkpoint added:" + " nextCheckpointX=" + nextCheckpoint.x + ", nextCheckpointY=" + nextCheckpoint.y);
for (final Point visitedCheckPoint : visitedCheckPoints) {
System.err.println("Visited checkpoint: (" + visitedCheckPoint.x + ", " + visitedCheckPoint.y + ")");
}
if (shouldSlowDown(currentPosition, nextCheckpoint)) {
power = String.valueOf(35);
}
if (hasFinishedOneLap(nextCheckpoint) &&
isLongestDistant(from, nextCheckpoint) &&
isWithinSharpAngle(nextCheckpointAngle) &&
!hasBoosted) {
power = "BOOST";
hasBoosted = true;
System.err.println("Boosted!!!");
}
sailToDestination(nextCheckpoint.x, nextCheckpoint.y, power);
}
private static boolean shouldSlowDown(
final Point currentPosition,
final Point nextCheckpoint) {
return getDistant(currentPosition, nextCheckpoint) < 1000;
}
private static void sailToDestination(final int nextCheckpointX, final int nextCheckpointY, final String power) {
System.out.println(nextCheckpointX + " " + nextCheckpointY + " " + power);
System.err.println("Thrust:" + power);
}
private static boolean isWithinAngle(final int nextCheckpointAngle) {
return -90 < nextCheckpointAngle && nextCheckpointAngle < 90;
}
private static boolean isWithinSharpAngle(final int nextCheckpointAngle) {
return -15 < nextCheckpointAngle && nextCheckpointAngle < 15;
}
private static boolean hasFinishedOneLap(final Point point) {
if (hasFinishedOneLap) {
return true;
}
if (initialPoint == null) { return false; }
hasFinishedOneLap = getDistant(initialPoint, point) <= 600;
return hasFinishedOneLap;
}
private static boolean isLongestDistant(final Point from, final Point endPoint) {
if (from == null) {
return false;
}
System.err.println("Start Point: (" + from.x + ", " + from.y + "); End Point: ("
+ endPoint.x + ", " + endPoint.y + ") ");
double dist = getDistant(from, endPoint);
System.err.println("dist=" + dist + ", longestDist=" + longestDist);
if (dist >= longestDist) {
longestDist = dist;
return true;
}
return false;
}
private static double getDistant(final Point from, final Point endPoint) {
return Math.sqrt(Math.pow(from.x - endPoint.x, 2) + Math.pow(from.y - endPoint.y, 2));
}
private static class Point {
final int x;
final int y;
private Point(final int t1, final int t2) {
this.x = t1;
this.y = t2;
}
@Override
public boolean equals(final Object o) {
if (this == o) { return true; }
if (!(o instanceof Point)) { return false; }
final Point point = (Point) o;
return x == point.x &&
y == point.y;
}
@Override
public int hashCode() {
return Objects.hash(x, y);
}
}
}
|
Java
|
'use strict';
var path = require('path');
var util = require('util');
module.exports = function(grunt) {
grunt.registerMultiTask('vjslanguages', 'A Grunt plugin for compiling VideoJS language assets.', function() {
var createLanguageFile = function(languageName, languageData, jsFilePath) {
var jsTemplate = 'videojs.addLanguage("' + languageName + '",' + JSON.stringify(languageData,null,' ') + ');';
grunt.file.write(jsFilePath, jsTemplate);
grunt.log.writeln('- [' + languageName +'] Language Built. File "' + jsFilePath + '" created.');
};
this.files.forEach(function(f) {
var languageName, languageData, jsFilePath;
// Multiple Files Case
if(util.isArray(f.src)){
for(var i =0; i < f.src.length; i++) {
languageName = path.basename(f.src[i], '.json');
languageData = grunt.file.readJSON(f.src[i]);
jsFilePath = path.join(f.dest, languageName + '.js');
createLanguageFile(languageName, languageData, jsFilePath);
}
}
// Singular File Case
else {
languageName = path.basename(f.src, '.json');
languageData = grunt.file.readJSON(f.src);
jsFilePath = path.join(f.dest, languageName + '.js');
createLanguageFile(languageName, languageData, jsFilePath);
}
});
});
};
|
Java
|
//
// WJAuthorView.h
// 糗百框架
//
// Created by 孙文君 on 15/6/30.
// Copyright (c) 2015年 sunwenjun. All rights reserved.
//
#import <UIKit/UIKit.h>
@class WJFrameAuthor,WJAuthor;
@interface WJAuthorView : UIView
//@property(nonatomic,strong)WJAuthor *author;
@property(nonatomic,strong)WJFrameAuthor *authorFrame;
@end
|
Java
|
//
// AddHomeViewController.h
// AirTouch
//
// Created by kenny on 15/8/12.
// Copyright (c) 2015年 Honeywell. All rights reserved.
//
#import "BaseViewController.h"
#import "IContainerViewControllerDelegate.h"
@interface AddHomeViewController : BaseViewController
@property (nonatomic, weak) id<IContainerViewControllerDelegate> delegate;
@end
|
Java
|
<?php
/**
* Zend Framework
*
* LICENSE
*
* This source file is subject to the new BSD license that is bundled
* with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
* http://framework.zend.com/license/new-bsd
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to license@zend.com so we can send you a copy immediately.
*
* @category Zend
* @package Zend_Filter
* @copyright Copyright (c) 2005-2008 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
* @version $Id: BaseName.php 8064 2008-02-16 10:58:39Z thomas $
*/
/**
* @see Zend_Filter_Interface
*/
require_once 'Zend/Filter/Interface.php';
/**
* @category Zend
* @package Zend_Filter
* @copyright Copyright (c) 2005-2008 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
*/
class Zend_Filter_BaseName implements Zend_Filter_Interface
{
/**
* Defined by Zend_Filter_Interface
*
* Returns basename($value)
*
* @param string $value
* @return string
*/
public function filter($value)
{
return basename((string) $value);
}
}
|
Java
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
tests for catalog module
"""
import os
import fabric.api
from fabric.operations import _AttributeString
from mock import patch
from prestoadmin import catalog
from prestoadmin.util import constants
from prestoadmin.util.exception import ConfigurationError, \
ConfigFileNotFoundError
from prestoadmin.standalone.config import PRESTO_STANDALONE_USER_GROUP
from prestoadmin.util.local_config_util import get_catalog_directory
from tests.unit.base_unit_case import BaseUnitCase
class TestCatalog(BaseUnitCase):
def setUp(self):
super(TestCatalog, self).setUp(capture_output=True)
@patch('prestoadmin.catalog.os.path.isfile')
def test_add_not_exist(self, isfile_mock):
isfile_mock.return_value = False
self.assertRaisesRegexp(ConfigurationError,
'Configuration for catalog dummy not found',
catalog.add, 'dummy')
@patch('prestoadmin.catalog.validate')
@patch('prestoadmin.catalog.deploy_files')
@patch('prestoadmin.catalog.os.path.isfile')
def test_add_exists(self, isfile_mock, deploy_mock, validate_mock):
isfile_mock.return_value = True
catalog.add('tpch')
filenames = ['tpch.properties']
deploy_mock.assert_called_with(filenames,
get_catalog_directory(),
constants.REMOTE_CATALOG_DIR,
PRESTO_STANDALONE_USER_GROUP)
validate_mock.assert_called_with(filenames)
@patch('prestoadmin.catalog.deploy_files')
@patch('prestoadmin.catalog.os.path.isdir')
@patch('prestoadmin.catalog.os.listdir')
@patch('prestoadmin.catalog.validate')
def test_add_all(self, mock_validate, listdir_mock, isdir_mock,
deploy_mock):
catalogs = ['tpch.properties', 'another.properties']
listdir_mock.return_value = catalogs
catalog.add()
deploy_mock.assert_called_with(catalogs,
get_catalog_directory(),
constants.REMOTE_CATALOG_DIR,
PRESTO_STANDALONE_USER_GROUP)
@patch('prestoadmin.catalog.deploy_files')
@patch('prestoadmin.catalog.os.path.isdir')
def test_add_all_fails_if_dir_not_there(self, isdir_mock, deploy_mock):
isdir_mock.return_value = False
self.assertRaisesRegexp(ConfigFileNotFoundError,
r'Cannot add catalogs because directory .+'
r' does not exist',
catalog.add)
self.assertFalse(deploy_mock.called)
@patch('prestoadmin.catalog.sudo')
@patch('prestoadmin.catalog.os.path.exists')
@patch('prestoadmin.catalog.os.remove')
def test_remove(self, local_rm_mock, exists_mock, sudo_mock):
script = ('if [ -f /etc/presto/catalog/tpch.properties ] ; '
'then rm /etc/presto/catalog/tpch.properties ; '
'else echo "Could not remove catalog \'tpch\'. '
'No such file \'/etc/presto/catalog/tpch.properties\'"; fi')
exists_mock.return_value = True
fabric.api.env.host = 'localhost'
catalog.remove('tpch')
sudo_mock.assert_called_with(script)
local_rm_mock.assert_called_with(get_catalog_directory() +
'/tpch.properties')
@patch('prestoadmin.catalog.sudo')
@patch('prestoadmin.catalog.os.path.exists')
def test_remove_failure(self, exists_mock, sudo_mock):
exists_mock.return_value = False
fabric.api.env.host = 'localhost'
out = _AttributeString()
out.succeeded = False
sudo_mock.return_value = out
self.assertRaisesRegexp(SystemExit,
'\\[localhost\\] Failed to remove catalog tpch.',
catalog.remove,
'tpch')
@patch('prestoadmin.catalog.sudo')
@patch('prestoadmin.catalog.os.path.exists')
def test_remove_no_such_file(self, exists_mock, sudo_mock):
exists_mock.return_value = False
fabric.api.env.host = 'localhost'
error_msg = ('Could not remove catalog tpch: No such file ' +
os.path.join(get_catalog_directory(), 'tpch.properties'))
out = _AttributeString(error_msg)
out.succeeded = True
sudo_mock.return_value = out
self.assertRaisesRegexp(SystemExit,
'\\[localhost\\] %s' % error_msg,
catalog.remove,
'tpch')
@patch('prestoadmin.catalog.os.listdir')
@patch('prestoadmin.catalog.os.path.isdir')
def test_warning_if_connector_dir_empty(self, isdir_mock, listdir_mock):
isdir_mock.return_value = True
listdir_mock.return_value = []
catalog.add()
self.assertEqual('\nWarning: Directory %s is empty. No catalogs will'
' be deployed\n\n' % get_catalog_directory(),
self.test_stderr.getvalue())
@patch('prestoadmin.catalog.os.listdir')
@patch('prestoadmin.catalog.os.path.isdir')
def test_add_permission_denied(self, isdir_mock, listdir_mock):
isdir_mock.return_value = True
error_msg = ('Permission denied')
listdir_mock.side_effect = OSError(13, error_msg)
fabric.api.env.host = 'localhost'
self.assertRaisesRegexp(SystemExit, '\[localhost\] %s' % error_msg,
catalog.add)
@patch('prestoadmin.catalog.os.remove')
@patch('prestoadmin.catalog.remove_file')
def test_remove_os_error(self, remove_file_mock, remove_mock):
fabric.api.env.host = 'localhost'
error = OSError(13, 'Permission denied')
remove_mock.side_effect = error
self.assertRaisesRegexp(OSError, 'Permission denied',
catalog.remove, 'tpch')
@patch('prestoadmin.catalog.secure_create_directory')
@patch('prestoadmin.util.fabricapi.put')
def test_deploy_files(self, put_mock, create_dir_mock):
local_dir = '/my/local/dir'
remote_dir = '/my/remote/dir'
catalog.deploy_files(['a', 'b'], local_dir, remote_dir,
PRESTO_STANDALONE_USER_GROUP)
create_dir_mock.assert_called_with(remote_dir, PRESTO_STANDALONE_USER_GROUP)
put_mock.assert_any_call('/my/local/dir/a', remote_dir, use_sudo=True,
mode=0600)
put_mock.assert_any_call('/my/local/dir/b', remote_dir, use_sudo=True,
mode=0600)
@patch('prestoadmin.catalog.os.path.isfile')
@patch("__builtin__.open")
def test_validate(self, open_mock, is_file_mock):
is_file_mock.return_value = True
file_obj = open_mock.return_value.__enter__.return_value
file_obj.read.return_value = 'connector.noname=example'
self.assertRaisesRegexp(ConfigurationError,
'Catalog configuration example.properties '
'does not contain connector.name',
catalog.add, 'example')
@patch('prestoadmin.catalog.os.path.isfile')
def test_validate_fail(self, is_file_mock):
is_file_mock.return_value = True
self.assertRaisesRegexp(
SystemExit,
'Error validating ' + os.path.join(get_catalog_directory(), 'example.properties') + '\n\n'
'Underlying exception:\n No such file or directory',
catalog.add, 'example')
@patch('prestoadmin.catalog.get')
@patch('prestoadmin.catalog.files.exists')
@patch('prestoadmin.catalog.ensure_directory_exists')
@patch('prestoadmin.catalog.os.path.exists')
def test_gather_connectors(self, path_exists, ensure_dir_exists,
files_exists, get_mock):
fabric.api.env.host = 'any_host'
path_exists.return_value = False
files_exists.return_value = True
catalog.gather_catalogs('local_config_dir')
get_mock.assert_called_once_with(
constants.REMOTE_CATALOG_DIR, 'local_config_dir/any_host/catalog', use_sudo=True)
# if remote catalog dir does not exist
get_mock.reset_mock()
files_exists.return_value = False
results = catalog.gather_catalogs('local_config_dir')
self.assertEqual([], results)
self.assertFalse(get_mock.called)
|
Java
|
$(document).ready(function(){
$("#inc_tab #tb1").removeClass();
$("#inc_tab #tb4").addClass("active");
$("#user_name").blur(function(){
var user_name = $.trim($(this).val());
$(this).val(user_name);
if (user_name.length==0){
$(this).parent().find("#user_name_null_warn").show();
$(this).parent().find("#user_name_exist_warn").hide();
return;
}
$(this).parent().find("#user_name_null_warn").hide();
var user_id = $(this).parent().find("#user_id").val();
var obj = $(this).parent().find("#user_name_exist_warn");
$.post(app.global.variable.base_path +"user/name/verify", {user_id:user_id, user_name:user_name}, function(data) {
if(data.toString().length > 0){
obj.show();
}else{
obj.hide();
}
})
})
$('#user_save_cancel').click(function(){
window.location.href=app.global.variable.base_path +'user/list';
})
selectRoleChange();
})
function selectRoleChange(){
var obj = $("#select_role_id");
var role_id_obj = obj.parent().find("#role_id");
$("#role_authority_"+role_id_obj.val()).hide();
$("#role_authority_"+obj.val()).show();
role_id_obj.val(obj.val());
}
function user_sava_check(){
var obj = $("#user_editor_form");
var valid = true;
obj.find(".functionWarn").each(function(){
if($(this).is(":visible")){
valid = false;
}
})
// 用户名
var user_name = obj.find("#user_name").val();
if(isSpace(user_name)){
obj.find("#user_name_null_warn").show();
valid = false;
}else{
obj.find("#user_name_null_warn").hide();
}
return valid;
}
|
Java
|
# Entyloma ficariae A.A. Fisch. Waldh., 1877 SPECIES
#### Status
ACCEPTED
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
Bull. Soc. nat. Moscou, Biol. 52: 309 (1877)
#### Original name
Entyloma ficariae A.A. Fisch. Waldh., 1877
### Remarks
null
|
Java
|
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.jdisc.client;
import com.google.inject.AbstractModule;
import com.google.inject.Inject;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
/**
* @author Simon Thoresen Hult
*/
public class ClientDriverTestCase {
@Test
public void requireThatApplicationInstanceInjectionWorks() throws Exception {
MyModule module = new MyModule();
ClientDriver.runApplication(new MyApplication(module));
assertEquals(5, module.state);
}
@Test
public void requireThatApplicationClassInjectionWorks() throws Exception {
MyModule module = new MyModule();
ClientDriver.runApplication(MyApplication.class, module);
assertEquals(5, module.state);
}
private static class MyApplication implements ClientApplication {
final MyModule module;
@Inject
MyApplication(MyModule module) {
this.module = module;
module.state = 1;
}
@Override
public void start() {
if (++module.state != 2) {
throw new IllegalStateException();
}
}
@Override
public void run() {
if (++module.state != 3) {
throw new IllegalStateException();
}
}
@Override
public void stop() {
if (++module.state != 4) {
throw new IllegalStateException();
}
}
@Override
public void destroy() {
if (++module.state != 5) {
throw new IllegalStateException();
}
}
}
private static class MyModule extends AbstractModule {
int state = 0;
@Override
protected void configure() {
bind(MyModule.class).toInstance(this);
}
}
}
|
Java
|
Public Class mysqlSettings
Private Sub btnSalvar_Click(ByVal sender As System.Object, ByVal e As System.EventArgs) Handles btnSalvar.Click
'Guardamos en sus respectivas variables globales
_varglobal.ip = txtIp.Text
_varglobal.pass = txtPass.Text
_varglobal.user = txtUser.Text
'Cerramos el formulario
Me.Close()
End Sub
Private Sub mysqlSettings_Load(ByVal sender As System.Object, ByVal e As System.EventArgs) Handles MyBase.Load
'Guardamos los parámetros de conexión dentro de sus respectivas variables
_varglobal.ip = txtIp.Text
_varglobal.pass = txtPass.Text
_varglobal.user = txtUser.Text
End Sub
End Class
|
Java
|
package sample.multiversion;
public interface Core {
String getVersion();
String getDependencyVersion();
}
|
Java
|
package org.example;
import org.camunda.bpm.spring.boot.starter.annotation.EnableProcessApplication;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
@SpringBootApplication
@EnableProcessApplication("dynamic-tenant-designation")
public class CamundaApplication {
public static void main(String... args) {
SpringApplication.run(CamundaApplication.class, args);
}
}
|
Java
|
#!/usr/bin/env perl
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<helpdesk.org>.
=cut
use warnings;
use strict;
use Bio::EnsEMBL::Registry;
use Bio::EnsEMBL::Utils::Sequence qw(reverse_comp expand);
use Getopt::Long;
use Fcntl qw( LOCK_SH LOCK_EX );
use Progress;
#ÊA hard-coded hash containing the subroutines to call for each check
my %ALLELE_PREDICATE = (
4 => \&novariation_alleles,
13 => \&illegal_character_alleles,
14 => \&ambiguous_alleles
);
my %SUBSNP_PREDICATE = (
);
my %VARIATION_ALLELE_PREDICATE = (
11 => \&mismatched_allele_string,
12 => \&multiple_alleles
);
my %VARIATION_FEATURE_PREDICATE = (
1 => \&multiple_mappings,
2 => \&reference_mismatch,
3 => \&multiple_alleles,
5 => \&no_mapping,
13 => \&illegal_character_alleles,
14 => \&ambiguous_alleles,
15 => \&inconsistent_coords
);
# Accepted alleles
my @ACCEPTED_ALLELE = (
'HGMD_MUTATION'
);
my %AMBIG_REGEXP_HASH = (
'M' => '[AC]',
'R' => '[AG]',
'W' => '[AT]',
'S' => '[CG]',
'Y' => '[CT]',
'K' => '[GT]',
'V' => '[ACG]',
'H' => '[ACT]',
'D' => '[AGT]',
'B' => '[CGT]',
'X' => '[ACGT]',
'N' => '[ACGT]'
);
#ÊGet a string containing the possible ambiguity nucleotides
my $AMBIGUITIES = join("",keys(%AMBIG_REGEXP_HASH));
# Add the code for uracil in case some allele should have that
%AMBIG_REGEXP_HASH = (%AMBIG_REGEXP_HASH,('U' => 'T'));
# The maximum number of mappings before the variation is flagged
my $MAX_MAP_WEIGHT = 3;
# The maximum number of different alleles a variation is permitted to have
my $MAX_ALLELES = 3;
#ÊThe option definitions
my @defs = (
'registry_file=s',
'qc=s@',
'output_dir=s',
'variation_id_range=s',
'task_management_file=s',
'task_id=i',
'species=s',
'group=s',
'scope=s',
'parallelize=i',
'source_id=i@',
'help!'
);
#ÊParse the command line and store the results in the options hash
my %options;
GetOptions(\%options,@defs);
# Check that we got a registry configuration file
die ("You need to provide a registry configuration file") unless (defined($options{'registry_file'}));
# Check that a species was specified
die ("You need to provide a species") unless (defined($options{'species'}));
#ÊIf no output dir was specified, use the current working one
my $outdir = $options{'output_dir'};
$outdir ||= "";
# Append a slash if we have a directory
if (length($outdir)) {
$outdir .= "/";
}
#ÊLoad the registry and get a DBAdaptor to the variation database we're processing (or the group specified on the command line)
my $registry = 'Bio::EnsEMBL::Registry';
$registry->load_all($options{'registry_file'});
my $species = $options{'species'};
my $group = $options{'group'};
$group ||= 'variation';
my $dba = $registry->get_DBAdaptor($species,$group) or die ("Could not get a DBAdaptor for $species - $group");
#ÊIf the option to parallelize was specified, we will chunk the task into the desired sizes and create the corresponding task management file
if ($options{'parallelize'}) {
# Check that a desired task_management_file was specified
die ("You must specify a file where the task parameters will be written") unless (defined($options{'task_management_file'}));
my $chunksize = $options{'parallelize'};
# Get the min and max variation_ids and simply assume that the data is evenly distributed on average w.r.t. variation_id
my $stmt = qq{
SELECT
MIN(variation_id),
MAX(variation_id)
FROM
variation
};
my ($min_id,$max_id) = @{$dba->dbc->db_handle->selectall_arrayref($stmt)->[0]};
# Divide the id range into chunks and write to management file
open (TASK,">",$options{'task_management_file'}) or die ("Could not open " . $options{'task_management_file'} . " for writing");
my $offset = $min_id;
my $task_id = 0;
while ($offset <= $max_id) {
$task_id++;
print TASK join("\t",($task_id,$offset,($offset+$chunksize-1))) . "\n";
$offset += $chunksize;
}
close(TASK);
print STDOUT "The task has been divided into chunks of $chunksize. The parameters have been written to " . $options{'task_management_file'} . ". You should submit this as a job array over the indexes 1-$task_id\n";
exit(0);
}
# We will probably need a core dbadaptor as well so create one
my $dba_core = $registry->get_DBAdaptor($species,'core') or warn ("Could not get a DBAdaptor for $species - core");
#ÊGet the range of variations we should work on. This can either be specified by:
# 1. A variation_id range specified on the command line
# 2. Provided in a task management file specified on the command line. This overrides a specified range.
# If this is the case then a job index corresponding to a row in the task management file must be specified.
# This can either be done on the command line or through the LSB_JOBINDEX environment variable (which gets set by LSF in a jobarray submission).
# The latter overrides the former.
# 3. None of the above, in which case all variations will be processed
my ($lower_id,$upper_id);
if (defined($options{'task_management_file'})) {
my $job_index = $ENV{'LSB_JOBINDEX'};
$job_index ||= $options{'task_id'};
# Check that we have a job index
die ("A task management file was specified but not a task index, can not proceed") unless (defined($job_index));
# Get the variation_id range for this job index
open(TASK,"<",$options{'task_management_file'}) or die ("Could not open task management file " . $options{'task_management_file'} . " for parsing");
while (<TASK>) {
chomp;
my @arr = split(/\s+/,$_);
($lower_id,$upper_id) = ($arr[1],$arr[2]) if ($arr[0] == $job_index);
}
close(TASK);
# Check that we could find the range
die ("Could not find the corresponding variation_id range for task index $job_index") unless (defined($lower_id) && defined($upper_id));
# Print the job assignment to STDERR
print STDERR "Job $job_index works on range $lower_id - $upper_id ";
}
#ÊElse, we check for a comma-separated range
elsif (defined($options{'variation_id_range'})) {
($lower_id,$upper_id) = split(",",$options{'variation_id_range'});
}
my $failed_variation_file = $outdir . "failed_variation.txt";
my $failed_allele_file = $outdir . "failed_allele.txt";
my $loadfile = {
'variation' => $failed_variation_file,
'allele' => $failed_allele_file
};
### Now, get the data from the database
# Get the haplotype seq region ids
our $HAPLOTYPE_IDS = get_haplotype_seq_region_ids($dba_core);
# Get the failed description ids
my %failed_description = %{get_failed_description($dba,$options{'qc'})};
my @failed_description_ids = keys(%failed_description);
# A hash to hold the variation_ids and the tests that it failed
my %failed_variation;
# A hash to hold the allele_ids and the tests that it failed
my %failed_allele;
#ÊCheck if we should do the checking for variations
my $scope = lc($options{'scope'});
$scope ||= 'variation';
if ($scope eq 'variation') {
#ÊLoop over the variation features and flag them as appropriate
#ÊIf a variation_id range was specified, create a condition on it
my $condition = get_range_condition($lower_id,$upper_id,"v");
# If a source_id condition was specified, append this to the condition
$condition .= " AND " . get_source_condition($options{'source_id'},"v");
my $stmt = qq{
SELECT
v.variation_id,
v.name,
vf.variation_feature_id,
vf.seq_region_id,
vf.seq_region_start,
vf.seq_region_end,
vf.seq_region_strand,
vf.allele_string,
ras.ref_allele,
ra.seq_region_strand,
'variation'
FROM
variation v LEFT JOIN
variation_feature vf ON (
vf.variation_id = v.variation_id
) LEFT JOIN
(
tmp_ref_allele ra JOIN
tmp_ref_allele_seq ras ON (
ras.ref_allele_seq_id = ra.ref_allele_seq_id
)
) ON (
ra.variation_feature_id = vf.variation_feature_id
)
WHERE
$condition
ORDER BY
v.variation_id;
};
my $sth = $dba->dbc->prepare($stmt);
# Execute the query
$sth->execute();
# Loop over the variation features
my @vf_arr;
my @row = $sth->fetchrow_array();
while (@row) {
# Add the row to the array grouping the same variation_ids into an array
push(@vf_arr,[@row]);
# Get the next row
my @nextrow = $sth->fetchrow_array();
#ÊIf we are switching variation or we have no more rows, do the checks
if (!scalar(@nextrow) || $nextrow[0] != $row[0]) {
#ÊExecute the predicates
if (scalar(@vf_arr)) {
my @failed;
# Cache the results in a hash
my $cache = {};
map {
push(@failed,$_) if (exists($VARIATION_FEATURE_PREDICATE{$_}) && $VARIATION_FEATURE_PREDICATE{$_}->(\@vf_arr,$cache));
} @failed_description_ids;
$failed_variation{$row[0]} = \@failed if (scalar(@failed));
}
# Empty the variation array
splice(@vf_arr);
}
@row = @nextrow;
}
}
if ($scope eq 'allele') {
#ÊLoop over the variation features and flag them as appropriate
#ÊIf a variation_id range was specified, create a condition on it
my $condition = get_range_condition($lower_id,$upper_id,"a");
my $stmt = qq{
SELECT
a.allele_id,
a.subsnp_id,
a.variation_id,
vf.seq_region_id,
vf.allele_string,
vf.seq_region_end,
vf.seq_region_strand,
a.allele,
NULL,
NULL,
'allele'
FROM
allele a LEFT JOIN
variation_feature vf ON (
vf.variation_id = a.variation_id
)
WHERE
$condition
ORDER BY
a.variation_id,
a.subsnp_id;
};
my $sth = $dba->dbc->prepare($stmt);
# Execute the query
$sth->execute();
# Loop over the joined rows. We'll send off checks both for individual alleles, subsnps and variations
my @variation;
my @subsnp;
my @allele;
my @row = $sth->fetchrow_array();
while (@row) {
# Variation array
push(@variation,[@row]);
push(@subsnp,[@row]);
push(@allele,[@row]);
# Get the next row
my @nextrow = $sth->fetchrow_array();
#ÊIf we are switching allele or we have no more rows, do the checks for alleles
if (!scalar(@nextrow) || $nextrow[0] != $row[0]) {
#ÊExecute the predicates
if (scalar(@allele)) {
my @failed;
# Cache the results in a hash
my $cache = {};
map {
push(@failed,$_) if (exists($ALLELE_PREDICATE{$_}) && $ALLELE_PREDICATE{$_}->(\@allele,$cache));
} @failed_description_ids;
if (scalar(@failed)) {
map {$failed_allele{$_->[0]} = \@failed} @allele;
}
}
# Empty the array
splice(@allele);
}
#ÊIf we are switching subsnp or we have no more rows, do the checks for subsnp
if (!scalar(@nextrow) || $nextrow[1] != $row[1]) {
#ÊExecute the predicates
if (scalar(@subsnp)) {
my @failed;
# Cache the results in a hash
my $cache = {};
map {
push(@failed,$_) if (exists($SUBSNP_PREDICATE{$_}) && $SUBSNP_PREDICATE{$_}->(\@subsnp,$cache));
} @failed_description_ids;
if (scalar(@failed)) {
map {$failed_allele{$_->[0]} = \@failed} @subsnp;
}
}
# Empty the array
splice(@subsnp);
}
#ÊIf we are switching variation or we have no more rows, do the checks for variations
if (!scalar(@nextrow) || $nextrow[2] != $row[2]) {
#ÊExecute the predicates
if (scalar(@variation)) {
my @failed;
# Cache the results in a hash
my $cache = {};
map {
push(@failed,$_) if (exists($VARIATION_ALLELE_PREDICATE{$_}) && $VARIATION_ALLELE_PREDICATE{$_}->(\@variation,$cache));
} @failed_description_ids;
if (scalar(@failed)) {
$failed_variation{$row[2]} = \@failed;
}
}
# Empty the variation feature array
splice(@variation);
}
@row = @nextrow;
}
}
foreach my $scope (('variation','allele')) {
my %h;
if ($scope eq 'variation') {
%h = %failed_variation;
}
else {
%h = %failed_allele;
}
# Only dump to file if we have any results
next unless (scalar(keys(%h)));
# Open the loadfile (append) and get a lock on it
open(LOAD,">>",$loadfile->{$scope}) or die ("Could not open loadfile " . $loadfile->{$scope} . " for writing");
flock(LOAD,LOCK_EX);
#ÊWrite the ids and the failed_description_id to the load file
foreach my $id (keys(%h)) {
map {print LOAD "$id\t$_\n"} @{$h{$id}};
}
close(LOAD);
}
#ÊIf we finished successfully, print that to STDERR
print STDERR " Finished ok!\n";
#ÊCheck if a variation is mapped to more than the maximum allowed number of (non-haplotype) genomic locations
sub multiple_mappings {
my $variation_features = shift;
my $cache = shift;
# If the result of this test has been cached return it
my $failed_description_id = 1;
unless (exists($cache->{$failed_description_id})) {
$cache->{$failed_description_id} = _multiple_mappings($variation_features,$cache);
}
return $cache->{$failed_description_id};
}
sub _multiple_mappings {
my $variation_features = shift;
my $cache = shift;
my $count = 0;
foreach my $vf (@{$variation_features}) {
next unless (defined($vf->[3]));
next if (grep {$vf->[3] == $_} @{$HAPLOTYPE_IDS});
$count++;
return 1 if ($count > $MAX_MAP_WEIGHT);
}
return 0;
}
#ÊCheck if the allele string provided by dbSNP is in agreement with the alleles of all subsnps belonging to the variation
sub mismatched_allele_string {
my $rows = shift;
my $cache = shift;
# If the result of this test has been cached return it
my $failed_description_id = 11;
unless (exists($cache->{$failed_description_id})) {
$cache->{$failed_description_id} = _mismatched_allele_string($rows,$cache);
}
return $cache->{$failed_description_id};
}
sub _mismatched_allele_string {
my $rows = shift;
my $cache = shift;
# If this variation has no mapping, it won't have any allele string associated
return 0 if (no_mapping($rows,$cache));
# Get the unique alleles from the subsnps
my %ss = map {$_->[7] => 1} @{$rows};
# Get the unique alleles from the variation feature allele string
my %vf = map {map {$_ => 1} split(/\//,$_->[4])} @{$rows};
# Check that all subsnp alleles are present in the allele_string
map {return 1 unless (exists($vf{$_}))} keys(%ss);
# Check that all allele_string alleles are present in the subsnp alleles
map {return 1 unless (exists($ss{$_}))} keys(%vf);
return 0;
}
# Check if a variation has no mappings
sub no_mapping {
my $rows = shift;
my $cache = shift;
# If the result of this test has been cached return it
my $failed_description_id = 5;
unless (exists($cache->{$failed_description_id})) {
$cache->{$failed_description_id} = _no_mapping($rows,$cache);
}
return $cache->{$failed_description_id};
}
sub _no_mapping {
my $rows = shift;
my $cache = shift;
return (defined($rows->[0][3]) ? 0 : 1);
}
# Check if the coordinates given for a variation is not compatible with its allele string
sub inconsistent_coords {
my $rows = shift;
my $cache = shift;
# If the result of this test has been cached return it
my $failed_description_id = 15;
unless (exists($cache->{$failed_description_id})) {
$cache->{$failed_description_id} = _inconsistent_coords($rows,$cache);
}
return $cache->{$failed_description_id};
}
sub _inconsistent_coords {
my $rows = shift;
my $cache = shift;
# If this variation has no mappings, it shouldn't be classified as inconsistent
return 0 if (no_mapping($rows,$cache));
# If this variation contains illegal characters, there's no point in checking for inconsistent coordinates
return 0 if (illegal_character_alleles($rows,$cache));
#ÊThe only things we accept is if the position is a deletion or if at least one of the alleles are of the same length as the position
foreach my $variation_feature (@{$rows}) {
expand(\$variation_feature->[7]);
my $ref_len = ($variation_feature->[5] - $variation_feature->[4] + 1);
#ÊMatching lengths or deletion and insertion in allele string?
next if (grep {($_ eq '-' && $ref_len == 0) || (length($_) == $ref_len)} split(/\//,$variation_feature->[7]));
# Else, this is inconsistent coordinates
return 1;
}
return 0;
}
#ÊCheck if the allele string alleles does not agree with the reference sequence
sub reference_mismatch {
my $rows = shift;
my $cache = shift;
# If the result of this test has been cached return it
my $failed_description_id = 2;
unless (exists($cache->{$failed_description_id})) {
$cache->{$failed_description_id} = _reference_mismatch($rows,$cache);
}
return $cache->{$failed_description_id};
}
sub _reference_mismatch {
my $rows = shift;
my $cache = shift;
# If this variation has no mappings, it shouldn't be classified as a mismatch
return 0 if (no_mapping($rows,$cache));
# Get the unique reference alleles
my $ref_allele = _unique_reference_allele($rows);
# Get the unique allele strings
my $allele_string = _unique_allele_string($rows);
# Loop over the allele strings and match them to the reference alleles
foreach my $as (@{$allele_string}) {
expand(\$as);
map {
my $allele = $_;
return 0 if (grep {mismatch($allele,$_) == 0} @{$ref_allele});
} split(/\//,$as);
}
# Nothing matched
return 1;
}
# Check if a sequence (possibly) ambiguous mismatches another
sub mismatch {
my $allele = shift;
my $reference = shift;
# If they match
return 0 if ($allele eq $reference);
#ÊReturn mismatch if allele doesn't contains ambig codes
return 1 unless (ambiguous(\$allele));
# Turn the sequence into regexps if necessary
ambiguity_to_regexp(\$allele);
# By now, the allele should only contain nucleotide characters and brackets.
# Do a regexp matching
return 0 if ($reference =~ m/^$allele$/);
return 1;
}
# Check if the allele string contains too many single nucleotide alleles
sub multiple_alleles {
my $rows = shift;
my $cache = shift;
# If the result of this test has been cached return it
my $failed_description_id = ($rows->[0][10] eq 'variation' ? 3 : 12);
unless (exists($cache->{$failed_description_id})) {
$cache->{$failed_description_id} = _multiple_alleles($rows,$cache);
}
return $cache->{$failed_description_id};
}
sub _multiple_alleles {
my $rows = shift;
my $cache = shift;
# If this variation has no mappings, it won't have any allele strings
#return 0 if (no_mapping($rows,$cache) && $rows->[0][10] eq 'variation');
# Get the unique allele strings
my $allele_string = _unique_allele_string($rows);
foreach my $a_string (@{$allele_string}) {
expand(\$a_string);
my $count = grep {$_ =~ m/^[ACGT]$/i} split(/\//,$a_string);
return 1 if ($count > $MAX_ALLELES);
}
return 0;
}
#ÊCheck if a variation's allele strings contain ambiguity codes
sub ambiguous_alleles {
my $rows = shift;
my $cache = shift;
# If the result of this test has been cached return it
my $failed_description_id = 14;
unless (exists($cache->{$failed_description_id})) {
$cache->{$failed_description_id} = _ambiguous_alleles($rows,$cache);
}
return $cache->{$failed_description_id};
}
sub _ambiguous_alleles {
my $rows = shift;
my $cache = shift;
my @alleles;
#ÊCheck if we are dealing with a variation feature or alleles
if ($rows->[0][10] eq 'variation') {
# If this variation has no mappings, it won't have any illegal characters in the allele_string
#return 0 if (no_mapping($rows,$cache));
# Get the unique allele strings
my $allele_string = _unique_allele_string($rows);
map {push(@alleles,split(/\//,$_))} @{$allele_string};
}
else {
push(@alleles,$rows->[0][7]);
}
foreach my $allele (@alleles) {
#ÊExpand the allele
expand(\$allele);
#ÊReport the allele if it contains 'illegal' characters
return 1 if (ambiguous(\$allele));
}
return 0;
}
# Check if an allele contains ambiguity codes, but make sure that it doesn't contain 'illegal' characters
sub ambiguous {
my $allele_ref = shift;
return (${$allele_ref} =~ m/[$AMBIGUITIES]/i && !illegal_characters($allele_ref));
}
#ÊCheck if a variation's allele strings contain illegal characters
sub illegal_character_alleles {
my $rows = shift;
my $cache = shift;
# If the result of this test has been cached return it
my $failed_description_id = 13;
unless (exists($cache->{$failed_description_id})) {
$cache->{$failed_description_id} = _illegal_character_alleles($rows,$cache);
}
return $cache->{$failed_description_id};
}
sub _illegal_character_alleles {
my $rows = shift;
my $cache = shift;
my @alleles;
#ÊCheck if we are dealing with a variation feature or alleles
if ($rows->[0][10] eq 'variation') {
# If this variation has no mappings, it won't have any illegal characters in the allele_string
#return 0 if (no_mapping($rows,$cache));
# Get the unique allele strings
my $allele_string = _unique_allele_string($rows);
map {push(@alleles,split(/\//,$_))} @{$allele_string};
}
else {
push(@alleles,$rows->[0][7]);
}
foreach my $allele (@alleles) {
#ÊExpand the allele
expand(\$allele);
#ÊReport the allele if it contains 'illegal' characters
return 1 if (illegal_characters(\$allele));
}
return 0;
}
#ÊCheck if an allele is a 'NOVARIATION'
sub novariation_alleles {
my $rows = shift;
my $cache = shift;
# If the result of this test has been cached return it
my $failed_description_id = 4;
unless (exists($cache->{$failed_description_id})) {
$cache->{$failed_description_id} = _novariation_alleles($rows,$cache);
}
return $cache->{$failed_description_id};
}
sub _novariation_alleles {
my $rows = shift;
my $cache = shift;
return 1 if (grep {novariation(\$_->[7])} @{$rows});
return 0;
}
#ÊKeep a list of accepted alleles that won't be flagged as containing illegal characters. Check if an allele is in this list
sub accepted {
my $allele_ref = shift;
map {return 1 if ($_ eq ${$allele_ref})} @ACCEPTED_ALLELE;
return 0;
}
# Check if an allele is 'NOVARIATION'
sub novariation {
my $allele_ref = shift;
return (${$allele_ref} eq 'NOVARIATION');
}
# Check if an allele contains 'illegal' characters
sub illegal_characters {
my $allele_ref = shift;
return (${$allele_ref} =~ m/[^ACGTU\-$AMBIGUITIES]/i && !accepted($allele_ref) && !novariation($allele_ref));
}
# Replace ambiguity codes in a sequence with a suitable regular expression
sub ambiguity_to_regexp {
my $seq_ref = shift;
${$seq_ref} =~ s/([U$AMBIGUITIES])/$AMBIG_REGEXP_HASH{$1}/ig;
};
#ÊPrivate method to get the unique allele strings from variation features
sub _unique_allele_string {
my $variation_features = shift;
# Check first if this is just a single row
return [$variation_features->[0][7]] if (scalar(@{$variation_features}) == 1);
# Get the unique allele strings
my %allele_string;
map {
$allele_string{$_->[7]}++;
} @{$variation_features};
my @unique = keys(%allele_string);
# If it is alleles rather than a variation we're looking at, create an allele string from the alleles
if ($variation_features->[0][10] eq 'allele') {
my $as = join("/",@unique);
@unique = ($as);
}
return \@unique;
}
#ÊPrivate method to get the reference alleles from variation features
sub _unique_reference_allele {
my $variation_features = shift;
# Check first if this is just a single row
if (scalar(@{$variation_features}) == 1) {
# Flip the reference allele if necessary
reverse_comp($variation_features->[0][8]) unless ($variation_features->[0][9] == $variation_features->[0][6]);
return [$variation_features->[0][8]];
}
# Get the unique reference alleles
my %ref_allele;
map {
# Flip the reference allele if necessary
reverse_comp(\$_->[8]) unless ($_->[9] == $_->[6]);
$ref_allele{$_->[8]}++;
} @{$variation_features};
my @unique = keys(%ref_allele);
return \@unique;
}
sub get_haplotype_seq_region_ids {
my $dba_core = shift;
#ÊThe haplotype regions have attribs 'non reference'. So do the LRGs however, so filter by name to exclude these
my $stmt = qq{
SELECT
sr.seq_region_id
FROM
seq_region sr JOIN
seq_region_attrib sra ON (
sra.seq_region_id = sr.seq_region_id
) JOIN
attrib_type at ON (
at.attrib_type_id = sra.attrib_type_id
)
WHERE
sr.name NOT LIKE 'lrg%' AND
at.name LIKE 'non reference'
};
my $haplotype_ids = $dba_core->dbc->db_handle->selectcol_arrayref($stmt);
return $haplotype_ids;
}
sub get_range_condition {
my $lower_id = shift;
my $upper_id = shift;
my $alias = shift;
return " 1 " unless (defined($lower_id) && defined($upper_id));
return (defined($alias) ? " $alias\." : " ") . qq{variation_id BETWEEN $lower_id AND $upper_id };
}
sub get_source_condition {
my $ids = shift;
my $alias = shift;
return " 1 " unless (defined($ids) && scalar(@{$ids}));
my $condition = " (" . (defined($alias) ? "$alias\." : "") . "source_id = " . join(" OR " . (defined($alias) ? "$alias\." : "") . "source_id = ",@{$ids}) . ") ";
return $condition;
}
sub get_failed_description {
my $dba = shift;
my $ids = shift;
my $condition = " 1 ";
if (defined($ids) && scalar(@{$ids})) {
$condition = " failed_description_id IN (" . join(",",@{$ids}) . ") ";
}
my $stmt = qq{
SELECT
failed_description_id,
description
FROM
failed_description
WHERE
$condition
};
#ÊGet a hashref of the descriptions with the failed_description_id as key
my $description = $dba->dbc->db_handle->selectall_hashref($stmt,'failed_description_id');
return $description;
}
=head
#ÊLoop over the failed_description_ids and for each, call the corresponding subroutine. Each check will return a hashref with arrayrefs of failed variation_ids and allele_ids, respectively and we write these to the corresponding dump file.
foreach my $failed_description_id (keys(%{$failed_description})) {
# Print some progress information to stdout
print STDOUT Progress::location() . "\tFlagging variations/alleles for '" . $failed_description->{$failed_description_id}{'description'} . "' (failed_description_id = $failed_description_id)\n";
# Warn and skip if we don't know how to perform this check
unless (exists($PREDICATE{$failed_description_id})) {
warn ("Can not determine the corresponding subroutine to use for consistency check '" . $failed_description->{$failed_description_id}{'description'} . "' (failed_description_id = $failed_description_id). Skipping");
next;
}
# Call the checking subroutine
my $routine = $PREDICATE{$failed_description_id};
my $flagged = $routine->($dba,$lower_id,$upper_id,$dba_core);
# Loop over the flagged variations and alleles and write them to the dump files
foreach my $type (('variation','allele')) {
#ÊGet the ids that were returned
my $ids = $flagged->{$type} || [];
#ÊIf no ids were flagged, skip
next unless (scalar(@{$ids}));
# Print some progress information to stdout
print STDOUT Progress::location() . "\tDumping flagged " . $type . "s to loadfile\n";
# Open the loadfile (append) and get a lock on it
open(LOAD,">>",$loadfile->{$type}) or die ("Could not open loadfile " . $loadfile->{$type} . " for writing");
flock(LOAD,LOCK_EX);
#ÊWrite the ids and the failed_description_id to the load file
while (my $id = shift(@{$ids})) {
print LOAD join("\t",($id,$failed_description_id)) . "\n";
}
close(LOAD);
}
}
sub get_haplotype_condition {
my $dba_core = shift;
my $haplotype_seq_region_ids = get_haplotype_seq_region_ids($dba_core);
return " 1 " unless (defined($haplotype_seq_region_ids) && scalar(@{$haplotype_seq_region_ids}));
return " seq_region_id NOT IN (" . join(",",@{$haplotype_seq_region_ids}) . ") ";
}
#ÊCheck if a variation is mapped to more than the maximum allowed number of (non-haplotype) genomic locations
sub multiple_mappings {
my $dba = shift;
my $lower_id = shift;
my $upper_id = shift;
my $dba_core = shift;
#ÊIf a range was specified, create a condition on it
my $condition = get_range_condition($lower_id,$upper_id);
$condition .= " AND " . get_haplotype_condition($dba_core);
my $stmt = qq{
SELECT
variation_id
FROM
variation_feature
WHERE
$condition
};
# Add the group and condition on maximum mappings
$stmt .= qq{
GROUP BY
variation_id
HAVING
COUNT(*) > $MAX_MAP_WEIGHT
};
# Execute the query and get the result
my $flagged_variation_ids = $dba->dbc->db_handle->selectcol_arrayref($stmt);
# Return a hashref with the result
return {'variation' => $flagged_variation_ids};
}
#ÊCheck whether the variation has at least one allele that matches the reference
sub reference_mismatch {
my $dba = shift;
my $lower_id = shift;
my $upper_id = shift;
#ÊIf a range was specified, create a condition on it
my $condition = get_range_condition($lower_id,$upper_id);
# Statement to get the variation alleles
my $stmt = qq{
SELECT
allele_id,
subsnp_id,
variation_id,
allele
FROM
allele
WHERE
$condition
ORDER BY
variation_id
};
my $sth = $dba->dbc->db_handle($stmt);
#ÊStatement to get the reference sequence for each variation_feature
$stmt = qq{
SELECT
ras.ref_allele,
ra.seq_region_strand
FROM
variation_feature vf JOIN
tmp_ref_allele ra ON (
ra.variation_feature_id = vf.variation_feature_id
) JOIN
tmp_ref_allele_seq ON (
ras.ref_allele_seq_id = ra.ref_allele_seq_id
)
WHERE
vf.variation_id = ?
};
my $seq_sth = $dba->dbc->prepare($stmt);
# Get the alleles
$sth->execute();
my ($allele_id,$subsnp_id,$variation_id,$allele,$refseq,$refstrand,$last_variation_id);
$sth->bind_columns(\$allele_id,\$subsnp_id,\$variation_id,\$allele);
$last_variation_id = -1;
while ($sth->fetch()) {
#ÊIf we switched variation, get the possible reference sequences
if ($variation_id != $last_variation_id) {
$seq_sth->execute($variation_id);
$seq_sth->bind_columns(\$refseq,\$refstrand);
}
}
}
#ÊCheck that a variation does not have more than the maximum allowed number of single-nucleotide alleles (based on subsnps)
sub multiple_alleles {
my $dba = shift;
my $lower_id = shift;
my $upper_id = shift;
#ÊIf a range was specified, create a condition on it
my $condition = get_range_condition($lower_id,$upper_id);
#ÊStatement to get the alleles for the variation_id range
my $stmt = qq{
SELECT
variation_id,
allele
FROM
allele
WHERE
$condition
ORDER BY
variation_id
};
my $sth = $dba->dbc->prepare($stmt);
# Execute the statement and bind the result columns
$sth->execute();
my ($variation_id,$allele,$last_variation_id,$last_flagged);
$sth->bind_columns(\$variation_id,\$allele);
my %alleles;
$last_variation_id = -1;
# An array to hold the variation_id for flagged variations
my @flagged;
#ÊLoop over the alleles
while ($sth->fetch()) {
#ÊReset the allele hash and the flagged status if we are moving to a new variation_id
if ($variation_id != $last_variation_id) {
%alleles = ();
$last_flagged = 0;
$last_variation_id = $variation_id;
}
# Skip if we have already flagged this variation
next if ($last_flagged);
# If this is a single bp allele and it's not a deletion, add it to the hash
if (length($allele) == 1 && $allele ne '-') {
$alleles{$allele}++;
# Check the size of the hash and flag the variation if it is greater than the maximum number of allowed alleles
if (scalar(keys(%alleles)) > $MAX_ALLELES) {
push(@flagged,$variation_id);
$last_flagged = 1;
}
}
}
# Return the flagged variations
return {'variation' => \@flagged};
}
# Check that the variation has a mapping to the genome
sub no_mapping {
my $dba = shift;
my $lower_id = shift;
my $upper_id = shift;
#ÊIf a range was specified, create a condition on it
my $condition = get_range_condition($lower_id,$upper_id,"v");
#ÊStatement to check for unmapped variations
my $stmt = qq{
SELECT
v.variation_id
FROM
variation v LEFT JOIN
variation_feature vf ON (
vf.variation_id = v.variation_id
)
WHERE
$condition AND
vf.variation_feature_id IS NULL
};
# Execute the query and get the result
my $flagged_variation_ids = $dba->dbc->db_handle->selectcol_arrayref($stmt);
return {'variation' => $flagged_variation_ids};
}
#ÊCheck if this is a 'NoVariation'
sub no_variation {
my $dba = shift;
my $lower_id = shift;
my $upper_id = shift;
#ÊIf a range was specified, create a condition on it
my $condition = get_range_condition($lower_id,$upper_id);
#ÊStatement to get the alleles that are 'NoVariation'
my $stmt = qq{
SELECT
allele_id,
variation_id
FROM
allele
WHERE
$condition AND
allele LIKE 'novariation'
};
return _check_allele_variation($dba,$stmt);
}
# Check that there are no disallowed (e.g. 'N') alleles
sub disallowed_alleles {
my $dba = shift;
my $lower_id = shift;
my $upper_id = shift;
#ÊIf a range was specified, create a condition on it
my $condition = get_range_condition($lower_id,$upper_id);
# Define a number of regexps for things that we do allow and catch the rest
my $normal_variation = '^[-ACGT]+$';
my $microsatellite = '^\\([ACGTMRWSYKVHDBXN]+\\)[0-9]+';
my $novariation = '^NOVARIATION$';
my $hgmd = '^HGMD_MUTATION$';
#ÊStatement to catch non-accepted alleles
my $stmt = qq{
SELECT
allele_id,
variation_id
FROM
allele
WHERE
$condition AND
allele NOT REGEXP '$normal_variation' AND
allele NOT REGEXP '$microsatellite' AND
allele NOT REGEXP '$novariation' AND
allele NOT REGEXP '$hgmd'
};
return _check_allele_variation($dba,$stmt);
}
# 'internal' function that checks alleles and whether all alleles for the corresponding variation have failed
sub _check_allele_variation {
my $dba = shift;
my $stmt = shift;
my $sth = $dba->dbc->prepare($stmt);
$sth->execute();
my ($allele_id,$variation_id);
$sth->bind_columns(\$allele_id,\$variation_id);
my %variation_ids;
my @flagged_alleles;
my @flagged_variations;
# Loop over the alleles and flag them. At the same time, count the number of alleles for each variation_id that has this allele string
while ($sth->fetch()) {
push(@flagged_alleles,$allele_id);
$variation_ids{$variation_id}++;
}
# In order to determine if the variation should be flagged as well as the allele, count the number of alleles for each variation and see if it corresponds to the number of failed alleles
$stmt = qq{
SELECT
COUNT(*)
FROM
allele
WHERE
variation_id = ?
};
$sth = $dba->dbc->prepare($stmt);
# Loop over the variaiton_ids concerned
while (my ($variation_id,$count) = each(%variation_ids)) {
$sth->execute($variation_id);
# If the count matches the number of alleles, we should flag the variation as well
if ($count == $sth->fetchrow_arrayref()->[0]) {
push(@flagged_variations,$variation_id);
}
}
# Return the flagged variations and alleles
return {'variation' => \@flagged_variations, 'allele' => \@flagged_alleles};
}
=cut
|
Java
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"fmt"
"io/ioutil"
"os"
"strings"
)
var (
yamlPaths = flag.String("yaml", "", "comma-separated list of input YAML files")
printText = flag.Bool("print-text", false, "print generated proto in text format to stdout")
outputPath = flag.String("output", "", "output path to save generated protobuf data")
)
func errExit(format string, a ...interface{}) {
fmt.Fprintf(os.Stderr, format, a...)
os.Exit(1)
}
func main() {
flag.Parse()
yamlFiles := strings.Split(*yamlPaths, ",")
if len(yamlFiles) == 0 || yamlFiles[0] == "" {
errExit("Must specify one or more YAML files with --yaml\n")
}
if !*printText && *outputPath == "" {
errExit("Must set --print-text or --output\n")
}
if *printText && *outputPath != "" {
errExit("Cannot set both --print-text and --output\n")
}
var c Config
for _, file := range yamlFiles {
b, err := ioutil.ReadFile(file)
if err != nil {
errExit("IO Error : Cannot Read File %s : %v\n", file, err)
}
if err = c.Update(b); err != nil {
errExit("Error parsing file %s : %v\n", file, err)
}
}
if *printText {
if err := c.MarshalText(os.Stdout); err != nil {
errExit("err printing proto: %v", err)
}
} else {
b, err := c.MarshalBytes()
if err != nil {
errExit("err encoding proto: %v", err)
}
if err = ioutil.WriteFile(*outputPath, b, 0644); err != nil {
errExit("IO Error : Cannot Write File %v\n", outputPath)
}
}
}
|
Java
|
package org.galaxy.myhttp;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* To work on unit tests, switch the Test Artifact in the Build Variants view.
*/
public class ExampleUnitTest {
@Test
public void addition_isCorrect() throws Exception {
assertEquals(4, 2 + 2);
}
}
|
Java
|
---
layout: post
title: 'Ubuntu 下的录屏软件 kazam'
date: '2017-05-07'
header-img: "img/post-bg-unix.jpg"
tags:
- Ubuntu
author: 'Bro Qiang'
---
# Ubuntu 下的录屏软件
软件有很多,不过个人觉得 kazam 用起来最顺手
## 软件安装
```shell
sudo apt-get install kazam
```
## 使用
快捷键 `Alt+F2` 输入 `kazam` 即可打开软件
## 快捷键
-- 开始录 - `Win+R`
-- 暂停 - `Win+P`
-- 结束保存 - `Win+F`
## 麦克风杂音很大处理
使用 alsamixer 工具
```shell
$ sudo alsamixer
```
将里面的配置,红色的全都改成绿色即可,不过我的改完之后麦克声音特别小,不清楚是Ubuntu支持的不好,还是麦克太差了……
|
Java
|
<Global.Microsoft.VisualBasic.CompilerServices.DesignerGenerated()> _
Partial Class Scanning_keluar_Edit
Inherits System.Windows.Forms.Form
'Form overrides dispose to clean up the component list.
<System.Diagnostics.DebuggerNonUserCode()> _
Protected Overrides Sub Dispose(ByVal disposing As Boolean)
Try
If disposing AndAlso components IsNot Nothing Then
components.Dispose()
End If
Finally
MyBase.Dispose(disposing)
End Try
End Sub
'Required by the Windows Form Designer
Private components As System.ComponentModel.IContainer
'NOTE: The following procedure is required by the Windows Form Designer
'It can be modified using the Windows Form Designer.
'Do not modify it using the code editor.
<System.Diagnostics.DebuggerStepThrough()> _
Private Sub InitializeComponent()
Me.components = New System.ComponentModel.Container
Dim resources As System.ComponentModel.ComponentResourceManager = New System.ComponentModel.ComponentResourceManager(GetType(Scanning_keluar_Edit))
Me.LBLNama = New System.Windows.Forms.Label
Me.ListView1 = New System.Windows.Forms.ListView
Me.FileName = New System.Windows.Forms.ColumnHeader
Me.Lokasi = New System.Windows.Forms.ColumnHeader
Me.BTNScan = New System.Windows.Forms.Button
Me.GroupBox1 = New System.Windows.Forms.GroupBox
Me.BTNHapus = New System.Windows.Forms.Button
Me.BTNSImpan = New System.Windows.Forms.Button
Me.Label1 = New System.Windows.Forms.Label
Me._twain32 = New Saraff.Twain.Twain32(Me.components)
Me.Label12 = New System.Windows.Forms.Label
Me.Label23 = New System.Windows.Forms.Label
Me.GroupBox3 = New System.Windows.Forms.GroupBox
Me.BTNTutup = New System.Windows.Forms.Button
Me.picboxDeleteAll = New System.Windows.Forms.PictureBox
Me.picboxDelete = New System.Windows.Forms.PictureBox
Me.PictureBox1 = New System.Windows.Forms.PictureBox
Me.ToolTip1 = New System.Windows.Forms.ToolTip(Me.components)
Me.GroupBox1.SuspendLayout()
Me.GroupBox3.SuspendLayout()
CType(Me.picboxDeleteAll, System.ComponentModel.ISupportInitialize).BeginInit()
CType(Me.picboxDelete, System.ComponentModel.ISupportInitialize).BeginInit()
CType(Me.PictureBox1, System.ComponentModel.ISupportInitialize).BeginInit()
Me.SuspendLayout()
'
'LBLNama
'
Me.LBLNama.BorderStyle = System.Windows.Forms.BorderStyle.FixedSingle
Me.LBLNama.Font = New System.Drawing.Font("Microsoft Sans Serif", 9.75!, System.Drawing.FontStyle.Regular, System.Drawing.GraphicsUnit.Point, CType(0, Byte))
Me.LBLNama.Location = New System.Drawing.Point(94, 34)
Me.LBLNama.Name = "LBLNama"
Me.LBLNama.Size = New System.Drawing.Size(133, 23)
Me.LBLNama.TabIndex = 5
Me.ToolTip1.SetToolTip(Me.LBLNama, "Nama File")
'
'ListView1
'
Me.ListView1.Columns.AddRange(New System.Windows.Forms.ColumnHeader() {Me.FileName, Me.Lokasi})
Me.ListView1.Dock = System.Windows.Forms.DockStyle.Bottom
Me.ListView1.Location = New System.Drawing.Point(3, 19)
Me.ListView1.Name = "ListView1"
Me.ListView1.Size = New System.Drawing.Size(230, 151)
Me.ListView1.TabIndex = 0
Me.ListView1.UseCompatibleStateImageBehavior = False
Me.ListView1.View = System.Windows.Forms.View.Details
'
'FileName
'
Me.FileName.Text = "Nama File"
'
'Lokasi
'
Me.Lokasi.Text = "Lokasi"
'
'BTNScan
'
Me.BTNScan.Font = New System.Drawing.Font("Microsoft Sans Serif", 9.75!, System.Drawing.FontStyle.Regular, System.Drawing.GraphicsUnit.Point, CType(0, Byte))
Me.BTNScan.Location = New System.Drawing.Point(65, 74)
Me.BTNScan.Name = "BTNScan"
Me.BTNScan.Size = New System.Drawing.Size(122, 43)
Me.BTNScan.TabIndex = 6
Me.BTNScan.Text = "Scan"
Me.ToolTip1.SetToolTip(Me.BTNScan, "Scan")
Me.BTNScan.UseVisualStyleBackColor = True
'
'GroupBox1
'
Me.GroupBox1.Controls.Add(Me.BTNScan)
Me.GroupBox1.Controls.Add(Me.BTNHapus)
Me.GroupBox1.Controls.Add(Me.LBLNama)
Me.GroupBox1.Controls.Add(Me.BTNSImpan)
Me.GroupBox1.Controls.Add(Me.Label1)
Me.GroupBox1.Font = New System.Drawing.Font("Microsoft Sans Serif", 9.0!, System.Drawing.FontStyle.Regular, System.Drawing.GraphicsUnit.Point, CType(0, Byte))
Me.GroupBox1.Location = New System.Drawing.Point(347, 11)
Me.GroupBox1.Name = "GroupBox1"
Me.GroupBox1.Size = New System.Drawing.Size(236, 184)
Me.GroupBox1.TabIndex = 169
Me.GroupBox1.TabStop = False
Me.GroupBox1.Text = "Scanning"
'
'BTNHapus
'
Me.BTNHapus.Font = New System.Drawing.Font("Microsoft Sans Serif", 9.75!, System.Drawing.FontStyle.Regular, System.Drawing.GraphicsUnit.Point, CType(0, Byte))
Me.BTNHapus.Location = New System.Drawing.Point(128, 136)
Me.BTNHapus.Name = "BTNHapus"
Me.BTNHapus.Size = New System.Drawing.Size(99, 31)
Me.BTNHapus.TabIndex = 7
Me.BTNHapus.Text = "Hapus"
Me.ToolTip1.SetToolTip(Me.BTNHapus, "Hapus Hasil Scan")
Me.BTNHapus.UseVisualStyleBackColor = True
'
'BTNSImpan
'
Me.BTNSImpan.Font = New System.Drawing.Font("Microsoft Sans Serif", 9.75!, System.Drawing.FontStyle.Regular, System.Drawing.GraphicsUnit.Point, CType(0, Byte))
Me.BTNSImpan.Location = New System.Drawing.Point(21, 136)
Me.BTNSImpan.Name = "BTNSImpan"
Me.BTNSImpan.Size = New System.Drawing.Size(93, 31)
Me.BTNSImpan.TabIndex = 6
Me.BTNSImpan.Text = "Simpan"
Me.ToolTip1.SetToolTip(Me.BTNSImpan, "Simpan Hasil Scan")
Me.BTNSImpan.UseVisualStyleBackColor = True
'
'Label1
'
Me.Label1.AutoSize = True
Me.Label1.Font = New System.Drawing.Font("Microsoft Sans Serif", 9.75!, System.Drawing.FontStyle.Regular, System.Drawing.GraphicsUnit.Point, CType(0, Byte))
Me.Label1.Location = New System.Drawing.Point(18, 38)
Me.Label1.Name = "Label1"
Me.Label1.Size = New System.Drawing.Size(70, 16)
Me.Label1.TabIndex = 4
Me.Label1.Text = "Nama File"
'
'_twain32
'
Me._twain32.AppProductName = "Saraff.Twain"
Me._twain32.Parent = Nothing
'
'Label12
'
Me.Label12.AutoSize = True
Me.Label12.BackColor = System.Drawing.Color.WhiteSmoke
Me.Label12.Font = New System.Drawing.Font("Segoe UI", 9.0!, System.Drawing.FontStyle.Regular, System.Drawing.GraphicsUnit.Point, CType(0, Byte))
Me.Label12.Location = New System.Drawing.Point(363, 387)
Me.Label12.Margin = New System.Windows.Forms.Padding(4, 0, 4, 0)
Me.Label12.Name = "Label12"
Me.Label12.Size = New System.Drawing.Size(106, 15)
Me.Label12.TabIndex = 171
Me.Label12.Text = "Delete && Delete All"
'
'Label23
'
Me.Label23.AutoSize = True
Me.Label23.Location = New System.Drawing.Point(472, 198)
Me.Label23.Name = "Label23"
Me.Label23.Size = New System.Drawing.Size(45, 13)
Me.Label23.TabIndex = 174
Me.Label23.Text = "Label23"
Me.Label23.Visible = False
'
'GroupBox3
'
Me.GroupBox3.Controls.Add(Me.ListView1)
Me.GroupBox3.Location = New System.Drawing.Point(347, 211)
Me.GroupBox3.Name = "GroupBox3"
Me.GroupBox3.Size = New System.Drawing.Size(236, 173)
Me.GroupBox3.TabIndex = 170
Me.GroupBox3.TabStop = False
Me.GroupBox3.Text = "Daftar Gambar"
'
'BTNTutup
'
Me.BTNTutup.DialogResult = System.Windows.Forms.DialogResult.Cancel
Me.BTNTutup.Font = New System.Drawing.Font("Microsoft Sans Serif", 9.75!, System.Drawing.FontStyle.Regular, System.Drawing.GraphicsUnit.Point, CType(0, Byte))
Me.BTNTutup.Location = New System.Drawing.Point(489, 405)
Me.BTNTutup.Name = "BTNTutup"
Me.BTNTutup.Size = New System.Drawing.Size(91, 34)
Me.BTNTutup.TabIndex = 168
Me.BTNTutup.Text = "Selesai"
Me.ToolTip1.SetToolTip(Me.BTNTutup, "Selesai")
Me.BTNTutup.UseVisualStyleBackColor = True
'
'picboxDeleteAll
'
Me.picboxDeleteAll.Image = Global.SIMARSIP.My.Resources.Resources.picboxDeleteAll_Leave
Me.picboxDeleteAll.Location = New System.Drawing.Point(412, 405)
Me.picboxDeleteAll.Name = "picboxDeleteAll"
Me.picboxDeleteAll.Size = New System.Drawing.Size(61, 36)
Me.picboxDeleteAll.TabIndex = 173
Me.picboxDeleteAll.TabStop = False
Me.picboxDeleteAll.Tag = "Delete All"
Me.ToolTip1.SetToolTip(Me.picboxDeleteAll, "Hapus Semua")
'
'picboxDelete
'
Me.picboxDelete.Image = Global.SIMARSIP.My.Resources.Resources.picboxDelete_Leave
Me.picboxDelete.Location = New System.Drawing.Point(353, 405)
Me.picboxDelete.Name = "picboxDelete"
Me.picboxDelete.Size = New System.Drawing.Size(60, 36)
Me.picboxDelete.TabIndex = 172
Me.picboxDelete.TabStop = False
Me.picboxDelete.Tag = "Delete Current Image"
Me.ToolTip1.SetToolTip(Me.picboxDelete, "Hapus")
'
'PictureBox1
'
Me.PictureBox1.BackColor = System.Drawing.SystemColors.ControlDark
Me.PictureBox1.BorderStyle = System.Windows.Forms.BorderStyle.FixedSingle
Me.PictureBox1.ImageLocation = ""
Me.PictureBox1.Location = New System.Drawing.Point(13, 13)
Me.PictureBox1.Name = "PictureBox1"
Me.PictureBox1.Size = New System.Drawing.Size(316, 429)
Me.PictureBox1.SizeMode = System.Windows.Forms.PictureBoxSizeMode.Zoom
Me.PictureBox1.TabIndex = 167
Me.PictureBox1.TabStop = False
'
'Scanning_keluar_Edit
'
Me.AutoScaleDimensions = New System.Drawing.SizeF(6.0!, 13.0!)
Me.AutoScaleMode = System.Windows.Forms.AutoScaleMode.Font
Me.AutoSizeMode = System.Windows.Forms.AutoSizeMode.GrowAndShrink
Me.CancelButton = Me.BTNTutup
Me.ClientSize = New System.Drawing.Size(596, 452)
Me.ControlBox = False
Me.Controls.Add(Me.picboxDeleteAll)
Me.Controls.Add(Me.picboxDelete)
Me.Controls.Add(Me.PictureBox1)
Me.Controls.Add(Me.GroupBox1)
Me.Controls.Add(Me.Label12)
Me.Controls.Add(Me.Label23)
Me.Controls.Add(Me.GroupBox3)
Me.Controls.Add(Me.BTNTutup)
Me.Icon = CType(resources.GetObject("$this.Icon"), System.Drawing.Icon)
Me.Name = "Scanning_keluar_Edit"
Me.StartPosition = System.Windows.Forms.FormStartPosition.CenterScreen
Me.Text = "Scanning"
Me.GroupBox1.ResumeLayout(False)
Me.GroupBox1.PerformLayout()
Me.GroupBox3.ResumeLayout(False)
CType(Me.picboxDeleteAll, System.ComponentModel.ISupportInitialize).EndInit()
CType(Me.picboxDelete, System.ComponentModel.ISupportInitialize).EndInit()
CType(Me.PictureBox1, System.ComponentModel.ISupportInitialize).EndInit()
Me.ResumeLayout(False)
Me.PerformLayout()
End Sub
Friend WithEvents LBLNama As System.Windows.Forms.Label
Private WithEvents picboxDeleteAll As System.Windows.Forms.PictureBox
Private WithEvents picboxDelete As System.Windows.Forms.PictureBox
Friend WithEvents PictureBox1 As System.Windows.Forms.PictureBox
Friend WithEvents ListView1 As System.Windows.Forms.ListView
Friend WithEvents FileName As System.Windows.Forms.ColumnHeader
Friend WithEvents Lokasi As System.Windows.Forms.ColumnHeader
Friend WithEvents BTNScan As System.Windows.Forms.Button
Friend WithEvents GroupBox1 As System.Windows.Forms.GroupBox
Friend WithEvents BTNHapus As System.Windows.Forms.Button
Friend WithEvents BTNSImpan As System.Windows.Forms.Button
Friend WithEvents Label1 As System.Windows.Forms.Label
Friend WithEvents _twain32 As Saraff.Twain.Twain32
Private WithEvents Label12 As System.Windows.Forms.Label
Friend WithEvents Label23 As System.Windows.Forms.Label
Friend WithEvents GroupBox3 As System.Windows.Forms.GroupBox
Friend WithEvents BTNTutup As System.Windows.Forms.Button
Friend WithEvents ToolTip1 As System.Windows.Forms.ToolTip
End Class
|
Java
|
require_relative '../netapp_cmode'
Puppet::Type.type(:netapp_lun).provide(:cmode, :parent => Puppet::Provider::NetappCmode) do
@doc = "Manage Netapp Lun creation, modification and deletion. [Family: vserver]"
confine :feature => :posix
defaultfor :feature => :posix
netapp_commands :lunlist => {:api => 'lun-get-iter', :iter => true, :result_element => 'attributes-list'}
netapp_commands :luncreate => 'lun-create-by-size'
netapp_commands :lundestroy => 'lun-destroy'
netapp_commands :lunresize => 'lun-resize'
netapp_commands :lunonline => 'lun-online'
netapp_commands :lunoffline => 'lun-offline'
mk_resource_methods
def self.instances
Puppet.debug("Puppet::Provider::Netapp_lun.cmode: Got to self.instances.")
luns = []
#Get a list of all Lun's
results = lunlist() || []
# Itterate through the results
results.each do |lun|
lun_path = lun.child_get_string('path')
Puppet.debug("Puppet::Provider::Netapp_lun.cmode: Processing lun #{lun_path}.")
# Construct initial hash for lun
lun_hash = {
:name => lun_path,
:ensure => :present
}
# Grab additional elements
# Lun state - Need to map true/false to online/offline
lun_state = lun.child_get_string('online')
if lun_state == 'true'
lun_hash[:state] = 'online'
else
lun_hash[:state] = 'offline'
end
# Get size
lun_hash[:size] = lun.child_get_string('size')
# Create the instance and add to luns array
Puppet.debug("Puppet::Provider::Netapp_lun.cmode: Creating instance for #{lun_path}\n Contents = #{lun_hash.inspect}.")
luns << new(lun_hash)
end
# Return the final luns array
Puppet.debug("Puppet::Provider::Netapp_lun.cmode: Returning luns array.")
luns
end
def self.prefetch(resources)
Puppet.debug("Puppet::Provider::Netapp_lun.cmode: Got to self.prefetch.")
# Itterate instances and match provider where relevant.
instances.each do |prov|
Puppet.debug("Prov.path = #{resources[prov.name]}. ")
if resource = resources[prov.name]
resource.provider = prov
end
end
end
def flush
Puppet.debug("Puppet::Provider::Netapp_lun.cmode: flushing Netapp Lun #{@resource[:path]}.")
# Are we updating or destroying?
Puppet.debug("Puppet::Provider::Netapp_lun.cmode: required resource state = #{@property_hash[:ensure]}")
if @property_hash[:ensure] == :absent
Puppet.debug("Puppet::Provider::Netapp_lun.cmode: Ensure is absent. Destroying...")
# Deleting the lun
lundestroy('path', @resource[:path])
Puppet.debug("Puppet::Provider::Netapp_lun.cmode: Lun #{@resource[:path]} has been destroyed successfully. ")
return true
end
end
# Set lun size
def size=(value)
Puppet.debug("Puppet::Provider::Netapp_lun.cmode size=: Setting lun size for #{@resource[:path]} to #{@resource[:size]}.")
force
if @resource[:force] == nil
force = false
else
force = @resource[:force]
end
# Resize the volume
result = lunresize('force', force, 'path', @resource[:path], 'size', @resource[:size])
if result.results_status() != "failed"
Puppet.debug("Puppet::Provider::Netapp_lun.cmode size=: Lun has been resized.")
return true
end
end
# Set lun state
def state=(value)
Puppet.debug("Puppet::Provider::Netapp_lun.cmode state=: Setting lun state for #{@resource[:path]} to #{@resource[:state]}.")
case @resource[:state]
when :online
Puppet.debug("Puppet::Provider::Netapp_lun.cmode state=: Onlineing lun.")
result = lunonline('path', @resource[:path])
Puppet.debug("Puppet::Provider::Netapp_lun.cmode state=: Lun has been onlined.")
return true
when :offline
Puppet.debug("Puppet::Provider::Netapp_lun.cmode state=: Offlining lun.")
result = lunoffline('path', @resource[:path])
Puppet.debug("Puppet::Provider::Netapp_lun.cmode state=: Lun has been offlined.")
return true
end
end
def create
Puppet.debug("Puppet::Provider::Netapp_lun.cmode: creating Netapp Lun #{@resource[:path]}.")
# Lun create args
luncreate_args = []
luncreate_args << 'path' << @resource[:path]
luncreate_args << 'size' << @resource[:size]
luncreate_args << 'class' << @resource[:lunclass]
luncreate_args << 'ostype' << @resource[:ostype]
luncreate_args << 'space-reservation-enabled' << @resource[:spaceresenabled]
# Optional fields
luncreate_args << 'prefix-size' << @resource[:prefixsize] unless @resource[:prefixsize].nil?
luncreate_args << 'qos-policy-group' << @resource[:qospolicygroup] unless @resource[:qospolicygroup].nil?
# Create the lun
result = luncreate(*luncreate_args)
# Lun created successfully
Puppet.debug("Puppet::Provider::Netapp_lun.cmode: Lun #{@resource[:path]} created successfully.")
return true
end
def destroy
Puppet.debug("Puppet::Provider::Netapp_lun.cmode: destroying Netapp Lun #{@resource[:path]}.")
@property_hash[:ensure] = :absent
end
def exists?
Puppet.debug("Puppet::Provider::Netapp_lun.cmode: checking existance of Netapp Lun #{@resource[:path]}.")
@property_hash[:ensure] == :present
end
end
|
Java
|
package org.commcare;
import org.commcare.models.database.UnencryptedHybridFileBackedSqlStorage;
import org.commcare.models.database.UnencryptedHybridFileBackedSqlStorageMock;
import org.javarosa.core.services.storage.Persistable;
/**
* Delegator around CommCareApp allowing the test suite to override logic.
*
* @author Phillip Mates (pmates@dimagi.com).
*/
public class CommCareTestApp extends CommCareApp {
private final CommCareApp app;
public CommCareTestApp(CommCareApp app) {
super(app.getAppRecord());
fileRoot = app.fileRoot;
setAppResourceState(app.getAppResourceState());
this.app = app;
}
@Override
public <T extends Persistable> UnencryptedHybridFileBackedSqlStorage<T> getFileBackedStorage(String name, Class<T> c) {
return new UnencryptedHybridFileBackedSqlStorageMock<>(name, c, app.buildAndroidDbHelper(), app);
}
}
|
Java
|
// Code generated by go-swagger; DO NOT EDIT.
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// SendPhotoLinkBody send photo link body
// swagger:model SendPhotoLinkBody
type SendPhotoLinkBody struct {
// caption
Caption string `json:"caption,omitempty"`
// chat id
// Required: true
ChatID interface{} `json:"chat_id"`
// disable notification
DisableNotification bool `json:"disable_notification,omitempty"`
// photo
// Required: true
Photo *string `json:"photo"`
// reply markup
ReplyMarkup interface{} `json:"reply_markup,omitempty"`
// reply to message id
ReplyToMessageID int64 `json:"reply_to_message_id,omitempty"`
}
// Validate validates this send photo link body
func (m *SendPhotoLinkBody) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateChatID(formats); err != nil {
// prop
res = append(res, err)
}
if err := m.validatePhoto(formats); err != nil {
// prop
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *SendPhotoLinkBody) validateChatID(formats strfmt.Registry) error {
return nil
}
func (m *SendPhotoLinkBody) validatePhoto(formats strfmt.Registry) error {
if err := validate.Required("photo", "body", m.Photo); err != nil {
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *SendPhotoLinkBody) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *SendPhotoLinkBody) UnmarshalBinary(b []byte) error {
var res SendPhotoLinkBody
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
|
Java
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="de">
<head>
<!-- Generated by javadoc (version 1.7.0_17) on Tue May 14 03:45:04 CEST 2013 -->
<title>Uses of Class com.badlogic.gdx.net.HttpStatus (libgdx API)</title>
<meta name="date" content="2013-05-14">
<link rel="stylesheet" type="text/css" href="../../../../../stylesheet.css" title="Style">
</head>
<body>
<script type="text/javascript"><!--
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class com.badlogic.gdx.net.HttpStatus (libgdx API)";
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar_top">
<!-- -->
</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../com/badlogic/gdx/net/HttpStatus.html" title="class in com.badlogic.gdx.net">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../../../../../overview-tree.html">Tree</a></li>
<li><a href="../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../help-doc.html">Help</a></li>
</ul>
<div class="aboutLanguage"><em>
libgdx API
<style>
body, td, th { font-family:Helvetica, Tahoma, Arial, sans-serif; font-size:10pt }
pre, code, tt { font-size:9pt; font-family:Lucida Console, Courier New, sans-serif }
h1, h2, h3, .FrameTitleFont, .FrameHeadingFont, .TableHeadingColor font { font-size:105%; font-weight:bold }
.TableHeadingColor { background:#EEEEFF; }
a { text-decoration:none }
a:hover { text-decoration:underline }
a:link, a:visited { color:blue }
table { border:0px }
.TableRowColor td:first-child { border-left:1px solid black }
.TableRowColor td { border:0px; border-bottom:1px solid black; border-right:1px solid black }
hr { border:0px; border-bottom:1px solid #333366; }
</style>
</em></div>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../index.html?com/badlogic/gdx/net/class-use/HttpStatus.html" target="_top">Frames</a></li>
<li><a href="HttpStatus.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h2 title="Uses of Class com.badlogic.gdx.net.HttpStatus" class="title">Uses of Class<br>com.badlogic.gdx.net.HttpStatus</h2>
</div>
<div class="classUseContainer">
<ul class="blockList">
<li class="blockList">
<table border="0" cellpadding="3" cellspacing="0" summary="Use table, listing packages, and an explanation">
<caption><span>Packages that use <a href="../../../../../com/badlogic/gdx/net/HttpStatus.html" title="class in com.badlogic.gdx.net">HttpStatus</a></span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Package</th>
<th class="colLast" scope="col">Description</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><a href="#com.badlogic.gdx">com.badlogic.gdx</a></td>
<td class="colLast"> </td>
</tr>
</tbody>
</table>
</li>
<li class="blockList">
<ul class="blockList">
<li class="blockList"><a name="com.badlogic.gdx">
<!-- -->
</a>
<h3>Uses of <a href="../../../../../com/badlogic/gdx/net/HttpStatus.html" title="class in com.badlogic.gdx.net">HttpStatus</a> in <a href="../../../../../com/badlogic/gdx/package-summary.html">com.badlogic.gdx</a></h3>
<table border="0" cellpadding="3" cellspacing="0" summary="Use table, listing methods, and an explanation">
<caption><span>Methods in <a href="../../../../../com/badlogic/gdx/package-summary.html">com.badlogic.gdx</a> that return <a href="../../../../../com/badlogic/gdx/net/HttpStatus.html" title="class in com.badlogic.gdx.net">HttpStatus</a></span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Method and Description</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><code><a href="../../../../../com/badlogic/gdx/net/HttpStatus.html" title="class in com.badlogic.gdx.net">HttpStatus</a></code></td>
<td class="colLast"><span class="strong">Net.HttpResponse.</span><code><strong><a href="../../../../../com/badlogic/gdx/Net.HttpResponse.html#getStatus()">getStatus</a></strong>()</code>
<div class="block">Returns the <a href="../../../../../com/badlogic/gdx/net/HttpStatus.html" title="class in com.badlogic.gdx.net"><code>HttpStatus</code></a> containing the statusCode of the HTTP response.</div>
</td>
</tr>
</tbody>
</table>
</li>
</ul>
</li>
</ul>
</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar_bottom">
<!-- -->
</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../com/badlogic/gdx/net/HttpStatus.html" title="class in com.badlogic.gdx.net">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../../../../../overview-tree.html">Tree</a></li>
<li><a href="../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../help-doc.html">Help</a></li>
</ul>
<div class="aboutLanguage"><em>libgdx API</em></div>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../index.html?com/badlogic/gdx/net/class-use/HttpStatus.html" target="_top">Frames</a></li>
<li><a href="HttpStatus.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>
<div style="font-size:9pt"><i>
Copyright © 2010-2013 Mario Zechner (contact@badlogicgames.com), Nathan Sweet (admin@esotericsoftware.com)
</i></div>
</small></p>
</body>
</html>
|
Java
|
/**
* Copyright (C) 2014-2015 LinkedIn Corp. (pinot-core@linkedin.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.linkedin.pinot.core.startree;
import java.io.BufferedOutputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.joda.time.DateTime;
import org.json.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Objects;
import com.google.common.collect.BiMap;
import com.google.common.collect.HashBiMap;
import com.linkedin.pinot.common.data.DimensionFieldSpec;
import com.linkedin.pinot.common.data.MetricFieldSpec;
import com.linkedin.pinot.common.data.FieldSpec.DataType;
import com.linkedin.pinot.common.data.Schema;
import com.linkedin.pinot.common.utils.Pairs.IntPair;
import com.linkedin.pinot.core.data.GenericRow;
import com.linkedin.pinot.core.segment.creator.impl.V1Constants;
/**
* Uses file to build the star tree. Each row is divided into dimension and metrics. Time is added to dimension list.
* We use the split order to build the tree. In most cases, split order will be ranked depending on the cardinality (descending order).
* Time column will be excluded or last entry in split order irrespective of its cardinality
* This is a recursive algorithm where we branch on one dimension at every level.
*
* <b>Psuedo algo</b>
* <code>
*
* build(){
* let table(1,N) consists of N input rows
* table.sort(1,N) //sort the table on all dimensions, according to split order
* constructTree(table, 0, N, 0);
* }
* constructTree(table,start,end, level){
* splitDimensionName = dimensionsSplitOrder[level]
* groupByResult<dimName, length> = table.groupBy(dimensionsSplitOrder[level]); //returns the number of rows for each value in splitDimension
* int rangeStart = 0;
* for each ( entry<dimName,length> groupByResult){
* if(entry.length > minThreshold){
* constructTree(table, rangeStart, rangeStart + entry.length, level +1);
* }
* rangeStart = rangeStart + entry.length;
* updateStarTree() //add new child
* }
*
* //create a star tree node
*
* aggregatedRows = table.uniqueAfterRemovingAttributeAndAggregateMetrics(start,end, splitDimensionName);
* for(each row in aggregatedRows_
* table.add(row);
* if(aggregateRows.size > minThreshold) {
* table.sort(end, end + aggregatedRows.size);
* constructStarTree(table, end, end + aggregatedRows.size, level +1);
* }
* }
* </code>
*/
public class OffHeapStarTreeBuilder implements StarTreeBuilder {
private static final Logger LOG = LoggerFactory.getLogger(OffHeapStarTreeBuilder.class);
File dataFile;
private DataOutputStream dataBuffer;
int rawRecordCount = 0;
int aggRecordCount = 0;
private List<String> dimensionsSplitOrder;
private Set<String> skipStarNodeCreationForDimensions;
private Set<String> skipMaterializationForDimensions;
private int maxLeafRecords;
private StarTree starTree;
private StarTreeIndexNode starTreeRootIndexNode;
private int numDimensions;
private int numMetrics;
private List<String> dimensionNames;
private List<String> metricNames;
private String timeColumnName;
private List<DataType> dimensionTypes;
private List<DataType> metricTypes;
private Map<String, Object> dimensionNameToStarValueMap;
private HashBiMap<String, Integer> dimensionNameToIndexMap;
private Map<String, Integer> metricNameToIndexMap;
private int dimensionSizeBytes;
private int metricSizeBytes;
private File outDir;
private Map<String, HashBiMap<Object, Integer>> dictionaryMap;
boolean debugMode = false;
private int[] sortOrder;
private int skipMaterializationCardinalityThreshold;
public void init(StarTreeBuilderConfig builderConfig) throws Exception {
Schema schema = builderConfig.schema;
timeColumnName = schema.getTimeColumnName();
this.dimensionsSplitOrder = builderConfig.dimensionsSplitOrder;
skipStarNodeCreationForDimensions = builderConfig.getSkipStarNodeCreationForDimensions();
skipMaterializationForDimensions = builderConfig.getSkipMaterializationForDimensions();
skipMaterializationCardinalityThreshold = builderConfig.getSkipMaterializationCardinalityThreshold();
this.maxLeafRecords = builderConfig.maxLeafRecords;
this.outDir = builderConfig.getOutDir();
if (outDir == null) {
outDir = new File(System.getProperty("java.io.tmpdir"), V1Constants.STAR_TREE_INDEX_DIR + "_" + DateTime.now());
}
LOG.debug("Index output directory:{}", outDir);
dimensionTypes = new ArrayList<>();
dimensionNames = new ArrayList<>();
dimensionNameToIndexMap = HashBiMap.create();
dimensionNameToStarValueMap = new HashMap<>();
dictionaryMap = new HashMap<>();
//READ DIMENSIONS COLUMNS
List<DimensionFieldSpec> dimensionFieldSpecs = schema.getDimensionFieldSpecs();
for (int index = 0; index < dimensionFieldSpecs.size(); index++) {
DimensionFieldSpec spec = dimensionFieldSpecs.get(index);
String dimensionName = spec.getName();
dimensionNames.add(dimensionName);
dimensionNameToIndexMap.put(dimensionName, index);
Object starValue;
starValue = getAllStarValue(spec);
dimensionNameToStarValueMap.put(dimensionName, starValue);
dimensionTypes.add(spec.getDataType());
HashBiMap<Object, Integer> dictionary = HashBiMap.create();
dictionaryMap.put(dimensionName, dictionary);
}
//treat time column as just another dimension, only difference is that we will never split on this dimension unless explicitly specified in split order
if (timeColumnName != null) {
dimensionNames.add(timeColumnName);
dimensionTypes.add(schema.getTimeFieldSpec().getDataType());
int index = dimensionNameToIndexMap.size();
dimensionNameToIndexMap.put(timeColumnName, index);
HashBiMap<Object, Integer> dictionary = HashBiMap.create();
dictionaryMap.put(schema.getTimeColumnName(), dictionary);
}
dimensionSizeBytes = dimensionNames.size() * Integer.SIZE / 8;
this.numDimensions = dimensionNames.size();
//READ METRIC COLUMNS
this.metricTypes = new ArrayList<>();
this.metricNames = new ArrayList<>();
this.metricNameToIndexMap = new HashMap<>();
this.metricSizeBytes = 0;
List<MetricFieldSpec> metricFieldSpecs = schema.getMetricFieldSpecs();
for (int index = 0; index < metricFieldSpecs.size(); index++) {
MetricFieldSpec spec = metricFieldSpecs.get(index);
String metricName = spec.getName();
metricNames.add(metricName);
metricNameToIndexMap.put(metricName, index);
DataType dataType = spec.getDataType();
metricTypes.add(dataType);
metricSizeBytes += dataType.size();
}
this.numMetrics = metricNames.size();
builderConfig.getOutDir().mkdirs();
dataFile = new File(outDir, "star-tree.buf");
dataBuffer = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(dataFile)));
//INITIALIZE THE ROOT NODE
this.starTreeRootIndexNode = new StarTreeIndexNode();
this.starTreeRootIndexNode.setDimensionName(StarTreeIndexNode.all());
this.starTreeRootIndexNode.setDimensionValue(StarTreeIndexNode.all());
this.starTreeRootIndexNode.setLevel(0);
LOG.debug("dimensionNames:{}", dimensionNames);
LOG.debug("metricNames:{}", metricNames);
}
/**
* Validate the split order by removing any dimensions that may be part of the skip materialization list.
* @param dimensionsSplitOrder
* @param skipMaterializationForDimensions
* @return
*/
private List<String> sanitizeSplitOrder(List<String> dimensionsSplitOrder,
Set<String> skipMaterializationForDimensions) {
List<String> validatedSplitOrder = new ArrayList<String>();
for (String dimension : dimensionsSplitOrder) {
if (skipMaterializationForDimensions == null || !skipMaterializationForDimensions.contains(dimension)) {
LOG.info("Adding dimension {} to split order", dimension);
validatedSplitOrder.add(dimension);
} else {
LOG.info(
"Dimension {} cannot be part of 'dimensionSplitOrder' and 'skipMaterializationForDimensions', removing it from split order",
dimension);
}
}
return validatedSplitOrder;
}
private Object getAllStarValue(DimensionFieldSpec spec) throws Exception {
switch (spec.getDataType()) {
case STRING:
return "ALL";
case BOOLEAN:
case BYTE:
case CHAR:
case DOUBLE:
case FLOAT:
case INT:
case LONG:
return spec.getDefaultNullValue();
case OBJECT:
case SHORT:
case DOUBLE_ARRAY:
case CHAR_ARRAY:
case FLOAT_ARRAY:
case INT_ARRAY:
case LONG_ARRAY:
case SHORT_ARRAY:
case STRING_ARRAY:
case BYTE_ARRAY:
default:
throw new Exception("Unsupported dimension data type" + spec);
}
}
public GenericRow toGenericRow(DimensionBuffer dimensionKey, MetricBuffer metricsHolder) {
GenericRow row = new GenericRow();
Map<String, Object> map = new HashMap<>();
for (int i = 0; i < dimensionNames.size(); i++) {
String dimName = dimensionNames.get(i);
BiMap<Integer, Object> inverseDictionary = dictionaryMap.get(dimName).inverse();
Object dimValue = inverseDictionary.get(dimensionKey.getDimension(i));
if (dimValue == null) {
dimValue = dimensionNameToStarValueMap.get(dimName);
}
map.put(dimName, dimValue);
}
for (int i = 0; i < numMetrics; i++) {
String metName = metricNames.get(i);
map.put(metName, metricsHolder.get(i));
}
row.init(map);
return row;
}
public void append(GenericRow row) throws Exception {
DimensionBuffer dimension = new DimensionBuffer(numDimensions);
for (int i = 0; i < dimensionNames.size(); i++) {
String dimName = dimensionNames.get(i);
Map<Object, Integer> dictionary = dictionaryMap.get(dimName);
Object dimValue = row.getValue(dimName);
if (dimValue == null) {
//TODO: Have another default value to represent STAR. Using default value to represent STAR as of now.
//It does not matter during query execution, since we know that values is STAR from the star tree
dimValue = dimensionNameToStarValueMap.get(dimName);
}
if (!dictionary.containsKey(dimValue)) {
dictionary.put(dimValue, dictionary.size());
}
dimension.setDimension(i, dictionary.get(dimValue));
}
Number[] numbers = new Number[numMetrics];
for (int i = 0; i < numMetrics; i++) {
String metName = metricNames.get(i);
numbers[i] = (Number) row.getValue(metName);
}
MetricBuffer metrics = new MetricBuffer(numbers);
append(dimension, metrics);
}
public void append(DimensionBuffer dimension, MetricBuffer metrics) throws Exception {
appendToRawBuffer(dimension, metrics);
}
private void appendToRawBuffer(DimensionBuffer dimension, MetricBuffer metrics) throws IOException {
appendToBuffer(dataBuffer, dimension, metrics);
rawRecordCount++;
}
private void appendToAggBuffer(DimensionBuffer dimension, MetricBuffer metrics) throws IOException {
appendToBuffer(dataBuffer, dimension, metrics);
aggRecordCount++;
}
private void appendToBuffer(DataOutputStream dos, DimensionBuffer dimensions, MetricBuffer metricHolder)
throws IOException {
for (int i = 0; i < numDimensions; i++) {
dos.writeInt(dimensions.getDimension(i));
}
dos.write(metricHolder.toBytes(metricSizeBytes, metricTypes));
}
public void build() throws Exception {
if (skipMaterializationForDimensions == null || skipMaterializationForDimensions.isEmpty()) {
skipMaterializationForDimensions = computeDefaultDimensionsToSkipMaterialization();
}
if (dimensionsSplitOrder == null || dimensionsSplitOrder.isEmpty()) {
dimensionsSplitOrder = computeDefaultSplitOrder();
}
// Remove any dimensions from split order that would be not be materialized.
dimensionsSplitOrder = sanitizeSplitOrder(dimensionsSplitOrder, skipMaterializationForDimensions);
LOG.debug("Split order:{}", dimensionsSplitOrder);
long start = System.currentTimeMillis();
dataBuffer.flush();
sort(dataFile, 0, rawRecordCount);
constructStarTree(starTreeRootIndexNode, 0, rawRecordCount, 0, dataFile);
long end = System.currentTimeMillis();
LOG.debug("Took {} ms to build star tree index. Original records:{} Materialized record:{}", (end - start),
rawRecordCount, aggRecordCount);
starTree = new StarTree(starTreeRootIndexNode, dimensionNameToIndexMap);
File treeBinary = new File(outDir, "star-tree.bin");
LOG.debug("Saving tree binary at: {} ", treeBinary);
starTree.writeTree(new BufferedOutputStream(new FileOutputStream(treeBinary)));
printTree(starTreeRootIndexNode, 0);
LOG.debug("Finished build tree. out dir: {} ", outDir);
dataBuffer.close();
}
private void printTree(StarTreeIndexNode node, int level) {
for (int i = 0; i < level; i++) {
LOG.debug(" ");
}
BiMap<Integer, String> inverse = dimensionNameToIndexMap.inverse();
String dimName = "ALL";
Object dimValue = "ALL";
if (node.getDimensionName() != StarTreeIndexNode.all()) {
dimName = inverse.get(node.getDimensionName());
}
if (node.getDimensionValue() != StarTreeIndexNode.all()) {
dimValue = dictionaryMap.get(dimName).inverse().get(node.getDimensionValue());
}
String formattedOutput =
Objects.toStringHelper(node).add("nodeId", node.getNodeId()).add("level", level).add("dimensionName", dimName)
.add("dimensionValue", dimValue).add("childDimensionName", inverse.get(node.getChildDimensionName()))
.add("childCount", node.getChildren() == null ? 0 : node.getChildren().size())
.add("startDocumentId", node.getStartDocumentId()).add("endDocumentId", node.getEndDocumentId())
.add("documentCount", (node.getEndDocumentId() - node.getStartDocumentId())).toString();
LOG.debug(formattedOutput);
if (!node.isLeaf()) {
for (StarTreeIndexNode child : node.getChildren().values()) {
printTree(child, level + 1);
}
}
}
private List<String> computeDefaultSplitOrder() {
ArrayList<String> defaultSplitOrder = new ArrayList<>();
//include only the dimensions not time column. Also, assumes that skipMaterializationForDimensions is built.
for (String dimensionName : dimensionNames) {
if (skipMaterializationForDimensions != null && !skipMaterializationForDimensions.contains(dimensionName)) {
defaultSplitOrder.add(dimensionName);
}
}
if (timeColumnName != null) {
defaultSplitOrder.remove(timeColumnName);
}
Collections.sort(defaultSplitOrder, new Comparator<String>() {
@Override
public int compare(String o1, String o2) {
return dictionaryMap.get(o2).size() - dictionaryMap.get(o1).size(); //descending
}
});
return defaultSplitOrder;
}
private Set<String> computeDefaultDimensionsToSkipMaterialization() {
Set<String> skipDimensions = new HashSet<String>();
for (String dimensionName : dimensionNames) {
if (dictionaryMap.get(dimensionName).size() > skipMaterializationCardinalityThreshold) {
skipDimensions.add(dimensionName);
}
}
return skipDimensions;
}
/*
* Sorts the file on all dimensions
*/
private void sort(File file, int startDocId, int endDocId) throws IOException {
if (debugMode) {
LOG.info("BEFORE SORTING");
printFile(file, startDocId, endDocId);
}
StarTreeDataTable dataSorter = new StarTreeDataTable(file, dimensionSizeBytes, metricSizeBytes, getSortOrder());
dataSorter.sort(startDocId, endDocId, 0, dimensionSizeBytes);
if (debugMode) {
LOG.info("AFTER SORTING");
printFile(file, startDocId, endDocId);
}
}
private int[] getSortOrder() {
if (sortOrder == null) {
sortOrder = new int[dimensionNames.size()];
for (int i = 0; i < dimensionsSplitOrder.size(); i++) {
sortOrder[i] = dimensionNameToIndexMap.get(dimensionsSplitOrder.get(i));
}
//add remaining dimensions that were not part of dimensionsSplitOrder
int counter = 0;
for (String dimName : dimensionNames) {
if (!dimensionsSplitOrder.contains(dimName)) {
sortOrder[dimensionsSplitOrder.size() + counter] = dimensionNameToIndexMap.get(dimName);
counter = counter + 1;
}
}
}
return sortOrder;
}
private void printFile(File file, int startDocId, int endDocId) throws IOException {
LOG.info("Contents of file:{} from:{} to:{}", file.getName(), startDocId, endDocId);
StarTreeDataTable dataSorter = new StarTreeDataTable(file, dimensionSizeBytes, metricSizeBytes, getSortOrder());
Iterator<Pair<byte[], byte[]>> iterator = dataSorter.iterator(startDocId, endDocId);
int numRecordsToPrint = 100;
int counter = 0;
while (iterator.hasNext()) {
Pair<byte[], byte[]> next = iterator.next();
LOG.info("{}, {}", DimensionBuffer.fromBytes(next.getLeft()),
MetricBuffer.fromBytes(next.getRight(), metricTypes));
if (counter++ == numRecordsToPrint) {
break;
}
}
}
private int constructStarTree(StarTreeIndexNode node, int startDocId, int endDocId, int level, File file)
throws Exception {
//node.setStartDocumentId(startDocId);
int docsAdded = 0;
if (level == dimensionsSplitOrder.size() - 1) {
return 0;
}
String splitDimensionName = dimensionsSplitOrder.get(level);
Integer splitDimensionId = dimensionNameToIndexMap.get(splitDimensionName);
LOG.debug("Building tree at level:{} using file:{} from startDoc:{} endDocId:{} splitting on dimension:{}", level,
file.getName(), startDocId, endDocId, splitDimensionName);
Map<Integer, IntPair> sortGroupBy = groupBy(startDocId, endDocId, splitDimensionId, file);
LOG.debug("Group stats:{}", sortGroupBy);
node.setChildDimensionName(splitDimensionId);
node.setChildren(new HashMap<Integer, StarTreeIndexNode>());
for (int childDimensionValue : sortGroupBy.keySet()) {
StarTreeIndexNode child = new StarTreeIndexNode();
child.setDimensionName(splitDimensionId);
child.setDimensionValue(childDimensionValue);
child.setParent(node);
child.setLevel(node.getLevel() + 1);
// n.b. We will number the nodes later using BFS after fully split
// Add child to parent
node.getChildren().put(childDimensionValue, child);
int childDocs = 0;
IntPair range = sortGroupBy.get(childDimensionValue);
if (range.getRight() - range.getLeft() > maxLeafRecords) {
childDocs = constructStarTree(child, range.getLeft(), range.getRight(), level + 1, file);
docsAdded += childDocs;
}
// Either range <= maxLeafRecords, or we did not split further (last level).
if (childDocs == 0) {
child.setStartDocumentId(range.getLeft());
child.setEndDocumentId(range.getRight());
}
}
// Return if star node does not need to be created.
if (skipStarNodeCreationForDimensions != null && skipStarNodeCreationForDimensions.contains(splitDimensionName)) {
return docsAdded;
}
//create star node
StarTreeIndexNode starChild = new StarTreeIndexNode();
starChild.setDimensionName(splitDimensionId);
starChild.setDimensionValue(StarTreeIndexNode.all());
starChild.setParent(node);
starChild.setLevel(node.getLevel() + 1);
// n.b. We will number the nodes later using BFS after fully split
// Add child to parent
node.getChildren().put(StarTreeIndexNode.all(), starChild);
Iterator<Pair<DimensionBuffer, MetricBuffer>> iterator =
uniqueCombinations(startDocId, endDocId, file, splitDimensionId);
int rowsAdded = 0;
int startOffset = rawRecordCount + aggRecordCount;
while (iterator.hasNext()) {
Pair<DimensionBuffer, MetricBuffer> next = iterator.next();
DimensionBuffer dimension = next.getLeft();
MetricBuffer metricsHolder = next.getRight();
LOG.debug("Adding row:{}", dimension);
appendToAggBuffer(dimension, metricsHolder);
rowsAdded++;
}
docsAdded += rowsAdded;
LOG.debug("Added {} additional records at level {}", rowsAdded, level);
//flush
dataBuffer.flush();
int childDocs = 0;
if (rowsAdded >= maxLeafRecords) {
sort(dataFile, startOffset, startOffset + rowsAdded);
childDocs = constructStarTree(starChild, startOffset, startOffset + rowsAdded, level + 1, dataFile);
docsAdded += childDocs;
}
// Either rowsAdded < maxLeafRecords, or we did not split further (last level).
if (childDocs == 0) {
starChild.setStartDocumentId(startOffset);
starChild.setEndDocumentId(startOffset + rowsAdded);
}
//node.setEndDocumentId(endDocId + docsAdded);
return docsAdded;
}
/**
* Assumes the file is already sorted, returns the unique combinations after removing a specified dimension.
* Aggregates the metrics for each unique combination, currently only sum is supported by default
* @param startDocId
* @param endDocId
* @param file
* @param splitDimensionId
* @return
* @throws Exception
*/
private Iterator<Pair<DimensionBuffer, MetricBuffer>> uniqueCombinations(int startDocId, int endDocId, File file,
int splitDimensionId) throws Exception {
StarTreeDataTable dataSorter = new StarTreeDataTable(file, dimensionSizeBytes, metricSizeBytes, getSortOrder());
Iterator<Pair<byte[], byte[]>> iterator1 = dataSorter.iterator(startDocId, endDocId);
File tempFile = new File(outDir, file.getName() + "_" + startDocId + "_" + endDocId + ".unique.tmp");
DataOutputStream dos = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(tempFile)));
while (iterator1.hasNext()) {
Pair<byte[], byte[]> next = iterator1.next();
byte[] dimensionBuffer = next.getLeft();
byte[] metricBuffer = next.getRight();
DimensionBuffer dimensions = DimensionBuffer.fromBytes(dimensionBuffer);
for (int i = 0; i < numDimensions; i++) {
String dimensionName = dimensionNameToIndexMap.inverse().get(i);
if (i == splitDimensionId || (skipMaterializationForDimensions != null &&
skipMaterializationForDimensions.contains(dimensionName))) {
dos.writeInt(StarTreeIndexNode.all());
} else {
dos.writeInt(dimensions.getDimension(i));
}
}
dos.write(metricBuffer);
}
dos.close();
dataSorter = new StarTreeDataTable(tempFile, dimensionSizeBytes, metricSizeBytes, getSortOrder());
dataSorter.sort(0, endDocId - startDocId);
if (debugMode) {
printFile(tempFile, 0, endDocId - startDocId);
}
final Iterator<Pair<byte[], byte[]>> iterator = dataSorter.iterator(0, endDocId - startDocId);
return new Iterator<Pair<DimensionBuffer, MetricBuffer>>() {
Pair<DimensionBuffer, MetricBuffer> prev = null;
boolean done = false;
@Override
public void remove() {
throw new UnsupportedOperationException();
}
@Override
public boolean hasNext() {
return !done;
}
@Override
public Pair<DimensionBuffer, MetricBuffer> next() {
while (iterator.hasNext()) {
Pair<byte[], byte[]> next = iterator.next();
byte[] dimBuffer = next.getLeft();
byte[] metricBuffer = next.getRight();
if (prev == null) {
prev = Pair.of(DimensionBuffer.fromBytes(dimBuffer), MetricBuffer.fromBytes(metricBuffer, metricTypes));
} else {
Pair<DimensionBuffer, MetricBuffer> current =
Pair.of(DimensionBuffer.fromBytes(dimBuffer), MetricBuffer.fromBytes(metricBuffer, metricTypes));
if (!current.getLeft().equals(prev.getLeft())) {
Pair<DimensionBuffer, MetricBuffer> ret = prev;
prev = current;
LOG.debug("Returning unique {}", prev.getLeft());
return ret;
} else {
prev.getRight().aggregate(current.getRight(), metricTypes);
}
}
}
done = true;
LOG.debug("Returning unique {}", prev.getLeft());
return prev;
}
};
}
/**
* sorts the file from start to end on a dimension index
* @param startDocId
* @param endDocId
* @param dimension
* @param file
* @return
*/
private Map<Integer, IntPair> groupBy(int startDocId, int endDocId, Integer dimension, File file) {
StarTreeDataTable dataSorter = new StarTreeDataTable(file, dimensionSizeBytes, metricSizeBytes, getSortOrder());
return dataSorter.groupByIntColumnCount(startDocId, endDocId, dimension);
}
/**
* Iterator to iterate over the records from startDocId to endDocId
*/
@Override
public Iterator<GenericRow> iterator(final int startDocId, final int endDocId) throws Exception {
StarTreeDataTable dataSorter = new StarTreeDataTable(dataFile, dimensionSizeBytes, metricSizeBytes, getSortOrder());
final Iterator<Pair<byte[], byte[]>> iterator = dataSorter.iterator(startDocId, endDocId);
return new Iterator<GenericRow>() {
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
@Override
public GenericRow next() {
Pair<byte[], byte[]> pair = iterator.next();
DimensionBuffer dimensionKey = DimensionBuffer.fromBytes(pair.getLeft());
MetricBuffer metricsHolder = MetricBuffer.fromBytes(pair.getRight(), metricTypes);
return toGenericRow(dimensionKey, metricsHolder);
}
};
}
public JSONObject getStarTreeAsJSON() throws Exception {
JSONObject json = new JSONObject();
toJson(json, starTreeRootIndexNode, dictionaryMap);
return json;
}
private void toJson(JSONObject json, StarTreeIndexNode node, Map<String, HashBiMap<Object, Integer>> dictionaryMap)
throws Exception {
String dimName = "ALL";
Object dimValue = "ALL";
if (node.getDimensionName() != StarTreeIndexNode.all()) {
dimName = dimensionNames.get(node.getDimensionName());
}
if (node.getDimensionValue() != StarTreeIndexNode.all()) {
dimValue = dictionaryMap.get(dimName).inverse().get(node.getDimensionValue());
}
json.put("title", dimName + ":" + dimValue);
if (node.getChildren() != null) {
JSONObject[] childJsons = new JSONObject[node.getChildren().size()];
int index = 0;
for (Integer child : node.getChildren().keySet()) {
StarTreeIndexNode childNode = node.getChildren().get(child);
JSONObject childJson = new JSONObject();
toJson(childJson, childNode, dictionaryMap);
childJsons[index++] = childJson;
}
json.put("nodes", childJsons);
}
}
@Override
public void cleanup() {
if (outDir != null) {
FileUtils.deleteQuietly(outDir);
}
}
@Override
public StarTree getTree() {
return starTree;
}
@Override
public int getTotalRawDocumentCount() {
return rawRecordCount;
}
@Override
public int getTotalAggregateDocumentCount() {
return aggRecordCount;
}
@Override
public int getMaxLeafRecords() {
return maxLeafRecords;
}
@Override
public List<String> getDimensionsSplitOrder() {
return dimensionsSplitOrder;
}
public Map<String, HashBiMap<Object, Integer>> getDictionaryMap() {
return dictionaryMap;
}
public HashBiMap<String, Integer> getDimensionNameToIndexMap() {
return dimensionNameToIndexMap;
}
@Override
public Set<String> getSkipMaterializationForDimensions() {
return skipMaterializationForDimensions;
}
}
|
Java
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (1.8.0_112) on Fri Jun 16 09:55:12 MST 2017 -->
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>org.wildfly.swarm.batch.jberet (Public javadocs 2017.6.1 API)</title>
<meta name="date" content="2017-06-16">
<link rel="stylesheet" type="text/css" href="../../../../../stylesheet.css" title="Style">
<script type="text/javascript" src="../../../../../script.js"></script>
</head>
<body>
<h1 class="bar"><a href="../../../../../org/wildfly/swarm/batch/jberet/package-summary.html" target="classFrame">org.wildfly.swarm.batch.jberet</a></h1>
<div class="indexContainer">
<h2 title="Classes">Classes</h2>
<ul title="Classes">
<li><a href="BatchFraction.html" title="class in org.wildfly.swarm.batch.jberet" target="classFrame">BatchFraction</a></li>
</ul>
</div>
</body>
</html>
|
Java
|
<table class="table table-bordered" st-pipe="load" st-table="displayed">
<thead>
<tr ng-if="globalFilter">
<th colspan="{{ columns.length + 1 }}">
<input st-search=""
class="form-control"
placeholder="global search ..."
type="text"/>
</th>
</tr>
<tr ng-if="columnFilter">
<th ng-repeat="col in columns">
<input st-search="{{col}}"
class="form-control"
placeholder="search by {{ col | translate }} ..."
type="text"/>
</th>
<th></th>
</tr>
<tr>
<th ng-repeat="col in columns" st-sort="{{col}}"> {{ col | translate }}</th>
<th>Edit</th>
</tr>
</thead>
<tbody ng-show="isLoading">
<tr>
<td colspan="{{ columns.length + 1 }}" class="text-center">Loading ...</td>
</tr>
</tbody>
<tbody ng-show="!isLoading">
<tr ng-repeat="row in displayed">
<td ng-repeat="col in columns">{{ row[col] }}</td>
<td>
<a ui-sref="{{edit}}">
{{ 'action.edit' | translate }}
</a>
</td>
</tr>
</tbody>
<tfoot>
<tr>
<td colspan="{{ columns.length + 1 }}" class="text-center">
<div st-pagination="" st-items-by-page="itemsPerPage" st-displayed-pages="7"></div>
</td>
</tr>
</tfoot>
</table>
|
Java
|
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.plugins.signing;
import com.google.common.base.Function;
import groovy.lang.Closure;
import org.gradle.api.artifacts.PublishArtifact;
import org.gradle.api.file.FileCollection;
import org.gradle.api.internal.file.collections.ImmutableFileCollection;
import org.gradle.plugins.signing.signatory.Signatory;
import org.gradle.plugins.signing.type.SignatureType;
import org.gradle.util.ConfigureUtil;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
/**
* A sign operation creates digital signatures for one or more files or {@link PublishArtifact publish artifacts}.
*
* <p>The external representation of the signature is specified by the {@link #getSignatureType() signature type property}, while the {@link #signatory} property specifies who is to sign. <p> A sign
* operation manages one or more {@link Signature} objects. The {@code sign} methods are used to register things to generate signatures for. The {@link #execute()} method generates the signatures for
* all of the registered items at that time.
*/
abstract public class SignOperation implements SignatureSpec {
/**
* The file representation of the signature(s).
*/
private SignatureType signatureType;
/**
* The signatory to the generated digital signatures.
*/
private Signatory signatory;
/**
* Whether or not it is required that this signature be generated.
*/
private boolean required;
private final List<Signature> signatures = new ArrayList<Signature>();
public String getDisplayName() {
return "SignOperation";
}
@Override
public String toString() {
return getDisplayName();
}
@Override
public void setSignatureType(SignatureType signatureType) {
this.signatureType = signatureType;
}
@Override
public SignatureType getSignatureType() {
return signatureType;
}
@Override
public void setSignatory(Signatory signatory) {
this.signatory = signatory;
}
@Override
public Signatory getSignatory() {
return signatory;
}
@Override
public void setRequired(boolean required) {
this.required = required;
}
@Override
public boolean isRequired() {
return required;
}
/**
* Registers signatures for the given artifacts.
*
* @return this
* @see Signature#Signature(File, SignatureSpec, Object...)
*/
public SignOperation sign(PublishArtifact... artifacts) {
for (PublishArtifact artifact : artifacts) {
signatures.add(new Signature(artifact, this));
}
return this;
}
/**
* Registers signatures for the given files.
*
* @return this
* @see Signature#Signature(File, SignatureSpec, Object...)
*/
public SignOperation sign(File... files) {
for (File file : files) {
signatures.add(new Signature(file, this));
}
return this;
}
/**
* Registers signatures (with the given classifier) for the given files
*
* @return this
* @see Signature#Signature(PublishArtifact, SignatureSpec, Object...)
*/
public SignOperation sign(String classifier, File... files) {
for (File file : files) {
signatures.add(new Signature(file, classifier, this));
}
return this;
}
/**
* Change the signature type for signature generation.
*/
public SignOperation signatureType(SignatureType type) {
this.signatureType = type;
return this;
}
/**
* Change the signatory for signature generation.
*/
public SignOperation signatory(Signatory signatory) {
this.signatory = signatory;
return this;
}
/**
* Executes the given closure against this object.
*/
public SignOperation configure(Closure closure) {
ConfigureUtil.configureSelf(closure, this);
return this;
}
/**
* Generates actual signature files for all of the registered signatures.
*
* <p>The signatures are generated with the configuration they have at this time, which includes the signature type and signatory of this operation at this time. <p> This method can be called
* multiple times, with the signatures being generated with their current configuration each time.
*
* @return this
* @see Signature#generate()
*/
public SignOperation execute() {
for (Signature signature : signatures) {
signature.generate();
}
return this;
}
/**
* The registered signatures.
*/
public List<Signature> getSignatures() {
return new ArrayList<Signature>(signatures);
}
/**
* Returns the single registered signature.
*
* @return The signature.
* @throws IllegalStateException if there is not exactly one registered signature.
*/
public Signature getSingleSignature() {
final int size = signatures.size();
switch (size) {
case 1:
return signatures.get(0);
case 0:
throw new IllegalStateException("Expected operation to contain exactly one signature, however, it contains no signatures.");
default:
throw new IllegalStateException("Expected operation to contain exactly one signature, however, it contains " + String.valueOf(size) + " signatures.");
}
}
/**
* All of the files that will be signed by this operation.
*/
public FileCollection getFilesToSign() {
return newSignatureFileCollection(new Function<Signature, File>() {
@Override
public File apply(Signature input) {
return input.getToSign();
}
});
}
/**
* All of the signature files that will be generated by this operation.
*/
public FileCollection getSignatureFiles() {
return newSignatureFileCollection(new Function<Signature, File>() {
@Override
public File apply(Signature input) {
return input.getFile();
}
});
}
private FileCollection newSignatureFileCollection(Function<Signature, File> getFile) {
return ImmutableFileCollection.of(collectSignatureFiles(getFile));
}
private ArrayList<File> collectSignatureFiles(Function<Signature, File> getFile) {
ArrayList<File> files = new ArrayList<File>(signatures.size());
for (Signature signature : signatures) {
File file = getFile.apply(signature);
if (file != null) {
files.add(file);
}
}
return files;
}
}
|
Java
|
/*
* Copyright 2013-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.cloudfoundry.client.v2.spaces;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import org.cloudfoundry.Nullable;
import org.immutables.value.Value;
import java.util.List;
/**
* The request payload for the Update a Space operation
*/
@JsonSerialize
@Value.Immutable
abstract class _UpdateSpaceRequest {
/**
* Allow SSH
*/
@JsonProperty("allow_ssh")
@Nullable
abstract Boolean getAllowSsh();
/**
* The auditor ids
*/
@JsonProperty("auditor_guids")
@Nullable
abstract List<String> getAuditorIds();
/**
* The developer ids
*/
@JsonProperty("developer_guids")
@Nullable
abstract List<String> getDeveloperIds();
/**
* The domain ids
*/
@JsonProperty("domain_guids")
@Nullable
abstract List<String> getDomainIds();
/**
* The manager ids
*/
@JsonProperty("manager_guids")
@Nullable
abstract List<String> getManagerIds();
/**
* The name
*/
@JsonProperty("name")
@Nullable
abstract String getName();
/**
* The organization id
*/
@JsonProperty("organization_guid")
@Nullable
abstract String getOrganizationId();
/**
* The security group ids
*/
@JsonProperty("security_group_guids")
@Nullable
abstract List<String> getSecurityGroupIds();
/**
* The space id
*/
@JsonIgnore
abstract String getSpaceId();
}
|
Java
|
<!DOCTYPE html>
<!--
Copyright 2016 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License
-->
<html>
<head>
<meta charset="utf-8">
<title>MDC Toolbar Demo</title>
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="icon" type="image/png" href="/images/logo_components_color_2x_web_48dp.png" />
<script src="../assets/material-components-web.css.js" charset="utf-8"></script>
<link href="../demos.css" rel="stylesheet">
<link rel="stylesheet" href="https://fonts.googleapis.com/icon?family=Material+Icons">
<style>
.mdc-toolbar-demo {
margin: 0px;
}
.demo-paragraph {
margin: 0px;
padding: 20px 28px;
}
@media (max-width: 599px) {
.demo-paragraph {
padding: 16px;
}
}
</style>
</head>
<body class="mdc-typography mdc-toolbar-demo">
<header class="mdc-toolbar mdc-toolbar--fixed">
<div class="mdc-toolbar__row">
<section class="mdc-toolbar__section mdc-toolbar__section--align-start">
<a href="#" class="material-icons mdc-toolbar__icon--menu">menu</a>
<span class="mdc-toolbar__title">Title</span>
</section>
<section class="mdc-toolbar__section mdc-toolbar__section--align-end" role="toolbar">
<a href="#" class="material-icons mdc-toolbar__icon" aria-label="Download" alt="Download">file_download</a>
<a href="#" class="material-icons mdc-toolbar__icon" aria-label="Print this page" alt="Print this page">print</a>
<a href="#" class="material-icons mdc-toolbar__icon" aria-label="Bookmark this page" alt="Bookmark this page">more_vert</a>
</section>
</div>
</header>
<main>
<div class="mdc-toolbar-fixed-adjust">
<p class="demo-paragraph">
Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Vestibulum tortor quam, feugiat vitae, ultricies eget, tempor sit amet, ante. Donec eu libero sit amet quam egestas semper. Aenean ultricies mi vitae est. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Vestibulum tortor quam, feugiat vitae, ultricies eget, tempor sit amet, ante. Donec eu libero sit amet quam egestas semper. Aenean ultricies mi vitae est.
</p>
<p class="demo-paragraph">
Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Vestibulum tortor quam, feugiat vitae, ultricies eget, tempor sit amet, ante. Donec eu libero sit amet quam egestas semper. Aenean ultricies mi vitae est. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Vestibulum tortor quam, feugiat vitae, ultricies eget, tempor sit amet, ante. Donec eu libero sit amet quam egestas semper. Aenean ultricies mi vitae est.
</p>
<p class="demo-paragraph">
Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Vestibulum tortor quam, feugiat vitae, ultricies eget, tempor sit amet, ante. Donec eu libero sit amet quam egestas semper. Aenean ultricies mi vitae est. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Vestibulum tortor quam, feugiat vitae, ultricies eget, tempor sit amet, ante. Donec eu libero sit amet quam egestas semper. Aenean ultricies mi vitae est.
</p>
</div>
</main>
</body>
</html>
|
Java
|
# AUTOGENERATED FILE
FROM balenalib/orangepi-plus2-debian:bullseye-build
ENV NODE_VERSION 12.21.0
ENV YARN_VERSION 1.22.4
RUN for key in \
6A010C5166006599AA17F08146C2130DFD2497F5 \
; do \
gpg --batch --keyserver pgp.mit.edu --recv-keys "$key" || \
gpg --batch --keyserver keyserver.pgp.com --recv-keys "$key" || \
gpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys "$key" ; \
done \
&& curl -SLO "http://nodejs.org/dist/v$NODE_VERSION/node-v$NODE_VERSION-linux-armv7l.tar.gz" \
&& echo "6edc31a210e47eb72b0a2a150f7fe604539c1b2a45e8c81d378ac9315053a54f node-v$NODE_VERSION-linux-armv7l.tar.gz" | sha256sum -c - \
&& tar -xzf "node-v$NODE_VERSION-linux-armv7l.tar.gz" -C /usr/local --strip-components=1 \
&& rm "node-v$NODE_VERSION-linux-armv7l.tar.gz" \
&& curl -fSLO --compressed "https://yarnpkg.com/downloads/$YARN_VERSION/yarn-v$YARN_VERSION.tar.gz" \
&& curl -fSLO --compressed "https://yarnpkg.com/downloads/$YARN_VERSION/yarn-v$YARN_VERSION.tar.gz.asc" \
&& gpg --batch --verify yarn-v$YARN_VERSION.tar.gz.asc yarn-v$YARN_VERSION.tar.gz \
&& mkdir -p /opt/yarn \
&& tar -xzf yarn-v$YARN_VERSION.tar.gz -C /opt/yarn --strip-components=1 \
&& ln -s /opt/yarn/bin/yarn /usr/local/bin/yarn \
&& ln -s /opt/yarn/bin/yarn /usr/local/bin/yarnpkg \
&& rm yarn-v$YARN_VERSION.tar.gz.asc yarn-v$YARN_VERSION.tar.gz \
&& npm config set unsafe-perm true -g --unsafe-perm \
&& rm -rf /tmp/*
CMD ["echo","'No CMD command was set in Dockerfile! Details about CMD command could be found in Dockerfile Guide section in our Docs. Here's the link: https://balena.io/docs"]
RUN curl -SLO "https://raw.githubusercontent.com/balena-io-library/base-images/8accad6af708fca7271c5c65f18a86782e19f877/scripts/assets/tests/test-stack@node.sh" \
&& echo "Running test-stack@node" \
&& chmod +x test-stack@node.sh \
&& bash test-stack@node.sh \
&& rm -rf test-stack@node.sh
RUN [ ! -d /.balena/messages ] && mkdir -p /.balena/messages; echo 'Here are a few details about this Docker image (For more information please visit https://www.balena.io/docs/reference/base-images/base-images/): \nArchitecture: ARM v7 \nOS: Debian Bullseye \nVariant: build variant \nDefault variable(s): UDEV=off \nThe following software stack is preinstalled: \nNode.js v12.21.0, Yarn v1.22.4 \nExtra features: \n- Easy way to install packages with `install_packages <package-name>` command \n- Run anywhere with cross-build feature (for ARM only) \n- Keep the container idling with `balena-idle` command \n- Show base image details with `balena-info` command' > /.balena/messages/image-info
RUN echo '#!/bin/sh.real\nbalena-info\nrm -f /bin/sh\ncp /bin/sh.real /bin/sh\n/bin/sh "$@"' > /bin/sh-shim \
&& chmod +x /bin/sh-shim \
&& cp /bin/sh /bin/sh.real \
&& mv /bin/sh-shim /bin/sh
|
Java
|
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package agent.lldb.manager.evt;
import agent.lldb.lldb.DebugThreadInfo;
/**
* The event corresponding with SBThread.eBroadcastBitThreadResumed
*/
public class LldbThreadResumedEvent extends AbstractLldbEvent<DebugThreadInfo> {
public LldbThreadResumedEvent(DebugThreadInfo info) {
super(info);
}
}
|
Java
|
/**
* Copyright (C) 2013
* by 52 North Initiative for Geospatial Open Source Software GmbH
*
* Contact: Andreas Wytzisk
* 52 North Initiative for Geospatial Open Source Software GmbH
* Martin-Luther-King-Weg 24
* 48155 Muenster, Germany
* info@52north.org
*
* This program is free software; you can redistribute and/or modify it under
* the terms of the GNU General Public License version 2 as published by the
* Free Software Foundation.
*
* This program is distributed WITHOUT ANY WARRANTY; even without the implied
* WARRANTY OF MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with
* this program (see gnu-gpl v2.txt). If not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA or
* visit the Free Software Foundation web page, http://www.fsf.org.
*/
package org.n52.sos.binding.rest.resources;
import org.n52.sos.binding.rest.requests.RestRequest;
/**
* @author <a href="mailto:e.h.juerrens@52north.org">Eike Hinderk Jürrens</a>
*
*/
public class OptionsRestRequest implements RestRequest {
private String resourceType;
private boolean isGlobalResource;
private boolean isResourceCollection;
public OptionsRestRequest(String resourceType, boolean isGlobalResource, boolean isResourceCollection) {
this.resourceType = resourceType;
this.isGlobalResource = isGlobalResource;
this.isResourceCollection = isResourceCollection;
}
public String getResourceType()
{
return resourceType;
}
public boolean isGlobalResource()
{
return isGlobalResource;
}
public boolean isResourceCollection()
{
return isResourceCollection;
}
}
|
Java
|
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace Watcher.Model
{
public class GreatThan : IPredicate
{
public string Quantifier
{
get
{
return "great than";
}
}
public bool IsSatisfiedBy(int a, int b)
{
return a > b;
}
}
}
|
Java
|
package org.adligo.tests4j.system.shared.trials;
import org.adligo.tests4j.shared.common.ClassMethods;
import org.adligo.tests4j.shared.xml.I_XML_Builder;
public class TrialParamValue implements I_TrialParamValue {
public static final String TAG_NAME = "value";
public static final String CLASS_NAME = "class";
public static final String PARAMETER_VALUE_MUST_BE_A_NON_VOID_PRIMITIVE_OR_STRING =
"Parameter value must be a non Void primitive or String.";
private Object value_;
public TrialParamValue(Object value) {
if (value == null) {
throw new NullPointerException();
}
Class<?> c = value.getClass();
if ( (ClassMethods.isPrimitiveClass(c) && !ClassMethods.isClass(Void.class, c))
|| ClassMethods.isClass(String.class, c)) {
value_ = value;
} else {
throw new IllegalArgumentException(
PARAMETER_VALUE_MUST_BE_A_NON_VOID_PRIMITIVE_OR_STRING);
}
}
@Override
public String getClassName() {
return value_.getClass().getName();
}
@Override
public Object getValue() {
return value_;
}
@Override
public void toXml(I_XML_Builder builder) {
builder.addIndent();
builder.addStartTag(TAG_NAME);
String name = ClassMethods.getSimpleName(value_.getClass());
builder.addAttribute(CLASS_NAME, name);
builder.endHeader();
builder.addText(value_.toString());
builder.addEndTag(TAG_NAME);
builder.endLine();
}
}
|
Java
|
"""
Installs and configures MySQL
"""
import uuid
import logging
from packstack.installer import validators
from packstack.installer import utils
from packstack.modules.ospluginutils import getManifestTemplate, appendManifestFile
# Controller object will be initialized from main flow
controller = None
# Plugin name
PLUGIN_NAME = "OS-MySQL"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
logging.debug("plugin %s loaded", __name__)
def initConfig(controllerObject):
global controller
controller = controllerObject
logging.debug("Adding MySQL OpenStack configuration")
paramsList = [
{"CMD_OPTION" : "mysql-host",
"USAGE" : "The IP address of the server on which to install MySQL",
"PROMPT" : "Enter the IP address of the MySQL server",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_MYSQL_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "mysql-user",
"USAGE" : "Username for the MySQL admin user",
"PROMPT" : "Enter the username for the MySQL admin user",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : "root",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_MYSQL_USER",
"USE_DEFAULT" : True,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "mysql-pw",
"USAGE" : "Password for the MySQL admin user",
"PROMPT" : "Enter the password for the MySQL admin user",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : uuid.uuid4().hex[:16],
"MASK_INPUT" : True,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_MYSQL_PW",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : True,
"CONDITION" : False },
]
groupDict = { "GROUP_NAME" : "MYSQL",
"DESCRIPTION" : "MySQL Config parameters",
"PRE_CONDITION" : lambda x: 'yes',
"PRE_CONDITION_MATCH" : "yes",
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True}
controller.addGroup(groupDict, paramsList)
def initSequences(controller):
mysqlsteps = [
{'title': 'Adding MySQL manifest entries',
'functions':[createmanifest]}
]
controller.addSequence("Installing MySQL", [], [], mysqlsteps)
def createmanifest(config):
if config['CONFIG_MYSQL_INSTALL'] == 'y':
install = True
suffix = 'install'
else:
install = False
suffix = 'noinstall'
# In case we are not installing MySQL server, mysql* manifests have
# to be run from Keystone host
host = install and config['CONFIG_MYSQL_HOST'] \
or config['CONFIG_KEYSTONE_HOST']
manifestfile = "%s_mysql.pp" % host
manifestdata = [getManifestTemplate('mysql_%s.pp' % suffix)]
def append_for(module, suffix):
# Modules have to be appended to the existing mysql.pp
# otherwise pp will fail for some of them saying that
# Mysql::Config definition is missing.
template = "mysql_%s_%s.pp" % (module, suffix)
manifestdata.append(getManifestTemplate(template))
append_for("keystone", suffix)
hosts = set()
for mod in ['nova', 'cinder', 'glance', 'neutron', 'heat']:
if config['CONFIG_%s_INSTALL' % mod.upper()] == 'y':
append_for(mod, suffix)
# Check wich modules are enabled so we can allow their
# hosts on the firewall
if mod != 'nova' and mod != 'neutron':
hosts.add(config.get('CONFIG_%s_HOST' % mod.upper()).strip())
elif mod == 'neutron':
hosts.add(config.get('CONFIG_NEUTRON_SERVER_HOST').strip())
elif config['CONFIG_NOVA_INSTALL'] != 'n':
#In that remote case that we have lot's of nova hosts
hosts.add(config.get('CONFIG_NOVA_API_HOST').strip())
hosts.add(config.get('CONFIG_NOVA_CERT_HOST').strip())
hosts.add(config.get('CONFIG_NOVA_VNCPROXY_HOST').strip())
hosts.add(config.get('CONFIG_NOVA_CONDUCTOR_HOST').strip())
hosts.add(config.get('CONFIG_NOVA_SCHED_HOST').strip())
if config['CONFIG_NEUTRON_INSTALL'] != 'y':
dbhosts = split_hosts(config['CONFIG_NOVA_NETWORK_HOSTS'])
hosts |= dbhosts
for host in config.get('CONFIG_NOVA_COMPUTE_HOSTS').split(','):
hosts.add(host.strip())
config['FIREWALL_ALLOWED'] = ",".join(["'%s'" % i for i in hosts])
config['FIREWALL_SERVICE_NAME'] = "mysql"
config['FIREWALL_PORTS'] = "'3306'"
manifestdata.append(getManifestTemplate("firewall.pp"))
appendManifestFile(manifestfile, "\n".join(manifestdata), 'pre')
|
Java
|
```nginx
server {
listen 80;
charset utf-8;
client_max_body_size 128M;
server_name meotrics.com;
return 301 https://$host$request_uri;
}
server {
listen 80;
server_name www.meotrics.com;
return 301 http://meotrics.com$request_uri;
}
server {
listen 443;
server_name www.meotrics.com meotrics.com;
ssl on;
ssl_certificate /etc/ssl/certs/chained.pem;
ssl_certificate_key /etc/ssl/private/domain.key;
ssl_session_timeout 5m;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-G$
ssl_session_cache shared:SSL:50m;
ssl_dhparam /etc/ssl/certs/dhparam.pem;
ssl_prefer_server_ciphers on;
root /home/thanhpk/meotrics/landing/;
location ~* \.(js|css|png|jpg|jpeg|gif|ico)$ {
expires 1y;
log_not_found off;
}
server_tokens off;
#more_set_headers "Server: Meotrics";
index index.html;
location ~ /\.(ht|svn|git) {
deny all;
}
# Common bandwidth hoggers and hacking tools.
if ($http_user_agent ~ "libwww-perl") {
set $block_user_agents 1;
}
if ($http_user_agent ~ "GetRight") {
set $block_user_agents 1;
}
if ($http_user_agent ~ "GetWeb!") {
set $block_user_agents 1;
}
if ($http_user_agent ~ "Go!Zilla") {
set $block_user_agents 1;
}
if ($http_user_agent ~ "Download Demon") {
set $block_user_agents 1;
}
if ($http_user_agent ~ "Go-Ahead-Got-It") {
set $block_user_agents 1;
}
if ($http_user_agent ~ "TurnitinBot") {
set $block_user_agents 1;
}
if ($http_user_agent ~ "GrabNet") {
set $block_user_agents 1;
}
if ($block_user_agents = 1) {
return 403;
}
}
server {
listen 80;
server_name api.meotrics.com;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_pass http://127.0.0.1:1711/api;
}
}
server {
listen 443;
server_name api.meotrics.com;
ssl on;
ssl_certificate /etc/ssl/certs/chained.pem;
ssl_certificate_key /etc/ssl/private/domain.key;
ssl_session_timeout 5m;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA;
ssl_session_cache shared:SSL:50m;
ssl_dhparam /etc/ssl/certs/dhparam.pem;
ssl_prefer_server_ciphers on;
large_client_header_buffers 8 32k;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_pass http://127.0.0.1:1711/api;
}
}
server {
listen 80;
server_name app.meotrics.com;
return 301 https://$server_name$request_uri;
}
map $http_upgrade $connection_upgrade{
default upgrade;
'' close;
}
upstream websocket {
server 127.0.0.1:2910;
}
server {
charset utf-8;
listen 443;
server_name app.meotrics.com;
root /home/thanhpk/meotrics/dashboard/public/;
index index.php;
ssl on;
ssl_certificate /etc/ssl/certs/chained.pem;
ssl_certificate_key /etc/ssl/private/domain.key;
ssl_session_timeout 5m;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA;
ssl_session_cache shared:SSL:50m;
ssl_dhparam /etc/ssl/certs/dhparam.pem;
ssl_prefer_server_ciphers on;
access_log /home/thanhpk/tmp/meotrics-access443.log;
error_log /home/thanhpk/tmp/meotrics-error443.log;
location /ws {
proxy_pass http://websocket;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
}
location / {
try_files $uri $uri/ /index.php?$args;
}
location ~ \.php$ {
include fastcgi_params;
fastcgi_param REMOTE_ADDR $http_x_real_ip;
fastcgi_param SCRIPT_FILENAME $document_root/$fastcgi_script_name;
fastcgi_pass unix:/var/run/php5-fpm.sock;
try_files $uri =404;
}
location ~ /\.(ht|svn|git) {
deny all;
}
}
```
|
Java
|
/*
Copyright 2018 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
ndmapis "github.com/openebs/maya/pkg/apis/openebs.io/ndm/v1alpha1"
apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
bd "github.com/openebs/maya/pkg/blockdevice/v1alpha2"
bdc "github.com/openebs/maya/pkg/blockdeviceclaim/v1alpha1"
cspc "github.com/openebs/maya/pkg/cstor/poolcluster/v1alpha1"
csp "github.com/openebs/maya/pkg/cstor/poolinstance/v1alpha3"
nodeapis "github.com/openebs/maya/pkg/kubernetes/node/v1alpha1"
"github.com/openebs/maya/pkg/volume"
"github.com/pkg/errors"
k8serror "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog"
)
// SelectNode returns a node where pool should be created.
func (ac *Config) SelectNode() (*apis.PoolSpec, string, error) {
usedNodes, err := ac.GetUsedNode()
if err != nil {
return nil, "", errors.Wrapf(err, "could not get used nodes list for pool creation")
}
for _, pool := range ac.CSPC.Spec.Pools {
// pin it
pool := pool
nodeName, err := GetNodeFromLabelSelector(pool.NodeSelector)
if err != nil || nodeName == "" {
klog.Errorf("could not use node for selectors {%v}", pool.NodeSelector)
continue
}
if ac.VisitedNodes[nodeName] {
continue
} else {
ac.VisitedNodes[nodeName] = true
if !usedNodes[nodeName] {
return &pool, nodeName, nil
}
}
}
return nil, "", errors.New("no node qualified for pool creation")
}
// GetNodeFromLabelSelector returns the node name selected by provided labels
// TODO : Move it to node package
func GetNodeFromLabelSelector(labels map[string]string) (string, error) {
nodeList, err := nodeapis.NewKubeClient().List(metav1.ListOptions{LabelSelector: getLabelSelectorString(labels)})
if err != nil {
return "", errors.Wrap(err, "failed to get node list from the node selector")
}
if len(nodeList.Items) != 1 {
return "", errors.Errorf("invalid no.of nodes %d from the given node selectors", len(nodeList.Items))
}
return nodeList.Items[0].Name, nil
}
// getLabelSelectorString returns a string of label selector form label map to be used in
// list options.
// TODO : Move it to node package
func getLabelSelectorString(selector map[string]string) string {
var selectorString string
for key, value := range selector {
selectorString = selectorString + key + "=" + value + ","
}
selectorString = selectorString[:len(selectorString)-len(",")]
return selectorString
}
// GetUsedNode returns a map of node for which pool has already been created.
// Note : Filter function is not used from node builder package as it needs
// CSP builder package which cam cause import loops.
func (ac *Config) GetUsedNode() (map[string]bool, error) {
usedNode := make(map[string]bool)
cspList, err := csp.
NewKubeClient().
WithNamespace(ac.Namespace).
List(
metav1.
ListOptions{LabelSelector: string(apis.CStorPoolClusterCPK) + "=" + ac.CSPC.Name},
)
if err != nil {
return nil, errors.Wrap(err, "could not list already created csp(s)")
}
for _, cspObj := range cspList.Items {
usedNode[cspObj.Labels[string(apis.HostNameCPK)]] = true
}
return usedNode, nil
}
// GetBDListForNode returns a list of BD from the pool spec.
// TODO : Move it to CStorPoolCluster packgage
func (ac *Config) GetBDListForNode(pool *apis.PoolSpec) []string {
var BDList []string
for _, group := range pool.RaidGroups {
for _, bd := range group.BlockDevices {
BDList = append(BDList, bd.BlockDeviceName)
}
}
return BDList
}
// ClaimBDsForNode claims a given BlockDevice for node
// If the block device(s) is/are already claimed for any other CSPC it returns error.
// If the block device(s) is/are already calimed for the same CSPC -- it is left as it is and can be used for
// pool provisioning.
// If the block device(s) is/are unclaimed, then those are claimed.
func (ac *Config) ClaimBDsForNode(BD []string) error {
pendingClaim := 0
for _, bdName := range BD {
bdAPIObj, err := bd.NewKubeClient().WithNamespace(ac.Namespace).Get(bdName, metav1.GetOptions{})
if err != nil {
return errors.Wrapf(err, "error in getting details for BD {%s} whether it is claimed", bdName)
}
if bd.BuilderForAPIObject(bdAPIObj).BlockDevice.IsClaimed() {
IsClaimedBDUsable, errBD := ac.IsClaimedBDUsable(bdAPIObj)
if errBD != nil {
return errors.Wrapf(err, "error in getting details for BD {%s} for usability", bdName)
}
if !IsClaimedBDUsable {
return errors.Errorf("BD {%s} already in use", bdName)
}
continue
}
err = ac.ClaimBD(bdAPIObj)
if err != nil {
return errors.Wrapf(err, "Failed to claim BD {%s}", bdName)
}
pendingClaim++
}
if pendingClaim > 0 {
return errors.Errorf("%d block device claims are pending", pendingClaim)
}
return nil
}
// ClaimBD claims a given BlockDevice
func (ac *Config) ClaimBD(bdObj *ndmapis.BlockDevice) error {
newBDCObj, err := bdc.NewBuilder().
WithName("bdc-cstor-" + string(bdObj.UID)).
WithNamespace(ac.Namespace).
WithLabels(map[string]string{string(apis.CStorPoolClusterCPK): ac.CSPC.Name}).
WithBlockDeviceName(bdObj.Name).
WithHostName(bdObj.Labels[string(apis.HostNameCPK)]).
WithCapacity(volume.ByteCount(bdObj.Spec.Capacity.Storage)).
WithCSPCOwnerReference(ac.CSPC).
WithFinalizer(cspc.CSPCFinalizer).
Build()
if err != nil {
return errors.Wrapf(err, "failed to build block device claim for bd {%s}", bdObj.Name)
}
_, err = bdc.NewKubeClient().WithNamespace(ac.Namespace).Create(newBDCObj.Object)
if k8serror.IsAlreadyExists(err) {
klog.Infof("BDC for BD {%s} already created", bdObj.Name)
return nil
}
if err != nil {
return errors.Wrapf(err, "failed to create block device claim for bd {%s}", bdObj.Name)
}
return nil
}
// IsClaimedBDUsable returns true if the passed BD is already claimed and can be
// used for provisioning
func (ac *Config) IsClaimedBDUsable(bdAPIObj *ndmapis.BlockDevice) (bool, error) {
bdObj := bd.BuilderForAPIObject(bdAPIObj)
if bdObj.BlockDevice.IsClaimed() {
bdcName := bdObj.BlockDevice.Object.Spec.ClaimRef.Name
bdcAPIObject, err := bdc.NewKubeClient().WithNamespace(ac.Namespace).Get(bdcName, metav1.GetOptions{})
if err != nil {
return false, errors.Wrapf(err, "could not get block device claim for block device {%s}", bdAPIObj.Name)
}
bdcObj := bdc.BuilderForAPIObject(bdcAPIObject)
if bdcObj.BDC.HasLabel(string(apis.CStorPoolClusterCPK), ac.CSPC.Name) {
return true, nil
}
} else {
return false, errors.Errorf("block device {%s} is not claimed", bdAPIObj.Name)
}
return false, nil
}
// ValidatePoolSpec validates the pool spec.
// TODO: Fix following function -- (Current is mock only )
func ValidatePoolSpec(pool *apis.PoolSpec) bool {
return true
}
|
Java
|
#ifndef __PropertyGroup_h__
#define __PropertyGroup_h__
/* -------------------------------------------------------------------------- *
* OpenSim: PropertyGroup.h *
* -------------------------------------------------------------------------- *
* The OpenSim API is a toolkit for musculoskeletal modeling and simulation. *
* See http://opensim.stanford.edu and the NOTICE file for more information. *
* OpenSim is developed at Stanford University and supported by the US *
* National Institutes of Health (U54 GM072970, R24 HD065690) and by DARPA *
* through the Warrior Web program. *
* *
* Copyright (c) 2005-2017 Stanford University and the Authors *
* Author(s): Peter Loan *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); you may *
* not use this file except in compliance with the License. You may obtain a *
* copy of the License at http://www.apache.org/licenses/LICENSE-2.0. *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* -------------------------------------------------------------------------- */
#ifdef _WIN32
#pragma warning( disable : 4251 )
#endif
// INCLUDE
#include "osimCommonDLL.h"
#include "Property_Deprecated.h"
#include "Array.h"
namespace OpenSim {
#ifdef SWIG
#ifdef OSIMCOMMON_API
#undef OSIMCOMMON_API
#define OSIMCOMMON_API
#endif
#endif
//=============================================================================
//=============================================================================
/**
* A class implementing a property group.
*
* @author Peter Loan
* @version 1.0
*/
class OSIMCOMMON_API PropertyGroup
{
//=============================================================================
// DATA
//=============================================================================
private:
/** Name of the group. */
std::string _name;
protected:
/** Pointers to the properties in the group. */
Array<Property_Deprecated*> _properties;
//=============================================================================
// METHODS
//=============================================================================
//--------------------------------------------------------------------------
// CONSTRUCTION
//--------------------------------------------------------------------------
public:
PropertyGroup();
PropertyGroup(std::string& aName);
PropertyGroup(const PropertyGroup &aGroup);
virtual ~PropertyGroup();
virtual PropertyGroup* clone() const;
#ifndef SWIG
PropertyGroup& operator=(const PropertyGroup &aGroup);
bool operator<(const PropertyGroup &aGroup) const;
bool operator==(const PropertyGroup& aGroup) const;
#endif
void copyData(const PropertyGroup &aGroup);
void clear();
bool contains(const std::string& aName) const;
void add(Property_Deprecated* aProperty);
void remove(Property_Deprecated* aProperty);
#ifndef SWIG
const Array<Property_Deprecated*>& getProperties() const { return _properties; }
#endif
Property_Deprecated* get(int aIndex);
int getPropertyIndex(Property_Deprecated* aProperty) const;
// NAME
void setName(const std::string &aName) { _name = aName; }
const std::string& getName() const { return _name; }
private:
void setNull();
//=============================================================================
}; // END of class PropertyGroup
//=============================================================================
//=============================================================================
} // end of namespace OpenSim
#endif // __PropertyGroup_h__
|
Java
|
define(
['app/models/proto_model'],
function(ProtoModel) {
var Model = ProtoModel.extend({
// matches first part of method name in @remote.method
urlRoot: '/cru_api.order_',
must_be_floats: ['sub_total', 'actual_total'],
});
return Model;
}
);
|
Java
|
#include <cassert>
#include <iostream>
#include <sstream>
#include <boost/program_options/variables_map.hpp>
#include "optimize.h"
#include "online_optimizer.h"
#include "sparse_vector.h"
#include "fdict.h"
using namespace std;
double TestOptimizer(BatchOptimizer* opt) {
cerr << "TESTING NON-PERSISTENT OPTIMIZER\n";
// f(x,y) = 4x1^2 + x1*x2 + x2^2 + x3^2 + 6x3 + 5
// df/dx1 = 8*x1 + x2
// df/dx2 = 2*x2 + x1
// df/dx3 = 2*x3 + 6
vector<double> x(3);
vector<double> g(3);
x[0] = 8;
x[1] = 8;
x[2] = 8;
double obj = 0;
do {
g[0] = 8 * x[0] + x[1];
g[1] = 2 * x[1] + x[0];
g[2] = 2 * x[2] + 6;
obj = 4 * x[0]*x[0] + x[0] * x[1] + x[1]*x[1] + x[2]*x[2] + 6 * x[2] + 5;
opt->Optimize(obj, g, &x);
cerr << x[0] << " " << x[1] << " " << x[2] << endl;
cerr << " obj=" << obj << "\td/dx1=" << g[0] << " d/dx2=" << g[1] << " d/dx3=" << g[2] << endl;
} while (!opt->HasConverged());
return obj;
}
double TestPersistentOptimizer(BatchOptimizer* opt) {
cerr << "\nTESTING PERSISTENT OPTIMIZER\n";
// f(x,y) = 4x1^2 + x1*x2 + x2^2 + x3^2 + 6x3 + 5
// df/dx1 = 8*x1 + x2
// df/dx2 = 2*x2 + x1
// df/dx3 = 2*x3 + 6
vector<double> x(3);
vector<double> g(3);
x[0] = 8;
x[1] = 8;
x[2] = 8;
double obj = 0;
string state;
bool converged = false;
while (!converged) {
g[0] = 8 * x[0] + x[1];
g[1] = 2 * x[1] + x[0];
g[2] = 2 * x[2] + 6;
obj = 4 * x[0]*x[0] + x[0] * x[1] + x[1]*x[1] + x[2]*x[2] + 6 * x[2] + 5;
{
if (state.size() > 0) {
istringstream is(state, ios::binary);
opt->Load(&is);
}
opt->Optimize(obj, g, &x);
ostringstream os(ios::binary); opt->Save(&os); state = os.str();
}
cerr << x[0] << " " << x[1] << " " << x[2] << endl;
cerr << " obj=" << obj << "\td/dx1=" << g[0] << " d/dx2=" << g[1] << " d/dx3=" << g[2] << endl;
converged = opt->HasConverged();
if (!converged) {
// now screw up the state (should be undone by Load)
obj += 2.0;
g[1] = -g[2];
vector<double> x2 = x;
try {
opt->Optimize(obj, g, &x2);
} catch (...) { }
}
}
return obj;
}
template <class O>
void TestOptimizerVariants(int num_vars) {
O oa(num_vars);
cerr << "-------------------------------------------------------------------------\n";
cerr << "TESTING: " << oa.Name() << endl;
double o1 = TestOptimizer(&oa);
O ob(num_vars);
double o2 = TestPersistentOptimizer(&ob);
if (o1 != o2) {
cerr << oa.Name() << " VARIANTS PERFORMED DIFFERENTLY!\n" << o1 << " vs. " << o2 << endl;
exit(1);
}
cerr << oa.Name() << " SUCCESS\n";
}
using namespace std::tr1;
void TestOnline() {
size_t N = 20;
double C = 1.0;
double eta0 = 0.2;
std::tr1::shared_ptr<LearningRateSchedule> r(new ExponentialDecayLearningRate(N, eta0, 0.85));
//shared_ptr<LearningRateSchedule> r(new StandardLearningRate(N, eta0));
CumulativeL1OnlineOptimizer opt(r, N, C, std::vector<int>());
assert(r->eta(10) < r->eta(1));
}
int main() {
int n = 3;
TestOptimizerVariants<LBFGSOptimizer>(n);
TestOptimizerVariants<RPropOptimizer>(n);
TestOnline();
return 0;
}
|
Java
|
package com.sequenceiq.freeipa.entity.util;
import com.sequenceiq.cloudbreak.converter.DefaultEnumConverter;
import com.sequenceiq.freeipa.api.v1.kerberos.model.KerberosType;
public class KerberosTypeConverter extends DefaultEnumConverter<KerberosType> {
@Override
public KerberosType getDefault() {
return KerberosType.FREEIPA;
}
}
|
Java
|
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="description" content="Ionic makes it incredibly easy to build beautiful and interactive mobile apps using HTML5 and AngularJS.">
<meta name="keywords" content="html5,javascript,mobile,drifty,ionic,hybrid,phonegap,cordova,native,ios,android,angularjs">
<meta name="author" content="Drifty">
<meta property="og:image" content="http://ionicframework.com/img/ionic-logo-blog.png"/>
<!-- version /docs/1.0.0-beta.10 should not be indexed -->
<meta name="robots" content="noindex">
<title>Services in module ionic - 11 services - Ionic Framework</title>
<link href="/css/site.css?12" rel="stylesheet">
<!--<script src="//cdn.optimizely.com/js/595530035.js"></script>-->
<script type="text/javascript">var _sf_startpt=(new Date()).getTime()</script>
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-44023830-1', 'ionicframework.com');
ga('send', 'pageview');
</script>
<script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script>
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
</head>
<body class="docs docs-page docs-api">
<nav class="navbar navbar-default horizontal-gradient" role="navigation">
<div class="container">
<div class="navbar-header">
<button type="button" class="navbar-toggle button ionic" data-toggle="collapse" data-target=".navbar-ex1-collapse">
<i class="icon ion-navicon"></i>
</button>
<a class="navbar-brand" href="/">
<img src="/img/ionic-logo-white.svg" width="123" height="43" alt="Ionic Framework">
</a>
<a href="http://blog.ionic.io/announcing-ionic-1-0/" target="_blank">
<img src="/img/ionic1-tag.png" alt="Ionic 1.0 is out!" width="28" height="32" style="margin-left: 140px; margin-top:22px; display:block">
</a>
</div>
<div class="collapse navbar-collapse navbar-ex1-collapse">
<ul class="nav navbar-nav navbar-right">
<li><a class="getting-started-nav nav-link" href="/getting-started/">Getting Started</a></li>
<li><a class="docs-nav nav-link" href="/docs/">Docs</a></li>
<li><a class="nav-link" href="http://ionic.io/support">Support</a></li>
<li><a class="blog-nav nav-link" href="http://blog.ionic.io/">Blog <span id="blog-badge">1</span></a></li>
<li><a class="nav-link" href="http://forum.ionicframework.com/">Forum</a></li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown" role="button" aria-expanded="false">More <span class="caret"></span></a>
<ul class="dropdown-menu" role="menu">
<div class="arrow-up"></div>
<li><a class="products-nav nav-link" href="http://ionic.io/">Ionic.io</a></li>
<li><a class="examples-nav nav-link" href="http://showcase.ionicframework.com/">Showcase</a></li>
<li><a class="nav-link" href="http://jobs.ionic.io/">Job Board</a></li>
<li><a class="nav-link" href="http://market.ionic.io/">Market</a></li>
<li><a class="nav-link" href="http://ionicworldwide.herokuapp.com/">Ionic Worldwide</a></li>
<li><a class="nav-link" href="http://play.ionic.io/">Playground</a></li>
<li><a class="nav-link" href="http://creator.ionic.io/">Creator</a></li>
<li><a class="nav-link" href="http://shop.ionic.io/">Shop</a></li>
<!--<li><a class="nav-link" href="http://ngcordova.com/">ngCordova</a></li>-->
</ul>
</li>
</ul>
</div>
</div>
</nav>
<div class="header horizontal-gradient">
<div class="container">
<h3>Services in module ionic</h3>
<h4>11 services</h4>
</div>
<div class="news">
<div class="container">
<div class="row">
<div class="col-sm-8 news-col">
<div class="picker">
<select onchange="window.location.href=this.options[this.selectedIndex].value">
<option
value="/docs/nightly/api/ionic/service"
>
nightly
</option>
<option
value="/docs/api/ionic/service"
>
1.1.0 (latest)
</option>
<option
value="/docs/1.0.1/api/ionic/service"
>
1.0.1
</option>
<option
value="/docs/1.0.0/api/ionic/service"
>
1.0.0
</option>
<option
value="/docs/1.0.0-rc.5/api/ionic/service"
>
1.0.0-rc.5
</option>
<option
value="/docs/1.0.0-rc.4/api/ionic/service"
>
1.0.0-rc.4
</option>
<option
value="/docs/1.0.0-rc.3/api/ionic/service"
>
1.0.0-rc.3
</option>
<option
value="/docs/1.0.0-rc.2/api/ionic/service"
>
1.0.0-rc.2
</option>
<option
value="/docs/1.0.0-rc.1/api/ionic/service"
>
1.0.0-rc.1
</option>
<option
value="/docs/1.0.0-rc.0/api/ionic/service"
>
1.0.0-rc.0
</option>
<option
value="/docs/1.0.0-beta.14/api/ionic/service"
>
1.0.0-beta.14
</option>
<option
value="/docs/1.0.0-beta.13/api/ionic/service"
>
1.0.0-beta.13
</option>
<option
value="/docs/1.0.0-beta.12/api/ionic/service"
>
1.0.0-beta.12
</option>
<option
value="/docs/1.0.0-beta.11/api/ionic/service"
>
1.0.0-beta.11
</option>
<option
value="/docs/1.0.0-beta.10/api/ionic/service"
>
1.0.0-beta.10
</option>
</select>
</div>
</div>
<div class="col-sm-4 search-col">
<div class="search-bar">
<span class="search-icon ionic"><i class="ion-ios7-search-strong"></i></span>
<input type="search" id="search-input" value="Search">
</div>
</div>
</div>
</div>
</div>
</div>
<div id="search-results" class="search-results" style="display:none">
<div class="container">
<div class="search-section search-api">
<h4>JavaScript</h4>
<ul id="results-api"></ul>
</div>
<div class="search-section search-css">
<h4>CSS</h4>
<ul id="results-css"></ul>
</div>
<div class="search-section search-content">
<h4>Resources</h4>
<ul id="results-content"></ul>
</div>
</div>
</div>
<div class="container content-container">
<div class="row">
<div class="col-md-2 col-sm-3 aside-menu">
<div>
<ul class="nav left-menu">
<li class="menu-title">
<a href="/docs/overview/">Overview</a>
</li>
</ul>
<ul class="nav left-menu">
<li class="menu-title">
<a href="/docs/components/">CSS</a>
</li>
</ul>
<!-- Docs: JavaScript -->
<ul class="nav left-menu active-menu">
<li class="menu-title">
<a href="/docs/1.0.0-beta.10/api/">
JavaScript
</a>
</li>
<!-- Tabs -->
<li class="menu-section">
<a href="#" class="api-section">
Tabs
</a>
<ul>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/ionTabs/">
ion-tabs
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/ionTab/">
ion-tab
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/service/$ionicTabsDelegate/">
$ionicTabsDelegate
</a>
</li>
</ul>
</li>
<!-- Side Menus -->
<li class="menu-section">
<a href="#" class="api-section">
Side Menus
</a>
<ul>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/ionSideMenus/">
ion-side-menus
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/ionSideMenuContent/">
ion-side-menu-content
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/ionSideMenu/">
ion-side-menu
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/menuToggle/">
menu-toggle
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/menuClose/">
menu-close
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/service/$ionicSideMenuDelegate/">
$ionicSideMenuDelegate
</a>
</li>
</ul>
</li>
<!-- Navigation -->
<li class="menu-section">
<a href="#" class="api-section">
Navigation
</a>
<ul>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/ionNavView/">
ion-nav-view
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/ionView/">
ion-view
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/ionNavBar/">
ion-nav-bar
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/ionNavButtons/">
ion-nav-buttons
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/ionNavBackButton/">
ion-nav-back-button
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/navClear/">
nav-clear
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/service/$ionicNavBarDelegate/">
$ionicNavBarDelegate
</a>
</li>
</ul>
</li>
<!-- Headers/Footers -->
<li class="menu-section">
<a href="#" class="api-section">
Headers/Footers
</a>
<ul>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/ionHeaderBar/">
ion-header-bar
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/ionFooterBar/">
ion-footer-bar
</a>
</li>
</ul>
</li>
<!-- Content -->
<li class="menu-section">
<a href="#" class="api-section">
Content
</a>
<ul>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/ionContent/">
ion-content
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/ionRefresher/">
ion-refresher
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/ionPane/">
ion-pane
</a>
</li>
</ul>
</li>
<!-- Scroll -->
<li class="menu-section">
<a href="#" class="api-section">
Scroll
</a>
<ul>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/ionScroll/">
ion-scroll
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/ionInfiniteScroll/">
ion-infinite-scroll
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/service/$ionicScrollDelegate/">
$ionicScrollDelegate
</a>
</li>
</ul>
</li>
<!-- Lists -->
<li class="menu-section">
<a href="#" class="api-section">
Lists
</a>
<ul>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/ionList/">
ion-list
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/ionItem/">
ion-item
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/ionDeleteButton/">
ion-delete-button
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/ionReorderButton/">
ion-reorder-button
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/ionOptionButton/">
ion-option-button
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/collectionRepeat/">
collection-repeat
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/service/$ionicListDelegate/">
$ionicListDelegate
</a>
</li>
</ul>
</li>
<!-- Form Inputs -->
<li class="menu-section">
<a href="#" class="api-section">
Form Inputs
</a>
<ul>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/ionCheckbox/">
ion-checkbox
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/ionRadio/">
ion-radio
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/ionToggle/">
ion-toggle
</a>
</li>
</ul>
</li>
<!-- Slide Box -->
<li class="menu-section">
<a href="#" class="api-section">
Slide Box
</a>
<ul>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/ionSlideBox/">
ion-slide-box
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/service/$ionicSlideBoxDelegate/">
$ionicSlideBoxDelegate
</a>
</li>
</ul>
</li>
<!-- Modal -->
<li class="menu-section">
<a href="#" class="api-section">
Modal
</a>
<ul>
<li>
<a href="/docs/1.0.0-beta.10/api/service/$ionicModal/">
$ionicModal
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/controller/ionicModal/">
ionicModal
</a>
</li>
</ul>
</li>
<!-- Action Sheet -->
<li class="menu-section">
<a href="#" class="api-section">
Action Sheet
</a>
<ul>
<li>
<a href="/docs/1.0.0-beta.10/api/service/$ionicActionSheet/">
$ionicActionSheet
</a>
</li>
</ul>
</li>
<!-- Popup -->
<li class="menu-section">
<a href="#" class="api-section">
Popup
</a>
<ul>
<li>
<a href="/docs/1.0.0-beta.10/api/service/$ionicPopup/">
$ionicPopup
</a>
</li>
</ul>
</li>
<!-- Loading -->
<li class="menu-section">
<a href="#" class="api-section">
Loading
</a>
<ul>
<li>
<a href="/docs/1.0.0-beta.10/api/service/$ionicLoading/">
$ionicLoading
</a>
</li>
</ul>
<ul>
<li>
<a href="/docs/1.0.0-beta.10/api/object/$ionicLoadingConfig/">
$ionicLoadingConfig
</a>
</li>
</ul>
</li>
<!-- Platform -->
<li class="menu-section">
<a href="#" class="api-section">
Platform
</a>
<ul>
<li>
<a href="/docs/1.0.0-beta.10/api/service/$ionicPlatform/">
$ionicPlatform
</a>
</li>
</ul>
</li>
<!-- Events -->
<li class="menu-section">
<a href="#" class="api-section">
Events
</a>
<ul>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/onHold/">
on-hold
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/onTap/">
on-tap
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/onTouch/">
on-touch
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/onRelease/">
on-release
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/onDrag/">
on-drag
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/onDragUp/">
on-drag-up
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/onDragRight/">
on-drag-right
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/onDragDown/">
on-drag-down
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/onDragLeft/">
on-drag-left
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/onSwipe/">
on-swipe
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/onSwipeUp/">
on-swipe-up
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/onSwipeRight/">
on-swipe-right
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/onSwipeDown/">
on-swipe-down
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/onSwipeLeft/">
on-swipe-left
</a>
</li>
</ul>
</li>
<!-- Gesture -->
<li class="menu-section">
<a href="#" class="api-section">
Gesture
</a>
<ul>
<li>
<a href="/docs/1.0.0-beta.10/api/service/$ionicGesture/">
$ionicGesture
</a>
</li>
</ul>
</li>
<!-- Backdrop -->
<li class="menu-section">
<a href="#" class="api-section">
Backdrop
</a>
<ul>
<li>
<a href="/docs/1.0.0-beta.10/api/service/$ionicBackdrop/">
$ionicBackdrop
</a>
</li>
</ul>
</li>
<!-- Utility -->
<li class="menu-section">
<a href="#" class="api-section">
Utility
</a>
<ul>
<li>
<a href="/docs/1.0.0-beta.10/api/utility/ionic.Platform/">
ionic.Platform
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/utility/ionic.DomUtil/">
ionic.DomUtil
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/utility/ionic.EventController/">
ionic.EventController
</a>
</li>
</ul>
</li>
<!-- Tap -->
<li class="menu-section">
<a href="/docs/1.0.0-beta.10/api/page/tap/" class="api-section">
Tap & Click
</a>
</li>
<!-- Keyboard -->
<li class="menu-section">
<a href="#" class="api-section">
Keyboard
</a>
<ul>
<li>
<a href="/docs/1.0.0-beta.10/api/page/keyboard/">
Keyboard
</a>
</li>
<li>
<a href="/docs/1.0.0-beta.10/api/directive/keyboardAttach/">
keyboard-attach
</a>
</li>
</ul>
</li>
</ul>
<ul class="nav left-menu">
<li class="menu-title">
<a href="/docs/cli/">CLI</a>
</li>
</ul>
<ul class="nav left-menu">
<li class="menu-title">
<a href="http://learn.ionicframework.com/">Learn Ionic</a>
</li>
</ul>
<ul class="nav left-menu">
<li class="menu-title">
<a href="/docs/guide/">Guide</a>
</li>
</ul>
<ul class="nav left-menu">
<li class="menu-title">
<a href="/docs/ionic-cli-faq/">FAQ</a>
</li>
</ul>
<ul class="nav left-menu">
<li class="menu-title">
<a href="/docs/getting-help/">Getting Help</a>
</li>
</ul>
<ul class="nav left-menu">
<li class="menu-title">
<a href="/docs/concepts/">Ionic Concepts</a>
</li>
</ul>
</div>
</div>
<div class="col-md-10 col-sm-9 main-content">
<table class="table">
<tr>
<th>Name</th>
<th>Description</th>
</tr>
<tr>
<td><a href="/docs/api/service/$ionicScrollDelegate/">$ionicScrollDelegate</a></td>
<td><p>Delegate for controlling scrollViews (created by
<a href="/docs/api/directive/ionContent/"><code>ionContent</code></a> and
<a href="/docs/api/directive/ionScroll/"><code>ionScroll</code></a> directives).</p>
</td>
</tr>
<tr>
<td><a href="/docs/api/service/$ionicNavBarDelegate/">$ionicNavBarDelegate</a></td>
<td><p>Delegate for controlling the <a href="/docs/api/directive/ionNavBar/"><code>ionNavBar</code></a> directive.</p>
</td>
</tr>
<tr>
<td><a href="/docs/api/service/$ionicSideMenuDelegate/">$ionicSideMenuDelegate</a></td>
<td><p>Delegate for controlling the <a href="/docs/api/directive/ionSideMenus/"><code>ionSideMenus</code></a> directive.</p>
</td>
</tr>
<tr>
<td><a href="/docs/api/service/$ionicSlideBoxDelegate/">$ionicSlideBoxDelegate</a></td>
<td><p>Delegate that controls the <a href="/docs/api/directive/ionSlideBox/"><code>ionSlideBox</code></a> directive.</p>
</td>
</tr>
<tr>
<td><a href="/docs/api/service/$ionicTabsDelegate/">$ionicTabsDelegate</a></td>
<td><p>Delegate for controlling the <a href="/docs/api/directive/ionTabs/"><code>ionTabs</code></a> directive.</p>
</td>
</tr>
<tr>
<td><a href="/docs/api/service/$ionicActionSheet/">$ionicActionSheet</a></td>
<td><p>The Action Sheet is a slide-up pane that lets the user choose from a set of options.
Dangerous options are highlighted in red and made obvious.</p>
</td>
</tr>
<tr>
<td><a href="/docs/api/service/$ionicGesture/">$ionicGesture</a></td>
<td><p>An angular service exposing ionic
<a href="/docs/api/utility/ionic.EventController/"><code>ionic.EventController</code></a>'s gestures.</p>
</td>
</tr>
<tr>
<td><a href="/docs/api/service/$ionicLoading/">$ionicLoading</a></td>
<td><p>An overlay that can be used to indicate activity while blocking user
interaction.</p>
</td>
</tr>
<tr>
<td><a href="/docs/api/service/$ionicModal/">$ionicModal</a></td>
<td><p>The Modal is a content pane that can go over the user's main view
temporarily. Usually used for making a choice or editing an item.</p>
</td>
</tr>
<tr>
<td><a href="/docs/api/service/$ionicPlatform/">$ionicPlatform</a></td>
<td><p>An angular abstraction of <a href="/docs/api/utility/ionic.Platform/"><code>ionic.Platform</code></a>.</p>
</td>
</tr>
<tr>
<td><a href="/docs/api/service/$ionicPopup/">$ionicPopup</a></td>
<td><p>The Ionic Popup service makes it easy to programatically create and show popup
windows that require the user to respond in order to continue:</p>
</td>
</tr>
</table>
</div>
</div>
</div>
<div class="pre-footer">
<div class="row ionic">
<div class="col-sm-6 col-a">
<h4>
<a href="/getting-started/">Getting started <span class="icon ion-arrow-right-c"></span></a>
</h4>
<p>
Learn more about how Ionic was built, why you should use it, and what's included. We'll cover
the basics and help you get started from the ground up.
</p>
</div>
<div class="col-sm-6 col-b">
<h4>
<a href="/docs/">Documentation <span class="icon ion-arrow-right-c"></span></a>
</h4>
<p>
What are you waiting for? Take a look and get coding! Our documentation covers all you need to know
to get an app up and running in minutes.
</p>
</div>
</div>
</div>
<footer class="footer">
<nav class="base-links">
<dl>
<dt>Docs</dt>
<dd><a href="http://ionicframework.com/docs/">Documentation</a></dd>
<dd><a href="http://ionicframework.com/getting-started/">Getting Started</a></dd>
<dd><a href="http://ionicframework.com/docs/overview/">Overview</a></dd>
<dd><a href="http://ionicframework.com/docs/components/">Components</a></dd>
<dd><a href="http://ionicframework.com/docs/api/">JavaScript</a></dd>
<dd><a href="http://ionicframework.com/submit-issue/">Submit Issue</a></dd>
</dl>
<dl>
<dt>Resources</dt>
<dd><a href="http://learn.ionicframework.com/">Learn Ionic</a></dd>
<dd><a href="http://ngcordova.com/">ngCordova</a></dd>
<dd><a href="http://ionicons.com/">Ionicons</a></dd>
<dd><a href="http://creator.ionic.io/">Creator</a></dd>
<dd><a href="http://showcase.ionicframework.com/">Showcase</a></dd>
<dd><a href="http://manning.com/wilken/?a_aid=ionicinactionben&a_bid=1f0a0e1d">The Ionic Book</a></dd>
</dl>
<dl>
<dt>Contribute</dt>
<dd><a href="http://forum.ionicframework.com/">Community Forum</a></dd>
<dd><a href="http://webchat.freenode.net/?randomnick=1&channels=%23ionic&uio=d4">Ionic IRC</a></dd>
<dd><a href="http://ionicframework.com/present-ionic/">Present Ionic</a></dd>
<dd><a href="http://ionicframework.com/contribute/">Contribute</a></dd>
<dd><a href="https://github.com/driftyco/ionic-learn/issues/new">Write for us</a></dd>
<dd><a href="http://shop.ionic.io/">Ionic Shop</a></dd>
</dl>
<dl class="small-break">
<dt>About</dt>
<dd><a href="http://blog.ionic.io/">Blog</a></dd>
<dd><a href="http://ionic.io">Services</a></dd>
<dd><a href="http://drifty.com">Company</a></dd>
<dd><a href="https://s3.amazonaws.com/ionicframework.com/logo-pack.zip">Logo Pack</a></dd>
<dd><a href="mailto:hi@ionicframework.com">Contact</a></dd>
<dd><a href="http://ionicframework.com/jobs/">Jobs</a></dd>
</dl>
<dl>
<dt>Connect</dt>
<dd><a href="https://twitter.com/IonicFramework">Twitter</a></dd>
<dd><a href="https://github.com/driftyco/ionic">GitHub</a></dd>
<dd><a href="https://www.facebook.com/ionicframework">Facebook</a></dd>
<dd><a href="https://plus.google.com/b/112280728135675018538/+Ionicframework/posts">Google+</a></dd>
<dd><a href="https://www.youtube.com/channel/UChYheBnVeCfhCmqZfCUdJQw">YouTube</a></dd>
<dd><a href="https://twitter.com/ionitron">Ionitron</a></dd>
</dl>
</nav>
<div class="newsletter row">
<div class="newsletter-container">
<div class="col-sm-7">
<div class="newsletter-text">Stay in the loop</div>
<div class="sign-up">Sign up to receive emails for the latest updates, features, and news on the framework.</div>
</div>
<form action="http://codiqa.createsend.com/t/t/s/jytylh/" method="post" class="input-group col-sm-5">
<input id="fieldEmail" name="cm-jytylh-jytylh" class="form-control" type="email" placeholder="Email" required />
<span class="input-group-btn">
<button class="btn btn-default" type="submit">Subscribe</button>
</span>
</form>
</div>
</div>
<div class="copy">
<div class="copy-container">
<p class="authors">
Code licensed under <a href="/docs/#license">MIT</a>.
Docs under <a href="https://tldrlegal.com/license/apache-license-2.0-(apache-2.0)">Apache 2</a>
<span>|</span>
© 2013-2015 <a href="http://drifty.com/">Drifty Co</a>
</p>
</div>
</div>
</footer>
<script type="text/javascript">
var _sf_async_config = { uid: 54141, domain: 'ionicframework.com', useCanonical: true };
(function() {
function loadChartbeat() {
window._sf_endpt = (new Date()).getTime();
var e = document.createElement('script');
e.setAttribute('language', 'javascript');
e.setAttribute('type', 'text/javascript');
e.setAttribute('src','//static.chartbeat.com/js/chartbeat.js');
document.body.appendChild(e);
};
var oldonload = window.onload;
window.onload = (typeof window.onload != 'function') ?
loadChartbeat : function() { oldonload(); loadChartbeat(); };
})();
</script>
<script src="//netdna.bootstrapcdn.com/bootstrap/3.0.2/js/bootstrap.min.js"></script>
<script src="/js/site.js?1"></script>
<script src="//cdnjs.cloudflare.com/ajax/libs/Cookies.js/0.4.0/cookies.min.js"></script>
<script>
$('.navbar .dropdown').on('show.bs.dropdown', function(e){
//$(this).find('.dropdown-menu').addClass('animated fadeInDown');
});
// ADD SLIDEUP ANIMATION TO DROPDOWN //
$('.navbar .dropdown').on('hide.bs.dropdown', function(e){
//$(this).find('.dropdown-menu').first().stop(true, true).slideUp(200);
//$(this).find('.dropdown-menu').removeClass('animated fadeInDown');
});
try {
var d = new Date('2015-03-20 05:00:00 +0000');
var ts = d.getTime();
var cd = Cookies.get('_iondj');
if(cd) {
cd = JSON.parse(atob(cd));
if(parseInt(cd.lp) < ts) {
var bt = document.getElementById('blog-badge');
bt.style.display = 'block';
}
cd.lp = ts;
} else {
var bt = document.getElementById('blog-badge');
bt.style.display = 'block';
cd = {
lp: ts
}
}
Cookies.set('_iondj', btoa(JSON.stringify(cd)));
} catch(e) {
}
</script>
<div id="fb-root"></div>
<script>(function(d, s, id) {
var js, fjs = d.getElementsByTagName(s)[0];
if (d.getElementById(id)) return;
js = d.createElement(s); js.id = id;
js.src = "//connect.facebook.net/en_US/all.js#xfbml=1";
fjs.parentNode.insertBefore(js, fjs);
}(document, 'script', 'facebook-jssdk'));</script>
</body>
</html>
|
Java
|
#!/usr/bin/env bash
echo "Puppet6 Platform Detection and Installation"
/usr/bin/wget -O - https://raw.githubusercontent.com/petems/puppet-install-shell/master/install_puppet_6_agent.sh | /bin/sh
echo "Install R10k and Hiera-Eyaml"
/opt/puppetlabs/puppet/bin/gem install r10k hiera-eyaml
echo "Retrieve Puppetfile from puppet-maas repo"
#/usr/bin/wget -O /etc/puppetlabs/code/environments/production/Puppetfile https://raw.githubusercontent.com/ppouliot/Puppetfile/master/Puppetfile
/usr/bin/wget -O /etc/puppetlabs/code/environments/production/Puppetfile https://raw.githubusercontent.com/ppouliot/puppet-maas/master/Puppetfile
echo "Run R10k on downloaded Puppetfile"
cd /etc/puppetlabs/code/environments/production && /opt/puppetlabs/puppet/bin/r10k puppetfile install --verbose DEBUG2
/opt/puppetlabs/bin/puppet apply --debug --trace --verbose --modulepath=/etc/puppetlabs/code/environments/production/modules:/etc/puppetlabs/code/modules /etc/puppetlabs/code/environments/production/modules/maas/examples/init.pp
|
Java
|
namespace TrelloToExcel.Trello
{
public class TextData
{
public Emoji3 emoji { get; set; }
}
}
|
Java
|
def power_digit_sum(exponent):
power_of_2 = str(2 ** exponent)
return sum([int(x) for x in power_of_2])
|
Java
|
# Copyright 2022 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants for music processing in Magenta."""
# Meter-related constants.
DEFAULT_QUARTERS_PER_MINUTE = 120.0
DEFAULT_STEPS_PER_BAR = 16 # 4/4 music sampled at 4 steps per quarter note.
DEFAULT_STEPS_PER_QUARTER = 4
# Default absolute quantization.
DEFAULT_STEPS_PER_SECOND = 100
# Standard pulses per quarter.
# https://en.wikipedia.org/wiki/Pulses_per_quarter_note
STANDARD_PPQ = 220
# Special melody events.
NUM_SPECIAL_MELODY_EVENTS = 2
MELODY_NOTE_OFF = -1
MELODY_NO_EVENT = -2
# Other melody-related constants.
MIN_MELODY_EVENT = -2
MAX_MELODY_EVENT = 127
MIN_MIDI_PITCH = 0 # Inclusive.
MAX_MIDI_PITCH = 127 # Inclusive.
NUM_MIDI_PITCHES = MAX_MIDI_PITCH - MIN_MIDI_PITCH + 1
NOTES_PER_OCTAVE = 12
# Velocity-related constants.
MIN_MIDI_VELOCITY = 1 # Inclusive.
MAX_MIDI_VELOCITY = 127 # Inclusive.
# Program-related constants.
MIN_MIDI_PROGRAM = 0
MAX_MIDI_PROGRAM = 127
# MIDI programs that typically sound unpitched.
UNPITCHED_PROGRAMS = (
list(range(96, 104)) + list(range(112, 120)) + list(range(120, 128)))
# Chord symbol for "no chord".
NO_CHORD = 'N.C.'
# The indices of the pitch classes in a major scale.
MAJOR_SCALE = [0, 2, 4, 5, 7, 9, 11]
# NOTE_KEYS[note] = The major keys that note belongs to.
# ex. NOTE_KEYS[0] lists all the major keys that contain the note C,
# which are:
# [0, 1, 3, 5, 7, 8, 10]
# [C, C#, D#, F, G, G#, A#]
#
# 0 = C
# 1 = C#
# 2 = D
# 3 = D#
# 4 = E
# 5 = F
# 6 = F#
# 7 = G
# 8 = G#
# 9 = A
# 10 = A#
# 11 = B
#
# NOTE_KEYS can be generated using the code below, but is explicitly declared
# for readability:
# NOTE_KEYS = [[j for j in range(12) if (i - j) % 12 in MAJOR_SCALE]
# for i in range(12)]
NOTE_KEYS = [
[0, 1, 3, 5, 7, 8, 10],
[1, 2, 4, 6, 8, 9, 11],
[0, 2, 3, 5, 7, 9, 10],
[1, 3, 4, 6, 8, 10, 11],
[0, 2, 4, 5, 7, 9, 11],
[0, 1, 3, 5, 6, 8, 10],
[1, 2, 4, 6, 7, 9, 11],
[0, 2, 3, 5, 7, 8, 10],
[1, 3, 4, 6, 8, 9, 11],
[0, 2, 4, 5, 7, 9, 10],
[1, 3, 5, 6, 8, 10, 11],
[0, 2, 4, 6, 7, 9, 11]
]
|
Java
|
# Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing classes related to AWS CloudWatch Logs."""
import json
from perfkitbenchmarker import resource
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.aws import util
class LogGroup(resource.BaseResource):
"""Class representing a CloudWatch log group."""
def __init__(self, region, name, retention_in_days=7):
super(LogGroup, self).__init__()
self.region = region
self.name = name
self.retention_in_days = retention_in_days
def _Create(self):
"""Create the log group."""
create_cmd = util.AWS_PREFIX + [
'--region', self.region,
'logs', 'create-log-group',
'--log-group-name', self.name
]
vm_util.IssueCommand(create_cmd)
def _Delete(self):
"""Delete the log group."""
delete_cmd = util.AWS_PREFIX + [
'--region', self.region,
'logs', 'delete-log-group',
'--log-group-name', self.name
]
vm_util.IssueCommand(delete_cmd, raise_on_failure=False)
def Exists(self):
"""Returns True if the log group exists."""
describe_cmd = util.AWS_PREFIX + [
'--region', self.region,
'logs', 'describe-log-groups',
'--log-group-name-prefix', self.name,
'--no-paginate'
]
stdout, _, _ = vm_util.IssueCommand(describe_cmd)
log_groups = json.loads(stdout)['logGroups']
group = next((group for group in log_groups
if group['logGroupName'] == self.name), None)
return bool(group)
def _PostCreate(self):
"""Set the retention policy."""
put_cmd = util.AWS_PREFIX + [
'--region', self.region,
'logs', 'put-retention-policy',
'--log-group-name', self.name,
'--retention-in-days', str(self.retention_in_days)
]
vm_util.IssueCommand(put_cmd)
def GetLogs(region, stream_name, group_name, token=None):
"""Fetches the JSON formatted log stream starting at the token."""
get_cmd = util.AWS_PREFIX + [
'--region', region,
'logs', 'get-log-events',
'--start-from-head',
'--log-group-name', group_name,
'--log-stream-name', stream_name,
]
if token:
get_cmd.extend(['--next-token', token])
stdout, _, _ = vm_util.IssueCommand(get_cmd)
return json.loads(stdout)
def GetLogStreamAsString(region, stream_name, log_group):
"""Returns the messages of the log stream as a string."""
log_lines = []
token = None
events = []
while token is None or events:
response = GetLogs(region, stream_name, log_group, token)
events = response['events']
token = response['nextForwardToken']
for event in events:
log_lines.append(event['message'])
return '\n'.join(log_lines)
|
Java
|
package com.lyubenblagoev.postfixrest.security;
import com.lyubenblagoev.postfixrest.entity.User;
import com.lyubenblagoev.postfixrest.repository.UserRepository;
import org.springframework.security.core.userdetails.UserDetails;
import org.springframework.security.core.userdetails.UserDetailsService;
import org.springframework.security.core.userdetails.UsernameNotFoundException;
import org.springframework.stereotype.Service;
import java.util.Optional;
@Service
public class CustomUserDetailsService implements UserDetailsService {
private final UserRepository userRepository;
public CustomUserDetailsService(UserRepository userRepository) {
this.userRepository = userRepository;
}
@Override
public UserDetails loadUserByUsername(String username) throws UsernameNotFoundException {
return userRepository.findByEmail(username)
.map(u -> new UserPrincipal(u))
.orElseThrow(() -> new UsernameNotFoundException("No user found for " + username));
}
}
|
Java
|
/*
* Copyright (c) 2021 Citrix Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package network
/**
* Binding object which returns the resources bound to vrid_binding.
*/
type Vridbinding struct {
/**
* Integer value that uniquely identifies the VMAC address.<br/>Minimum value = 1<br/>Maximum value = 255
*/
Id int `json:"id,omitempty"`
}
|
Java
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"io"
"os"
"strconv"
"strings"
"time"
"github.com/spf13/cobra"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/klog"
"k8s.io/kops/cmd/kops/util"
api "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/cloudinstances"
"k8s.io/kops/pkg/featureflag"
"k8s.io/kops/pkg/instancegroups"
"k8s.io/kops/pkg/pretty"
"k8s.io/kops/upup/pkg/fi/cloudup"
"k8s.io/kops/util/pkg/tables"
"k8s.io/kubernetes/pkg/kubectl/util/i18n"
"k8s.io/kubernetes/pkg/kubectl/util/templates"
)
var (
rollingupdateLong = pretty.LongDesc(i18n.T(`
This command updates a kubernetes cluster to match the cloud and kops specifications.
To perform a rolling update, you need to update the cloud resources first with the command
` + pretty.Bash("kops update cluster") + `.
If rolling-update does not report that the cluster needs to be rolled, you can force the cluster to be
rolled with the force flag. Rolling update drains and validates the cluster by default. A cluster is
deemed validated when all required nodes are running and all pods in the kube-system namespace are operational.
When a node is deleted, rolling-update sleeps the interval for the node type, and then tries for the same period
of time for the cluster to be validated. For instance, setting --master-interval=3m causes rolling-update
to wait for 3 minutes after a master is rolled, and another 3 minutes for the cluster to stabilize and pass
validation.
Note: terraform users will need to run all of the following commands from the same directory
` + pretty.Bash("kops update cluster --target=terraform") + ` then ` + pretty.Bash("terraform plan") + ` then
` + pretty.Bash("terraform apply") + ` prior to running ` + pretty.Bash("kops rolling-update cluster") + `.`))
rollingupdateExample = templates.Examples(i18n.T(`
# Preview a rolling-update.
kops rolling-update cluster
# Roll the currently selected kops cluster with defaults.
# Nodes will be drained and the cluster will be validated between node replacement.
kops rolling-update cluster --yes
# Roll the k8s-cluster.example.com kops cluster,
# do not fail if the cluster does not validate,
# wait 8 min to create new node, and wait at least
# 8 min to validate the cluster.
kops rolling-update cluster k8s-cluster.example.com --yes \
--fail-on-validate-error="false" \
--master-interval=8m \
--node-interval=8m
# Roll the k8s-cluster.example.com kops cluster,
# do not validate the cluster because of the cloudonly flag.
# Force the entire cluster to roll, even if rolling update
# reports that the cluster does not need to be rolled.
kops rolling-update cluster k8s-cluster.example.com --yes \
--cloudonly \
--force
# Roll the k8s-cluster.example.com kops cluster,
# only roll the node instancegroup,
# use the new drain and validate functionality.
kops rolling-update cluster k8s-cluster.example.com --yes \
--fail-on-validate-error="false" \
--node-interval 8m \
--instance-group nodes
`))
rollingupdateShort = i18n.T(`Rolling update a cluster.`)
)
// RollingUpdateOptions is the command Object for a Rolling Update.
type RollingUpdateOptions struct {
Yes bool
Force bool
CloudOnly bool
// The following two variables are when kops is validating a cluster
// during a rolling update.
// FailOnDrainError fail rolling-update if drain errors.
FailOnDrainError bool
// FailOnValidate fail the cluster rolling-update when the cluster
// does not validate, after a validation period.
FailOnValidate bool
// PostDrainDelay is the duration of a pause after a drain operation
PostDrainDelay time.Duration
// ValidationTimeout is the timeout for validation to succeed after the drain and pause
ValidationTimeout time.Duration
// MasterInterval is the minimum time to wait after stopping a master node. This does not include drain and validate time.
MasterInterval time.Duration
// NodeInterval is the minimum time to wait after stopping a (non-master) node. This does not include drain and validate time.
NodeInterval time.Duration
// BastionInterval is the minimum time to wait after stopping a bastion. This does not include drain and validate time.
BastionInterval time.Duration
// Interactive rolling-update prompts user to continue after each instances is updated.
Interactive bool
ClusterName string
// InstanceGroups is the list of instance groups to rolling-update;
// if not specified, all instance groups will be updated
InstanceGroups []string
// InstanceGroupRoles is the list of roles we should rolling-update
// if not specified, all instance groups will be updated
InstanceGroupRoles []string
}
func (o *RollingUpdateOptions) InitDefaults() {
o.Yes = false
o.Force = false
o.CloudOnly = false
o.FailOnDrainError = false
o.FailOnValidate = true
o.MasterInterval = 15 * time.Second
o.NodeInterval = 15 * time.Second
o.BastionInterval = 15 * time.Second
o.Interactive = false
o.PostDrainDelay = 5 * time.Second
o.ValidationTimeout = 15 * time.Minute
}
func NewCmdRollingUpdateCluster(f *util.Factory, out io.Writer) *cobra.Command {
var options RollingUpdateOptions
options.InitDefaults()
cmd := &cobra.Command{
Use: "cluster",
Short: rollingupdateShort,
Long: rollingupdateLong,
Example: rollingupdateExample,
}
cmd.Flags().BoolVarP(&options.Yes, "yes", "y", options.Yes, "Perform rolling update immediately, without --yes rolling-update executes a dry-run")
cmd.Flags().BoolVar(&options.Force, "force", options.Force, "Force rolling update, even if no changes")
cmd.Flags().BoolVar(&options.CloudOnly, "cloudonly", options.CloudOnly, "Perform rolling update without confirming progress with k8s")
cmd.Flags().DurationVar(&options.ValidationTimeout, "validation-timeout", options.ValidationTimeout, "Maximum time to wait for a cluster to validate")
cmd.Flags().DurationVar(&options.MasterInterval, "master-interval", options.MasterInterval, "Time to wait between restarting masters")
cmd.Flags().DurationVar(&options.NodeInterval, "node-interval", options.NodeInterval, "Time to wait between restarting nodes")
cmd.Flags().DurationVar(&options.BastionInterval, "bastion-interval", options.BastionInterval, "Time to wait between restarting bastions")
cmd.Flags().DurationVar(&options.PostDrainDelay, "post-drain-delay", options.PostDrainDelay, "Time to wait after draining each node")
cmd.Flags().BoolVarP(&options.Interactive, "interactive", "i", options.Interactive, "Prompt to continue after each instance is updated")
cmd.Flags().StringSliceVar(&options.InstanceGroups, "instance-group", options.InstanceGroups, "List of instance groups to update (defaults to all if not specified)")
cmd.Flags().StringSliceVar(&options.InstanceGroupRoles, "instance-group-roles", options.InstanceGroupRoles, "If specified, only instance groups of the specified role will be updated (e.g. Master,Node,Bastion)")
if featureflag.DrainAndValidateRollingUpdate.Enabled() {
cmd.Flags().BoolVar(&options.FailOnDrainError, "fail-on-drain-error", true, "The rolling-update will fail if draining a node fails.")
cmd.Flags().BoolVar(&options.FailOnValidate, "fail-on-validate-error", true, "The rolling-update will fail if the cluster fails to validate.")
}
cmd.Run = func(cmd *cobra.Command, args []string) {
err := rootCommand.ProcessArgs(args)
if err != nil {
exitWithError(err)
return
}
clusterName := rootCommand.ClusterName()
if clusterName == "" {
exitWithError(fmt.Errorf("--name is required"))
return
}
options.ClusterName = clusterName
err = RunRollingUpdateCluster(f, os.Stdout, &options)
if err != nil {
exitWithError(err)
return
}
}
return cmd
}
func RunRollingUpdateCluster(f *util.Factory, out io.Writer, options *RollingUpdateOptions) error {
clientset, err := f.Clientset()
if err != nil {
return err
}
cluster, err := GetCluster(f, options.ClusterName)
if err != nil {
return err
}
contextName := cluster.ObjectMeta.Name
clientGetter := genericclioptions.NewConfigFlags()
clientGetter.Context = &contextName
config, err := clientGetter.ToRESTConfig()
if err != nil {
return fmt.Errorf("cannot load kubecfg settings for %q: %v", contextName, err)
}
var nodes []v1.Node
var k8sClient kubernetes.Interface
if !options.CloudOnly {
k8sClient, err = kubernetes.NewForConfig(config)
if err != nil {
return fmt.Errorf("cannot build kube client for %q: %v", contextName, err)
}
nodeList, err := k8sClient.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to reach the kubernetes API.\n")
fmt.Fprintf(os.Stderr, "Use --cloudonly to do a rolling-update without confirming progress with the k8s API\n\n")
return fmt.Errorf("error listing nodes in cluster: %v", err)
}
if nodeList != nil {
nodes = nodeList.Items
}
}
list, err := clientset.InstanceGroupsFor(cluster).List(metav1.ListOptions{})
if err != nil {
return err
}
var instanceGroups []*api.InstanceGroup
for i := range list.Items {
instanceGroups = append(instanceGroups, &list.Items[i])
}
warnUnmatched := true
if len(options.InstanceGroups) != 0 {
var filtered []*api.InstanceGroup
for _, instanceGroupName := range options.InstanceGroups {
var found *api.InstanceGroup
for _, ig := range instanceGroups {
if ig.ObjectMeta.Name == instanceGroupName {
found = ig
break
}
}
if found == nil {
return fmt.Errorf("InstanceGroup %q not found", instanceGroupName)
}
filtered = append(filtered, found)
}
instanceGroups = filtered
// Don't warn if we find more ASGs than IGs
warnUnmatched = false
}
if len(options.InstanceGroupRoles) != 0 {
var filtered []*api.InstanceGroup
for _, ig := range instanceGroups {
for _, role := range options.InstanceGroupRoles {
if ig.Spec.Role == api.InstanceGroupRole(strings.Title(strings.ToLower(role))) {
filtered = append(filtered, ig)
continue
}
}
}
instanceGroups = filtered
// Don't warn if we find more ASGs than IGs
warnUnmatched = false
}
cloud, err := cloudup.BuildCloud(cluster)
if err != nil {
return err
}
groups, err := cloud.GetCloudGroups(cluster, instanceGroups, warnUnmatched, nodes)
if err != nil {
return err
}
{
t := &tables.Table{}
t.AddColumn("NAME", func(r *cloudinstances.CloudInstanceGroup) string {
return r.InstanceGroup.ObjectMeta.Name
})
t.AddColumn("STATUS", func(r *cloudinstances.CloudInstanceGroup) string {
return r.Status()
})
t.AddColumn("NEEDUPDATE", func(r *cloudinstances.CloudInstanceGroup) string {
return strconv.Itoa(len(r.NeedUpdate))
})
t.AddColumn("READY", func(r *cloudinstances.CloudInstanceGroup) string {
return strconv.Itoa(len(r.Ready))
})
t.AddColumn("MIN", func(r *cloudinstances.CloudInstanceGroup) string {
return strconv.Itoa(r.MinSize)
})
t.AddColumn("MAX", func(r *cloudinstances.CloudInstanceGroup) string {
return strconv.Itoa(r.MaxSize)
})
t.AddColumn("NODES", func(r *cloudinstances.CloudInstanceGroup) string {
var nodes []*v1.Node
for _, i := range r.Ready {
if i.Node != nil {
nodes = append(nodes, i.Node)
}
}
for _, i := range r.NeedUpdate {
if i.Node != nil {
nodes = append(nodes, i.Node)
}
}
return strconv.Itoa(len(nodes))
})
var l []*cloudinstances.CloudInstanceGroup
for _, v := range groups {
l = append(l, v)
}
columns := []string{"NAME", "STATUS", "NEEDUPDATE", "READY", "MIN", "MAX"}
if !options.CloudOnly {
columns = append(columns, "NODES")
}
err := t.Render(l, out, columns...)
if err != nil {
return err
}
}
needUpdate := false
for _, group := range groups {
if len(group.NeedUpdate) != 0 {
needUpdate = true
}
}
if !needUpdate && !options.Force {
fmt.Printf("\nNo rolling-update required.\n")
return nil
}
if !options.Yes {
fmt.Printf("\nMust specify --yes to rolling-update.\n")
return nil
}
if featureflag.DrainAndValidateRollingUpdate.Enabled() {
klog.V(2).Infof("Rolling update with drain and validate enabled.")
}
d := &instancegroups.RollingUpdateCluster{
MasterInterval: options.MasterInterval,
NodeInterval: options.NodeInterval,
BastionInterval: options.BastionInterval,
Interactive: options.Interactive,
Force: options.Force,
Cloud: cloud,
K8sClient: k8sClient,
ClientGetter: clientGetter,
FailOnDrainError: options.FailOnDrainError,
FailOnValidate: options.FailOnValidate,
CloudOnly: options.CloudOnly,
ClusterName: options.ClusterName,
PostDrainDelay: options.PostDrainDelay,
ValidationTimeout: options.ValidationTimeout,
}
return d.RollingUpdate(groups, cluster, list)
}
|
Java
|
"""Auto-generated file, do not edit by hand. BM metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_BM = PhoneMetadata(id='BM', country_code=1, international_prefix='011',
general_desc=PhoneNumberDesc(national_number_pattern='(?:441|[58]\\d\\d|900)\\d{7}', possible_length=(10,), possible_length_local_only=(7,)),
fixed_line=PhoneNumberDesc(national_number_pattern='441(?:[46]\\d\\d|5(?:4\\d|60|89))\\d{4}', example_number='4414123456', possible_length=(10,), possible_length_local_only=(7,)),
mobile=PhoneNumberDesc(national_number_pattern='441(?:[2378]\\d|5[0-39])\\d{5}', example_number='4413701234', possible_length=(10,), possible_length_local_only=(7,)),
toll_free=PhoneNumberDesc(national_number_pattern='8(?:00|33|44|55|66|77|88)[2-9]\\d{6}', example_number='8002123456', possible_length=(10,)),
premium_rate=PhoneNumberDesc(national_number_pattern='900[2-9]\\d{6}', example_number='9002123456', possible_length=(10,)),
personal_number=PhoneNumberDesc(national_number_pattern='52(?:3(?:[2-46-9][02-9]\\d|5(?:[02-46-9]\\d|5[0-46-9]))|4(?:[2-478][02-9]\\d|5(?:[034]\\d|2[024-9]|5[0-46-9])|6(?:0[1-9]|[2-9]\\d)|9(?:[05-9]\\d|2[0-5]|49)))\\d{4}|52[34][2-9]1[02-9]\\d{4}|5(?:00|2[12]|33|44|66|77|88)[2-9]\\d{6}', example_number='5002345678', possible_length=(10,)),
national_prefix='1',
national_prefix_for_parsing='1|([2-8]\\d{6})$',
national_prefix_transform_rule='441\\1',
leading_digits='441',
mobile_number_portable_region=True)
|
Java
|
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<title>{% if page.title %}{{ page.title }}{% else %}{{ site.title }}{% endif %}</title>
<meta name="viewport" content="width=device-width">
<meta name="description" content="{{ site.description }}">
<link rel="canonical" href="{{ page.url | replace:'index.html','' | prepend: site.baseurl | prepend: site.url }}">
<!-- Custom CSS & Bootstrap Core CSS - Uses Bootswatch Flatly Theme: http://bootswatch.com/flatly/ -->
<link rel="stylesheet" href="{{ "/style.css" | prepend: site.baseurl }}">
<!-- Custom Fonts -->
<link rel="stylesheet" href="{{ "/css/font-awesome/css/font-awesome.min.css" | prepend: site.baseurl }}">
<link href="//fonts.googleapis.com/css?family=Lora:400,700,400italic,700italic" rel="stylesheet" type="text/css">
<link href="//fonts.googleapis.com/css?family=Montserrat:400,700" rel="stylesheet" type="text/css">
<!-- HTML5 Shim and Respond.js IE8 support of HTML5 elements and media queries -->
<!-- WARNING: Respond.js doesn't work if you view the page via file:// -->
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/libs/html5shiv/3.7.0/html5shiv.js"></script>
<script src="https://oss.maxcdn.com/libs/respond.js/1.4.2/respond.min.js"></script>
<![endif]-->
</head>
|
Java
|
/*******************************************************************************
* Copyright (c) 2012, 2015 Pivotal Software, Inc.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License,
* Version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Contributors:
* Pivotal Software, Inc. - initial API and implementation
********************************************************************************/
package cn.dockerfoundry.ide.eclipse.server.core.internal;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import org.cloudfoundry.client.lib.domain.CloudService;
import org.eclipse.core.runtime.IStatus;
import org.eclipse.core.runtime.Status;
import org.eclipse.wst.server.core.IModule;
import cn.dockerfoundry.ide.eclipse.server.core.internal.application.ModuleChangeEvent;
import cn.dockerfoundry.ide.eclipse.server.core.internal.client.CloudRefreshEvent;
/**
* Fires server refresh events. Only one handler is active per workbench runtime
* session.
*
*/
public class ServerEventHandler {
private static ServerEventHandler handler;
public static ServerEventHandler getDefault() {
if (handler == null) {
handler = new ServerEventHandler();
}
return handler;
}
private final List<CloudServerListener> applicationListeners = new CopyOnWriteArrayList<CloudServerListener>();
public synchronized void addServerListener(CloudServerListener listener) {
if (listener != null && !applicationListeners.contains(listener)) {
applicationListeners.add(listener);
}
}
public synchronized void removeServerListener(CloudServerListener listener) {
applicationListeners.remove(listener);
}
public void fireServicesUpdated(DockerFoundryServer server, List<DockerApplicationService> services) {
fireServerEvent(new CloudRefreshEvent(server, null, CloudServerEvent.EVENT_UPDATE_SERVICES, services));
}
public void firePasswordUpdated(DockerFoundryServer server) {
fireServerEvent(new CloudServerEvent(server, CloudServerEvent.EVENT_UPDATE_PASSWORD));
}
public void fireServerRefreshed(DockerFoundryServer server) {
fireServerEvent(new CloudServerEvent(server, CloudServerEvent.EVENT_SERVER_REFRESHED));
}
public void fireAppInstancesChanged(DockerFoundryServer server, IModule module) {
fireServerEvent(new ModuleChangeEvent(server, CloudServerEvent.EVENT_INSTANCES_UPDATED, module,
Status.OK_STATUS));
}
public void fireApplicationRefreshed(DockerFoundryServer server, IModule module) {
fireServerEvent(new ModuleChangeEvent(server, CloudServerEvent.EVENT_APPLICATION_REFRESHED, module,
Status.OK_STATUS));
}
public void fireAppDeploymentChanged(DockerFoundryServer server, IModule module) {
fireServerEvent(new ModuleChangeEvent(server, CloudServerEvent.EVENT_APP_DEPLOYMENT_CHANGED, module,
Status.OK_STATUS));
}
public void fireError(DockerFoundryServer server, IModule module, IStatus status) {
fireServerEvent(new ModuleChangeEvent(server, CloudServerEvent.EVENT_CLOUD_OP_ERROR, module, status));
}
public synchronized void fireServerEvent(CloudServerEvent event) {
CloudServerListener[] listeners = applicationListeners.toArray(new CloudServerListener[0]);
for (CloudServerListener listener : listeners) {
listener.serverChanged(event);
}
}
}
|
Java
|
# AUTOGENERATED FILE
FROM balenalib/artik530-debian:buster-run
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
ca-certificates \
curl \
\
# .NET Core dependencies
libc6 \
libgcc1 \
libgssapi-krb5-2 \
libicu63 \
libssl1.1 \
libstdc++6 \
zlib1g \
&& rm -rf /var/lib/apt/lists/*
# Configure web servers to bind to port 80 when present
ENV ASPNETCORE_URLS=http://+:80 \
# Enable detection of running in a container
DOTNET_RUNNING_IN_CONTAINER=true
# Install .NET Core
ENV DOTNET_VERSION 6.0.0
RUN curl -SL --output dotnet.tar.gz "https://dotnetcli.blob.core.windows.net/dotnet/Runtime/$DOTNET_VERSION/dotnet-runtime-$DOTNET_VERSION-linux-arm.tar.gz" \
&& dotnet_sha512='575037f2e164deaf3bcdd82f7b3f2b5a5784547c5bad4070375c00373722265401b88a81695b919f92ca176f21c1bdf1716f8fce16ab3d301ae666daa8cae750' \
&& echo "$dotnet_sha512 dotnet.tar.gz" | sha512sum -c - \
&& mkdir -p /usr/share/dotnet \
&& tar -zxf dotnet.tar.gz -C /usr/share/dotnet \
&& rm dotnet.tar.gz \
&& ln -s /usr/share/dotnet/dotnet /usr/bin/dotnet
CMD ["echo","'No CMD command was set in Dockerfile! Details about CMD command could be found in Dockerfile Guide section in our Docs. Here's the link: https://balena.io/docs"]
RUN curl -SLO "https://raw.githubusercontent.com/balena-io-library/base-images/44e597e40f2010cdde15b3ba1e397aea3a5c5271/scripts/assets/tests/test-stack@dotnet.sh" \
&& echo "Running test-stack@dotnet" \
&& chmod +x test-stack@dotnet.sh \
&& bash test-stack@dotnet.sh \
&& rm -rf test-stack@dotnet.sh
RUN [ ! -d /.balena/messages ] && mkdir -p /.balena/messages; echo 'Here are a few details about this Docker image (For more information please visit https://www.balena.io/docs/reference/base-images/base-images/): \nArchitecture: ARM v7 \nOS: Debian Buster \nVariant: run variant \nDefault variable(s): UDEV=off \nThe following software stack is preinstalled: \ndotnet 6.0-runtime \nExtra features: \n- Easy way to install packages with `install_packages <package-name>` command \n- Run anywhere with cross-build feature (for ARM only) \n- Keep the container idling with `balena-idle` command \n- Show base image details with `balena-info` command' > /.balena/messages/image-info
RUN echo '#!/bin/sh.real\nbalena-info\nrm -f /bin/sh\ncp /bin/sh.real /bin/sh\n/bin/sh "$@"' > /bin/sh-shim \
&& chmod +x /bin/sh-shim \
&& cp /bin/sh /bin/sh.real \
&& mv /bin/sh-shim /bin/sh
|
Java
|
# AUTOGENERATED FILE
FROM balenalib/jetson-xavier-nx-devkit-seeed-2mic-hat-fedora:33-build
ENV NODE_VERSION 14.18.3
ENV YARN_VERSION 1.22.4
RUN for key in \
6A010C5166006599AA17F08146C2130DFD2497F5 \
; do \
gpg --keyserver pgp.mit.edu --recv-keys "$key" || \
gpg --keyserver keyserver.pgp.com --recv-keys "$key" || \
gpg --keyserver keyserver.ubuntu.com --recv-keys "$key" ; \
done \
&& curl -SLO "http://nodejs.org/dist/v$NODE_VERSION/node-v$NODE_VERSION-linux-arm64.tar.gz" \
&& echo "2d071ca1bc1d0ea1eb259e79b81ebb4387237b2f77b3cf616806534e0030eaa8 node-v$NODE_VERSION-linux-arm64.tar.gz" | sha256sum -c - \
&& tar -xzf "node-v$NODE_VERSION-linux-arm64.tar.gz" -C /usr/local --strip-components=1 \
&& rm "node-v$NODE_VERSION-linux-arm64.tar.gz" \
&& curl -fSLO --compressed "https://yarnpkg.com/downloads/$YARN_VERSION/yarn-v$YARN_VERSION.tar.gz" \
&& curl -fSLO --compressed "https://yarnpkg.com/downloads/$YARN_VERSION/yarn-v$YARN_VERSION.tar.gz.asc" \
&& gpg --batch --verify yarn-v$YARN_VERSION.tar.gz.asc yarn-v$YARN_VERSION.tar.gz \
&& mkdir -p /opt/yarn \
&& tar -xzf yarn-v$YARN_VERSION.tar.gz -C /opt/yarn --strip-components=1 \
&& ln -s /opt/yarn/bin/yarn /usr/local/bin/yarn \
&& ln -s /opt/yarn/bin/yarn /usr/local/bin/yarnpkg \
&& rm yarn-v$YARN_VERSION.tar.gz.asc yarn-v$YARN_VERSION.tar.gz \
&& npm config set unsafe-perm true -g --unsafe-perm \
&& rm -rf /tmp/*
CMD ["echo","'No CMD command was set in Dockerfile! Details about CMD command could be found in Dockerfile Guide section in our Docs. Here's the link: https://balena.io/docs"]
RUN curl -SLO "https://raw.githubusercontent.com/balena-io-library/base-images/8accad6af708fca7271c5c65f18a86782e19f877/scripts/assets/tests/test-stack@node.sh" \
&& echo "Running test-stack@node" \
&& chmod +x test-stack@node.sh \
&& bash test-stack@node.sh \
&& rm -rf test-stack@node.sh
RUN [ ! -d /.balena/messages ] && mkdir -p /.balena/messages; echo $'Here are a few details about this Docker image (For more information please visit https://www.balena.io/docs/reference/base-images/base-images/): \nArchitecture: ARM v8 \nOS: Fedora 33 \nVariant: build variant \nDefault variable(s): UDEV=off \nThe following software stack is preinstalled: \nNode.js v14.18.3, Yarn v1.22.4 \nExtra features: \n- Easy way to install packages with `install_packages <package-name>` command \n- Run anywhere with cross-build feature (for ARM only) \n- Keep the container idling with `balena-idle` command \n- Show base image details with `balena-info` command' > /.balena/messages/image-info
RUN echo $'#!/bin/sh.real\nbalena-info\nrm -f /bin/sh\ncp /bin/sh.real /bin/sh\n/bin/sh "$@"' > /bin/sh-shim \
&& chmod +x /bin/sh-shim \
&& cp /bin/sh /bin/sh.real \
&& mv /bin/sh-shim /bin/sh
|
Java
|
/*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#pragma once
#include <aws/codecommit/CodeCommit_EXPORTS.h>
#include <aws/codecommit/CodeCommitRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
namespace CodeCommit
{
namespace Model
{
/**
*/
class AWS_CODECOMMIT_API ListTagsForResourceRequest : public CodeCommitRequest
{
public:
ListTagsForResourceRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "ListTagsForResource"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>The Amazon Resource Name (ARN) of the resource for which you want to get
* information about tags, if any.</p>
*/
inline const Aws::String& GetResourceArn() const{ return m_resourceArn; }
/**
* <p>The Amazon Resource Name (ARN) of the resource for which you want to get
* information about tags, if any.</p>
*/
inline bool ResourceArnHasBeenSet() const { return m_resourceArnHasBeenSet; }
/**
* <p>The Amazon Resource Name (ARN) of the resource for which you want to get
* information about tags, if any.</p>
*/
inline void SetResourceArn(const Aws::String& value) { m_resourceArnHasBeenSet = true; m_resourceArn = value; }
/**
* <p>The Amazon Resource Name (ARN) of the resource for which you want to get
* information about tags, if any.</p>
*/
inline void SetResourceArn(Aws::String&& value) { m_resourceArnHasBeenSet = true; m_resourceArn = std::move(value); }
/**
* <p>The Amazon Resource Name (ARN) of the resource for which you want to get
* information about tags, if any.</p>
*/
inline void SetResourceArn(const char* value) { m_resourceArnHasBeenSet = true; m_resourceArn.assign(value); }
/**
* <p>The Amazon Resource Name (ARN) of the resource for which you want to get
* information about tags, if any.</p>
*/
inline ListTagsForResourceRequest& WithResourceArn(const Aws::String& value) { SetResourceArn(value); return *this;}
/**
* <p>The Amazon Resource Name (ARN) of the resource for which you want to get
* information about tags, if any.</p>
*/
inline ListTagsForResourceRequest& WithResourceArn(Aws::String&& value) { SetResourceArn(std::move(value)); return *this;}
/**
* <p>The Amazon Resource Name (ARN) of the resource for which you want to get
* information about tags, if any.</p>
*/
inline ListTagsForResourceRequest& WithResourceArn(const char* value) { SetResourceArn(value); return *this;}
/**
* <p>An enumeration token that, when provided in a request, returns the next batch
* of the results.</p>
*/
inline const Aws::String& GetNextToken() const{ return m_nextToken; }
/**
* <p>An enumeration token that, when provided in a request, returns the next batch
* of the results.</p>
*/
inline bool NextTokenHasBeenSet() const { return m_nextTokenHasBeenSet; }
/**
* <p>An enumeration token that, when provided in a request, returns the next batch
* of the results.</p>
*/
inline void SetNextToken(const Aws::String& value) { m_nextTokenHasBeenSet = true; m_nextToken = value; }
/**
* <p>An enumeration token that, when provided in a request, returns the next batch
* of the results.</p>
*/
inline void SetNextToken(Aws::String&& value) { m_nextTokenHasBeenSet = true; m_nextToken = std::move(value); }
/**
* <p>An enumeration token that, when provided in a request, returns the next batch
* of the results.</p>
*/
inline void SetNextToken(const char* value) { m_nextTokenHasBeenSet = true; m_nextToken.assign(value); }
/**
* <p>An enumeration token that, when provided in a request, returns the next batch
* of the results.</p>
*/
inline ListTagsForResourceRequest& WithNextToken(const Aws::String& value) { SetNextToken(value); return *this;}
/**
* <p>An enumeration token that, when provided in a request, returns the next batch
* of the results.</p>
*/
inline ListTagsForResourceRequest& WithNextToken(Aws::String&& value) { SetNextToken(std::move(value)); return *this;}
/**
* <p>An enumeration token that, when provided in a request, returns the next batch
* of the results.</p>
*/
inline ListTagsForResourceRequest& WithNextToken(const char* value) { SetNextToken(value); return *this;}
private:
Aws::String m_resourceArn;
bool m_resourceArnHasBeenSet;
Aws::String m_nextToken;
bool m_nextTokenHasBeenSet;
};
} // namespace Model
} // namespace CodeCommit
} // namespace Aws
|
Java
|
<?php
/**
* MyBB 1.6 Spanish Language Pack
* Copyright 2010 MyBB Group, All Rights Reserved
*
* $Id: report.lang.php 5016 2010-08-10 12:32:33Z Anio_pke $
*/
$l['report_post'] = "Reportar mensaje";
$l['report_to_mod'] = "Reporta este mensaje a un moderador";
$l['only_report'] = "Solo debes reportar mensajes que sean spam, de publicidad, o abusivos.";
$l['report_reason'] = "Tu razón para reportar este mensaje:";
$l['thank_you'] = "Gracias.";
$l['post_reported'] = "El mensaje se ha reportado correctamente. Ya puedes cerrar la ventana.";
$l['report_error'] = "Error";
$l['no_reason'] = "No puedes reportar un mensaje sin especificar la razón del reporte.";
$l['go_back'] = "Volver";
$l['close_window'] = "Cerrar ventana";
?>
|
Java
|
# Laboulbenia neoguineensis Speg. SPECIES
#### Status
ACCEPTED
#### According to
Index Fungorum
#### Published in
null
#### Original name
Laboulbenia neoguineensis Speg.
### Remarks
null
|
Java
|
<html dir="LTR">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=Windows-1252" />
<meta name="vs_targetSchema" content="http://schemas.microsoft.com/intellisense/ie5" />
<title>BasicConfigurator.Configure Method ()</title>
<xml>
</xml>
<link rel="stylesheet" type="text/css" href="MSDN.css" />
</head>
<body id="bodyID" class="dtBODY">
<div id="nsbanner">
<div id="bannerrow1">
<table class="bannerparthead" cellspacing="0">
<tr id="hdr">
<td class="runninghead">Apache log4net SDK Documentation - Microsoft .NET Framework 4.0</td>
<td class="product">
</td>
</tr>
</table>
</div>
<div id="TitleRow">
<h1 class="dtH1">BasicConfigurator.Configure Method ()</h1>
</div>
</div>
<div id="nstext">
<p> Initializes the log4net system with a default configuration. </p>
<div class="syntax">
<span class="lang">[Visual Basic]</span>
<br />Overloads Public Shared Function Configure() As <a href="ms-help://MS.NETFrameworkSDKv1.1/cpref/html/frlrfSystemCollectionsICollectionClassTopic.htm">ICollection</a></div>
<div class="syntax">
<span class="lang">[C#]</span>
<br />public static <a href="ms-help://MS.NETFrameworkSDKv1.1/cpref/html/frlrfSystemCollectionsICollectionClassTopic.htm">ICollection</a> Configure();</div>
<h4 class="dtH4">Remarks</h4>
<p> Initializes the log4net logging system using a <a href="log4net.Appender.ConsoleAppender.html">ConsoleAppender</a> that will write to <code>Console.Out</code>. The log messages are formatted using the <a href="log4net.Layout.PatternLayout.html">PatternLayout</a> layout object with the <a href="log4net.Layout.PatternLayout.DetailConversionPattern.html">DetailConversionPattern</a> layout style. </p>
<h4 class="dtH4">See Also</h4><p><a href="log4net.Config.BasicConfigurator.html">BasicConfigurator Class</a> | <a href="log4net.Config.html">log4net.Config Namespace</a> | <a href="log4net.Config.BasicConfigurator.Configure_overloads.html">BasicConfigurator.Configure Overload List</a></p><object type="application/x-oleobject" classid="clsid:1e2a7bd0-dab9-11d0-b93a-00c04fc99f9e" viewastext="true" style="display: none;"><param name="Keyword" value="Configure method"></param><param name="Keyword" value="Configure method, BasicConfigurator class"></param><param name="Keyword" value="BasicConfigurator.Configure method"></param></object><hr /><div id="footer"><a href='http://logging.apache.org/log4net/'>Copyright 2004-2013 The Apache Software Foundation.</a><br></br>Apache log4net, Apache and log4net are trademarks of The Apache Software Foundation.</div></div>
</body>
</html>
|
Java
|
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
// This file can be replaced during build by using the `fileReplacements` array.
// `ng build --prod` replaces `environment.ts` with `environment.prod.ts`.
// The list of file replacements can be found in `angular.json`.
import { version } from '../../../../package.json';
export const environment = {
production: false,
interactiveVisualizerUrl: `https://storage.googleapis.com/interactive_visualizer/${version}/index.html`,
models: [
{
displayName: 'Birds V1',
description: 'AIY natural world insects classification model',
type: 'image classification',
metadataUrl: 'https://storage.googleapis.com/tfhub-visualizers/google/aiy/vision/classifier/birds_V1/1/metadata.json',
},
{
displayName: 'Insects V1',
description: 'AIY natural world birds quantized classification model',
type: 'image classification',
metadataUrl: 'https://storage.googleapis.com/tfhub-visualizers/google/aiy/vision/classifier/insects_V1/1/metadata.json',
},
{
displayName: 'Mobile Object Localizer V1',
description: 'Mobile model to localize objects in an image',
type: 'object detection',
metadataUrl: 'https://storage.googleapis.com/tfhub-visualizers/google/object_detection/mobile_object_localizer_v1/1/metadata.json',
},
],
};
/*
* For easier debugging in development mode, you can import the following file
* to ignore zone related error stack frames such as `zone.run`, `zoneDelegate.invokeTask`.
*
* This import should be commented out in production mode because it will have a negative impact
* on performance if an error is thrown.
*/
// import 'zone.js/dist/zone-error'; // Included with Angular CLI.
|
Java
|
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Diagnostics;
namespace MT5LiquidityIndicator.Net.Settings
{
[DisplayName("Line Settings")]
public class LineSettings
{
#region contruction
public LineSettings()
{
this.Volume = 0;
this.m_bidColor = Color.Black;
this.m_askColor = Color.Black;
}
internal LineSettings(LineSettings settings)
{
this.Volume = settings.Volume;
this.m_bidColor = settings.m_bidColor;
this.m_askColor = settings.m_askColor;
}
internal LineSettings(double volume, Color bidColor, Color askColor)
{
this.Volume = volume;
this.m_bidColor = bidColor;
this.m_askColor = askColor;
}
#endregion
#region properties
[DefaultValue(1)]
public double Volume
{
get
{
return m_volume;
}
set
{
if ((value < m_minVolume) || (value > m_maxVolume))
{
string message = string.Format("Volum can be from {0} to {1}", m_minVolume, m_maxVolume);
throw new ArgumentOutOfRangeException("value", value, message);
}
m_volume = value;
}
}
[DisplayName("Bid Color")]
[DefaultValue(typeof(Color), "Black")]
public Color BidColor
{
get
{
return m_bidColor;
}
set
{
m_bidColor = NormalizeColor(value);
}
}
[DisplayName("Ask Color")]
[DefaultValue(typeof(Color), "Black")]
public Color AskColor
{
get
{
return m_askColor;
}
set
{
m_askColor = NormalizeColor(value);
}
}
#endregion
#region private members
private static Color NormalizeColor(Color value)
{
if (255 == value.A)
{
return value;
}
Color result = Color.FromArgb(255, value.R, value.G, value.B);
return result;
}
#endregion
#region overrode methods
public override string ToString()
{
string result = string.Format("Volume = {0}", this.Volume);
return result;
}
#endregion
#region members
private double m_volume;
private const double m_minVolume = 0;
private const double m_maxVolume = 10000;
private Color m_bidColor;
private Color m_askColor;
#endregion
}
}
|
Java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.