focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static Configuration configurePythonDependencies(ReadableConfig config) { final PythonDependencyManager pythonDependencyManager = new PythonDependencyManager(config); final Configuration pythonDependencyConfig = new Configuration(); pythonDependencyManager.applyToConfiguration(pythonDependencyConfig); return pythonDependencyConfig; }
@Test void testPythonPath() { String pyPath = "venv/bin/python3/lib64/python3.7/site-packages/:venv/bin/python3/lib/python3.7/site-packages/"; Configuration config = new Configuration(); config.set(PythonOptions.PYTHON_PATH, pyPath); Configuration actual = configurePythonDependencies(config); Configuration expectedConfiguration = new Configuration(); expectedConfiguration.set(PythonOptions.PYTHON_PATH, pyPath); verifyConfiguration(expectedConfiguration, actual); }
public Materialization create( final StreamsMaterialization delegate, final MaterializationInfo info, final QueryId queryId, final QueryContext.Stacker contextStacker ) { final TransformVisitor transformVisitor = new TransformVisitor(queryId, contextStacker); final List<Transform> transforms = info .getTransforms() .stream() .map(xform -> xform.visit(transformVisitor)) .collect(Collectors.toList()); return materializationFactory.create( delegate, info.getSchema(), transforms ); }
@Test public void shouldBuildMaterializationWithPositivePredicateTransform() { // Given: factory.create(materialization, info, queryId, contextStacker); when(predicate.transform(any(), any(), any())) .thenAnswer(inv -> Optional.of(inv.getArgument(1))); final Transform transform = getTransform(1); // Then: final Optional<GenericRow> result = transform.apply(keyIn, rowIn, ctx); // Then: assertThat(result, is(Optional.of(rowIn))); }
@Override public SuspensionReasons verifyGroupGoingDownIsFine(ClusterApi clusterApi) throws HostStateChangeDeniedException { return verifyGroupGoingDownIsFine(clusterApi, false); }
@Test public void verifyGroupGoingDownIsFine_percentageIsFine() throws HostStateChangeDeniedException { verifyGroupGoingDownIsFine(false, Optional.empty(), 9, true); }
@Nullable @Override public byte[] chunk(@NonNull final byte[] message, @IntRange(from = 0) final int index, @IntRange(from = 20) final int maxLength) { final int offset = index * maxLength; final int length = Math.min(maxLength, message.length - offset); if (length <= 0) return null; final byte[] data = new byte[length]; System.arraycopy(message, offset, data, 0, length); return data; }
@Test public void chunk_43() { final int MTU = 43; final DefaultMtuSplitter splitter = new DefaultMtuSplitter(); final byte[] result = splitter.chunk(text.getBytes(), 2, MTU - 3); assertArrayEquals(text.substring(2 * (MTU - 3), 3 * (MTU - 3)).getBytes(), result); }
public void clean() { try { if (tempDir.exists()) { Files.walkFileTree(tempDir.toPath(), DeleteRecursivelyFileVisitor.INSTANCE); } } catch (IOException e) { LOG.error("Failed to delete temp folder", e); } }
@Test public void clean_does_not_fail_if_directory_has_already_been_deleted() throws Exception { File dir = temp.newFolder(); DefaultTempFolder underTest = new DefaultTempFolder(dir); underTest.clean(); assertThat(dir).doesNotExist(); // second call does not fail, nor log ERROR logs underTest.clean(); assertThat(logTester.logs(Level.ERROR)).isEmpty(); }
public FEELFnResult<Boolean> invoke(@ParameterName("list") List list, @ParameterName("element") Object element) { if ( list == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null")); } if (element == null) { return FEELFnResult.ofResult(list.contains(element)); } Object e = NumberEvalHelper.coerceNumber(element); boolean found = false; ListIterator<?> it = list.listIterator(); while (it.hasNext() && !found) { Object next = NumberEvalHelper.coerceNumber(it.next()); found = itemEqualsSC(e, next); } return FEELFnResult.ofResult(found); }
@Test void invokeContains() { FunctionTestUtil.assertResult(listContainsFunction.invoke(Arrays.asList(1, 2, "test"), "test"), true); FunctionTestUtil.assertResult(listContainsFunction.invoke(Arrays.asList(1, 2, "test"), 1), true); FunctionTestUtil.assertResult(listContainsFunction.invoke(Arrays.asList(1, 2, "test"), BigDecimal.ONE), true); }
public static boolean isNaN(short h) { return (h & EXPONENT_SIGNIFICAND_MASK) > POSITIVE_INFINITY; }
@Test public void testIsNaN() { assertFalse(Float16.isNaN(POSITIVE_INFINITY)); assertFalse(Float16.isNaN(NEGATIVE_INFINITY)); assertFalse(Float16.isNaN(POSITIVE_ZERO)); assertFalse(Float16.isNaN(NEGATIVE_ZERO)); assertTrue(Float16.isNaN(NaN)); assertTrue(Float16.isNaN((short) 0x7c01)); assertTrue(Float16.isNaN((short) 0x7c18)); assertTrue(Float16.isNaN((short) 0xfc01)); assertTrue(Float16.isNaN((short) 0xfc98)); assertFalse(Float16.isNaN(MAX_VALUE)); assertFalse(Float16.isNaN(LOWEST_VALUE)); assertFalse(Float16.isNaN(Float16.toFloat16(-128.3f))); assertFalse(Float16.isNaN(Float16.toFloat16(128.3f))); }
public String getFQDNHostname() { return hostNameSupplier.getFqdnHostName(); }
@Test void testGetFQDNHostname() { try { TaskManagerLocation info1 = new TaskManagerLocation( ResourceID.generate(), InetAddress.getByName("127.0.0.1"), 19871); assertThat(info1.getFQDNHostname()).isNotNull(); TaskManagerLocation info2 = new TaskManagerLocation( ResourceID.generate(), InetAddress.getByName("1.2.3.4"), 8888); assertThat(info2.getFQDNHostname()).isNotNull(); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
@Override public int compareTo( MonetDbVersion mDbVersion ) { int result = majorVersion.compareTo( mDbVersion.majorVersion ); if ( result != 0 ) { return result; } result = minorVersion.compareTo( mDbVersion.minorVersion ); if ( result != 0 ) { return result; } result = patchVersion.compareTo( mDbVersion.patchVersion ); if ( result != 0 ) { return result; } return result; }
@Test public void testCompareVersions_DiffInMajor() throws Exception { String dbVersionBigger = "786.5.3"; String dbVersion = "785.2.2"; assertEquals( 1, new MonetDbVersion( dbVersionBigger ).compareTo( new MonetDbVersion( dbVersion ) ) ); }
@Override public boolean checkCredentials(String username, String password) { if (username == null || password == null) { return false; } Credentials credentials = new Credentials(username, password); if (validCredentialsCache.contains(credentials)) { return true; } else if (invalidCredentialsCache.contains(credentials)) { return false; } boolean isValid = this.username.equals(username) && this.passwordHash.equals( generatePasswordHash( algorithm, salt, iterations, keyLength, password)); if (isValid) { validCredentialsCache.add(credentials); } else { invalidCredentialsCache.add(credentials); } return isValid; }
@Test public void testPBKDF2WithHmacSHA1_lowerCase() throws Exception { String algorithm = "PBKDF2WithHmacSHA1"; int iterations = 1000; int keyLength = 128; String hash = "17:87:CA:B9:14:73:60:36:8B:20:82:87:92:58:43:B8:A3:85:66:BC:C1:6D:C3:31:6C:1D:47:48:C7:F2:E4:1D:96" + ":00:11:F8:4D:94:63:2F:F2:7A:F0:3B:72:63:16:5D:EF:5C:97:CC:EC:59:CB:18:4A:AA:F5:23:63:0B:6E:3B:65" + ":E0:72:6E:69:7D:EB:83:05:05:E5:D6:F2:19:99:49:3F:89:DA:DE:83:D7:2B:5B:7D:C9:56:B4:F2:F6:A5:61:29" + ":29:ED:DF:4C:4E:8D:EA:DF:47:A2:B0:89:11:86:D4:77:A1:02:E9:0C:26:A4:1E:2A:C1:A8:71:E0:93:8F:A4"; hash = hash.toLowerCase(); PBKDF2Authenticator PBKDF2Authenticator = new PBKDF2Authenticator( "/", VALID_USERNAME, hash, algorithm, SALT, iterations, keyLength); for (String username : TEST_USERNAMES) { for (String password : TEST_PASSWORDS) { boolean expectedIsAuthenticated = VALID_USERNAME.equals(username) && VALID_PASSWORD.equals(password); boolean actualIsAuthenticated = PBKDF2Authenticator.checkCredentials(username, password); assertEquals(expectedIsAuthenticated, actualIsAuthenticated); } } }
public ControlledFragmentHandler.Action onExtensionMessage( final int actingBlockLength, final int templateId, final int schemaId, final int actingVersion, final DirectBuffer buffer, final int offset, final int length, final Header header) { if (null != consensusModuleExtension) { return consensusModuleExtension.onIngressExtensionMessage( actingBlockLength, templateId, schemaId, actingVersion, buffer, offset, length, header); } throw new ClusterException("expected schemaId=" + MessageHeaderDecoder.SCHEMA_ID + ", actual=" + schemaId); }
@Test void shouldDelegateHandlingToRegisteredExtension() { final ConsensusModuleExtension consensusModuleExtension = mock(ConsensusModuleExtension.class, "used adapter"); when(consensusModuleExtension.supportedSchemaId()).thenReturn(SCHEMA_ID); final TestClusterClock clock = new TestClusterClock(TimeUnit.MILLISECONDS); ctx.epochClock(clock) .clusterClock(clock) .consensusModuleExtension(consensusModuleExtension); final ConsensusModuleAgent agent = new ConsensusModuleAgent(ctx); agent.onExtensionMessage(0, 1, SCHEMA_ID, 0, null, 0, 0, null); verify(consensusModuleExtension) .onIngressExtensionMessage(0, 1, SCHEMA_ID, 0, null, 0, 0, null); }
public String substring(final int beginIndex) { split(); final int beginChar = splitted.get(beginIndex); return input.substring(beginChar); }
@Test public void testSubstringCPs2() { final UnicodeHelper lh = new UnicodeHelper("a", Method.CODEPOINTS); assertEquals("a", lh.substring(0, 1)); final UnicodeHelper lh2 = new UnicodeHelper(new String(Character.toChars(0x1f600)), Method.CODEPOINTS); assertEquals(new String(Character.toChars(0x1f600)), lh2.substring(0, 1)); final UnicodeHelper lh3 = new UnicodeHelper(UCSTR, Method.CODEPOINTS); assertEquals(new String(Character.toChars(0x1f645)), lh3.substring(0, 1)); final UnicodeHelper lh4 = new UnicodeHelper("a" + UCSTR + "A", Method.CODEPOINTS); assertEquals("a", lh4.substring(0, 1)); assertEquals(new String(Character.toChars(0x1f645)), lh4.substring(1, 2)); assertEquals(new String(Character.toChars(0x1f3ff)), lh4.substring(2, 3)); assertEquals("a" + new String(Character.toChars(0x1f645)), lh4.substring(0, 2)); final UnicodeHelper lh5 = new UnicodeHelper("k\u035fh", Method.CODEPOINTS); assertEquals("k", lh5.substring(0, 1)); assertEquals("\u035f", lh5.substring(1, 2)); }
@Override public Thread newThread(Runnable r) { String name = prefix + "_" + counter.incrementAndGet(); if (totalSize > 1) { name += "_" + totalSize; } Thread thread = new FastThreadLocalThread(group, r, name); thread.setDaemon(makeDaemons); if (thread.getPriority() != Thread.NORM_PRIORITY) { thread.setPriority(Thread.NORM_PRIORITY); } return thread; }
@Test public void testConstructorWithPrefixAndDaemons() { NamedThreadFactory factory = new NamedThreadFactory("prefix", true); Thread thread = factory.newThread(() -> {}); assertThat(thread.getName()).startsWith("prefix"); assertThat(thread.isDaemon()).isTrue(); }
public Module configure(Container container) { this.container = checkNotNull(container); configureModule(); return this; }
@Test(expected = NullPointerException.class) public void configure_throws_NPE_if_container_is_empty() { new Module() { @Override protected void configureModule() { // empty } }.configure(null); }
public static void notEmptyString(String str, String message) { if (StringUtils.isEmpty(str)) { throw new IllegalArgumentException(message); } }
@Test void testNotEmptyString() { Assertions.assertThrows( IllegalArgumentException.class, () -> notEmptyString("", "Message can't be null or empty")); }
public static double mul(float v1, float v2) { return mul(Float.toString(v1), Float.toString(v2)).doubleValue(); }
@Test public void issueI7R2B6Test2() { final BigDecimal mul = NumberUtil.mul((Number) 15858155520D, 100.0); assertEquals("1585815552000", mul.toString()); }
@Override synchronized void close() { wrapped.close(); }
@Test public void testClose() { synchronizedPartitionGroup.close(); verify(wrapped, times(1)).close(); }
@Override public void register(final String key, final String value) { try { KV kvClient = etcdClient.getKVClient(); String uuid = UUIDUtils.getInstance().generateShortUuid(); PutOption putOption = PutOption.newBuilder().withPrevKV().withLeaseId(leaseId).build(); kvClient.put(bytesOf(key + "/" + uuid), bytesOf(value), putOption).get(timeout, TimeUnit.MILLISECONDS); LOGGER.info("etcd client key: {} with value: {}", key, value); } catch (InterruptedException | ExecutionException | TimeoutException e) { LOGGER.error("etcd client register (key:{},value:{}) error.", key, value, e); throw new ShenyuException(e); } }
@Test void registerTest() throws ExecutionException, InterruptedException, TimeoutException { final String key = "key"; final String value = "value"; final KV kvClient = mock(KV.class); when(etcdClient.getKVClient()).thenReturn(kvClient); final PutResponse putResponse = mock(PutResponse.class); final CompletableFuture<PutResponse> completableFuture = mock(CompletableFuture.class); when(completableFuture.get(anyLong(), any(TimeUnit.class))) .thenReturn(putResponse); when(kvClient.put(any(ByteSequence.class), any(ByteSequence.class), any(PutOption.class))) .thenReturn(completableFuture); etcdDiscoveryServiceUnderTest.register(key, value); doThrow(new InterruptedException()).when(completableFuture).get(anyLong(), any(TimeUnit.class)); assertThrows(ShenyuException.class, () -> etcdDiscoveryServiceUnderTest.register(key, value)); }
public static ParamType getVarArgsSchemaFromType(final Type type) { return getSchemaFromType(type, VARARGS_JAVA_TO_ARG_TYPE); }
@Test public void shouldGetLongSchemaForLongClassVariadic() { assertThat( UdfUtil.getVarArgsSchemaFromType(Long.class), equalTo(ParamTypes.LONG) ); }
static Result coerceUserList( final Collection<Expression> expressions, final ExpressionTypeManager typeManager ) { return coerceUserList(expressions, typeManager, Collections.emptyMap()); }
@Test public void shouldThrowOnIncompatible() { // Given: final ImmutableList<Expression> expressions = ImmutableList.of( new BooleanLiteral(true), new IntegerLiteral(10) // <-- can not be coerced to boolean. ); // When: final Exception e = assertThrows( KsqlException.class, () -> CoercionUtil.coerceUserList(expressions, typeManager) ); // Then: assertThat(e.getMessage(), is("operator does not exist: BOOLEAN = INTEGER (10)" + System.lineSeparator() + "Hint: You might need to add explicit type casts.") ); }
@Override public void doAdmissionChecks(AppsV1Api appsV1Api, CoreV1Api coreV1Api, String jobNamespace, String jobName, Function.FunctionDetails functionDetails) { if (!StringUtils.isEmpty(functionDetails.getSecretsMap())) { Type type = new TypeToken<Map<String, Object>>() { }.getType(); Map<String, Object> secretsMap = new Gson().fromJson(functionDetails.getSecretsMap(), type); for (Object object : secretsMap.values()) { if (object instanceof Map) { Map<String, String> kubernetesSecret = (Map<String, String>) object; if (kubernetesSecret.size() < 2) { throw new IllegalArgumentException("Kubernetes Secret should contain id and key"); } if (!kubernetesSecret.containsKey(idKey)) { throw new IllegalArgumentException("Kubernetes Secret should contain id information"); } if (!kubernetesSecret.containsKey(keyKey)) { throw new IllegalArgumentException("Kubernetes Secret should contain key information"); } } else { throw new IllegalArgumentException("Kubernetes Secret should be a Map containing id/key pairs"); } } } }
@Test public void testConfigValidation() throws Exception { KubernetesSecretsProviderConfigurator provider = new KubernetesSecretsProviderConfigurator(); try { HashMap<String, Object> map = new HashMap<String, Object>(); map.put("secretname", "randomsecret"); Function.FunctionDetails functionDetails = Function.FunctionDetails.newBuilder().setSecretsMap(new Gson().toJson(map)).build(); provider.doAdmissionChecks(null, null, null, null, functionDetails); Assert.fail("Non conforming secret object should not validate"); } catch (Exception e) { } try { HashMap<String, Object> map = new HashMap<String, Object>(); HashMap<String, String> map1 = new HashMap<String, String>(); map1.put("secretname", "secretvalue"); map.put("secretname", map1); Function.FunctionDetails functionDetails = Function.FunctionDetails.newBuilder().setSecretsMap(new Gson().toJson(map)).build(); provider.doAdmissionChecks(null, null, null, null, functionDetails); Assert.fail("Non conforming secret object should not validate"); } catch (Exception e) { } try { HashMap<String, Object> map = new HashMap<String, Object>(); HashMap<String, String> map1 = new HashMap<String, String>(); map1.put("path", "secretvalue"); map1.put("key", "secretvalue"); map.put("secretname", map1); Function.FunctionDetails functionDetails = Function.FunctionDetails.newBuilder().setSecretsMap(new Gson().toJson(map)).build(); provider.doAdmissionChecks(null, null, null, null, functionDetails); } catch (Exception e) { Assert.fail("Conforming secret object should validate"); } }
static int dissectSocketAddress(final MutableDirectBuffer buffer, final int offset, final StringBuilder builder) { int encodedLength = 0; final int port = buffer.getInt(offset + encodedLength, LITTLE_ENDIAN); encodedLength += SIZE_OF_INT; encodedLength += dissectInetAddress(buffer, offset + encodedLength, builder); builder.append(':').append(port); return encodedLength; }
@Test void dissectSocketAddressIpv4() { final int offset = 16; buffer.putInt(offset, 12121, LITTLE_ENDIAN); buffer.putInt(offset + SIZE_OF_INT, 4, LITTLE_ENDIAN); buffer.putBytes(offset + SIZE_OF_INT * 2, new byte[]{ 127, 0, 0, 1 }); final int decodedLength = CommonEventDissector.dissectSocketAddress(buffer, offset, builder); assertEquals(12, decodedLength); assertEquals("127.0.0.1:12121", builder.toString()); }
public static String printDistributedFromFragments(List<PlanFragment> allFragments, FunctionAndTypeManager functionAndTypeManager, Session session) { PlanNodeIdGenerator idGenerator = new PlanNodeIdGenerator(); Map<PlanFragmentId, PlanFragment> fragmentsById = Maps.uniqueIndex(allFragments, PlanFragment::getId); StringBuilder output = new StringBuilder(); output.append("digraph distributed_plan {\n"); for (PlanFragment planFragment : allFragments) { printFragmentNodes(output, planFragment, idGenerator, functionAndTypeManager, session); planFragment.getRoot().accept(new EdgePrinter(output, fragmentsById, idGenerator), null); } output.append("}\n"); return output.toString(); }
@Test public void testPrintDistributedFromFragments() { List<PlanFragment> allFragments = new ArrayList<>(); allFragments.add(createTestPlanFragment(0, TEST_TABLE_SCAN_NODE)); allFragments.add(createTestPlanFragment(1, TEST_TABLE_SCAN_NODE)); String actual = printDistributedFromFragments( allFragments, FUNCTION_AND_TYPE_MANAGER, testSessionBuilder().build()); String expected = "digraph distributed_plan {\n" + "subgraph cluster_0 {\n" + "label = \"SOURCE\"\n" + "plannode_1[label=\"{TableScan | [TableHandle \\{connectorId='connector_id', connectorHandle='com.facebook.presto.testing.TestingMetadata$TestingTableHandle@1af56f7', layout='Optional.empty'\\}]|Estimates: \\{rows: ? (0B), cpu: ?, memory: ?, network: ?\\}\n" + "}\", style=\"rounded, filled\", shape=record, fillcolor=deepskyblue];\n" + "}\n" + "subgraph cluster_1 {\n" + "label = \"SOURCE\"\n" + "plannode_1[label=\"{TableScan | [TableHandle \\{connectorId='connector_id', connectorHandle='com.facebook.presto.testing.TestingMetadata$TestingTableHandle@1af56f7', layout='Optional.empty'\\}]|Estimates: \\{rows: ? (0B), cpu: ?, memory: ?, network: ?\\}\n" + "}\", style=\"rounded, filled\", shape=record, fillcolor=deepskyblue];\n" + "}\n" + "}\n"; assertEquals(actual, expected); }
public static Optional<File> getPluginsDir() { String pluginsDir = System.getenv() .getOrDefault( ConfigConstants.ENV_FLINK_PLUGINS_DIR, ConfigConstants.DEFAULT_FLINK_PLUGINS_DIRS); File pluginsDirFile = new File(pluginsDir); if (!pluginsDirFile.isDirectory()) { LOG.warn("The plugins directory [{}] does not exist.", pluginsDirFile); return Optional.empty(); } return Optional.of(pluginsDirFile); }
@Test void getPluginsDir_nonExistingDirectory_returnsEmpty() throws IOException { final Map<String, String> envVariables = ImmutableMap.of( ConfigConstants.ENV_FLINK_PLUGINS_DIR, new File( TempDirUtils.newFolder(tempFolder).getAbsoluteFile(), "should_not_exist") .getAbsolutePath()); CommonTestUtils.setEnv(envVariables); assertThat(PluginConfig.getPluginsDir()).isNotPresent(); }
public static FromJarEntryClassInformationProvider createFromCustomJar( File jarFile, @Nullable String jobClassName) { return new FromJarEntryClassInformationProvider(jarFile, jobClassName); }
@Test void testMissingJar() { assertThatThrownBy( () -> FromJarEntryClassInformationProvider.createFromCustomJar( null, "JobClassName")) .isInstanceOf(NullPointerException.class); }
public void writeEncodedValue(EncodedValue encodedValue) throws IOException { switch (encodedValue.getValueType()) { case ValueType.BOOLEAN: writer.write(Boolean.toString(((BooleanEncodedValue) encodedValue).getValue())); break; case ValueType.BYTE: writer.write( String.format("0x%x", ((ByteEncodedValue)encodedValue).getValue())); break; case ValueType.CHAR: writer.write( String.format("0x%x", (int)((CharEncodedValue)encodedValue).getValue())); break; case ValueType.SHORT: writer.write( String.format("0x%x", ((ShortEncodedValue)encodedValue).getValue())); break; case ValueType.INT: writer.write( String.format("0x%x", ((IntEncodedValue)encodedValue).getValue())); break; case ValueType.LONG: writer.write( String.format("0x%x", ((LongEncodedValue)encodedValue).getValue())); break; case ValueType.FLOAT: writer.write(Float.toString(((FloatEncodedValue)encodedValue).getValue())); break; case ValueType.DOUBLE: writer.write(Double.toString(((DoubleEncodedValue)encodedValue).getValue())); break; case ValueType.ANNOTATION: writeAnnotation((AnnotationEncodedValue)encodedValue); break; case ValueType.ARRAY: writeArray((ArrayEncodedValue)encodedValue); break; case ValueType.STRING: writeQuotedString(((StringEncodedValue)encodedValue).getValue()); break; case ValueType.FIELD: writeFieldDescriptor(((FieldEncodedValue)encodedValue).getValue()); break; case ValueType.ENUM: writeFieldDescriptor(((EnumEncodedValue)encodedValue).getValue()); break; case ValueType.METHOD: writeMethodDescriptor(((MethodEncodedValue)encodedValue).getValue()); break; case ValueType.TYPE: writeType(((TypeEncodedValue)encodedValue).getValue()); break; case ValueType.METHOD_TYPE: writeMethodProtoDescriptor(((MethodTypeEncodedValue)encodedValue).getValue()); break; case ValueType.METHOD_HANDLE: writeMethodHandle(((MethodHandleEncodedValue)encodedValue).getValue()); break; case ValueType.NULL: writer.write("null"); break; default: throw new IllegalArgumentException("Unknown encoded value type"); } }
@Test public void testWriteEncodedValue_float() throws IOException { DexFormattedWriter writer = new DexFormattedWriter(output); writer.writeEncodedValue(new ImmutableFloatEncodedValue(12.34f)); Assert.assertEquals("12.34", output.toString()); }
public List<String> build() { if (columnDefs.isEmpty()) { throw new IllegalStateException("No column has been defined"); } switch (dialect.getId()) { case PostgreSql.ID: return createPostgresQuery(); case Oracle.ID: return createOracleQuery(); default: return createMsSqlAndH2Queries(); } }
@Test public void update_columns_on_mssql() { assertThat(createSampleBuilder(new MsSql()).build()) .containsOnly( "ALTER TABLE issues ALTER COLUMN value DECIMAL (30,20) NULL", "ALTER TABLE issues ALTER COLUMN name NVARCHAR (10) NULL"); }
@Override public ByteBuf writeBytes(byte[] src, int srcIndex, int length) { ensureWritable(length); setBytes(writerIndex, src, srcIndex, length); writerIndex += length; return this; }
@Test public void testSliceReadOutputStreamMultipleThreads() throws Exception { final byte[] bytes = new byte[8]; random.nextBytes(bytes); final ByteBuf buffer = newBuffer(8); buffer.writeBytes(bytes); try { testReadOutputStreamMultipleThreads(buffer, bytes, true); } finally { buffer.release(); } }
public static String betweenOuterPair(String text, char before, char after) { if (text == null) { return null; } int pos = -1; int pos2 = -1; int count = 0; int count2 = 0; boolean singleQuoted = false; boolean doubleQuoted = false; for (int i = 0; i < text.length(); i++) { char ch = text.charAt(i); if (!doubleQuoted && ch == '\'') { singleQuoted = !singleQuoted; } else if (!singleQuoted && ch == '\"') { doubleQuoted = !doubleQuoted; } if (singleQuoted || doubleQuoted) { continue; } if (ch == before) { count++; } else if (ch == after) { count2++; } if (ch == before && pos == -1) { pos = i; } else if (ch == after) { pos2 = i; } } if (pos == -1 || pos2 == -1) { return null; } // must be even paris if (count != count2) { return null; } return text.substring(pos + 1, pos2); }
@Test public void testBetweenOuterPair() { assertEquals("bar(baz)123", StringHelper.betweenOuterPair("foo(bar(baz)123)", '(', ')')); assertNull(StringHelper.betweenOuterPair("foo(bar(baz)123))", '(', ')')); assertNull(StringHelper.betweenOuterPair("foo(bar(baz123", '(', ')')); assertNull(StringHelper.betweenOuterPair("foo)bar)baz123", '(', ')')); assertEquals("bar", StringHelper.betweenOuterPair("foo(bar)baz123", '(', ')')); assertEquals("'bar', 'baz()123', 123", StringHelper.betweenOuterPair("foo('bar', 'baz()123', 123)", '(', ')')); assertTrue(StringHelper.betweenOuterPair("foo(bar)baz123", '(', ')', "bar"::equals).orElse(false)); assertFalse(StringHelper.betweenOuterPair("foo[bar)baz123", '(', ')', "bar"::equals).orElse(false)); }
public boolean isValid() { if (!this.conf.isValid()) { return false; } // The peer set and learner set should not have intersection set. final Set<PeerId> intersection = listPeers(); intersection.retainAll(listLearners()); if (intersection.isEmpty()) { return true; } LOG.error("Invalid conf entry {}, peers and learners have intersection: {}.", this, intersection); return false; }
@Test public void testIsValid() { ConfigurationEntry entry = TestUtils.getConfEntry("localhost:8081,localhost:8082,localhost:8083", null); assertTrue(entry.isValid()); entry = TestUtils.getConfEntry("localhost:8081,localhost:8082,localhost:8083", "localhost:8081,localhost:8082,localhost:8084"); assertTrue(entry.isValid()); entry.getConf().addLearner(new PeerId("localhost", 8084)); assertFalse(entry.isValid()); entry.getConf().addLearner(new PeerId("localhost", 8081)); assertFalse(entry.isValid()); }
private ClusterId(int id) { super(id); }
@Test public void testEquals() { new EqualsTester() .addEqualityGroup(clusterId(1), clusterId(1)) .addEqualityGroup(clusterId(3), clusterId(3)).testEquals(); }
@Subscribe public void publishClusterEvent(Object event) { if (event instanceof DeadEvent) { LOG.debug("Skipping DeadEvent on cluster event bus"); return; } final String className = AutoValueUtils.getCanonicalName(event.getClass()); final ClusterEvent clusterEvent = ClusterEvent.create(nodeId.getNodeId(), className, Collections.singleton(nodeId.getNodeId()), event); try { final String id = dbCollection.save(clusterEvent, WriteConcern.JOURNALED).getSavedId(); // We are handling a locally generated event, so we can speed up processing by posting it to the local event // bus immediately. Due to having added the local node id to its list of consumers, it will not be picked up // by the db cursor again, avoiding double processing of the event. See #11263 for details. serverEventBus.post(event); LOG.debug("Published cluster event with ID <{}> and type <{}>", id, className); } catch (MongoException e) { LOG.error("Couldn't publish cluster event of type <" + className + ">", e); } }
@Test public void publishClusterEventHandlesAutoValueCorrectly() throws Exception { @SuppressWarnings("deprecation") DBCollection collection = mongoConnection.getDatabase().getCollection(ClusterEventPeriodical.COLLECTION_NAME); DebugEvent event = DebugEvent.create("Node ID", "Test"); assertThat(collection.count()).isEqualTo(0L); clusterEventPeriodical.publishClusterEvent(event); verify(clusterEventBus, never()).post(any()); assertThat(collection.count()).isEqualTo(1L); DBObject dbObject = collection.findOne(); assertThat((String) dbObject.get("producer")).isEqualTo(nodeId.getNodeId()); assertThat((String) dbObject.get("event_class")).isEqualTo(DebugEvent.class.getCanonicalName()); }
@SuppressWarnings({"CyclomaticComplexity"}) @Override public void process(ApplicationEvent event) { switch (event.type()) { case COMMIT_ASYNC: process((AsyncCommitEvent) event); return; case COMMIT_SYNC: process((SyncCommitEvent) event); return; case POLL: process((PollEvent) event); return; case FETCH_COMMITTED_OFFSETS: process((FetchCommittedOffsetsEvent) event); return; case NEW_TOPICS_METADATA_UPDATE: process((NewTopicsMetadataUpdateRequestEvent) event); return; case ASSIGNMENT_CHANGE: process((AssignmentChangeEvent) event); return; case TOPIC_METADATA: process((TopicMetadataEvent) event); return; case ALL_TOPICS_METADATA: process((AllTopicsMetadataEvent) event); return; case LIST_OFFSETS: process((ListOffsetsEvent) event); return; case RESET_POSITIONS: process((ResetPositionsEvent) event); return; case VALIDATE_POSITIONS: process((ValidatePositionsEvent) event); return; case SUBSCRIPTION_CHANGE: process((SubscriptionChangeEvent) event); return; case UNSUBSCRIBE: process((UnsubscribeEvent) event); return; case CONSUMER_REBALANCE_LISTENER_CALLBACK_COMPLETED: process((ConsumerRebalanceListenerCallbackCompletedEvent) event); return; case COMMIT_ON_CLOSE: process((CommitOnCloseEvent) event); return; case SHARE_FETCH: process((ShareFetchEvent) event); return; case SHARE_ACKNOWLEDGE_SYNC: process((ShareAcknowledgeSyncEvent) event); return; case SHARE_ACKNOWLEDGE_ASYNC: process((ShareAcknowledgeAsyncEvent) event); return; case SHARE_SUBSCRIPTION_CHANGE: process((ShareSubscriptionChangeEvent) event); return; case SHARE_UNSUBSCRIBE: process((ShareUnsubscribeEvent) event); return; case SHARE_ACKNOWLEDGE_ON_CLOSE: process((ShareAcknowledgeOnCloseEvent) event); return; default: log.warn("Application event type {} was not expected", event.type()); } }
@Test public void testResetPositionsProcess() { ApplicationEventProcessor applicationEventProcessor = mock(ApplicationEventProcessor.class); ResetPositionsEvent event = new ResetPositionsEvent(calculateDeadlineMs(time, 100)); applicationEventProcessor.process(event); verify(applicationEventProcessor).process(any(ResetPositionsEvent.class)); }
public static ProducingResult createProducingResult( ResolvedSchema inputSchema, @Nullable Schema declaredSchema) { // no schema has been declared by the user, // the schema will be entirely derived from the input if (declaredSchema == null) { // go through data type to erase time attributes final DataType physicalDataType = inputSchema.toSourceRowDataType(); final Schema schema = Schema.newBuilder().fromRowDataType(physicalDataType).build(); return new ProducingResult(null, schema, null); } final List<UnresolvedColumn> declaredColumns = declaredSchema.getColumns(); // the declared schema does not contain physical information, // thus, it only replaces physical columns with metadata rowtime or adds a primary key if (declaredColumns.stream().noneMatch(SchemaTranslator::isPhysical)) { // go through data type to erase time attributes final DataType sourceDataType = inputSchema.toSourceRowDataType(); final DataType physicalDataType = patchDataTypeWithoutMetadataRowtime(sourceDataType, declaredColumns); final Schema.Builder builder = Schema.newBuilder(); builder.fromRowDataType(physicalDataType); builder.fromSchema(declaredSchema); return new ProducingResult(null, builder.build(), null); } return new ProducingResult(null, declaredSchema, null); }
@Test void testOutputToEmptySchema() { final ResolvedSchema tableSchema = ResolvedSchema.of( Column.physical("id", BIGINT()), Column.metadata("rowtime", TIMESTAMP_LTZ(3), null, false), Column.physical("name", STRING())); final ProducingResult result = SchemaTranslator.createProducingResult(tableSchema, Schema.derived()); assertThat(result.getProjections()).isEmpty(); assertThat(result.getSchema()) .isEqualTo( Schema.newBuilder() .column("id", BIGINT()) .column("rowtime", TIMESTAMP_LTZ(3)) // becomes physical .column("name", STRING()) .build()); assertThat(result.getPhysicalDataType()).isEmpty(); }
public String doLayout(ILoggingEvent event) { if (!isStarted()) { return CoreConstants.EMPTY_STRING; } return writeLoopOnConverters(event); }
@Test public void contextNameTest() { pl.setPattern("%contextName"); lc.setName("aValue"); pl.start(); String val = pl.doLayout(getEventObject()); assertEquals("aValue", val); }
@Override public String toString() { if (mUriString != null) { return mUriString; } StringBuilder sb = new StringBuilder(); if (mUri.getScheme() != null) { sb.append(mUri.getScheme()); sb.append("://"); } if (hasAuthority()) { if (mUri.getScheme() == null) { sb.append("//"); } sb.append(mUri.getAuthority().toString()); } if (mUri.getPath() != null) { String path = mUri.getPath(); if (path.indexOf('/') == 0 && hasWindowsDrive(path, true) // has windows drive && mUri.getScheme() == null // but no scheme && (mUri.getAuthority() == null || mUri.getAuthority() instanceof NoAuthority)) { // or authority path = path.substring(1); // remove slash before drive } sb.append(path); } if (mUri.getQuery() != null) { sb.append("?"); sb.append(mUri.getQuery()); } mUriString = sb.toString(); return mUriString; }
@Test public void toStringWindowsTests() { assumeTrue(WINDOWS); String[] uris = new String[] { "c:/", "c:/foo/bar", "C:/foo/bar#boo", "C:/foo/ bar" }; for (String uri : uris) { AlluxioURI turi = new AlluxioURI(uri); assertEquals(uri, turi.toString()); } assertEquals("C:/", new AlluxioURI("C:\\\\").toString()); assertEquals("C:/a/b.txt", new AlluxioURI("C:\\\\a\\b.txt").toString()); }
@Override public GetApplicationReportResponse getApplicationReport( GetApplicationReportRequest request) throws YarnException, IOException { if (request == null || request.getApplicationId() == null) { routerMetrics.incrAppsFailedRetrieved(); String errMsg = "Missing getApplicationReport request or applicationId information."; RouterAuditLogger.logFailure(user.getShortUserName(), GET_APP_REPORT, UNKNOWN, TARGET_CLIENT_RM_SERVICE, errMsg); RouterServerUtil.logAndThrowException(errMsg, null); } long startTime = clock.getTime(); SubClusterId subClusterId = null; try { subClusterId = federationFacade .getApplicationHomeSubCluster(request.getApplicationId()); } catch (YarnException e) { routerMetrics.incrAppsFailedRetrieved(); String errMsg = String.format("Application %s does not exist in FederationStateStore.", request.getApplicationId()); RouterAuditLogger.logFailure(user.getShortUserName(), GET_APP_REPORT, UNKNOWN, TARGET_CLIENT_RM_SERVICE, errMsg, request.getApplicationId()); RouterServerUtil.logAndThrowException(errMsg, e); } ApplicationClientProtocol clientRMProxy = getClientRMProxyForSubCluster(subClusterId); GetApplicationReportResponse response = null; try { response = clientRMProxy.getApplicationReport(request); } catch (Exception e) { routerMetrics.incrAppsFailedRetrieved(); String errMsg = String.format("Unable to get the application report for %s to SubCluster %s.", request.getApplicationId(), subClusterId.getId()); RouterAuditLogger.logFailure(user.getShortUserName(), GET_APP_REPORT, UNKNOWN, TARGET_CLIENT_RM_SERVICE, errMsg, request.getApplicationId(), subClusterId); RouterServerUtil.logAndThrowException(errMsg, e); } if (response == null) { LOG.error("No response when attempting to retrieve the report of " + "the application {} to SubCluster {}.", request.getApplicationId(), subClusterId.getId()); } long stopTime = clock.getTime(); routerMetrics.succeededAppsRetrieved(stopTime - startTime); RouterAuditLogger.logSuccess(user.getShortUserName(), GET_APP_REPORT, TARGET_CLIENT_RM_SERVICE, request.getApplicationId()); return response; }
@Test public void testGetApplicationEmptyRequest() throws Exception { LOG.info("Test FederationClientInterceptor: Get Application Report - Empty."); // null request1 LambdaTestUtils.intercept(YarnException.class, "Missing getApplicationReport request or applicationId information.", () -> interceptor.getApplicationReport(null)); // null request2 GetApplicationReportRequest reportRequest = GetApplicationReportRequest.newInstance(null); LambdaTestUtils.intercept(YarnException.class, "Missing getApplicationReport request or applicationId information.", () -> interceptor.getApplicationReport(reportRequest)); }
public static Optional<String> deserialize(String messageText) { return Optional.ofNullable(substringBetween(messageText, SONAR_ISSUE_KEY_PREFIX, SONAR_ISSUE_KEY_SUFFIX)); }
@Test public void should_notDeserializeIssueKey_when_messageHasWrongFormat() { String issueKey = RandomStringUtils.randomAlphanumeric(20); String messageWithoutSuffix = join("", SONAR_ISSUE_KEY_PREFIX, issueKey, "a message"); String messageWithoutPrefix = join("", issueKey, SONAR_ISSUE_KEY_SUFFIX, "a message"); String messageWithPrefixSuffixReversed = join("", SONAR_ISSUE_KEY_SUFFIX, issueKey, SONAR_ISSUE_KEY_PREFIX, "a message"); String messageWithNoPrefixSuffix = join("", issueKey, "a message"); assertThat(SonarQubeIssueKeyFormatter.deserialize(messageWithoutSuffix)).isEmpty(); assertThat(SonarQubeIssueKeyFormatter.deserialize(messageWithoutPrefix)).isEmpty(); assertThat(SonarQubeIssueKeyFormatter.deserialize(messageWithPrefixSuffixReversed)).isEmpty(); assertThat(SonarQubeIssueKeyFormatter.deserialize(messageWithNoPrefixSuffix)).isEmpty(); }
@Override public Result<V, E> search(Graph<V, E> graph, V src, V dst, EdgeWeigher<V, E> weigher, int maxPaths) { checkArguments(graph, src, dst); return internalSearch(graph, src, dst, weigher != null ? weigher : new DefaultEdgeWeigher<>(), maxPaths); }
@Test(expected = NullPointerException.class) public void nullSourceArgument() { graphSearch().search(new AdjacencyListsGraph<>(of(B, C), of(new TestEdge(B, C))), null, H, weigher, 1); }
public static Collection<AdvisorConfiguration> swap(final YamlAdvisorsConfiguration yamlConfig, final String pluginType) { return yamlConfig.getAdvisors().stream().filter(each -> null != each.getTarget()).map(each -> YamlAdvisorConfigurationSwapper.swap(each, pluginType)).collect(Collectors.toList()); }
@Test void assertSwapToObject() { Collection<AdvisorConfiguration> actual = YamlAdvisorsConfigurationSwapper.swap( AgentYamlEngine.unmarshalYamlAdvisorsConfiguration(getClass().getResourceAsStream("/META-INF/conf/advisors.yaml")), "FIXTURE"); assertThat(actual.size(), is(1)); assertAdvisorConfiguration(actual.iterator().next()); }
public static byte[] randomBytes(final int length) { final byte[] bytes = new byte[length]; getRandom().nextBytes(bytes); return bytes; }
@Test public void randomBytesTest() { final byte[] c = RandomUtil.randomBytes(10); assertNotNull(c); }
public long getAndAccumulate(long v, LongBinaryOperator f) { long prev, next; do { prev = lvVal(); next = f.applyAsLong(prev, v); } while (!casVal(prev, next)); return prev; }
@Test public void testGetAndAccumulate() { PaddedAtomicLong counter = new PaddedAtomicLong(10); long value = counter.getAndAccumulate(1, (left, right) -> left+right); assertEquals(value, 10); assertEquals(11, counter.get()); }
@Override public void login(final LoginCallback prompt, final CancelCallback cancel) throws BackgroundException { final Credentials credentials = authentication.get(); if(credentials.isAnonymousLogin()) { if(log.isDebugEnabled()) { log.debug(String.format("Connect with no credentials to %s", host)); } client.setProviderCredentials(null); } else { if(credentials.getTokens().validate()) { if(log.isDebugEnabled()) { log.debug(String.format("Connect with session credentials to %s", host)); } client.setProviderCredentials(new AWSSessionCredentials( credentials.getTokens().getAccessKeyId(), credentials.getTokens().getSecretAccessKey(), credentials.getTokens().getSessionToken())); } else { if(log.isDebugEnabled()) { log.debug(String.format("Connect with basic credentials to %s", host)); } client.setProviderCredentials(new AWSCredentials(credentials.getUsername(), credentials.getPassword())); } } if(host.getCredentials().isPassed()) { log.warn(String.format("Skip verifying credentials with previous successful authentication event for %s", this)); return; } try { final Path home = new DelegatingHomeFeature(new DefaultPathHomeFeature(host)).find(); final Location.Name location = new S3LocationFeature(S3Session.this, regions).getLocation(home); if(log.isDebugEnabled()) { log.debug(String.format("Retrieved region %s", location)); } if(!Location.unknown.equals(location)) { if(log.isDebugEnabled()) { log.debug(String.format("Set default region to %s determined from %s", location, home)); } // host.setProperty("s3.location", location.getIdentifier()); } } catch(AccessDeniedException | InteroperabilityException e) { log.warn(String.format("Failure %s querying region", e)); final Path home = new DefaultHomeFinderService(this).find(); if(log.isDebugEnabled()) { log.debug(String.format("Retrieved %s", home)); } } }
@Test public void testAWS2SignatureVersion() throws Exception { final ProtocolFactory factory = new ProtocolFactory(new HashSet<>(Collections.singleton(new S3Protocol()))); final Profile profile = new ProfilePlistReader(factory).read( this.getClass().getResourceAsStream("/S3 AWS2 Signature Version (HTTPS).cyberduckprofile")); final Host host = new Host(profile, profile.getDefaultHostname(), new Credentials( PROPERTIES.get("s3.key"), PROPERTIES.get("s3.secret") )); final S3Session session = new S3Session(host, new DisabledX509TrustManager(), new DefaultX509KeyManager()); assertNotNull(session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback())); assertTrue(session.isConnected()); session.login(new DisabledLoginCallback(), new DisabledCancelCallback()); session.close(); assertFalse(session.isConnected()); }
protected boolean configDevice(DeviceId deviceId) { // Returns true if config was successful, false if not and a clean up is // needed. final Device device = deviceService.getDevice(deviceId); if (device == null || !device.is(IntProgrammable.class)) { return true; } if (isNotIntConfigured()) { log.warn("Missing INT config, aborting programming of INT device {}", deviceId); return true; } final boolean isEdge = !hostService.getConnectedHosts(deviceId).isEmpty(); final IntDeviceRole intDeviceRole = isEdge ? IntDeviceRole.SOURCE_SINK : IntDeviceRole.TRANSIT; log.info("Started programming of INT device {} with role {}...", deviceId, intDeviceRole); final IntProgrammable intProg = device.as(IntProgrammable.class); if (!isIntStarted()) { // Leave device with no INT configuration. return true; } if (!intProg.init()) { log.warn("Unable to init INT pipeline on {}", deviceId); return false; } boolean supportSource = intProg.supportsFunctionality(IntProgrammable.IntFunctionality.SOURCE); boolean supportSink = intProg.supportsFunctionality(IntProgrammable.IntFunctionality.SINK); boolean supportPostcard = intProg.supportsFunctionality(IntProgrammable.IntFunctionality.POSTCARD); if (intDeviceRole != IntDeviceRole.SOURCE_SINK && !supportPostcard) { // Stop here, no more configuration needed for transit devices unless it support postcard. return true; } if (supportSink || supportPostcard) { if (!intProg.setupIntConfig(intConfig.get())) { log.warn("Unable to apply INT report config on {}", deviceId); return false; } } // Port configuration. final Set<PortNumber> hostPorts = deviceService.getPorts(deviceId) .stream() .map(port -> new ConnectPoint(deviceId, port.number())) .filter(cp -> !hostService.getConnectedHosts(cp).isEmpty()) .map(ConnectPoint::port) .collect(Collectors.toSet()); for (PortNumber port : hostPorts) { if (supportSource) { log.info("Setting port {}/{} as INT source port...", deviceId, port); if (!intProg.setSourcePort(port)) { log.warn("Unable to set INT source port {} on {}", port, deviceId); return false; } } if (supportSink) { log.info("Setting port {}/{} as INT sink port...", deviceId, port); if (!intProg.setSinkPort(port)) { log.warn("Unable to set INT sink port {} on {}", port, deviceId); return false; } } } if (!supportSource && !supportPostcard) { // Stop here, no more configuration needed for sink devices unless // it supports postcard mode. return true; } // Apply intents. // This is a trivial implementation where we simply get the // corresponding INT objective from an intent and we apply to all // device which support reporting. int appliedCount = 0; for (Versioned<IntIntent> versionedIntent : intentMap.values()) { IntIntent intent = versionedIntent.value(); IntObjective intObjective = getIntObjective(intent); if (intent.telemetryMode() == IntIntent.TelemetryMode.INBAND_TELEMETRY && supportSource) { intProg.addIntObjective(intObjective); appliedCount++; } else if (intent.telemetryMode() == IntIntent.TelemetryMode.POSTCARD && supportPostcard) { intProg.addIntObjective(intObjective); appliedCount++; } else { log.warn("Device {} does not support intent {}.", deviceId, intent); } } log.info("Completed programming of {}, applied {} INT objectives of {} total", deviceId, appliedCount, intentMap.size()); return true; }
@Test public void testConfigNonIntDevice() { reset(deviceService); Device device = getMockDevice(false, DEVICE_ID); expect(deviceService.getDevice(DEVICE_ID)) .andReturn(device) .anyTimes(); expect(deviceService.getDevices()) .andReturn(ImmutableSet.of(device)) .anyTimes(); replay(deviceService, device); assertTrue(manager.configDevice(DEVICE_ID)); verify(); }
protected void pollOnce(long maxTimeoutMs) { try { drainGeneratedRequests(); long now = time.milliseconds(); final long timeout = sendRequests(now, maxTimeoutMs); networkClient.poll(timeout, now); now = time.milliseconds(); checkDisconnects(now); failExpiredRequests(now); unsentRequests.clean(); } catch (FatalExitError fee) { throw fee; } catch (Throwable t) { if (t instanceof DisconnectException && !networkClient.active()) { // DisconnectException is expected when NetworkClient#initiateClose is called return; } if (t instanceof InterruptedException && !isRunning()) { // InterruptedException is expected when shutting down. Throw the error to ShutdownableThread to handle throw t; } log.error("unhandled exception caught in InterBrokerSendThread", t); // rethrow any unhandled exceptions as FatalExitError so the JVM will be terminated // as we will be in an unknown state with potentially some requests dropped and not // being able to make progress. Known and expected Errors should have been appropriately // dealt with already. throw new FatalExitError(); } }
@Test public void testDisconnectWithoutShutdownShouldCauseException() { DisconnectException de = new DisconnectException(); when(networkClient.poll(anyLong(), anyLong())).thenThrow(de); when(networkClient.active()).thenReturn(true); AtomicReference<Throwable> throwable = new AtomicReference<>(); final InterBrokerSendThread thread = new TestInterBrokerSendThread(networkClient, throwable::getAndSet); thread.pollOnce(100); verify(networkClient).poll(anyLong(), anyLong()); verify(networkClient).active(); verifyNoMoreInteractions(networkClient); Throwable thrown = throwable.get(); assertNotNull(thrown); assertInstanceOf(FatalExitError.class, thrown); }
public static <K, V> Write<K, V> write() { return new AutoValue_CdapIO_Write.Builder<K, V>().build(); }
@Test public void testWriteObjectCreationFailsIfValueClassIsNull() { assertThrows( IllegalArgumentException.class, () -> CdapIO.<String, String>write().withValueClass(null)); }
@Override public ConsumerBuilder<T> topic(String... topicNames) { checkArgument(topicNames != null && topicNames.length > 0, "Passed in topicNames should not be null or empty."); return topics(Arrays.stream(topicNames).collect(Collectors.toList())); }
@Test(expectedExceptions = IllegalArgumentException.class) public void testConsumerBuilderImplWhenTopicNamesVarargsHasBlankTopic() { consumerBuilderImpl.topic("my-topic", " "); }
public String getCurrentGtid() { return gtidMap.get(CURRENT_GTID_STRING); }
@Test public void getCurrentGtidOutputNull() { // Arrange final LogHeader objectUnderTest = new LogHeader(0); // Act final String actual = objectUnderTest.getCurrentGtid(); // Assert result Assert.assertNull(actual); }
@VisibleForTesting static void internalMoveTempFileToStore( File incomingFile, @Nullable JobID jobId, BlobKey blobKey, File storageFile, Logger log, @Nullable BlobStore blobStore, MoveFileOperation moveFileOperation) throws IOException { boolean success = false; try { // first check whether the file already exists if (!storageFile.exists()) { // persist the blob via the blob store if (blobStore != null) { blobStore.put(incomingFile, jobId, blobKey); } try { moveFileOperation.moveFile(incomingFile, storageFile); incomingFile = null; } catch (FileAlreadyExistsException ignored) { log.warn( "Detected concurrent file modifications. This should only happen if multiple" + "BlobServer use the same storage directory."); } } else { log.warn( "File upload for an existing file with key {} for job {}. This may indicate a duplicate upload or a hash collision. Ignoring newest upload.", blobKey, jobId); } success = true; } finally { if (!success) { if (blobStore != null) { blobStore.delete(jobId, blobKey); } if (!storageFile.delete() && storageFile.exists()) { log.warn("Could not delete the storage file {}.", storageFile); } } if (incomingFile != null && !incomingFile.delete() && incomingFile.exists()) { log.warn( "Could not delete the staging file {} for blob key {} and job {}.", incomingFile, blobKey, jobId); } } }
@Test void testCleanupIfMoveTempFileToStoreFails() throws IOException { final File storageFile = tempDir.resolve(UUID.randomUUID().toString()).toFile(); final File incomingFile = TempDirUtils.newFile(tempDir); Files.write(incomingFile.toPath(), new byte[] {1, 2, 3, 4}); final FileSystemBlobStore blobStore = new FileSystemBlobStore( new LocalFileSystem(), TempDirUtils.newFolder(tempDir).toString()); final JobID jobId = new JobID(); final BlobKey blobKey = BlobKey.createKey(BlobKey.BlobType.PERMANENT_BLOB); assertThatThrownBy( () -> BlobUtils.internalMoveTempFileToStore( incomingFile, jobId, blobKey, storageFile, LOG, blobStore, (source, target) -> { throw new IOException("Test Failure"); })) .isInstanceOf(IOException.class); assertThatThrownBy( () -> blobStore.get( jobId, blobKey, tempDir.resolve(UUID.randomUUID().toString()).toFile())) .isInstanceOf(FileNotFoundException.class); assertThat(incomingFile).doesNotExist(); assertThat(storageFile).doesNotExist(); }
@ApiOperation(value = "Get usersForAssign (getUsersForAssign)", notes = "Returns page of user data objects that can be assigned to provided alarmId. " + "Search is been executed by email, firstName and lastName fields. " + PAGE_DATA_PARAMETERS + TENANT_OR_CUSTOMER_AUTHORITY_PARAGRAPH) @PreAuthorize("hasAnyAuthority('TENANT_ADMIN', 'CUSTOMER_USER')") @RequestMapping(value = "/users/assign/{alarmId}", params = {"pageSize", "page"}, method = RequestMethod.GET) @ResponseBody public PageData<UserEmailInfo> getUsersForAssign( @Parameter(description = ALARM_ID_PARAM_DESCRIPTION, required = true) @PathVariable("alarmId") String strAlarmId, @Parameter(description = PAGE_SIZE_DESCRIPTION, required = true) @RequestParam int pageSize, @Parameter(description = PAGE_NUMBER_DESCRIPTION, required = true) @RequestParam int page, @Parameter(description = USER_TEXT_SEARCH_DESCRIPTION) @RequestParam(required = false) String textSearch, @Parameter(description = SORT_PROPERTY_DESCRIPTION, schema = @Schema(allowableValues = {"createdTime", "firstName", "lastName", "email"})) @RequestParam(required = false) String sortProperty, @Parameter(description = SORT_ORDER_DESCRIPTION, schema = @Schema(allowableValues = {"ASC", "DESC"})) @RequestParam(required = false) String sortOrder) throws ThingsboardException { checkParameter("alarmId", strAlarmId); AlarmId alarmEntityId = new AlarmId(toUUID(strAlarmId)); Alarm alarm = checkAlarmId(alarmEntityId, Operation.READ); SecurityUser currentUser = getCurrentUser(); TenantId tenantId = currentUser.getTenantId(); CustomerId originatorCustomerId = entityService.fetchEntityCustomerId(tenantId, alarm.getOriginator()).orElse(NULL_CUSTOMER_ID); PageLink pageLink = createPageLink(pageSize, page, textSearch, sortProperty, sortOrder); PageData<User> pageData; if (Authority.TENANT_ADMIN.equals(currentUser.getAuthority())) { if (alarm.getCustomerId() == null) { pageData = userService.findTenantAdmins(tenantId, pageLink); } else { ArrayList<CustomerId> customerIds = new ArrayList<>(Collections.singletonList(NULL_CUSTOMER_ID)); if (!CustomerId.NULL_UUID.equals(originatorCustomerId.getId())) { customerIds.add(originatorCustomerId); } pageData = userService.findUsersByCustomerIds(tenantId, customerIds, pageLink); } } else { pageData = userService.findCustomerUsers(tenantId, alarm.getCustomerId(), pageLink); } return pageData.mapData(user -> new UserEmailInfo(user.getId(), user.getEmail(), user.getFirstName(), user.getLastName())); }
@Test public void testGetUsersForAssign() throws Exception { loginTenantAdmin(); String email = "testEmail1"; List<UserId> expectedCustomerUserIds = new ArrayList<>(); expectedCustomerUserIds.add(customerUserId); for (int i = 0; i < 45; i++) { User customerUser = createCustomerUser(customerId); customerUser.setEmail(email + StringUtils.randomAlphanumeric((int) (5 + Math.random() * 10)) + "@thingsboard.org"); User user = doPost("/api/user", customerUser, User.class); expectedCustomerUserIds.add(user.getId()); } List<UserId> expectedTenantUserIds = new ArrayList<>(List.copyOf(expectedCustomerUserIds)); expectedTenantUserIds.add(tenantAdminUserId); Device device = new Device(); device.setName("testDevice"); Device savedDevice = doPost("/api/device", device, Device.class); Alarm alarm = createTestAlarm(savedDevice); List<UserId> loadedTenantUserIds = new ArrayList<>(); PageLink pageLink = new PageLink(33, 0); PageData<UserEmailInfo> pageData; do { pageData = doGetTypedWithPageLink("/api/users/assign/" + alarm.getId().getId().toString() + "?", new TypeReference<>() {}, pageLink); loadedTenantUserIds.addAll(pageData.getData().stream().map(UserEmailInfo::getId) .collect(Collectors.toList())); if (pageData.hasNext()) { pageLink = pageLink.nextPageLink(); } } while (pageData.hasNext()); Assert.assertEquals(1, loadedTenantUserIds.size()); Assert.assertEquals(tenantAdminUserId, loadedTenantUserIds.get(0)); doDelete("/api/alarm/" + alarm.getId().getId().toString()); savedDevice.setCustomerId(customerId); savedDevice = doPost("/api/customer/" + customerId.getId() + "/device/" + savedDevice.getId().getId(), Device.class); alarm = createTestAlarm(savedDevice); List<UserId> loadedUserIds = new ArrayList<>(); pageLink = new PageLink(16, 0); do { pageData = doGetTypedWithPageLink("/api/users/assign/" + alarm.getId().getId().toString() + "?", new TypeReference<>() {}, pageLink); loadedUserIds.addAll(pageData.getData().stream().map(UserEmailInfo::getId) .collect(Collectors.toList())); if (pageData.hasNext()) { pageLink = pageLink.nextPageLink(); } } while (pageData.hasNext()); expectedTenantUserIds.sort(userIdComparator); loadedUserIds.sort(userIdComparator); Assert.assertEquals(expectedTenantUserIds, loadedUserIds); loginCustomerUser(); loadedUserIds = new ArrayList<>(); pageLink = new PageLink(16, 0); do { pageData = doGetTypedWithPageLink("/api/users/assign/" + alarm.getId().getId().toString() + "?", new TypeReference<>() {}, pageLink); loadedUserIds.addAll(pageData.getData().stream().map(UserEmailInfo::getId) .collect(Collectors.toList())); if (pageData.hasNext()) { pageLink = pageLink.nextPageLink(); } } while (pageData.hasNext()); expectedCustomerUserIds.sort(userIdComparator); loadedUserIds.sort(userIdComparator); Assert.assertEquals(expectedCustomerUserIds, loadedUserIds); }
public RuntimeOptionsBuilder parse(Class<?> clazz) { RuntimeOptionsBuilder args = new RuntimeOptionsBuilder(); for (Class<?> classWithOptions = clazz; hasSuperClass( classWithOptions); classWithOptions = classWithOptions.getSuperclass()) { CucumberOptions options = requireNonNull(optionsProvider).getOptions(classWithOptions); if (options != null) { addDryRun(options, args); addMonochrome(options, args); addTags(classWithOptions, options, args); addPlugins(options, args); addPublish(options, args); addName(options, args); addSnippets(options, args); addGlue(options, args); addFeatures(options, args); addObjectFactory(options, args); addUuidGenerator(options, args); } } addDefaultFeaturePathIfNoFeaturePathIsSpecified(args, clazz); addDefaultGlueIfNoOverridingGlueIsSpecified(args, clazz); return args; }
@Test void throws_runtime_exception_on_invalid_tag_with_class_location() { RuntimeException actual = assertThrows(RuntimeException.class, () -> parser().parse(ClassWithInvalidTagExpression.class).build()); assertAll( () -> assertThat(actual.getMessage(), is( "Invalid tag expression at 'io.cucumber.core.options.CucumberOptionsAnnotationParserTest$ClassWithInvalidTagExpression'")), () -> assertThat(actual.getCause(), isA(TagExpressionException.class))); }
@Nullable public String getInstanceRegion(InstanceInfo instanceInfo) { if (instanceInfo.getDataCenterInfo() == null || instanceInfo.getDataCenterInfo().getName() == null) { logger.warn("Cannot get region for instance id:{}, app:{} as dataCenterInfo is null. Returning local:{} by default", instanceInfo.getId(), instanceInfo.getAppName(), localRegion); return localRegion; } if (DataCenterInfo.Name.Amazon.equals(instanceInfo.getDataCenterInfo().getName())) { AmazonInfo amazonInfo = (AmazonInfo) instanceInfo.getDataCenterInfo(); Map<String, String> metadata = amazonInfo.getMetadata(); String availabilityZone = metadata.get(AmazonInfo.MetaDataKey.availabilityZone.getName()); if (null != availabilityZone) { return azToRegionMapper.getRegionForAvailabilityZone(availabilityZone); } } return null; }
@Test public void testInstanceWithNoAZ() throws Exception { ConfigurationManager.getConfigInstance().setProperty("eureka.us-east-1.availabilityZones", "abc,def"); PropertyBasedAzToRegionMapper azToRegionMapper = new PropertyBasedAzToRegionMapper(new DefaultEurekaClientConfig()); InstanceRegionChecker checker = new InstanceRegionChecker(azToRegionMapper, "us-east-1"); azToRegionMapper.setRegionsToFetch(new String[]{"us-east-1"}); AmazonInfo dcInfo = AmazonInfo.Builder.newBuilder().addMetadata(AmazonInfo.MetaDataKey.availabilityZone, "").build(); InstanceInfo instanceInfo = InstanceInfo.Builder.newBuilder().setAppName("app").setDataCenterInfo( dcInfo).build(); String instanceRegion = checker.getInstanceRegion(instanceInfo); Assert.assertNull("Invalid instance region.", instanceRegion); }
public void loadXML( Node stepnode, List<DatabaseMeta> databases, IMetaStore metaStore ) throws KettleXMLException { readData( stepnode ); }
@Test public void testLoadAndGetXml() throws Exception { XMLOutputMeta xmlOutputMeta = new XMLOutputMeta(); Node stepnode = getTestNode(); DatabaseMeta dbMeta = mock( DatabaseMeta.class ); IMetaStore metaStore = mock( IMetaStore.class ); xmlOutputMeta.loadXML( stepnode, Collections.singletonList( dbMeta ), metaStore ); assertXmlOutputMeta( xmlOutputMeta ); }
static Optional<Duration> parseTimeout(String timeoutOption) throws InvalidContentException { if (timeoutOption == null) { return Optional.empty(); } float timeoutSeconds; try { timeoutSeconds = Float.parseFloat(timeoutOption); } catch (NumberFormatException e) { throw new InvalidContentException("value of timeout->" + timeoutOption + " is not a float"); } if (timeoutSeconds <= 0.0) { return Optional.of(Duration.ZERO); } else if (timeoutSeconds <= MAX_TIMEOUT.getSeconds()) { return Optional.of(Duration.ofMillis(Math.round(timeoutSeconds * 1000))); } else { throw new InvalidContentException("value of timeout->" + timeoutOption + " exceeds max timeout " + MAX_TIMEOUT); } }
@Test void testParsingOfTimeout() throws InvalidContentException { assertEquals(Optional.empty(), RestApiHandler.parseTimeout(null)); assertEquals(Optional.of(Duration.ofMillis(12500)), RestApiHandler.parseTimeout("12.5")); assertEquals(Optional.of(Duration.ofMillis(0)), RestApiHandler.parseTimeout("-1")); assertEquals(Optional.of(Duration.ofMillis(0)), RestApiHandler.parseTimeout("0.0001")); }
public static RpcClient getClient(String clientName) { return CLIENT_MAP.get(clientName); }
@Test void testGetClient() throws IllegalAccessException { // may be null assertNull(RpcClientFactory.getClient("notExistClientName")); clientMapField.set(null, new ConcurrentHashMap<>(Collections.singletonMap("testClient", rpcClient))); assertEquals(rpcClient, RpcClientFactory.getClient("testClient")); }
@Override @VisibleForTesting public CompletableFuture<JarUploadResponseBody> handleRequest( @Nonnull final HandlerRequest<EmptyRequestBody> request, @Nonnull final RestfulGateway gateway) throws RestHandlerException { Collection<File> uploadedFiles = request.getUploadedFiles(); if (uploadedFiles.size() != 1) { throw new RestHandlerException( "Exactly 1 file must be sent, received " + uploadedFiles.size() + '.', HttpResponseStatus.BAD_REQUEST); } final Path fileUpload = uploadedFiles.iterator().next().toPath(); return CompletableFuture.supplyAsync( () -> { if (!fileUpload.getFileName().toString().endsWith(".jar")) { throw new CompletionException( new RestHandlerException( "Only Jar files are allowed.", HttpResponseStatus.BAD_REQUEST)); } else { final Path destination = jarDir.resolve(UUID.randomUUID() + "_" + fileUpload.getFileName()); try { Files.move(fileUpload, destination); } catch (IOException e) { throw new CompletionException( new RestHandlerException( String.format( "Could not move uploaded jar file [%s] to [%s].", fileUpload, destination), HttpResponseStatus.INTERNAL_SERVER_ERROR, e)); } return new JarUploadResponseBody(destination.normalize().toString()); } }, executor); }
@Test void testUploadJar() throws Exception { final Path uploadedFile = Files.createFile(jarDir.resolve("FooBazzleExample.jar")); final HandlerRequest<EmptyRequestBody> request = createRequest(uploadedFile); final JarUploadResponseBody jarUploadResponseBody = jarUploadHandler.handleRequest(request, mockDispatcherGateway).get(); assertThat(jarUploadResponseBody.getStatus()) .isEqualTo(JarUploadResponseBody.UploadStatus.success); final String returnedFileNameWithUUID = jarUploadResponseBody.getFilename(); assertThat(returnedFileNameWithUUID).contains("_"); final String returnedFileName = returnedFileNameWithUUID.substring(returnedFileNameWithUUID.lastIndexOf("_") + 1); assertThat(returnedFileName).isEqualTo(uploadedFile.getFileName().toString()); }
public static SimpleTransform sub(double operand) { return new SimpleTransform(Operation.sub,operand); }
@Test public void testSub() { TransformationMap t = new TransformationMap(Collections.singletonList(SimpleTransform.sub(100)),new HashMap<>()); testSimple(t,(double a) -> a - 100); }
@Override public void close() { close(Duration.ofMillis(DEFAULT_CLOSE_TIMEOUT_MS)); }
@Test public void testUnsubscribeOnClose() { SubscriptionState subscriptions = mock(SubscriptionState.class); consumer = spy(newConsumer( mock(FetchBuffer.class), mock(ConsumerInterceptors.class), mock(ConsumerRebalanceListenerInvoker.class), subscriptions, "group-id", "client-id")); completeUnsubscribeApplicationEventSuccessfully(); consumer.close(Duration.ZERO); verifyUnsubscribeEvent(subscriptions); }
public static boolean isBlankSpace(String blankSpace) { String regex = "\\s+"; return isMatch(regex, blankSpace); }
@Test public void testBlankSpace() { Assert.assertEquals(true, PatternKit.isBlankSpace(" ")); Assert.assertEquals(false, PatternKit.isBlankSpace("a")); }
public SqlType getExpressionSqlType(final Expression expression) { return getExpressionSqlType(expression, Collections.emptyMap()); }
@Test public void shouldResolveTypeForAddBigIntDouble() { final Expression expression = new ArithmeticBinaryExpression(Operator.ADD, TestExpressions.COL0, COL3 ); final SqlType type = expressionTypeManager.getExpressionSqlType(expression); assertThat(type, is(SqlTypes.DOUBLE)); }
public static <K, V> Map<K, V> subtractMap(Map<? extends K, ? extends V> minuend, Map<? extends K, ? extends V> subtrahend) { return minuend.entrySet().stream() .filter(entry -> !subtrahend.containsKey(entry.getKey())) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); }
@Test public void testSubtractMapRemovesSecondMapsKeys() { Map<String, String> mainMap = new HashMap<>(); mainMap.put("one", "1"); mainMap.put("two", "2"); mainMap.put("three", "3"); Map<String, String> secondaryMap = new HashMap<>(); secondaryMap.put("one", "4"); secondaryMap.put("two", "5"); Map<String, String> newMap = subtractMap(mainMap, secondaryMap); assertEquals(3, mainMap.size()); // original map should not be modified assertEquals(1, newMap.size()); assertTrue(newMap.containsKey("three")); assertEquals("3", newMap.get("three")); }
static void checkFormat(final String originalFilename) { final List<String> fileNameSplit = Splitter.on(".").splitToList(originalFilename); if (fileNameSplit.size() <= 1) { throw new BadRequestException("The file format is invalid."); } for (String s : fileNameSplit) { if (StringUtils.isEmpty(s)) { throw new BadRequestException("The file format is invalid."); } } }
@Test(expected = BadRequestException.class) public void checkFormatWithException1() { ConfigFileUtils.checkFormat(".json"); }
public String decode(byte[] val) { return codecs[0].decode(val, 0, val.length); }
@Test public void testDecodeHebrewPersonName() { assertEquals(HEBREW_PERSON_NAME, iso8859_8().decode(HEBREW_PERSON_NAME_BYTE)); }
@Override public int addLast(V... elements) { return get(addLastAsync(elements)); }
@Test public void testAddLastOrigin() { Deque<Integer> queue = new ArrayDeque<Integer>(); queue.addLast(1); queue.addLast(2); queue.addLast(3); assertThat(queue).containsExactly(1, 2, 3); }
public final void doesNotContain(@Nullable Object element) { if (Iterables.contains(checkNotNull(actual), element)) { failWithActual("expected not to contain", element); } }
@Test public void iterableDoesNotContain() { assertThat(asList(1, null, 3)).doesNotContain(5); }
private Object getKey(final Message message) throws Exception { Object value; if (configuration.getKeyType() != null) { Class<?> clazz = getEndpoint().getCamelContext().getClassResolver().resolveClass(configuration.getKeyType()); value = message.getHeader(EhcacheConstants.KEY, clazz); } else { value = message.getHeader(EhcacheConstants.KEY); } if (value == null) { value = configuration.getKey(); } if (value == null) { throw new CamelExchangeException( "No value provided in header or as default value (" + EhcacheConstants.KEY + ")", message.getExchange()); } return value; }
@Test void testCacheRemoveAll() throws Exception { final Cache<Object, Object> cache = getTestCache(); final Map<String, String> map = generateRandomMapOfString(3); final Set<String> keys = map.keySet().stream().limit(2).collect(Collectors.toSet()); cache.putAll(map); MockEndpoint mock = getMockEndpoint("mock:result"); mock.expectedMinimumMessageCount(1); mock.expectedHeaderReceived(EhcacheConstants.ACTION_HAS_RESULT, false); mock.expectedHeaderReceived(EhcacheConstants.ACTION_SUCCEEDED, true); fluentTemplate() .withHeader(EhcacheConstants.ACTION, EhcacheConstants.ACTION_REMOVE_ALL) .withHeader(EhcacheConstants.KEYS, keys) .to("direct://start") .send(); MockEndpoint.assertIsSatisfied(context); cache.forEach(e -> assertFalse(keys.contains(e.getKey()))); }
public void lookupRepositoryReferences( Repository repository ) throws KettleException { KettleException lastThrownException = null; Map<String, RepositoryObjectType> notFoundedReferences = new HashMap<>(); for ( JobEntryCopy copy : jobcopies ) { if ( copy.getEntry().hasRepositoryReferences() ) { try { copy.getEntry().lookupRepositoryReferences( repository ); } catch ( IdNotFoundException e ) { lastThrownException = e; String path = e.getPathToObject(); String name = e.getObjectName(); String key = StringUtils.isEmpty( path ) || path.equals( "null" ) ? name : path + "/" + name; notFoundedReferences.put( key, e.getObjectType() ); } } } if ( lastThrownException != null && !notFoundedReferences.isEmpty() ) { throw new LookupReferencesException( lastThrownException, notFoundedReferences ); } }
@Test public void testLookupRepositoryReferences() throws Exception { jobMeta.clear(); JobEntryTrans jobEntryMock = mock( JobEntryTrans.class ); when( jobEntryMock.hasRepositoryReferences() ).thenReturn( true ); JobEntryTrans brokenJobEntryMock = mock( JobEntryTrans.class ); when( brokenJobEntryMock.hasRepositoryReferences() ).thenReturn( true ); doThrow( mock( IdNotFoundException.class ) ).when( brokenJobEntryMock ).lookupRepositoryReferences( any( Repository.class ) ); JobEntryCopy jobEntryCopy1 = mock( JobEntryCopy.class ); when( jobEntryCopy1.getEntry() ).thenReturn( jobEntryMock ); jobMeta.addJobEntry( 0, jobEntryCopy1 ); JobEntryCopy jobEntryCopy2 = mock( JobEntryCopy.class ); when( jobEntryCopy2.getEntry() ).thenReturn( brokenJobEntryMock ); jobMeta.addJobEntry( 1, jobEntryCopy2 ); JobEntryCopy jobEntryCopy3 = mock( JobEntryCopy.class ); when( jobEntryCopy3.getEntry() ).thenReturn( jobEntryMock ); jobMeta.addJobEntry( 2, jobEntryCopy3 ); try { jobMeta.lookupRepositoryReferences( mock( Repository.class ) ); fail( "no exception for broken entry" ); } catch ( LookupReferencesException e ) { // ok } verify( jobEntryMock, times( 2 ) ).lookupRepositoryReferences( any( Repository.class ) ); }
List<OffsetRange> getBundleSizes(int desiredNumBundles, long start, long end) { List<OffsetRange> result = new ArrayList<>(); double[] relativeSizes = getRelativeBundleSizes(desiredNumBundles); // Generate offset ranges proportional to the relative sizes. double s = sum(relativeSizes); long startOffset = start; double sizeSoFar = 0; for (int i = 0; i < relativeSizes.length; ++i) { sizeSoFar += relativeSizes[i]; long endOffset = (i == relativeSizes.length - 1) ? end : (long) (start + sizeSoFar * (end - start) / s); if (startOffset != endOffset) { result.add(new OffsetRange(startOffset, endOffset)); } startOffset = endOffset; } return result; }
@Test public void consequentBundlesShouldHaveTheSameRangeEndAndStart() { int desiredNumberOfBundles = 2; options.bundleSizeDistribution = fromRealDistribution(new ConstantRealDistribution(2)); splitter = new BundleSplitter(options); List<OffsetRange> bundleSizes = splitter.getBundleSizes(desiredNumberOfBundles, 0, options.numRecords); assertEquals(bundleSizes.get(0).getTo(), bundleSizes.get(1).getFrom()); assertEquals(bundleSizes.get(0).getTo(), bundleSizes.get(1).getFrom()); assertEquals(desiredNumberOfBundles, bundleSizes.size()); }
Cycles<EDGE> findCycles() { JohnsonCycleFinder johnsonCycleFinder = new JohnsonCycleFinder(createPrimitiveGraph()); JohnsonCycleFinder.Result rawCycles = johnsonCycleFinder.findCycles(); return new CyclesInternal<>(mapToCycles(rawCycles), rawCycles.maxNumberOfCyclesReached()); }
@Test public void finds_cycles_in_real_life_graph() { Graph<Integer, Edge<Integer>> graph = RealLifeGraph.get(); int expectedNumberOfCycles = 10000; ArchConfiguration.get().setProperty(MAX_NUMBER_OF_CYCLES_TO_DETECT_PROPERTY_NAME, String.valueOf(expectedNumberOfCycles)); Cycles<Edge<Integer>> cycles = graph.findCycles(); assertThatCycles(cycles).hasSize(expectedNumberOfCycles); assertThat(cycles.maxNumberOfCyclesReached()).as("maximum number of cycles reached").isTrue(); }
public static String clientTagPrefix(final String clientTagKey) { return CLIENT_TAG_PREFIX + clientTagKey; }
@Test public void shouldThrowExceptionWhenRackAwareAssignmentTagsExceedsMaxListSize() { final int limit = StreamsConfig.MAX_RACK_AWARE_ASSIGNMENT_TAG_LIST_SIZE + 1; final List<String> rackAwareAssignmentTags = new ArrayList<>(); for (int i = 0; i < limit; i++) { final String clientTagKey = "k" + i; rackAwareAssignmentTags.add(clientTagKey); props.put(StreamsConfig.clientTagPrefix(clientTagKey), "v" + i); } props.put(StreamsConfig.RACK_AWARE_ASSIGNMENT_TAGS_CONFIG, String.join(",", rackAwareAssignmentTags)); final ConfigException exception = assertThrows(ConfigException.class, () -> new StreamsConfig(props)); assertEquals( String.format("Invalid value %s for configuration %s: exceeds maximum list size of [%s].", rackAwareAssignmentTags, StreamsConfig.RACK_AWARE_ASSIGNMENT_TAGS_CONFIG, StreamsConfig.MAX_RACK_AWARE_ASSIGNMENT_TAG_LIST_SIZE), exception.getMessage() ); }
public Span nextSpan(ConsumerRecord<?, ?> record) { // Even though the type is ConsumerRecord, this is not a (remote) consumer span. Only "poll" // events create consumer spans. Since this is a processor span, we use the normal sampler. TraceContextOrSamplingFlags extracted = extractAndClearTraceIdHeaders(processorExtractor, record.headers(), record.headers()); Span result = tracer.nextSpan(extracted); if (extracted.context() == null && !result.isNoop()) { addTags(record, result); } return result; }
@Test void nextSpan_should_create_span_with_baggage() { addB3MultiHeaders(parent, consumerRecord); consumerRecord.headers().add(BAGGAGE_FIELD_KEY, "user1".getBytes()); Span span = kafkaTracing.nextSpan(consumerRecord); assertThat(BAGGAGE_FIELD.getValue(span.context())).contains("user1"); }
public SearchResponse search(IssueQuery query, SearchOptions options) { SearchRequest requestBuilder = EsClient.prepareSearch(TYPE_ISSUE.getMainType()); SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); requestBuilder.source(sourceBuilder); configureSorting(query, sourceBuilder); configurePagination(options, sourceBuilder); configureRouting(query, options, requestBuilder); AllFilters allFilters = createAllFilters(query); RequestFiltersComputer filterComputer = newFilterComputer(options, allFilters); configureTopAggregations(query, options, sourceBuilder, allFilters, filterComputer); configureQuery(sourceBuilder, filterComputer); configureTopFilters(sourceBuilder, filterComputer); sourceBuilder.fetchSource(false) .trackTotalHits(true); return client.search(requestBuilder); }
@Test void search_nine_issues_5_times_with_same_creation_date_sorted_by_creation_date_returned_issues_same_order() { ComponentDto project = newPrivateProjectDto(); ComponentDto file = newFileDto(project); List<IssueDoc> issues = new ArrayList<>(); // we are adding issues in reverse order to see if the sort is actually doing anything for (int i = 9; i >= 1; i--) { String key = "I" + i; issues.add(newDoc(key, project.uuid(), file)); } indexIssues(issues.toArray(new IssueDoc[]{})); IssueQuery.Builder query = IssueQuery.builder().asc(true); SearchResponse result = underTest.search(query.sort(IssueQuery.SORT_BY_CREATION_DATE).build(), new SearchOptions()); SearchHit[] originalHits = result.getHits().getHits(); for (int i = 0; i < 4; i++) { result = underTest.search(query.sort(IssueQuery.SORT_BY_CREATION_DATE).build(), new SearchOptions()); for (int j = 0; j < originalHits.length; j++) { SearchHit[] hits = result.getHits().getHits(); assertThat(originalHits[j].getId()).isEqualTo(hits[j].getId()); } } }
public static String getDataFromDynamicMessage(final DynamicMessage message) { for (Map.Entry<Descriptors.FieldDescriptor, Object> entry : message.getAllFields().entrySet()) { Descriptors.FieldDescriptor key = entry.getKey(); Object value = entry.getValue(); String fullName = key.getFullName(); String jsonMessageFullName = GrpcConstants.JSON_DESCRIPTOR_PROTO_NAME + "." + GrpcConstants.JSON_DESCRIPTOR_PROTO_FIELD_NAME; if (jsonMessageFullName.equals(fullName)) { return (String) value; } } return ""; }
@Test public void testGetDataFromDynamicMessage() { String jsonParam = "{\"text\":\"hello world\"}"; DynamicMessage jsonMessage = JsonMessage.buildJsonMessage(jsonParam); String data = JsonMessage.getDataFromDynamicMessage(jsonMessage); assertEquals(jsonParam, data); final DynamicMessage dynamicMessageMock = mock(DynamicMessage.class); final Map<Descriptors.FieldDescriptor, Object> hashMap = new HashMap<>(1); hashMap.put(mock(Descriptors.FieldDescriptor.class), "data"); when(dynamicMessageMock.getAllFields()).thenReturn(hashMap); final String dataFromDynamicMessage = JsonMessage.getDataFromDynamicMessage(dynamicMessageMock); assertEquals(dataFromDynamicMessage, ""); DynamicMessage jsonMessage2 = JsonMessage.buildJsonMessage(); String data2 = JsonMessage.getDataFromDynamicMessage(jsonMessage2); assertEquals(data2, ""); try (MockedStatic<Descriptors.FileDescriptor> descriptorMockedStatic = mockStatic(Descriptors.FileDescriptor.class)) { descriptorMockedStatic.when(() -> Descriptors.FileDescriptor.buildFrom(any(), any(Descriptors.FileDescriptor[].class))).thenThrow(Descriptors.DescriptorValidationException.class); assertThrows(RuntimeException.class, JsonMessage::buildJsonMessage); } }
public ScanResults run(ScanTarget scanTarget) throws ExecutionException, InterruptedException { return runAsync(scanTarget).get(); }
@Test public void run_whenNullScanTarget_throwsNullPointerException() { Injector injector = Guice.createInjector( new FakeUtcClockModule(), new FakePluginExecutionModule(), new FakeServiceFingerprinterBootstrapModule(), new FakeVulnDetectorBootstrapModule()); scanningWorkflow = injector.getInstance(DefaultScanningWorkflow.class); assertThrows(NullPointerException.class, () -> scanningWorkflow.run(null)); }
public List<Stream> match(Message message) { final Set<Stream> result = Sets.newHashSet(); final Set<String> blackList = Sets.newHashSet(); for (final Rule rule : rulesList) { if (blackList.contains(rule.getStreamId())) { continue; } final StreamRule streamRule = rule.getStreamRule(); final StreamRuleType streamRuleType = streamRule.getType(); final Stream.MatchingType matchingType = rule.getMatchingType(); if (!ruleTypesNotNeedingFieldPresence.contains(streamRuleType) && !message.hasField(streamRule.getField())) { if (matchingType == Stream.MatchingType.AND) { result.remove(rule.getStream()); // blacklist stream because it can't match anymore blackList.add(rule.getStreamId()); } continue; } final Stream stream; if (streamRuleType != StreamRuleType.REGEX) { stream = rule.match(message); } else { stream = rule.matchWithTimeOut(message, streamProcessingTimeout, TimeUnit.MILLISECONDS); } if (stream == null) { if (matchingType == Stream.MatchingType.AND) { result.remove(rule.getStream()); // blacklist stream because it can't match anymore blackList.add(rule.getStreamId()); } } else { result.add(stream); if (matchingType == Stream.MatchingType.OR) { // blacklist stream because it is already matched blackList.add(rule.getStreamId()); } } } final Stream defaultStream = defaultStreamProvider.get(); boolean alreadyRemovedDefaultStream = false; for (Stream stream : result) { if (stream.getRemoveMatchesFromDefaultStream()) { if (alreadyRemovedDefaultStream || message.removeStream(defaultStream)) { alreadyRemovedDefaultStream = true; if (LOG.isTraceEnabled()) { LOG.trace("Successfully removed default stream <{}> from message <{}>", defaultStream.getId(), message.getId()); } } else { // A previously executed message processor (or Illuminate) has likely already removed the // default stream from the message. Now, the message has matched a stream in the Graylog // MessageFilterChain, and the matching stream is also set to remove the default stream. // This is usually from user-defined stream rules, and is generally not a problem. cannotRemoveDefaultMeter.inc(); if (LOG.isTraceEnabled()) { LOG.trace("Couldn't remove default stream <{}> from message <{}>", defaultStream.getId(), message.getId()); } } } } return ImmutableList.copyOf(result); }
@Test public void testMultipleStreamsMatch() throws Exception { final StreamMock stream1 = getStreamMock("test1"); final StreamMock stream2 = getStreamMock("test2"); final StreamRuleMock rule1 = new StreamRuleMock(ImmutableMap.of( "_id", new ObjectId(), "field", "testfield1", "type", StreamRuleType.PRESENCE.toInteger(), "stream_id", stream1.getId() )); final StreamRuleMock rule2 = new StreamRuleMock(ImmutableMap.of( "_id", new ObjectId(), "field", "testfield2", "value", "^test", "type", StreamRuleType.REGEX.toInteger(), "stream_id", stream1.getId() )); final StreamRuleMock rule3 = new StreamRuleMock(ImmutableMap.of( "_id", new ObjectId(), "field", "testfield3", "value", "testvalue3", "type", StreamRuleType.EXACT.toInteger(), "stream_id", stream2.getId() )); stream1.setStreamRules(Lists.newArrayList(rule1, rule2)); stream2.setStreamRules(Lists.newArrayList(rule3)); final StreamRouterEngine engine = newEngine(Lists.newArrayList(stream1, stream2)); // Without testfield1 and testfield2 in the message. final Message message1 = getMessage(); assertTrue(engine.match(message1).isEmpty()); // With testfield1 and matching testfield2 in the message. final Message message2 = getMessage(); message2.addField("testfield1", "testvalue"); message2.addField("testfield2", "testvalue2"); assertEquals(Lists.newArrayList(stream1), engine.match(message2)); // With testfield1, matching testfield2 and matching testfield3 in the message. final Message message3 = getMessage(); message3.addField("testfield1", "testvalue"); message3.addField("testfield2", "testvalue2"); message3.addField("testfield3", "testvalue3"); final List<Stream> match = engine.match(message3); assertTrue(match.contains(stream1)); assertTrue(match.contains(stream2)); assertEquals(2, match.size()); // With matching testfield3 in the message. final Message message4 = getMessage(); message4.addField("testfield3", "testvalue3"); assertEquals(Lists.newArrayList(stream2), engine.match(message4)); }
@Override public void removeRule(final RuleData ruleData) { String key = CacheKeyUtils.INST.getKey(ruleData); Resilience4JRegistryFactory.remove(key); Optional.ofNullable(ruleData.getHandle()).ifPresent(s -> CACHED_HANDLE.get().removeHandle(key)); }
@Test public void testRemoveRule() { ruleData.setSelectorId("1"); ruleData.setHandle("{\"urlPath\":\"test\"}"); ruleData.setId("test"); Supplier<CommonHandleCache<String, Resilience4JHandle>> cache = Resilience4JHandler.CACHED_HANDLE; cache.get().cachedHandle("1_test", new Resilience4JHandle()); Assertions.assertNotNull(cache.get().obtainHandle("1_test")); resilience4JHandler.removeRule(ruleData); Assertions.assertNull(cache.get().obtainHandle("1_test")); }
public static double schemaNameToScaleFactor(String schemaName) { if (TINY_SCHEMA_NAME.equals(schemaName)) { return TINY_SCALE_FACTOR; } if (!schemaName.startsWith("sf")) { return -1; } try { return Double.parseDouble(schemaName.substring(2)); } catch (Exception ignored) { return -1; } }
@Test public void testTableStats() { SUPPORTED_SCHEMAS.forEach(schema -> { double scaleFactor = TpchMetadata.schemaNameToScaleFactor(schema); testTableStats(schema, REGION, 5); testTableStats(schema, NATION, 25); testTableStats(schema, SUPPLIER, 10_000 * scaleFactor); testTableStats(schema, CUSTOMER, 150_000 * scaleFactor); testTableStats(schema, PART, 200_000 * scaleFactor); testTableStats(schema, PART_SUPPLIER, 800_000 * scaleFactor); testTableStats(schema, ORDERS, 1_500_000 * scaleFactor); testTableStats(schema, LINE_ITEM, 6_000_000 * scaleFactor); }); }
@Override public boolean decide(final SelectStatementContext selectStatementContext, final List<Object> parameters, final RuleMetaData globalRuleMetaData, final ShardingSphereDatabase database, final ShardingRule rule, final Collection<DataNode> includedDataNodes) { Collection<String> tableNames = rule.getShardingLogicTableNames(selectStatementContext.getTablesContext().getTableNames()); if (tableNames.isEmpty()) { return false; } includedDataNodes.addAll(getTableDataNodes(rule, tableNames, database)); if (selectStatementContext.isContainsSubquery() || selectStatementContext.isContainsHaving() || selectStatementContext.isContainsCombine() || selectStatementContext.isContainsPartialDistinctAggregation()) { return true; } if (!selectStatementContext.isContainsJoinQuery() || rule.isAllTablesInSameDataSource(tableNames)) { return false; } if (1 == tableNames.size() && selectStatementContext.isContainsJoinQuery() && !rule.isAllBindingTables(database, selectStatementContext, tableNames)) { return true; } return tableNames.size() > 1 && !rule.isAllBindingTables(database, selectStatementContext, tableNames); }
@Test void assertDecideWhenContainsHaving() { SelectStatementContext select = createStatementContext(); when(select.isContainsHaving()).thenReturn(true); Collection<DataNode> includedDataNodes = new HashSet<>(); ShardingRule shardingRule = createShardingRule(); assertTrue(new ShardingSQLFederationDecider().decide(select, Collections.emptyList(), mock(RuleMetaData.class), createDatabase(shardingRule), shardingRule, includedDataNodes)); assertThat(includedDataNodes.size(), is(4)); }
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException { final List<Path> containers = new ArrayList<>(); for(Path file : files.keySet()) { if(containerService.isContainer(file)) { containers.add(file); } else { callback.delete(file); final Path bucket = containerService.getContainer(file); if(file.getType().contains(Path.Type.upload)) { // In-progress multipart upload try { multipartService.delete(new MultipartUpload(file.attributes().getVersionId(), bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file))); } catch(NotfoundException ignored) { log.warn(String.format("Ignore failure deleting multipart upload %s", file)); } } else { try { // Always returning 204 even if the key does not exist. Does not return 404 for non-existing keys session.getClient().deleteVersionedObject( file.attributes().getVersionId(), bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file)); } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Cannot delete {0}", e, file); } } } } for(Path file : containers) { callback.delete(file); try { final String bucket = containerService.getContainer(file).getName(); session.getClient().deleteBucket(bucket); session.getClient().getRegionEndpointCache().removeRegionForBucketName(bucket); } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Cannot delete {0}", e, file); } } }
@Test public void testDeletePlaceholder() throws Exception { final Path container = new Path("versioning-test-eu-central-1-cyberduck", EnumSet.of(Path.Type.volume, Path.Type.directory)); final S3AccessControlListFeature acl = new S3AccessControlListFeature(session); final Path test = new S3DirectoryFeature(session, new S3WriteFeature(session, acl), acl).mkdir(new Path(container, String.format("%s %s", new AlphanumericRandomStringService().random(), new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.directory)), new TransferStatus()); assertTrue(new S3FindFeature(session, acl).find(test)); assertTrue(new DefaultFindFeature(session).find(test)); new S3DefaultDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse(new S3FindFeature(session, acl).find(test)); assertFalse(new DefaultFindFeature(session).find(test)); assertNull(new S3VersionedObjectListService(session, acl).list(container, new DisabledListProgressListener()).find(new SimplePathPredicate(test))); }
@Override public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException { for(Map.Entry<Path, TransferStatus> file : files.entrySet()) { try { callback.delete(file.getKey()); final StoregateApiClient client = session.getClient(); final HttpRequestBase request; request = new HttpDelete(String.format("%s/v4.2/files/%s", client.getBasePath(), fileid.getFileId(file.getKey()))); if(file.getValue().getLockId() != null) { request.addHeader("X-Lock-Id", file.getValue().getLockId().toString()); } request.addHeader(HTTP.CONTENT_TYPE, MEDIA_TYPE); final HttpResponse response = client.getClient().execute(request); try { switch(response.getStatusLine().getStatusCode()) { case HttpStatus.SC_NO_CONTENT: break; default: throw new StoregateExceptionMappingService(fileid).map("Cannot delete {0}", new ApiException(response.getStatusLine().getStatusCode(), response.getStatusLine().getReasonPhrase()), file.getKey()); } } finally { EntityUtils.consume(response.getEntity()); } fileid.cache(file.getKey(), null); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Cannot delete {0}", e, file.getKey()); } } }
@Test public void testDeleteFolderRoomWithContent() throws Exception { final StoregateIdProvider nodeid = new StoregateIdProvider(session); final Path room = new StoregateDirectoryFeature(session, nodeid).mkdir(new Path( String.format("/My files/%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final Path folder = new StoregateDirectoryFeature(session, nodeid).mkdir(new Path(room, new AlphanumericRandomStringService().random().toLowerCase(), EnumSet.of(Path.Type.directory)), new TransferStatus()); assertTrue(new DefaultFindFeature(session).find(folder)); final Path file = new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); new StoregateTouchFeature(session, nodeid).touch(file, new TransferStatus()); assertTrue(new DefaultFindFeature(session).find(file)); new StoregateDeleteFeature(session, nodeid).delete(Collections.singletonList(folder), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse(new DefaultFindFeature(session).find(folder)); new StoregateDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse(new DefaultFindFeature(session).find(room)); }
@Override public PageResult<OperateLogDO> getOperateLogPage(OperateLogPageReqVO pageReqVO) { return operateLogMapper.selectPage(pageReqVO); }
@Test public void testGetOperateLogPage_dto() { // 构造操作日志 OperateLogDO operateLogDO = RandomUtils.randomPojo(OperateLogDO.class, o -> { o.setUserId(2048L); o.setBizId(999L); o.setType("订单"); }); operateLogMapper.insert(operateLogDO); // 测试 userId 不匹配 operateLogMapper.insert(cloneIgnoreId(operateLogDO, o -> o.setUserId(1024L))); // 测试 bizId 不匹配 operateLogMapper.insert(cloneIgnoreId(operateLogDO, o -> o.setBizId(888L))); // 测试 type 不匹配 operateLogMapper.insert(cloneIgnoreId(operateLogDO, o -> o.setType("退款"))); // 构造调用参数 OperateLogPageReqDTO reqDTO = new OperateLogPageReqDTO(); reqDTO.setUserId(2048L); reqDTO.setBizId(999L); reqDTO.setType("订单"); // 调用 PageResult<OperateLogDO> pageResult = operateLogServiceImpl.getOperateLogPage(reqDTO); // 断言,只查到了一条符合条件的 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(operateLogDO, pageResult.getList().get(0)); }
@Override public byte[] serialize(final String topic, final List<?> data) { if (data == null) { return null; } try { final StringWriter stringWriter = new StringWriter(); final CSVPrinter csvPrinter = new CSVPrinter(stringWriter, csvFormat); csvPrinter.printRecord(() -> new FieldIterator(data, schema)); final String result = stringWriter.toString(); return result.substring(0, result.length() - 2).getBytes(StandardCharsets.UTF_8); } catch (final Exception e) { throw new SerializationException("Error serializing CSV message", e); } }
@Test public void shouldSerializeDecimalWithPaddedZeros() { // Given: givenSingleColumnSerializer(SqlTypes.decimal(4, 2)); final List<?> values = Collections.singletonList(new BigDecimal("1.12")); // When: final byte[] bytes = serializer.serialize("", values); // Then: assertThat(new String(bytes, StandardCharsets.UTF_8), is("1.12")); }
@SuppressWarnings("unchecked") @Override public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) throws YarnException, IOException { NodeStatus remoteNodeStatus = request.getNodeStatus(); /** * Here is the node heartbeat sequence... * 1. Check if it's a valid (i.e. not excluded) node * 2. Check if it's a registered node * 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat * 4. Send healthStatus to RMNode * 5. Update node's labels if distributed Node Labels configuration is enabled */ NodeId nodeId = remoteNodeStatus.getNodeId(); // 1. Check if it's a valid (i.e. not excluded) node, if not, see if it is // in decommissioning. if (!this.nodesListManager.isValidNode(nodeId.getHost()) && !isNodeInDecommissioning(nodeId)) { String message = "Disallowed NodeManager nodeId: " + nodeId + " hostname: " + nodeId.getHost(); LOG.info(message); return YarnServerBuilderUtils.newNodeHeartbeatResponse( NodeAction.SHUTDOWN, message); } // 2. Check if it's a registered node RMNode rmNode = this.rmContext.getRMNodes().get(nodeId); if (rmNode == null) { /* node does not exist */ String message = "Node not found resyncing " + remoteNodeStatus.getNodeId(); LOG.info(message); return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.RESYNC, message); } // Send ping this.nmLivelinessMonitor.receivedPing(nodeId); this.decommissioningWatcher.update(rmNode, remoteNodeStatus); // 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat NodeHeartbeatResponse lastNodeHeartbeatResponse = rmNode.getLastNodeHeartBeatResponse(); if (getNextResponseId( remoteNodeStatus.getResponseId()) == lastNodeHeartbeatResponse .getResponseId()) { LOG.info("Received duplicate heartbeat from node " + rmNode.getNodeAddress()+ " responseId=" + remoteNodeStatus.getResponseId()); return lastNodeHeartbeatResponse; } else if (remoteNodeStatus.getResponseId() != lastNodeHeartbeatResponse .getResponseId()) { String message = "Too far behind rm response id:" + lastNodeHeartbeatResponse.getResponseId() + " nm response id:" + remoteNodeStatus.getResponseId(); LOG.info(message); // TODO: Just sending reboot is not enough. Think more. this.rmContext.getDispatcher().getEventHandler().handle( new RMNodeEvent(nodeId, RMNodeEventType.REBOOTING)); return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.RESYNC, message); } // Evaluate whether a DECOMMISSIONING node is ready to be DECOMMISSIONED. if (rmNode.getState() == NodeState.DECOMMISSIONING && decommissioningWatcher.checkReadyToBeDecommissioned( rmNode.getNodeID())) { String message = "DECOMMISSIONING " + nodeId + " is ready to be decommissioned"; LOG.info(message); this.rmContext.getDispatcher().getEventHandler().handle( new RMNodeEvent(nodeId, RMNodeEventType.DECOMMISSION)); this.nmLivelinessMonitor.unregister(nodeId); return YarnServerBuilderUtils.newNodeHeartbeatResponse( NodeAction.SHUTDOWN, message); } if (timelineServiceV2Enabled) { // Check & update collectors info from request. updateAppCollectorsMap(request); } // Heartbeat response long newInterval = nextHeartBeatInterval; if (heartBeatIntervalScalingEnable) { newInterval = rmNode.calculateHeartBeatInterval( nextHeartBeatInterval, heartBeatIntervalMin, heartBeatIntervalMax, heartBeatIntervalSpeedupFactor, heartBeatIntervalSlowdownFactor); } NodeHeartbeatResponse nodeHeartBeatResponse = YarnServerBuilderUtils.newNodeHeartbeatResponse( getNextResponseId(lastNodeHeartbeatResponse.getResponseId()), NodeAction.NORMAL, null, null, null, null, newInterval); rmNode.setAndUpdateNodeHeartbeatResponse(nodeHeartBeatResponse); populateKeys(request, nodeHeartBeatResponse); populateTokenSequenceNo(request, nodeHeartBeatResponse); if (timelineServiceV2Enabled) { // Return collectors' map that NM needs to know setAppCollectorsMapToResponse(rmNode.getRunningApps(), nodeHeartBeatResponse); } // 4. Send status to RMNode, saving the latest response. RMNodeStatusEvent nodeStatusEvent = new RMNodeStatusEvent(nodeId, remoteNodeStatus); if (request.getLogAggregationReportsForApps() != null && !request.getLogAggregationReportsForApps().isEmpty()) { nodeStatusEvent.setLogAggregationReportsForApps(request .getLogAggregationReportsForApps()); } this.rmContext.getDispatcher().getEventHandler().handle(nodeStatusEvent); // 5. Update node's labels to RM's NodeLabelManager. if (isDistributedNodeLabelsConf && request.getNodeLabels() != null) { try { updateNodeLabelsFromNMReport( NodeLabelsUtils.convertToStringSet(request.getNodeLabels()), nodeId); nodeHeartBeatResponse.setAreNodeLabelsAcceptedByRM(true); } catch (IOException ex) { //ensure the error message is captured and sent across in response nodeHeartBeatResponse.setDiagnosticsMessage(ex.getMessage()); nodeHeartBeatResponse.setAreNodeLabelsAcceptedByRM(false); } } // 6. check if node's capacity is load from dynamic-resources.xml // if so, send updated resource back to NM. String nid = nodeId.toString(); Resource capability = loadNodeResourceFromDRConfiguration(nid); // sync back with new resource if not null. if (capability != null) { nodeHeartBeatResponse.setResource(capability); } // Check if we got an event (AdminService) that updated the resources if (rmNode.isUpdatedCapability()) { nodeHeartBeatResponse.setResource(rmNode.getTotalCapability()); rmNode.resetUpdatedCapability(); } // 7. Send Container Queuing Limits back to the Node. This will be used by // the node to truncate the number of Containers queued for execution. if (this.rmContext.getNodeManagerQueueLimitCalculator() != null) { nodeHeartBeatResponse.setContainerQueuingLimit( this.rmContext.getNodeManagerQueueLimitCalculator() .createContainerQueuingLimit()); } // 8. Get node's attributes and update node-to-attributes mapping // in RMNodeAttributeManager. if (request.getNodeAttributes() != null) { try { // update node attributes if necessary then update heartbeat response updateNodeAttributesIfNecessary(nodeId, request.getNodeAttributes()); nodeHeartBeatResponse.setAreNodeAttributesAcceptedByRM(true); } catch (IOException ex) { //ensure the error message is captured and sent across in response String errorMsg = nodeHeartBeatResponse.getDiagnosticsMessage() == null ? ex.getMessage() : nodeHeartBeatResponse.getDiagnosticsMessage() + "\n" + ex .getMessage(); nodeHeartBeatResponse.setDiagnosticsMessage(errorMsg); nodeHeartBeatResponse.setAreNodeAttributesAcceptedByRM(false); } } return nodeHeartBeatResponse; }
@Test public void testNodeHeartbeatForAppCollectorsMap() throws Exception { Configuration conf = new Configuration(); conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); // set version to 2 conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f); // enable aux-service based timeline collectors conf.set(YarnConfiguration.NM_AUX_SERVICES, "timeline_collector"); conf.set(YarnConfiguration.NM_AUX_SERVICES + "." + "timeline_collector" + ".class", PerNodeTimelineCollectorsAuxService.class.getName()); conf.setClass(YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS, FileSystemTimelineWriterImpl.class, TimelineWriter.class); rm = new MockRM(conf); rm.start(); MockNM nm1 = rm.registerNode("host1:1234", 5120); MockNM nm2 = rm.registerNode("host2:1234", 2048); NodeHeartbeatResponse nodeHeartbeat1 = nm1.nodeHeartbeat(true); NodeHeartbeatResponse nodeHeartbeat2 = nm2.nodeHeartbeat(true); RMNodeImpl node1 = (RMNodeImpl) rm.getRMContext().getRMNodes().get(nm1.getNodeId()); RMNodeImpl node2 = (RMNodeImpl) rm.getRMContext().getRMNodes().get(nm2.getNodeId()); RMAppImpl app1 = (RMAppImpl) MockRMAppSubmitter.submitWithMemory(1024, rm); String collectorAddr1 = "1.2.3.4:5"; app1.setCollectorData(AppCollectorData.newInstance( app1.getApplicationId(), collectorAddr1)); String collectorAddr2 = "5.4.3.2:1"; RMAppImpl app2 = (RMAppImpl) MockRMAppSubmitter.submitWithMemory(1024, rm); app2.setCollectorData(AppCollectorData.newInstance( app2.getApplicationId(), collectorAddr2)); String collectorAddr3 = "5.4.3.2:2"; app2.setCollectorData(AppCollectorData.newInstance( app2.getApplicationId(), collectorAddr3, 0, 1)); String collectorAddr4 = "5.4.3.2:3"; app2.setCollectorData(AppCollectorData.newInstance( app2.getApplicationId(), collectorAddr4, 1, 0)); // Create a running container for app1 running on nm1 ContainerId runningContainerId1 = BuilderUtils.newContainerId( BuilderUtils.newApplicationAttemptId( app1.getApplicationId(), 0), 0); ContainerStatus status1 = ContainerStatus.newInstance(runningContainerId1, ContainerState.RUNNING, "", 0); List<ContainerStatus> statusList = new ArrayList<ContainerStatus>(); statusList.add(status1); NodeHealthStatus nodeHealth = NodeHealthStatus.newInstance(true, "", System.currentTimeMillis()); NodeStatus nodeStatus = NodeStatus.newInstance(nm1.getNodeId(), 0, statusList, null, nodeHealth, null, null, null); node1.handle(new RMNodeStatusEvent(nm1.getNodeId(), nodeStatus)); Assert.assertEquals(1, node1.getRunningApps().size()); Assert.assertEquals(app1.getApplicationId(), node1.getRunningApps().get(0)); // Create a running container for app2 running on nm2 ContainerId runningContainerId2 = BuilderUtils.newContainerId( BuilderUtils.newApplicationAttemptId( app2.getApplicationId(), 0), 0); ContainerStatus status2 = ContainerStatus.newInstance(runningContainerId2, ContainerState.RUNNING, "", 0); statusList = new ArrayList<ContainerStatus>(); statusList.add(status2); nodeStatus = NodeStatus.newInstance(nm1.getNodeId(), 0, statusList, null, nodeHealth, null, null, null); node2.handle(new RMNodeStatusEvent(nm2.getNodeId(), nodeStatus)); Assert.assertEquals(1, node2.getRunningApps().size()); Assert.assertEquals(app2.getApplicationId(), node2.getRunningApps().get(0)); nodeHeartbeat1 = nm1.nodeHeartbeat(true); Map<ApplicationId, AppCollectorData> map1 = nodeHeartbeat1.getAppCollectors(); Assert.assertEquals(1, map1.size()); Assert.assertEquals(collectorAddr1, map1.get(app1.getApplicationId()).getCollectorAddr()); nodeHeartbeat2 = nm2.nodeHeartbeat(true); Map<ApplicationId, AppCollectorData> map2 = nodeHeartbeat2.getAppCollectors(); Assert.assertEquals(1, map2.size()); Assert.assertEquals(collectorAddr4, map2.get(app2.getApplicationId()).getCollectorAddr()); }
@Subscribe public synchronized void renew(final SchemaAddedEvent event) { contextManager.getMetaDataContextManager().getSchemaMetaDataManager().addSchema(event.getDatabaseName(), event.getSchemaName()); refreshShardingSphereStatisticsData(); }
@Test void assertRenewForSchemaAdded() { subscriber.renew(new SchemaAddedEvent("db", "foo_schema")); verify(contextManager.getMetaDataContexts().getMetaData().getDatabase("db")).addSchema(argThat("foo_schema"::equals), any(ShardingSphereSchema.class)); }
@Override public Map<String, String> generationCodes(Long tableId) { // 校验是否已经存在 CodegenTableDO table = codegenTableMapper.selectById(tableId); if (table == null) { throw exception(CODEGEN_TABLE_NOT_EXISTS); } List<CodegenColumnDO> columns = codegenColumnMapper.selectListByTableId(tableId); if (CollUtil.isEmpty(columns)) { throw exception(CODEGEN_COLUMN_NOT_EXISTS); } // 如果是主子表,则加载对应的子表信息 List<CodegenTableDO> subTables = null; List<List<CodegenColumnDO>> subColumnsList = null; if (CodegenTemplateTypeEnum.isMaster(table.getTemplateType())) { // 校验子表存在 subTables = codegenTableMapper.selectListByTemplateTypeAndMasterTableId( CodegenTemplateTypeEnum.SUB.getType(), tableId); if (CollUtil.isEmpty(subTables)) { throw exception(CODEGEN_MASTER_GENERATION_FAIL_NO_SUB_TABLE); } // 校验子表的关联字段存在 subColumnsList = new ArrayList<>(); for (CodegenTableDO subTable : subTables) { List<CodegenColumnDO> subColumns = codegenColumnMapper.selectListByTableId(subTable.getId()); if (CollUtil.findOne(subColumns, column -> column.getId().equals(subTable.getSubJoinColumnId())) == null) { throw exception(CODEGEN_SUB_COLUMN_NOT_EXISTS, subTable.getId()); } subColumnsList.add(subColumns); } } // 执行生成 return codegenEngine.execute(table, columns, subTables, subColumnsList); }
@Test public void testGenerationCodes_master_success() { // mock 数据(CodegenTableDO) CodegenTableDO table = randomPojo(CodegenTableDO.class, o -> o.setScene(CodegenSceneEnum.ADMIN.getScene()) .setTemplateType(CodegenTemplateTypeEnum.MASTER_NORMAL.getType())); codegenTableMapper.insert(table); // mock 数据(CodegenColumnDO) CodegenColumnDO column01 = randomPojo(CodegenColumnDO.class, o -> o.setTableId(table.getId())); codegenColumnMapper.insert(column01); CodegenColumnDO column02 = randomPojo(CodegenColumnDO.class, o -> o.setTableId(table.getId())); codegenColumnMapper.insert(column02); // mock 数据(sub CodegenTableDO) CodegenTableDO subTable = randomPojo(CodegenTableDO.class, o -> o.setScene(CodegenSceneEnum.ADMIN.getScene()) .setTemplateType(CodegenTemplateTypeEnum.SUB.getType()) .setMasterTableId(table.getId()) .setSubJoinColumnId(1024L)); codegenTableMapper.insert(subTable); // mock 数据(sub CodegenColumnDO) CodegenColumnDO subColumn01 = randomPojo(CodegenColumnDO.class, o -> o.setId(1024L).setTableId(subTable.getId())); codegenColumnMapper.insert(subColumn01); // mock 执行生成 Map<String, String> codes = MapUtil.of(randomString(), randomString()); when(codegenEngine.execute(eq(table), argThat(columns -> { assertEquals(2, columns.size()); assertEquals(column01, columns.get(0)); assertEquals(column02, columns.get(1)); return true; }), argThat(tables -> { assertEquals(1, tables.size()); assertPojoEquals(subTable, tables.get(0)); return true; }), argThat(columns -> { assertEquals(1, columns.size()); assertPojoEquals(subColumn01, columns.size()); return true; }))).thenReturn(codes); // 准备参数 Long tableId = table.getId(); // 调用 Map<String, String> result = codegenService.generationCodes(tableId); // 断言 assertSame(codes, result); }
public static TableElements of(final TableElement... elements) { return new TableElements(ImmutableList.copyOf(elements)); }
@Test public void shouldThrowOnDuplicateKeyColumns() { // Given: final List<TableElement> elements = ImmutableList.of( tableElement( "k0", STRING_TYPE, KEY_CONSTRAINT), tableElement("k0", STRING_TYPE, KEY_CONSTRAINT), tableElement("k1", STRING_TYPE, KEY_CONSTRAINT), tableElement("k1", STRING_TYPE, PRIMARY_KEY_CONSTRAINT) ); // When: final Exception e = assertThrows( KsqlException.class, () -> TableElements.of(elements) ); // Then: assertThat(e.getMessage(), containsString( "Duplicate column names:")); assertThat(e.getMessage(), containsString( "k0")); assertThat(e.getMessage(), containsString( "k1")); }
@Override public void checkAuthorization( final KsqlSecurityContext securityContext, final MetaStore metaStore, final Statement statement ) { if (statement instanceof Query) { validateQuery(securityContext, metaStore, (Query)statement); } else if (statement instanceof InsertInto) { validateInsertInto(securityContext, metaStore, (InsertInto)statement); } else if (statement instanceof CreateAsSelect) { validateCreateAsSelect(securityContext, metaStore, (CreateAsSelect)statement); } else if (statement instanceof PrintTopic) { validatePrintTopic(securityContext, (PrintTopic)statement); } else if (statement instanceof CreateSource) { validateCreateSource(securityContext, (CreateSource)statement); } }
@Test public void shouldThrowWhenPrintTopicWithoutReadPermissionsDenied() { // Given: givenTopicAccessDenied(KAFKA_TOPIC, AclOperation.READ); final Statement statement = givenStatement(String.format("Print '%s';", KAFKA_TOPIC)); // When: final Exception e = assertThrows( KsqlTopicAuthorizationException.class, () -> authorizationValidator.checkAuthorization(securityContext, metaStore, statement) ); // Then: assertThat(e.getMessage(), containsString(String.format( "Authorization denied to Read on topic(s): [%s]", KAFKA_TOPIC ))); }
public static Map<String, ShardingSphereSchema> build(final String databaseName, final DatabaseType databaseType, final ConfigurationProperties props) { SystemDatabase systemDatabase = new SystemDatabase(databaseType); Map<String, ShardingSphereSchema> result = new LinkedHashMap<>(systemDatabase.getSystemSchemas().size(), 1F); boolean isSystemSchemaMetaDataEnabled = isSystemSchemaMetaDataEnabled(props.getProps()); YamlTableSwapper swapper = new YamlTableSwapper(); for (String each : getSystemSchemas(databaseName, databaseType, systemDatabase)) { result.put(each.toLowerCase(), createSchema(each, SystemSchemaManager.getAllInputStreams(databaseType.getType(), each), swapper, isSystemSchemaMetaDataEnabled)); } return result; }
@Test void assertBuildForPostgreSQL() { DatabaseType databaseType = TypedSPILoader.getService(DatabaseType.class, "PostgreSQL"); Map<String, ShardingSphereSchema> actual = SystemSchemaBuilder.build("sharding_db", databaseType, new ConfigurationProperties(new Properties())); assertThat(actual.size(), is(3)); assertTrue(actual.containsKey("information_schema")); assertTrue(actual.containsKey("pg_catalog")); assertTrue(actual.containsKey("shardingsphere")); assertThat(actual.get("information_schema").getTables().size(), is(69)); assertThat(actual.get("pg_catalog").getTables().size(), is(134)); assertThat(actual.get("shardingsphere").getTables().size(), is(2)); }
@Override public MaterializedWindowedTable windowed() { return new KsqlMaterializedWindowedTable(inner.windowed()); }
@Test public void shouldCallTransformsInOrderForWindowed() { // Given: final MaterializedWindowedTable table = materialization.windowed(); givenNoopFilter(); when(project.apply(any(), any(), any())).thenReturn(Optional.of(transformed)); // When: table.get(aKey, partition, windowStartBounds, windowEndBounds); // Then: final InOrder inOrder = inOrder(project, filter); inOrder.verify(filter).apply(any(), any(), any()); inOrder.verify(project).apply(any(), any(), any()); }
public static GeoPoint fromInvertedDoubleString(final String s, final char spacer) { final int spacerPos1 = s.indexOf(spacer); final int spacerPos2 = s.indexOf(spacer, spacerPos1 + 1); if (spacerPos2 == -1) { return new GeoPoint( Double.parseDouble(s.substring(spacerPos1 + 1, s.length())), Double.parseDouble(s.substring(0, spacerPos1))); } else { return new GeoPoint( Double.parseDouble(s.substring(spacerPos1 + 1, spacerPos2)), Double.parseDouble(s.substring(0, spacerPos1)), Double.parseDouble(s.substring(spacerPos2 + 1, s.length()))); } }
@Test public void test_toFromInvertedDoubleString_withoutAltitude() { final GeoPoint in = new GeoPoint(-117.123, 33.123); final GeoPoint out = GeoPoint.fromInvertedDoubleString("33.123,-117.123", ','); assertEquals("toFromString without altitude", in, out); }
static void start(Keys key, String value, StringBuilder b) { b.append(key.name()).append(AuditConstants.KEY_VAL_SEPARATOR).append(value); }
@Test public void testNMAuditLoggerWithIP() throws Exception { Configuration conf = new Configuration(); RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine2.class); // Create server side implementation MyTestRPCServer serverImpl = new MyTestRPCServer(); BlockingService service = TestRpcServiceProtos.TestProtobufRpcProto .newReflectiveBlockingService(serverImpl); // start the IPC server Server server = new RPC.Builder(conf) .setProtocol(TestRpcBase.TestRpcService.class) .setInstance(service).setBindAddress("0.0.0.0") .setPort(0).setNumHandlers(5).setVerbose(true).build(); server.start(); InetSocketAddress addr = NetUtils.getConnectAddress(server); // Make a client connection and test the audit log TestRpcService proxy = RPC.getProxy(TestRpcService.class, TestProtocol.versionID, addr, conf); // Start the testcase TestProtos.EmptyRequestProto pingRequest = TestProtos.EmptyRequestProto.newBuilder().build(); proxy.ping(null, pingRequest); server.stop(); RPC.stopProxy(proxy); }
protected Invoker<T> select( LoadBalance loadbalance, Invocation invocation, List<Invoker<T>> invokers, List<Invoker<T>> selected) throws RpcException { if (CollectionUtils.isEmpty(invokers)) { return null; } String methodName = invocation == null ? StringUtils.EMPTY_STRING : RpcUtils.getMethodName(invocation); boolean sticky = invokers.get(0).getUrl().getMethodParameter(methodName, CLUSTER_STICKY_KEY, DEFAULT_CLUSTER_STICKY); // ignore overloaded method if (stickyInvoker != null && !invokers.contains(stickyInvoker)) { stickyInvoker = null; } // ignore concurrency problem if (sticky && stickyInvoker != null && (selected == null || !selected.contains(stickyInvoker))) { if (availableCheck && stickyInvoker.isAvailable()) { return stickyInvoker; } } Invoker<T> invoker = doSelect(loadbalance, invocation, invokers, selected); if (sticky) { stickyInvoker = invoker; } return invoker; }
@Test void testDonotSelectAgainAndNoCheckAvailable() { LoadBalance lb = ExtensionLoader.getExtensionLoader(LoadBalance.class).getExtension(RoundRobinLoadBalance.NAME); initlistsize5(); { // Boundary condition test . selectedInvokers.clear(); selectedInvokers.add(invoker2); selectedInvokers.add(invoker3); selectedInvokers.add(invoker4); selectedInvokers.add(invoker5); Invoker sinvoker = cluster_nocheck.select(lb, invocation, invokers, selectedInvokers); Assertions.assertSame(invoker1, sinvoker); } { // Boundary condition test . selectedInvokers.clear(); selectedInvokers.add(invoker1); selectedInvokers.add(invoker3); selectedInvokers.add(invoker4); selectedInvokers.add(invoker5); Invoker sinvoker = cluster_nocheck.select(lb, invocation, invokers, selectedInvokers); Assertions.assertSame(invoker2, sinvoker); } { // Boundary condition test . selectedInvokers.clear(); selectedInvokers.add(invoker1); selectedInvokers.add(invoker2); selectedInvokers.add(invoker4); selectedInvokers.add(invoker5); Invoker sinvoker = cluster_nocheck.select(lb, invocation, invokers, selectedInvokers); Assertions.assertSame(invoker3, sinvoker); } { // Boundary condition test . selectedInvokers.clear(); selectedInvokers.add(invoker1); selectedInvokers.add(invoker2); selectedInvokers.add(invoker3); selectedInvokers.add(invoker4); Invoker sinvoker = cluster_nocheck.select(lb, invocation, invokers, selectedInvokers); Assertions.assertSame(invoker5, sinvoker); } { // Boundary condition test . selectedInvokers.clear(); selectedInvokers.add(invoker1); selectedInvokers.add(invoker2); selectedInvokers.add(invoker3); selectedInvokers.add(invoker4); selectedInvokers.add(invoker5); Invoker sinvoker = cluster_nocheck.select(lb, invocation, invokers, selectedInvokers); Assertions.assertTrue(invokers.contains(sinvoker)); } }
@Override public void updateApiErrorLogProcess(Long id, Integer processStatus, Long processUserId) { ApiErrorLogDO errorLog = apiErrorLogMapper.selectById(id); if (errorLog == null) { throw exception(API_ERROR_LOG_NOT_FOUND); } if (!ApiErrorLogProcessStatusEnum.INIT.getStatus().equals(errorLog.getProcessStatus())) { throw exception(API_ERROR_LOG_PROCESSED); } // 标记处理 apiErrorLogMapper.updateById(ApiErrorLogDO.builder().id(id).processStatus(processStatus) .processUserId(processUserId).processTime(LocalDateTime.now()).build()); }
@Test public void testUpdateApiErrorLogProcess_notFound() { // 准备参数 Long id = randomLongId(); Integer processStatus = randomEle(ApiErrorLogProcessStatusEnum.values()).getStatus(); Long processUserId = randomLongId(); // 调用,并断言异常 assertServiceException(() -> apiErrorLogService.updateApiErrorLogProcess(id, processStatus, processUserId), API_ERROR_LOG_NOT_FOUND); }
@Override public <I, O> List<O> flatMap(List<I> data, SerializableFunction<I, Stream<O>> func, int parallelism) { return data.stream().parallel().flatMap(throwingFlatMapWrapper(func)).collect(toList()); }
@Test public void testFlatMap() { List<String> list1 = Arrays.asList("a", "b", "c"); List<String> list2 = Arrays.asList("d", "e", "f"); List<String> list3 = Arrays.asList("g", "h", "i"); List<List<String>> inputList = new ArrayList<>(); inputList.add(list1); inputList.add(list2); inputList.add(list3); List<String> result = context.flatMap(inputList, Collection::stream, 2); Assertions.assertEquals(9, result.size()); }
@Override public void importData(JsonReader reader) throws IOException { logger.info("Reading configuration for 1.3"); // this *HAS* to start as an object reader.beginObject(); while (reader.hasNext()) { JsonToken tok = reader.peek(); switch (tok) { case NAME: String name = reader.nextName(); // find out which member it is if (name.equals(CLIENTS)) { readClients(reader); } else if (name.equals(GRANTS)) { readGrants(reader); } else if (name.equals(WHITELISTEDSITES)) { readWhitelistedSites(reader); } else if (name.equals(BLACKLISTEDSITES)) { readBlacklistedSites(reader); } else if (name.equals(AUTHENTICATIONHOLDERS)) { readAuthenticationHolders(reader); } else if (name.equals(ACCESSTOKENS)) { readAccessTokens(reader); } else if (name.equals(REFRESHTOKENS)) { readRefreshTokens(reader); } else if (name.equals(SYSTEMSCOPES)) { readSystemScopes(reader); } else { boolean processed = false; for (MITREidDataServiceExtension extension : extensions) { if (extension.supportsVersion(THIS_VERSION)) { processed = extension.importExtensionData(name, reader); if (processed) { // if the extension processed data, break out of this inner loop // (only the first extension to claim an extension point gets it) break; } } } if (!processed) { // unknown token, skip it reader.skipValue(); } } break; case END_OBJECT: // the object ended, we're done here reader.endObject(); continue; default: logger.debug("Found unexpected entry"); reader.skipValue(); continue; } } fixObjectReferences(); for (MITREidDataServiceExtension extension : extensions) { if (extension.supportsVersion(THIS_VERSION)) { extension.fixExtensionObjectReferences(maps); break; } } maps.clearAll(); }
@Test public void testImportRefreshTokens() throws IOException, ParseException { String expiration1 = "2014-09-10T22:49:44.090+00:00"; Date expirationDate1 = formatter.parse(expiration1, Locale.ENGLISH); ClientDetailsEntity mockedClient1 = mock(ClientDetailsEntity.class); when(mockedClient1.getClientId()).thenReturn("mocked_client_1"); AuthenticationHolderEntity mockedAuthHolder1 = mock(AuthenticationHolderEntity.class); when(mockedAuthHolder1.getId()).thenReturn(1L); OAuth2RefreshTokenEntity token1 = new OAuth2RefreshTokenEntity(); token1.setId(1L); token1.setClient(mockedClient1); token1.setExpiration(expirationDate1); token1.setJwt(JWTParser.parse("eyJhbGciOiJub25lIn0.eyJqdGkiOiJmOTg4OWQyOS0xMTk1LTQ4ODEtODgwZC1lZjVlYzAwY2Y4NDIifQ.")); token1.setAuthenticationHolder(mockedAuthHolder1); String expiration2 = "2015-01-07T18:31:50.079+00:00"; Date expirationDate2 = formatter.parse(expiration2, Locale.ENGLISH); ClientDetailsEntity mockedClient2 = mock(ClientDetailsEntity.class); when(mockedClient2.getClientId()).thenReturn("mocked_client_2"); AuthenticationHolderEntity mockedAuthHolder2 = mock(AuthenticationHolderEntity.class); when(mockedAuthHolder2.getId()).thenReturn(2L); OAuth2RefreshTokenEntity token2 = new OAuth2RefreshTokenEntity(); token2.setId(2L); token2.setClient(mockedClient2); token2.setExpiration(expirationDate2); token2.setJwt(JWTParser.parse("eyJhbGciOiJub25lIn0.eyJqdGkiOiJlYmEyYjc3My0xNjAzLTRmNDAtOWQ3MS1hMGIxZDg1OWE2MDAifQ.")); token2.setAuthenticationHolder(mockedAuthHolder2); String configJson = "{" + "\"" + MITREidDataService.SYSTEMSCOPES + "\": [], " + "\"" + MITREidDataService.ACCESSTOKENS + "\": [], " + "\"" + MITREidDataService.CLIENTS + "\": [], " + "\"" + MITREidDataService.GRANTS + "\": [], " + "\"" + MITREidDataService.WHITELISTEDSITES + "\": [], " + "\"" + MITREidDataService.BLACKLISTEDSITES + "\": [], " + "\"" + MITREidDataService.AUTHENTICATIONHOLDERS + "\": [], " + "\"" + MITREidDataService.REFRESHTOKENS + "\": [" + "{\"id\":1,\"clientId\":\"mocked_client_1\",\"expiration\":\"2014-09-10T22:49:44.090+00:00\"," + "\"authenticationHolderId\":1,\"value\":\"eyJhbGciOiJub25lIn0.eyJqdGkiOiJmOTg4OWQyOS0xMTk1LTQ4ODEtODgwZC1lZjVlYzAwY2Y4NDIifQ.\"}," + "{\"id\":2,\"clientId\":\"mocked_client_2\",\"expiration\":\"2015-01-07T18:31:50.079+00:00\"," + "\"authenticationHolderId\":2,\"value\":\"eyJhbGciOiJub25lIn0.eyJqdGkiOiJlYmEyYjc3My0xNjAzLTRmNDAtOWQ3MS1hMGIxZDg1OWE2MDAifQ.\"}" + " ]" + "}"; logger.debug(configJson); JsonReader reader = new JsonReader(new StringReader(configJson)); final Map<Long, OAuth2RefreshTokenEntity> fakeDb = new HashMap<>(); when(tokenRepository.saveRefreshToken(isA(OAuth2RefreshTokenEntity.class))).thenAnswer(new Answer<OAuth2RefreshTokenEntity>() { Long id = 332L; @Override public OAuth2RefreshTokenEntity answer(InvocationOnMock invocation) throws Throwable { OAuth2RefreshTokenEntity _token = (OAuth2RefreshTokenEntity) invocation.getArguments()[0]; if(_token.getId() == null) { _token.setId(id++); } fakeDb.put(_token.getId(), _token); return _token; } }); when(tokenRepository.getRefreshTokenById(anyLong())).thenAnswer(new Answer<OAuth2RefreshTokenEntity>() { @Override public OAuth2RefreshTokenEntity answer(InvocationOnMock invocation) throws Throwable { Long _id = (Long) invocation.getArguments()[0]; return fakeDb.get(_id); } }); when(clientRepository.getClientByClientId(anyString())).thenAnswer(new Answer<ClientDetailsEntity>() { @Override public ClientDetailsEntity answer(InvocationOnMock invocation) throws Throwable { String _clientId = (String) invocation.getArguments()[0]; ClientDetailsEntity _client = mock(ClientDetailsEntity.class); when(_client.getClientId()).thenReturn(_clientId); return _client; } }); when(authHolderRepository.getById(isNull(Long.class))).thenAnswer(new Answer<AuthenticationHolderEntity>() { Long id = 131L; @Override public AuthenticationHolderEntity answer(InvocationOnMock invocation) throws Throwable { AuthenticationHolderEntity _auth = mock(AuthenticationHolderEntity.class); when(_auth.getId()).thenReturn(id); id++; return _auth; } }); dataService.importData(reader); //2 times for token, 2 times to update client, 2 times to update authHolder verify(tokenRepository, times(6)).saveRefreshToken(capturedRefreshTokens.capture()); List<OAuth2RefreshTokenEntity> savedRefreshTokens = new ArrayList(fakeDb.values()); //capturedRefreshTokens.getAllValues(); Collections.sort(savedRefreshTokens, new refreshTokenIdComparator()); assertThat(savedRefreshTokens.size(), is(2)); assertThat(savedRefreshTokens.get(0).getClient().getClientId(), equalTo(token1.getClient().getClientId())); assertThat(savedRefreshTokens.get(0).getExpiration(), equalTo(token1.getExpiration())); assertThat(savedRefreshTokens.get(0).getValue(), equalTo(token1.getValue())); assertThat(savedRefreshTokens.get(1).getClient().getClientId(), equalTo(token2.getClient().getClientId())); assertThat(savedRefreshTokens.get(1).getExpiration(), equalTo(token2.getExpiration())); assertThat(savedRefreshTokens.get(1).getValue(), equalTo(token2.getValue())); }
@Override public Integer convertStringToValue(final String value) { return Integer.valueOf(value); }
@Test void testConvertValueFromString() { assertThat((int) parallelismQueryParameter.convertStringToValue("42")).isEqualTo(42); }
@Override public boolean isEmpty() { lock.lock(); try { return tasks.isEmpty(); } finally { lock.unlock(); } }
@Test void testIsEmpty() { assertTrue(nacosDelayTaskExecuteEngine.isEmpty()); nacosDelayTaskExecuteEngine.addTask("test", abstractTask); assertFalse(nacosDelayTaskExecuteEngine.isEmpty()); nacosDelayTaskExecuteEngine.removeTask("test"); assertTrue(nacosDelayTaskExecuteEngine.isEmpty()); }
static void checkValidTableId(String idToCheck) { if (idToCheck.length() < MIN_TABLE_ID_LENGTH) { throw new IllegalArgumentException("Table ID cannot be empty. "); } if (idToCheck.length() > MAX_TABLE_ID_LENGTH) { throw new IllegalArgumentException( "Table ID " + idToCheck + " cannot be longer than " + MAX_TABLE_ID_LENGTH + " characters."); } if (ILLEGAL_TABLE_CHARS.matcher(idToCheck).find()) { throw new IllegalArgumentException( "Table ID " + idToCheck + " is not a valid ID. Only letters, numbers, hyphens and underscores are allowed."); } }
@Test public void testCheckValidTableIdShouldWorkWhenGivenCorrectId() { char[] chars = new char[1024]; Arrays.fill(chars, 'a'); String s = new String(chars); checkValidTableId(s); checkValidTableId("a"); checkValidTableId("this-is_a_valid-id-1"); }
@Override public void updateApiErrorLogProcess(Long id, Integer processStatus, Long processUserId) { ApiErrorLogDO errorLog = apiErrorLogMapper.selectById(id); if (errorLog == null) { throw exception(API_ERROR_LOG_NOT_FOUND); } if (!ApiErrorLogProcessStatusEnum.INIT.getStatus().equals(errorLog.getProcessStatus())) { throw exception(API_ERROR_LOG_PROCESSED); } // 标记处理 apiErrorLogMapper.updateById(ApiErrorLogDO.builder().id(id).processStatus(processStatus) .processUserId(processUserId).processTime(LocalDateTime.now()).build()); }
@Test public void testUpdateApiErrorLogProcess_success() { // 准备参数 ApiErrorLogDO apiErrorLogDO = randomPojo(ApiErrorLogDO.class, o -> o.setProcessStatus(ApiErrorLogProcessStatusEnum.INIT.getStatus())); apiErrorLogMapper.insert(apiErrorLogDO); // 准备参数 Long id = apiErrorLogDO.getId(); Integer processStatus = randomEle(ApiErrorLogProcessStatusEnum.values()).getStatus(); Long processUserId = randomLongId(); // 调用 apiErrorLogService.updateApiErrorLogProcess(id, processStatus, processUserId); // 断言 ApiErrorLogDO dbApiErrorLogDO = apiErrorLogMapper.selectById(apiErrorLogDO.getId()); assertEquals(processStatus, dbApiErrorLogDO.getProcessStatus()); assertEquals(processUserId, dbApiErrorLogDO.getProcessUserId()); assertNotNull(dbApiErrorLogDO.getProcessTime()); }
@Override public String name() { return name; }
@Test public void testSetSnapshotSummary() throws Exception { Configuration conf = new Configuration(); conf.set("iceberg.hive.table-property-max-size", "4000"); HiveTableOperations ops = new HiveTableOperations(conf, null, null, catalog.name(), DB_NAME, "tbl"); Snapshot snapshot = mock(Snapshot.class); Map<String, String> summary = Maps.newHashMap(); when(snapshot.summary()).thenReturn(summary); // create a snapshot summary whose json string size is less than the limit for (int i = 0; i < 100; i++) { summary.put(String.valueOf(i), "value"); } assertThat(JsonUtil.mapper().writeValueAsString(summary).length()).isLessThan(4000); Map<String, String> parameters = Maps.newHashMap(); ops.setSnapshotSummary(parameters, snapshot); assertThat(parameters).as("The snapshot summary must be in parameters").hasSize(1); // create a snapshot summary whose json string size exceeds the limit for (int i = 0; i < 1000; i++) { summary.put(String.valueOf(i), "value"); } long summarySize = JsonUtil.mapper().writeValueAsString(summary).length(); // the limit has been updated to 4000 instead of the default value(32672) assertThat(summarySize).isGreaterThan(4000).isLessThan(32672); parameters.remove(CURRENT_SNAPSHOT_SUMMARY); ops.setSnapshotSummary(parameters, snapshot); assertThat(parameters) .as("The snapshot summary must not be in parameters due to the size limit") .isEmpty(); }