focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static ShardingRouteEngine newInstance(final ShardingRule shardingRule, final ShardingSphereDatabase database, final QueryContext queryContext, final ShardingConditions shardingConditions, final ConfigurationProperties props, final ConnectionContext connectionContext) { SQLStatementContext sqlStatementContext = queryContext.getSqlStatementContext(); SQLStatement sqlStatement = sqlStatementContext.getSqlStatement(); if (sqlStatement instanceof TCLStatement) { return new ShardingDatabaseBroadcastRoutingEngine(); } if (sqlStatement instanceof DDLStatement) { if (sqlStatementContext instanceof CursorAvailable) { return getCursorRouteEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props); } return getDDLRoutingEngine(shardingRule, database, sqlStatementContext); } if (sqlStatement instanceof DALStatement) { return getDALRoutingEngine(shardingRule, database, sqlStatementContext, connectionContext); } if (sqlStatement instanceof DCLStatement) { return getDCLRoutingEngine(shardingRule, database, sqlStatementContext); } return getDQLRoutingEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props, connectionContext); }
@Test void assertNewInstanceForCloseAllStatement() { CloseStatementContext closeStatementContext = mock(CloseStatementContext.class, RETURNS_DEEP_STUBS); OpenGaussCloseStatement closeStatement = mock(OpenGaussCloseStatement.class); when(closeStatement.isCloseAll()).thenReturn(true); tableNames.add("t_order"); when(closeStatementContext.getTablesContext().getTableNames()).thenReturn(tableNames); when(closeStatementContext.getTablesContext().getDatabaseName()).thenReturn(Optional.empty()); when(closeStatementContext.getSqlStatement()).thenReturn(closeStatement); when(shardingRule.getShardingRuleTableNames(tableNames)).thenReturn(tableNames); QueryContext queryContext = new QueryContext(closeStatementContext, "", Collections.emptyList(), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class)); ShardingRouteEngine actual = ShardingRouteEngineFactory.newInstance(shardingRule, database, queryContext, shardingConditions, props, new ConnectionContext(Collections::emptySet)); assertThat(actual, instanceOf(ShardingDatabaseBroadcastRoutingEngine.class)); }
@Override public void register(@NonNull Scheme scheme) { if (!schemes.contains(scheme)) { indexSpecRegistry.indexFor(scheme); schemes.add(scheme); getWatchers().forEach(watcher -> watcher.onChange(new SchemeRegistered(scheme))); } }
@Test void shouldTriggerOnChangeOnlyOnceWhenRegisterTwice() { final var watcher = mock(SchemeWatcher.class); when(watcherManager.watchers()).thenReturn(List.of(watcher)); schemeManager.register(FakeExtension.class); verify(watcherManager, times(1)).watchers(); verify(watcher, times(1)).onChange(isA(SchemeRegistered.class)); schemeManager.register(FakeExtension.class); verify(watcherManager, times(1)).watchers(); verify(watcher, times(1)).onChange(isA(SchemeRegistered.class)); verify(indexSpecRegistry).indexFor(any(Scheme.class)); }
@Override public void startScheduling() { Set<ExecutionVertexID> sourceVertices = IterableUtils.toStream(schedulingTopology.getVertices()) .filter(vertex -> vertex.getConsumedPartitionGroups().isEmpty()) .map(SchedulingExecutionVertex::getId) .collect(Collectors.toSet()); maybeScheduleVertices(sourceVertices); }
@Test void testScheduleDownstreamOfHybridEdge() { final TestingSchedulingTopology topology = new TestingSchedulingTopology(); final List<TestingSchedulingExecutionVertex> producers = topology.addExecutionVertices().withParallelism(2).finish(); final List<TestingSchedulingExecutionVertex> consumers = topology.addExecutionVertices().withParallelism(2).finish(); // add consumers to scheduling strategy. topology.connectAllToAll(producers, consumers) .withResultPartitionType(ResultPartitionType.HYBRID_FULL) .finish(); final VertexwiseSchedulingStrategy schedulingStrategy = createSchedulingStrategy(topology); inputConsumableDecider.addSourceVertices(new HashSet<>(producers)); inputConsumableDecider.setInputConsumable(consumers.get(0)); inputConsumableDecider.setInputConsumable(consumers.get(1)); schedulingStrategy.startScheduling(); // consumers are properly scheduled indicates that the consuming relationship and // correlation are successfully built assertLatestScheduledVerticesAreEqualTo( Arrays.asList( Collections.singletonList(producers.get(0)), Collections.singletonList(producers.get(1)), Collections.singletonList(consumers.get(0)), Collections.singletonList(consumers.get(1))), testingSchedulerOperation); }
public long getReportIntervalMs() { String intervalString = properties.getProperty( CONFLUENT_SUPPORT_METRICS_REPORT_INTERVAL_HOURS_CONFIG ); if (intervalString == null || intervalString.isEmpty()) { intervalString = CONFLUENT_SUPPORT_METRICS_REPORT_INTERVAL_HOURS_DEFAULT; } try { final long intervalHours = Long.parseLong(intervalString); if (intervalHours < 1) { throw new ConfigException( CONFLUENT_SUPPORT_METRICS_REPORT_INTERVAL_HOURS_CONFIG, intervalString, "Interval must be >= 1" ); } return intervalHours * 60 * 60 * 1000; } catch (NumberFormatException e) { throw new ConfigException( CONFLUENT_SUPPORT_METRICS_REPORT_INTERVAL_HOURS_CONFIG, intervalString, "Interval is not an integer number" ); } }
@Test public void testOverrideReportInterval() { // Given Properties overrideProps = new Properties(); int reportIntervalHours = 1; overrideProps.setProperty( BaseSupportConfig.CONFLUENT_SUPPORT_METRICS_REPORT_INTERVAL_HOURS_CONFIG, String.valueOf(reportIntervalHours) ); // When BaseSupportConfig supportConfig = new TestSupportConfig(overrideProps); // Then assertEquals((long) reportIntervalHours * 60 * 60 * 1000, supportConfig.getReportIntervalMs()); }
@Override public void reset() { // reset all offsets this.numRecords = 0; this.currentSortIndexOffset = 0; this.currentDataBufferOffset = 0; this.sortIndexBytes = 0; // return all memory this.freeMemory.addAll(this.sortIndex); this.freeMemory.addAll(this.recordBufferSegments); this.sortIndex.clear(); this.recordBufferSegments.clear(); // grab first buffers this.currentSortIndexSegment = nextMemorySegment(); this.sortIndex.add(this.currentSortIndexSegment); this.recordCollector.reset(); }
@Test void testReset() throws Exception { final int numSegments = MEMORY_SIZE / MEMORY_PAGE_SIZE; final List<MemorySegment> memory = this.memoryManager.allocatePages(new DummyInvokable(), numSegments); NormalizedKeySorter<Tuple2<Integer, String>> sorter = newSortBuffer(memory); TestData.TupleGenerator generator = new TestData.TupleGenerator( SEED, KEY_MAX, VALUE_LENGTH, KeyMode.RANDOM, ValueMode.FIX_LENGTH); // write the buffer full with the first set of records Tuple2<Integer, String> record = new Tuple2<>(); int num = -1; do { generator.next(record); num++; } while (sorter.write(record)); sorter.reset(); // write a second sequence of records. since the values are of fixed length, we must be able // to write an equal number generator = new TestData.TupleGenerator( SEED2, KEY_MAX, VALUE_LENGTH, KeyMode.RANDOM, ValueMode.FIX_LENGTH); // write the buffer full with the first set of records int num2 = -1; do { generator.next(record); num2++; } while (sorter.write(record)); assertThat(num2) .withFailMessage( "The number of records written after the reset was not the same as before.") .isEqualTo(num); // re-read the records generator.reset(); Tuple2<Integer, String> readTarget = new Tuple2<>(); int i = 0; while (i < num) { generator.next(record); readTarget = sorter.getRecord(readTarget, i++); int rk = readTarget.f0; int gk = record.f0; String rv = readTarget.f1; String gv = record.f1; assertThat(rk).withFailMessage("The re-read key is wrong").isEqualTo(gk); assertThat(rv).withFailMessage("The re-read value is wrong").isEqualTo(gv); } // release the memory occupied by the buffers sorter.dispose(); this.memoryManager.release(memory); }
@Override public void run() { if (processedEvents.get() > 0) { LOG.debug("checkpointing offset after reaching timeout, with a batch of {}", processedEvents.get()); eventContext.updateCheckpointAsync() .subscribe(unused -> LOG.debug("Processed one event..."), error -> LOG.debug("Error when updating Checkpoint: {}", error.getMessage()), () -> { LOG.debug("Checkpoint updated."); processedEvents.set(0); }); } else { LOG.debug("skip checkpointing offset even if timeout is reached. No events processed"); } }
@Test void testProcessedEventsNotResetWhenCheckpointUpdateFails() { var processedEvents = new AtomicInteger(1); var eventContext = Mockito.mock(EventContext.class); Mockito.when(eventContext.updateCheckpointAsync()) .thenReturn(Mono.error(new RuntimeException())); var timerTask = new EventHubsCheckpointUpdaterTimerTask(eventContext, processedEvents); timerTask.run(); assertEquals(1, processedEvents.get()); }
@Override public <T> AsyncResult<T> startProcess(Callable<T> task) { return startProcess(task, null); }
@Test void testNullTaskWithNullCallback() { assertTimeout(ofMillis(3000), () -> { // Instantiate a new executor and start a new 'null' task ... final var executor = new ThreadAsyncExecutor(); final var asyncResult = executor.startProcess(null, null); assertNotNull( asyncResult, "The AsyncResult should not be 'null', even though the task and callback were 'null'." ); asyncResult.await(); // Prevent timing issues, and wait until the result is available assertTrue(asyncResult.isCompleted()); try { asyncResult.getValue(); fail("Expected ExecutionException with NPE as cause"); } catch (final ExecutionException e) { assertNotNull(e.getMessage()); assertNotNull(e.getCause()); assertEquals(NullPointerException.class, e.getCause().getClass()); } }); }
public synchronized void lockMQPeriodically() { if (!this.stopped) { this.defaultMQPushConsumerImpl.getRebalanceImpl().lockAll(); } }
@Test public void testLockMQPeriodically() { popService.lockMQPeriodically(); verify(defaultMQPushConsumerImpl, times(1)).getRebalanceImpl(); verify(rebalanceImpl, times(1)).lockAll(); }
static Entry<ScramMechanism, String> parsePerMechanismArgument(String input) { input = input.trim(); int equalsIndex = input.indexOf('='); if (equalsIndex < 0) { throw new FormatterException("Failed to find equals sign in SCRAM " + "argument '" + input + "'"); } String mechanismString = input.substring(0, equalsIndex); String configString = input.substring(equalsIndex + 1); ScramMechanism mechanism = ScramMechanism.forMechanismName(mechanismString); if (mechanism == null) { throw new FormatterException("The add-scram mechanism " + mechanismString + " is not supported."); } if (!configString.startsWith("[")) { throw new FormatterException("Expected configuration string to start with ["); } if (!configString.endsWith("]")) { throw new FormatterException("Expected configuration string to end with ]"); } return new AbstractMap.SimpleImmutableEntry<>(mechanism, configString.substring(1, configString.length() - 1)); }
@Test public void testParsePerMechanismArgumentWithConfigStringWithoutEndBrace() { assertEquals("Expected configuration string to end with ]", assertThrows(FormatterException.class, () -> ScramParser.parsePerMechanismArgument( "SCRAM-SHA-256=[name=scram-admin,password=scram-user-secret")).getMessage()); }
@Nullable @Override public Message decode(@Nonnull RawMessage rawMessage) { final byte[] payload = rawMessage.getPayload(); final JsonNode event; try { event = objectMapper.readTree(payload); if (event == null || event.isMissingNode()) { throw new IOException("null result"); } } catch (IOException e) { LOG.error("Couldn't decode raw message {}", rawMessage); return null; } return parseEvent(event); }
@Test public void decodeMessagesHandlesGenericBeatWithCloudGCE() throws Exception { final Message message = codec.decode(messageFromJson("generic-with-cloud-gce.json")); assertThat(message).isNotNull(); assertThat(message.getMessage()).isEqualTo("-"); assertThat(message.getSource()).isEqualTo("unknown"); assertThat(message.getTimestamp()).isEqualTo(new DateTime(2016, 4, 1, 0, 0, DateTimeZone.UTC)); assertThat(message.getField("beats_type")).isEqualTo("beat"); assertThat(message.getField("beat_foo")).isEqualTo("bar"); assertThat(message.getField("beat_meta_cloud_provider")).isEqualTo("gce"); assertThat(message.getField("beat_meta_cloud_machine_type")).isEqualTo("projects/1234567890/machineTypes/f1-micro"); assertThat(message.getField("beat_meta_cloud_instance_id")).isEqualTo("1234556778987654321"); assertThat(message.getField("beat_meta_cloud_project_id")).isEqualTo("my-dev"); assertThat(message.getField("beat_meta_cloud_availability_zone")).isEqualTo("projects/1234567890/zones/us-east1-b"); }
@Override protected ExecuteContext doAfter(ExecuteContext context) { final Class<?> type = (Class<?>) context.getMemberFieldValue("type"); if (type == null) { return context; } if (canInjectClusterInvoker(type.getName()) && isDefined.compareAndSet(false, true)) { if (!(context.getResult() instanceof Map)) { return context; } final Map<String, Class<?>> classes = (Map<String, Class<?>>) context.getResult(); final String retryClusterInvoker = flowControlConfig.getRetryClusterInvoker(); if (classes.get(retryClusterInvoker) != null) { return context; } final Optional<Class<?>> retryInvokerClass; final ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); if (APACHE_DUBBO_CLUSTER_CLASS_NAME.equals(type.getName())) { ClassUtils.defineClass( "io.sermant.flowcontrol.retry.cluster.ApacheDubboClusterInvoker", contextClassLoader); retryInvokerClass = ClassUtils.defineClass( "io.sermant.flowcontrol.retry.cluster.ApacheDubboCluster", contextClassLoader); } else if (ALIBABA_DUBBO_CLUSTER_CLASS_NAME.equals(type.getName())) { ClassUtils.defineClass( "io.sermant.flowcontrol.retry.cluster.AlibabaDubboClusterInvoker", contextClassLoader); retryInvokerClass = ClassUtils.defineClass( "io.sermant.flowcontrol.retry.cluster.AlibabaDubboCluster", contextClassLoader); } else { return context; } retryInvokerClass.ifPresent(invokerClass -> classes.put(retryClusterInvoker, invokerClass)); ClusterInvokerCreator.INSTANCE.getClusterInvokerMap().putAll(classes); } return context; }
@Test public void testNoExecute() throws NoSuchMethodException { final ExtensionLoaderInterceptor interceptor = new ExtensionLoaderInterceptor(); final HashMap<String, Class<?>> result = new HashMap<>(); interceptor.doAfter(buildContext(null, result)); Assert.assertTrue(result.isEmpty()); }
public static File getPath() { if (PATH == null) { PATH = findPath(); } return PATH; }
@Test public void testPath() { Assertions.assertTrue(WorkPath.getPath().exists()); }
Converter<E> compile() { head = tail = null; for (Node n = top; n != null; n = n.next) { switch (n.type) { case Node.LITERAL: addToList(new LiteralConverter<E>((String) n.getValue())); break; case Node.COMPOSITE_KEYWORD: CompositeNode cn = (CompositeNode) n; CompositeConverter<E> compositeConverter = createCompositeConverter(cn); if(compositeConverter == null) { addError("Failed to create converter for [%"+cn.getValue()+"] keyword"); addToList(new LiteralConverter<E>("%PARSER_ERROR["+cn.getValue()+"]")); break; } compositeConverter.setFormattingInfo(cn.getFormatInfo()); compositeConverter.setOptionList(cn.getOptions()); Compiler<E> childCompiler = new Compiler<E>(cn.getChildNode(), converterMap); childCompiler.setContext(context); Converter<E> childConverter = childCompiler.compile(); compositeConverter.setChildConverter(childConverter); addToList(compositeConverter); break; case Node.SIMPLE_KEYWORD: SimpleKeywordNode kn = (SimpleKeywordNode) n; DynamicConverter<E> dynaConverter = createConverter(kn); if (dynaConverter != null) { dynaConverter.setFormattingInfo(kn.getFormatInfo()); dynaConverter.setOptionList(kn.getOptions()); addToList(dynaConverter); } else { // if the appropriate dynaconverter cannot be found, then replace // it with a dummy LiteralConverter indicating an error. Converter<E> errConveter = new LiteralConverter<E>("%PARSER_ERROR[" + kn.getValue() + "]"); addStatus(new ErrorStatus("[" + kn.getValue() + "] is not a valid conversion word", this)); addToList(errConveter); } } } return head; }
@Test public void testComposite() throws Exception { // { // Parser<Object> p = new Parser<Object>("%(ABC)"); // p.setContext(context); // Node t = p.parse(); // Converter<Object> head = p.compile(t, converterMap); // String result = write(head, new Object()); // assertEquals("ABC", result); // } { Context c = new ContextBase(); Parser<Object> p = new Parser<Object>("%(ABC %hello)"); p.setContext(c); Node t = p.parse(); Converter<Object> head = p.compile(t, converterMap); String result = write(head, new Object()); StatusPrinter.print(c); assertEquals("ABC Hello", result); } { Parser<Object> p = new Parser<Object>("%(ABC %hello)"); p.setContext(context); Node t = p.parse(); Converter<Object> head = p.compile(t, converterMap); String result = write(head, new Object()); assertEquals("ABC Hello", result); } }
protected boolean checkTabletReportCacheUp(long timeMs) { for (Backend backend : GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getBackends()) { if (TimeUtils.timeStringToLong(backend.getBackendStatus().lastSuccessReportTabletsTime) < timeMs) { LOG.warn("last tablet report time of backend {}:{} is {}, should wait it to report tablets", backend.getHost(), backend.getHeartbeatPort(), backend.getBackendStatus().lastSuccessReportTabletsTime); return false; } } return true; }
@Test public void testCheckTabletReportCacheUp() { long timeMs = System.currentTimeMillis(); MetaRecoveryDaemon metaRecoveryDaemon = new MetaRecoveryDaemon(); for (Backend backend : GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getBackends()) { backend.getBackendStatus().lastSuccessReportTabletsTime = TimeUtils .longToTimeString(timeMs); } Assert.assertFalse(metaRecoveryDaemon.checkTabletReportCacheUp(timeMs + 1000L)); Assert.assertTrue(metaRecoveryDaemon.checkTabletReportCacheUp(timeMs - 1000L)); }
@Override public void reserveSegments(int numberOfSegmentsToReserve) throws IOException { checkArgument( numberOfSegmentsToReserve <= numberOfRequiredMemorySegments, "Can not reserve more segments than number of required segments."); CompletableFuture<?> toNotify = null; synchronized (availableMemorySegments) { checkDestroyed(); if (numberOfRequestedMemorySegments < numberOfSegmentsToReserve) { availableMemorySegments.addAll( networkBufferPool.requestPooledMemorySegmentsBlocking( numberOfSegmentsToReserve - numberOfRequestedMemorySegments)); toNotify = availabilityHelper.getUnavailableToResetAvailable(); } } mayNotifyAvailable(toNotify); }
@Test void testReserveSegments() throws Exception { NetworkBufferPool networkBufferPool = new NetworkBufferPool(2, memorySegmentSize, Duration.ofSeconds(2)); try { BufferPool bufferPool1 = networkBufferPool.createBufferPool(1, 2); assertThatThrownBy(() -> bufferPool1.reserveSegments(2)) .isInstanceOf(IllegalArgumentException.class); // request all buffers ArrayList<Buffer> buffers = new ArrayList<>(2); buffers.add(bufferPool1.requestBuffer()); buffers.add(bufferPool1.requestBuffer()); assertThat(buffers).hasSize(2); BufferPool bufferPool2 = networkBufferPool.createBufferPool(1, 10); assertThatThrownBy(() -> bufferPool2.reserveSegments(1)) .isInstanceOf(IOException.class); assertThat(bufferPool2.isAvailable()).isFalse(); buffers.forEach(Buffer::recycleBuffer); bufferPool1.lazyDestroy(); bufferPool2.lazyDestroy(); BufferPool bufferPool3 = networkBufferPool.createBufferPool(2, 10); assertThat(bufferPool3.getNumberOfAvailableMemorySegments()).isOne(); bufferPool3.reserveSegments(2); assertThat(bufferPool3.getNumberOfAvailableMemorySegments()).isEqualTo(2); bufferPool3.lazyDestroy(); assertThatThrownBy(() -> bufferPool3.reserveSegments(1)) .isInstanceOf(CancelTaskException.class); } finally { networkBufferPool.destroy(); } }
public static boolean isTrackInstallation() { try { return SAStoreManager.getInstance().isExists(SHARED_PREF_CORRECT_TRACK_INSTALLATION); } catch (Exception e) { SALog.printStackTrace(e); } return false; }
@Test public void isTrackInstallation() { }
@Override public Optional<DatabaseAdminExecutor> create(final SQLStatementContext sqlStatementContext) { SQLStatement sqlStatement = sqlStatementContext.getSqlStatement(); if (sqlStatement instanceof ShowFunctionStatusStatement) { return Optional.of(new ShowFunctionStatusExecutor((ShowFunctionStatusStatement) sqlStatement)); } if (sqlStatement instanceof ShowProcedureStatusStatement) { return Optional.of(new ShowProcedureStatusExecutor((ShowProcedureStatusStatement) sqlStatement)); } if (sqlStatement instanceof ShowTablesStatement) { return Optional.of(new ShowTablesExecutor((ShowTablesStatement) sqlStatement, sqlStatementContext.getDatabaseType())); } return Optional.empty(); }
@Test void assertCreateWithSetStatement() { when(sqlStatementContext.getSqlStatement()).thenReturn(new MySQLSetStatement()); Optional<DatabaseAdminExecutor> actual = new MySQLAdminExecutorCreator().create(sqlStatementContext, "", "", Collections.emptyList()); assertTrue(actual.isPresent()); assertThat(actual.get(), instanceOf(MySQLSetVariableAdminExecutor.class)); }
public void next() { index++; if (index > lineLength) { lineIndex++; if (lineIndex < lines.size()) { setLine(lines.get(lineIndex)); } else { setLine(SourceLine.of("", null)); } index = 0; } }
@Test public void testNext() { Scanner scanner = new Scanner(List.of( SourceLine.of("foo bar", null)), 0, 4); assertEquals('b', scanner.peek()); scanner.next(); assertEquals('a', scanner.peek()); scanner.next(); assertEquals('r', scanner.peek()); scanner.next(); assertEquals('\0', scanner.peek()); }
@Override public void run() { try { PushDataWrapper wrapper = generatePushData(); ClientManager clientManager = delayTaskEngine.getClientManager(); for (String each : getTargetClientIds()) { Client client = clientManager.getClient(each); if (null == client) { // means this client has disconnect continue; } Subscriber subscriber = client.getSubscriber(service); // skip if null if (subscriber == null) { continue; } delayTaskEngine.getPushExecutor().doPushWithCallback(each, subscriber, wrapper, new ServicePushCallback(each, subscriber, wrapper.getOriginalData(), delayTask.isPushToAll())); } } catch (Exception e) { Loggers.PUSH.error("Push task for service" + service.getGroupedServiceName() + " execute failed ", e); delayTaskEngine.addTask(service, new PushDelayTask(service, 1000L)); } }
@Test void testRunFailedWithNoRetry() { PushDelayTask delayTask = new PushDelayTask(service, 0L); PushExecuteTask executeTask = new PushExecuteTask(service, delayTaskExecuteEngine, delayTask); pushExecutor.setShouldSuccess(false); pushExecutor.setFailedException(new NoRequiredRetryException()); executeTask.run(); assertEquals(1, MetricsMonitor.getFailedPushMonitor().get()); verify(delayTaskExecuteEngine, never()).addTask(eq(service), any(PushDelayTask.class)); }
public void handleLoss(String loss) { lossHandler.accept(new UnwritableMetadataException(requestedMetadataVersion, loss)); }
@Test public void testDefaultLossHandler() { ImageWriterOptions options = new ImageWriterOptions.Builder().build(); assertEquals("stuff", assertThrows(UnwritableMetadataException.class, () -> options.handleLoss("stuff")).loss()); }
@Override public UnregisterBrokerResult unregisterBroker(int brokerId, UnregisterBrokerOptions options) { final KafkaFutureImpl<Void> future = new KafkaFutureImpl<>(); final long now = time.milliseconds(); final Call call = new Call("unregisterBroker", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) { @Override UnregisterBrokerRequest.Builder createRequest(int timeoutMs) { UnregisterBrokerRequestData data = new UnregisterBrokerRequestData().setBrokerId(brokerId); return new UnregisterBrokerRequest.Builder(data); } @Override void handleResponse(AbstractResponse abstractResponse) { final UnregisterBrokerResponse response = (UnregisterBrokerResponse) abstractResponse; Errors error = Errors.forCode(response.data().errorCode()); switch (error) { case NONE: future.complete(null); break; case REQUEST_TIMED_OUT: throw error.exception(); default: log.error("Unregister broker request for broker ID {} failed: {}", brokerId, error.message()); future.completeExceptionally(error.exception()); break; } } @Override void handleFailure(Throwable throwable) { future.completeExceptionally(throwable); } }; runnable.call(call, now); return new UnregisterBrokerResult(future); }
@Test public void testUnregisterBrokerTimeoutAndFailureRetry() { int nodeId = 1; try (final AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions( NodeApiVersions.create(ApiKeys.UNREGISTER_BROKER.id, (short) 0, (short) 0)); env.kafkaClient().prepareResponse(prepareUnregisterBrokerResponse(Errors.REQUEST_TIMED_OUT, 0)); env.kafkaClient().prepareResponse(prepareUnregisterBrokerResponse(Errors.UNKNOWN_SERVER_ERROR, 0)); UnregisterBrokerResult result = env.adminClient().unregisterBroker(nodeId); // Validate response assertNotNull(result.all()); TestUtils.assertFutureThrows(result.all(), Errors.UNKNOWN_SERVER_ERROR.exception().getClass()); } }
public String getSymbol() { final StringBuilder symbolic = new StringBuilder(); symbolic.append(setuid ? user.implies(Action.execute) ? StringUtils.substring(user.symbolic, 0, 2) + "s" : StringUtils.substring(user.symbolic, 0, 2) + "S" : user.symbolic); symbolic.append(setgid ? group.implies(Action.execute) ? StringUtils.substring(group.symbolic, 0, 2) + "s" : StringUtils.substring(group.symbolic, 0, 2) + "S" : group.symbolic); symbolic.append(sticky ? other.implies(Action.execute) ? StringUtils.substring(other.symbolic, 0, 2) + "t" : StringUtils.substring(other.symbolic, 0, 2) + "T" : other.symbolic); return symbolic.toString(); }
@Test public void testSymbol() { Permission p1 = new Permission(777); assertEquals("rwxrwxrwx", p1.getSymbol()); Permission p2 = new Permission(666); assertEquals("rw-rw-rw-", p2.getSymbol()); }
private ListenableFuture<TbAlarmResult> clearAlarm(TbContext ctx, TbMsg msg, Alarm alarm) { ctx.logJsEvalRequest(); ListenableFuture<JsonNode> asyncDetails = buildAlarmDetails(msg, alarm.getDetails()); return Futures.transform(asyncDetails, details -> { ctx.logJsEvalResponse(); AlarmApiCallResult result = ctx.getAlarmService().clearAlarm(ctx.getTenantId(), alarm.getId(), System.currentTimeMillis(), details); if (result.isSuccessful()) { return new TbAlarmResult(false, false, result.isCleared(), result.getAlarm()); } else { return new TbAlarmResult(false, false, false, alarm); } }, ctx.getDbCallbackExecutor()); }
@Test void alarmCanBeCleared() { initWithClearAlarmScript(); metadata.putValue("key", "value"); TbMsg msg = TbMsg.newMsg(TbMsgType.POST_TELEMETRY_REQUEST, msgOriginator, metadata, "{\"temperature\": 50}"); long oldEndDate = System.currentTimeMillis(); Alarm activeAlarm = Alarm.builder().type("SomeType").tenantId(tenantId).originator(msgOriginator).severity(AlarmSeverity.WARNING).endTs(oldEndDate).build(); Alarm expectedAlarm = Alarm.builder() .tenantId(tenantId) .originator(msgOriginator) .cleared(true) .severity(AlarmSeverity.WARNING) .propagate(false) .type("SomeType") .details(null) .endTs(oldEndDate) .build(); when(alarmDetailsScriptMock.executeJsonAsync(msg)).thenReturn(Futures.immediateFuture(null)); when(alarmServiceMock.findLatestActiveByOriginatorAndType(tenantId, msgOriginator, "SomeType")).thenReturn(activeAlarm); when(alarmServiceMock.clearAlarm(eq(activeAlarm.getTenantId()), eq(activeAlarm.getId()), anyLong(), nullable(JsonNode.class))) .thenReturn(AlarmApiCallResult.builder() .successful(true) .cleared(true) .alarm(new AlarmInfo(expectedAlarm)) .build()); node.onMsg(ctxMock, msg); verify(ctxMock).enqueue(any(), successCaptor.capture(), failureCaptor.capture()); successCaptor.getValue().run(); verify(ctxMock).tellNext(any(), eq("Cleared")); ArgumentCaptor<TbMsg> msgCaptor = ArgumentCaptor.forClass(TbMsg.class); ArgumentCaptor<TbMsgType> typeCaptor = ArgumentCaptor.forClass(TbMsgType.class); ArgumentCaptor<EntityId> originatorCaptor = ArgumentCaptor.forClass(EntityId.class); ArgumentCaptor<TbMsgMetaData> metadataCaptor = ArgumentCaptor.forClass(TbMsgMetaData.class); ArgumentCaptor<String> dataCaptor = ArgumentCaptor.forClass(String.class); verify(ctxMock).transformMsg(msgCaptor.capture(), typeCaptor.capture(), originatorCaptor.capture(), metadataCaptor.capture(), dataCaptor.capture()); assertThat(TbMsgType.ALARM).isEqualTo(typeCaptor.getValue()); assertThat(msgOriginator).isEqualTo(originatorCaptor.getValue()); assertThat("value").isEqualTo(metadataCaptor.getValue().getValue("key")); assertThat(Boolean.TRUE.toString()).isEqualTo(metadataCaptor.getValue().getValue(DataConstants.IS_CLEARED_ALARM)); assertThat(metadata).isNotSameAs(metadataCaptor.getValue()); Alarm actualAlarm = JacksonUtil.fromBytes(dataCaptor.getValue().getBytes(), Alarm.class); assertThat(actualAlarm).isEqualTo(expectedAlarm); }
@Override protected void write(final PostgreSQLPacketPayload payload) { ByteBuf byteBuf = payload.getByteBuf(); for (DatabasePacket each : packets) { if (!(each instanceof PostgreSQLIdentifierPacket)) { each.write(payload); continue; } PostgreSQLIdentifierPacket eachPacket = (PostgreSQLIdentifierPacket) each; byteBuf.writeByte(eachPacket.getIdentifier().getValue()); int from = byteBuf.writerIndex(); byteBuf.writeInt(0); eachPacket.write(payload); int length = byteBuf.writerIndex() - from; byteBuf.setInt(from, length); } }
@Test void assertWrite() { PostgreSQLIdentifierPacket identifierPacket = mock(PostgreSQLIdentifierPacket.class); when(identifierPacket.getIdentifier()).thenReturn(PostgreSQLMessagePacketType.READY_FOR_QUERY); PostgreSQLPacket nonIdentifierPacket = mock(PostgreSQLPacket.class); PostgreSQLAggregatedResponsesPacket packet = new PostgreSQLAggregatedResponsesPacket(Arrays.asList(nonIdentifierPacket, identifierPacket)); PostgreSQLPacketPayload payload = mock(PostgreSQLPacketPayload.class); ByteBuf byteBuf = mock(ByteBuf.class); when(byteBuf.writerIndex()).thenReturn(1, 10); when(payload.getByteBuf()).thenReturn(byteBuf); packet.write(payload); verify(nonIdentifierPacket).write(payload); verify(byteBuf).writeByte(PostgreSQLMessagePacketType.READY_FOR_QUERY.getValue()); verify(byteBuf).writeInt(0); verify(identifierPacket).write(payload); verify(byteBuf).setInt(1, 10 - 1); }
public static int dayOfWeek(Date date) { return DateTime.of(date).dayOfWeek(); }
@Test public void dayOfWeekTest() { final int dayOfWeek = DateUtil.dayOfWeek(DateUtil.parse("2018-03-07")); assertEquals(Calendar.WEDNESDAY, dayOfWeek); final Week week = DateUtil.dayOfWeekEnum(DateUtil.parse("2018-03-07")); assertEquals(Week.WEDNESDAY, week); }
@Override public RetryStrategy getNextRetryStrategy() { int nextRemainingRetries = remainingRetries - 1; Preconditions.checkState( nextRemainingRetries >= 0, "The number of remaining retries must not be negative"); long nextRetryDelayMillis = Math.min(2 * currentRetryDelay.toMillis(), maxRetryDelay.toMillis()); return new ExponentialBackoffRetryStrategy( nextRemainingRetries, Duration.ofMillis(nextRetryDelayMillis), maxRetryDelay); }
@Test void testRetryFailure() { assertThatThrownBy( () -> new ExponentialBackoffRetryStrategy( 0, Duration.ofMillis(20L), Duration.ofMillis(20L)) .getNextRetryStrategy()) .isInstanceOf(IllegalStateException.class); }
public void unloadPlugin(GoPluginBundleDescriptor descriptorOfRemovedPlugin) { Bundle bundle = descriptorOfRemovedPlugin.bundle(); if (bundle == null) { return; } for (GoPluginDescriptor pluginDescriptor : descriptorOfRemovedPlugin.descriptors()) { for (PluginChangeListener listener : pluginChangeListeners) { try { listener.pluginUnLoaded(pluginDescriptor); } catch (Exception e) { LOGGER.warn("A plugin unload listener ({}) failed: {}", listener.toString(), pluginDescriptor, e); } } } pluginOSGiFramework.unloadPlugin(descriptorOfRemovedPlugin); }
@Test void shouldNotUnloadAPluginIfItsBundleIsNull() { GoPluginBundleDescriptor pluginDescriptor = mock(GoPluginBundleDescriptor.class); when(pluginDescriptor.bundle()).thenReturn(null); pluginLoader.unloadPlugin(pluginDescriptor); verifyNoMoreInteractions(pluginOSGiFramework); }
public static String decoratorPath(final String contextPath) { return StringUtils.contains(contextPath, AdminConstants.URI_SUFFIX) ? contextPath : contextPath + AdminConstants.URI_SUFFIX; }
@Test public void testDecoratorPath() { String uri = PathUtils.decoratorPath(URI); assertThat(uri, is(URI + AdminConstants.URI_SUFFIX)); uri = PathUtils.decoratorPath(URI_WRAPPER); assertThat(uri, is(URI + AdminConstants.URI_SUFFIX)); }
@Transactional public AppNamespace createAppNamespaceInLocal(AppNamespace appNamespace) { return createAppNamespaceInLocal(appNamespace, true); }
@Test(expected = BadRequestException.class) @Sql(scripts = "/sql/appnamespaceservice/init-appnamespace.sql", executionPhase = Sql.ExecutionPhase.BEFORE_TEST_METHOD) @Sql(scripts = "/sql/cleanup.sql", executionPhase = Sql.ExecutionPhase.AFTER_TEST_METHOD) public void testCreatePublicAppNamespaceExisted() { AppNamespace appNamespace = assembleBaseAppNamespace(); appNamespace.setPublic(true); appNamespace.setName("old"); appNamespace.setFormat(ConfigFileFormat.Properties.getValue()); appNamespaceService.createAppNamespaceInLocal(appNamespace); }
public static Builder in(Table table) { return new Builder(table); }
@TestTemplate public void testWithMetadataMatching() { table .newAppend() .appendFile(FILE_A) .appendFile(FILE_B) .appendFile(FILE_C) .appendFile(FILE_D) .commit(); Iterable<DataFile> files = FindFiles.in(table) .withMetadataMatching(Expressions.startsWith("file_path", "/path/to/data-a")) .collect(); assertThat(pathSet(files)).isEqualTo(pathSet(FILE_A)); }
public synchronized int requestUpdateForTopic(String topic) { if (newTopics.contains(topic)) { return requestUpdateForNewTopics(); } else { return requestUpdate(false); } }
@Test public void testRequestUpdateForTopic() { long now = 10000; final String topic1 = "topic-1"; final String topic2 = "topic-2"; // Add the topics to the metadata. metadata.add(topic1, now); metadata.add(topic2, now); assertTrue(metadata.updateRequested()); // Request an update for topic1. Since the topic is considered new, it should not trigger // the metadata to require a full update. metadata.requestUpdateForTopic(topic1); assertTrue(metadata.updateRequested()); // Perform the partial update. Verify no additional (full) updates are requested. now += 1000; metadata.updateWithCurrentRequestVersion(responseWithTopics(Collections.singleton(topic1)), true, now); assertFalse(metadata.updateRequested()); // Request an update for topic1 again. Such a request may occur when the leader // changes, which may affect many topics, and should therefore request a full update. metadata.requestUpdateForTopic(topic1); assertTrue(metadata.updateRequested()); // Perform a partial update for the topic. This should not clear the full update. now += 1000; metadata.updateWithCurrentRequestVersion(responseWithTopics(Collections.singleton(topic1)), true, now); assertTrue(metadata.updateRequested()); // Perform the full update. This should clear the update request. now += 1000; metadata.updateWithCurrentRequestVersion(responseWithTopics(new HashSet<>(Arrays.asList(topic1, topic2))), false, now); assertFalse(metadata.updateRequested()); }
@Override public Optional<Entity> exportEntity(EntityDescriptor entityDescriptor, EntityDescriptorIds entityDescriptorIds) { final ModelId modelId = entityDescriptor.id(); return dataAdapterService.get(modelId.id()).map(dataAdapterDto -> exportNativeEntity(dataAdapterDto, entityDescriptorIds)); }
@Test @MongoDBFixtures("LookupDataAdapterFacadeTest.json") public void collectEntity() { final EntityDescriptor descriptor = EntityDescriptor.create("5adf24a04b900a0fdb4e52c8", ModelTypes.LOOKUP_ADAPTER_V1); final EntityDescriptorIds entityDescriptorIds = EntityDescriptorIds.of(descriptor); final Optional<Entity> collectedEntity = facade.exportEntity(descriptor, entityDescriptorIds); assertThat(collectedEntity) .isPresent() .containsInstanceOf(EntityV1.class); final EntityV1 entity = (EntityV1) collectedEntity.orElseThrow(AssertionError::new); assertThat(entity.id()).isEqualTo(ModelId.of(entityDescriptorIds.get(descriptor).orElse(null))); assertThat(entity.type()).isEqualTo(ModelTypes.LOOKUP_ADAPTER_V1); final LookupDataAdapterEntity lookupDataAdapterEntity = objectMapper.convertValue(entity.data(), LookupDataAdapterEntity.class); assertThat(lookupDataAdapterEntity.name()).isEqualTo(ValueReference.of("http-dsv")); assertThat(lookupDataAdapterEntity.title()).isEqualTo(ValueReference.of("HTTP DSV")); assertThat(lookupDataAdapterEntity.description()).isEqualTo(ValueReference.of("HTTP DSV")); }
public BeamFnApi.InstructionResponse.Builder processBundle(BeamFnApi.InstructionRequest request) throws Exception { BeamFnApi.ProcessBundleResponse.Builder response = BeamFnApi.ProcessBundleResponse.newBuilder(); BundleProcessor bundleProcessor = bundleProcessorCache.get( request, () -> { try { return createBundleProcessor( request.getProcessBundle().getProcessBundleDescriptorId(), request.getProcessBundle()); } catch (IOException e) { throw new RuntimeException(e); } }); try { PTransformFunctionRegistry startFunctionRegistry = bundleProcessor.getStartFunctionRegistry(); PTransformFunctionRegistry finishFunctionRegistry = bundleProcessor.getFinishFunctionRegistry(); ExecutionStateTracker stateTracker = bundleProcessor.getStateTracker(); try (HandleStateCallsForBundle beamFnStateClient = bundleProcessor.getBeamFnStateClient()) { stateTracker.start(request.getInstructionId()); try { // Already in reverse topological order so we don't need to do anything. for (ThrowingRunnable startFunction : startFunctionRegistry.getFunctions()) { LOG.debug("Starting function {}", startFunction); startFunction.run(); } if (request.getProcessBundle().hasElements()) { boolean inputFinished = bundleProcessor .getInboundObserver() .multiplexElements(request.getProcessBundle().getElements()); if (!inputFinished) { throw new RuntimeException( "Elements embedded in ProcessBundleRequest do not contain stream terminators for " + "all data and timer inputs. Unterminated endpoints: " + bundleProcessor.getInboundObserver().getUnfinishedEndpoints()); } } else if (!bundleProcessor.getInboundEndpointApiServiceDescriptors().isEmpty()) { BeamFnDataInboundObserver observer = bundleProcessor.getInboundObserver(); beamFnDataClient.registerReceiver( request.getInstructionId(), bundleProcessor.getInboundEndpointApiServiceDescriptors(), observer); observer.awaitCompletion(); beamFnDataClient.unregisterReceiver( request.getInstructionId(), bundleProcessor.getInboundEndpointApiServiceDescriptors()); } // Need to reverse this since we want to call finish in topological order. for (ThrowingRunnable finishFunction : Lists.reverse(finishFunctionRegistry.getFunctions())) { LOG.debug("Finishing function {}", finishFunction); finishFunction.run(); } // If bundleProcessor has not flushed any elements, embed them in response. embedOutboundElementsIfApplicable(response, bundleProcessor); // Add all checkpointed residuals to the response. response.addAllResidualRoots(bundleProcessor.getSplitListener().getResidualRoots()); // Add all metrics to the response. bundleProcessor.getProgressRequestLock().lock(); Map<String, ByteString> monitoringData = finalMonitoringData(bundleProcessor); if (runnerAcceptsShortIds) { response.putAllMonitoringData(monitoringData); } else { for (Map.Entry<String, ByteString> metric : monitoringData.entrySet()) { response.addMonitoringInfos( shortIds.get(metric.getKey()).toBuilder().setPayload(metric.getValue())); } } if (!bundleProcessor.getBundleFinalizationCallbackRegistrations().isEmpty()) { finalizeBundleHandler.registerCallbacks( bundleProcessor.getInstructionId(), ImmutableList.copyOf(bundleProcessor.getBundleFinalizationCallbackRegistrations())); response.setRequiresFinalization(true); } } finally { // We specifically deactivate state tracking while we are holding the progress request and // sampling locks. stateTracker.reset(); } } // Mark the bundle processor as re-usable. bundleProcessorCache.release( request.getProcessBundle().getProcessBundleDescriptorId(), bundleProcessor); return BeamFnApi.InstructionResponse.newBuilder().setProcessBundle(response); } catch (Exception e) { // Make sure we clean-up from the active set of bundle processors. bundleProcessorCache.discard(bundleProcessor); throw e; } }
@Test public void testPendingStateCallsBlockTillCompletion() throws Exception { BeamFnApi.ProcessBundleDescriptor processBundleDescriptor = BeamFnApi.ProcessBundleDescriptor.newBuilder() .putTransforms( "2L", RunnerApi.PTransform.newBuilder() .setSpec(RunnerApi.FunctionSpec.newBuilder().setUrn(DATA_INPUT_URN).build()) .build()) .setStateApiServiceDescriptor(ApiServiceDescriptor.getDefaultInstance()) .build(); Map<String, BeamFnApi.ProcessBundleDescriptor> fnApiRegistry = ImmutableMap.of("1L", processBundleDescriptor); CompletableFuture<StateResponse>[] successfulResponse = new CompletableFuture[1]; CompletableFuture<StateResponse>[] unsuccessfulResponse = new CompletableFuture[1]; BeamFnStateGrpcClientCache mockBeamFnStateGrpcClient = Mockito.mock(BeamFnStateGrpcClientCache.class); BeamFnStateClient mockBeamFnStateClient = Mockito.mock(BeamFnStateClient.class); when(mockBeamFnStateGrpcClient.forApiServiceDescriptor(any())) .thenReturn(mockBeamFnStateClient); doAnswer( invocation -> { StateRequest.Builder stateRequestBuilder = (StateRequest.Builder) invocation.getArguments()[0]; CompletableFuture<StateResponse> completableFuture = new CompletableFuture<>(); new Thread( () -> { // Simulate sleeping which introduces a race which most of the time requires // the ProcessBundleHandler to block. Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS); switch (stateRequestBuilder.getInstructionId()) { case "SUCCESS": completableFuture.complete(StateResponse.getDefaultInstance()); break; case "FAIL": completableFuture.completeExceptionally( new RuntimeException("TEST ERROR")); } }) .start(); return completableFuture; }) .when(mockBeamFnStateClient) .handle(any()); ProcessBundleHandler handler = new ProcessBundleHandler( PipelineOptionsFactory.create(), Collections.emptySet(), fnApiRegistry::get, beamFnDataClient, mockBeamFnStateGrpcClient, null /* finalizeBundleHandler */, new ShortIdMap(), executionStateSampler, ImmutableMap.of( DATA_INPUT_URN, new PTransformRunnerFactory<Object>() { @Override public Object createRunnerForPTransform(Context context) throws IOException { BeamFnStateClient beamFnStateClient = context.getBeamFnStateClient(); context.addStartBundleFunction(() -> doStateCalls(beamFnStateClient)); return null; } private void doStateCalls(BeamFnStateClient beamFnStateClient) { successfulResponse[0] = beamFnStateClient.handle( StateRequest.newBuilder().setInstructionId("SUCCESS")); unsuccessfulResponse[0] = beamFnStateClient.handle( StateRequest.newBuilder().setInstructionId("FAIL")); } }), Caches.noop(), new BundleProcessorCache(), null /* dataSampler */); handler.processBundle( BeamFnApi.InstructionRequest.newBuilder() .setProcessBundle( BeamFnApi.ProcessBundleRequest.newBuilder().setProcessBundleDescriptorId("1L")) .build()); assertTrue(successfulResponse[0].isDone()); assertTrue(unsuccessfulResponse[0].isDone()); }
public static WindowBytesStoreSupplier persistentTimestampedWindowStore(final String name, final Duration retentionPeriod, final Duration windowSize, final boolean retainDuplicates) throws IllegalArgumentException { return persistentWindowStore(name, retentionPeriod, windowSize, retainDuplicates, true); }
@Test public void shouldThrowIfIPersistentTimestampedWindowStoreIfWindowSizeIsNegative() { final Exception e = assertThrows(IllegalArgumentException.class, () -> Stores.persistentTimestampedWindowStore("anyName", ofMillis(0L), ofMillis(-1L), false)); assertEquals("windowSize cannot be negative", e.getMessage()); }
@Override public Long createGroup(MemberGroupCreateReqVO createReqVO) { // 插入 MemberGroupDO group = MemberGroupConvert.INSTANCE.convert(createReqVO); memberGroupMapper.insert(group); // 返回 return group.getId(); }
@Test public void testCreateGroup_success() { // 准备参数 MemberGroupCreateReqVO reqVO = randomPojo(MemberGroupCreateReqVO.class, o -> o.setStatus(randomCommonStatus())); // 调用 Long groupId = groupService.createGroup(reqVO); // 断言 assertNotNull(groupId); // 校验记录的属性是否正确 MemberGroupDO group = groupMapper.selectById(groupId); assertPojoEquals(reqVO, group); }
Set<Integer> changedLines() { return tracker.changedLines(); }
@Test public void count_single_added_line() throws IOException { String example = "diff --git a/file-b1.xoo b/file-b1.xoo\n" + "index 0000000..c2a9048\n" + "--- a/foo\n" + "+++ b/bar\n" + "@@ -0,0 +1 @@\n" + "+added line\n"; printDiff(example); assertThat(underTest.changedLines()).containsExactly(1); }
public NodeState getWantedState() { NodeState retiredState = new NodeState(node.getType(), State.RETIRED); // Don't let configure retired state override explicitly set Down and Maintenance. if (configuredRetired && wantedState.above(retiredState)) { return retiredState; } return wantedState; }
@Test void retired_state_overrides_default_up_wanted_state() { final ClusterFixture fixture = ClusterFixture.forFlatCluster(3).markNodeAsConfigRetired(1); NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 1)); assertEquals(State.RETIRED, nodeInfo.getWantedState().getState()); }
@Override @Transactional(rollbackFor = Exception.class) public void updateCombinationActivity(CombinationActivityUpdateReqVO updateReqVO) { // 校验存在 CombinationActivityDO activityDO = validateCombinationActivityExists(updateReqVO.getId()); // 校验状态 if (ObjectUtil.equal(activityDO.getStatus(), CommonStatusEnum.DISABLE.getStatus())) { throw exception(COMBINATION_ACTIVITY_STATUS_DISABLE_NOT_UPDATE); } // 校验商品冲突 validateProductConflict(updateReqVO.getSpuId(), updateReqVO.getId()); // 校验商品是否存在 validateProductExists(updateReqVO.getSpuId(), updateReqVO.getProducts()); // 更新活动 CombinationActivityDO updateObj = CombinationActivityConvert.INSTANCE.convert(updateReqVO); combinationActivityMapper.updateById(updateObj); // 更新商品 updateCombinationProduct(updateObj, updateReqVO.getProducts()); }
@Test public void testUpdateCombinationActivity_success() { // mock 数据 CombinationActivityDO dbCombinationActivity = randomPojo(CombinationActivityDO.class); combinationActivityMapper.insert(dbCombinationActivity);// @Sql: 先插入出一条存在的数据 // 准备参数 CombinationActivityUpdateReqVO reqVO = randomPojo(CombinationActivityUpdateReqVO.class, o -> { o.setId(dbCombinationActivity.getId()); // 设置更新的 ID }); // 调用 combinationActivityService.updateCombinationActivity(reqVO); // 校验是否更新正确 CombinationActivityDO combinationActivity = combinationActivityMapper.selectById(reqVO.getId()); // 获取最新的 assertPojoEquals(reqVO, combinationActivity); }
@Override public ShowIndexStatement getSqlStatement() { return (ShowIndexStatement) super.getSqlStatement(); }
@Test void assertNewInstance() { MySQLShowIndexStatement sqlStatement = new MySQLShowIndexStatement(); sqlStatement.setTable(new SimpleTableSegment(new TableNameSegment(0, 0, new IdentifierValue("tbl_1")))); ShowIndexStatementContext actual = new ShowIndexStatementContext(sqlStatement, DefaultDatabase.LOGIC_NAME); assertThat(actual, instanceOf(CommonSQLStatementContext.class)); assertThat(actual.getSqlStatement(), is(sqlStatement)); assertThat(actual.getTablesContext().getSimpleTables().stream().map(each -> each.getTableName().getIdentifier().getValue()).collect(Collectors.toList()), is(Collections.singletonList("tbl_1"))); }
@Override public Optional<String> buildInsertOnDuplicateClause(final DataRecord dataRecord) { StringBuilder result = new StringBuilder("ON DUPLICATE KEY UPDATE "); PipelineSQLSegmentBuilder sqlSegmentBuilder = new PipelineSQLSegmentBuilder(getType()); result.append(dataRecord.getColumns().stream() .filter(each -> !each.isUniqueKey()).map(each -> sqlSegmentBuilder.getEscapedIdentifier(each.getName()) + "=EXCLUDED." + sqlSegmentBuilder.getEscapedIdentifier(each.getName())) .collect(Collectors.joining(","))); return Optional.of(result.toString()); }
@Test void assertBuildInsertOnDuplicateClause() { String actual = sqlBuilder.buildInsertOnDuplicateClause(mockDataRecord()).orElse(null); assertThat(actual, is("ON DUPLICATE KEY UPDATE c0=EXCLUDED.c0,c1=EXCLUDED.c1,c2=EXCLUDED.c2,c3=EXCLUDED.c3")); }
private Set<TimelineEntity> getEntities(Path dir, String entityType, TimelineEntityFilters filters, TimelineDataToRetrieve dataToRetrieve) throws IOException { // First sort the selected entities based on created/start time. Map<Long, Set<TimelineEntity>> sortedEntities = new TreeMap<>( new Comparator<Long>() { @Override public int compare(Long l1, Long l2) { return l2.compareTo(l1); } } ); dir = getNormalPath(dir); if (dir != null) { RemoteIterator<LocatedFileStatus> fileStatuses = fs.listFiles(dir, false); if (fileStatuses != null) { while (fileStatuses.hasNext()) { LocatedFileStatus locatedFileStatus = fileStatuses.next(); Path entityFile = locatedFileStatus.getPath(); if (!entityFile.getName() .contains(TIMELINE_SERVICE_STORAGE_EXTENSION)) { continue; } try (BufferedReader reader = new BufferedReader( new InputStreamReader(fs.open(entityFile), StandardCharsets.UTF_8))) { TimelineEntity entity = readEntityFromFile(reader); if (!entity.getType().equals(entityType)) { continue; } if (!isTimeInRange(entity.getCreatedTime(), filters.getCreatedTimeBegin(), filters.getCreatedTimeEnd())) { continue; } if (filters.getRelatesTo() != null && !filters.getRelatesTo().getFilterList().isEmpty() && !TimelineStorageUtils.matchRelatesTo(entity, filters.getRelatesTo())) { continue; } if (filters.getIsRelatedTo() != null && !filters.getIsRelatedTo().getFilterList().isEmpty() && !TimelineStorageUtils.matchIsRelatedTo(entity, filters.getIsRelatedTo())) { continue; } if (filters.getInfoFilters() != null && !filters.getInfoFilters().getFilterList().isEmpty() && !TimelineStorageUtils.matchInfoFilters(entity, filters.getInfoFilters())) { continue; } if (filters.getConfigFilters() != null && !filters.getConfigFilters().getFilterList().isEmpty() && !TimelineStorageUtils.matchConfigFilters(entity, filters.getConfigFilters())) { continue; } if (filters.getMetricFilters() != null && !filters.getMetricFilters().getFilterList().isEmpty() && !TimelineStorageUtils.matchMetricFilters(entity, filters.getMetricFilters())) { continue; } if (filters.getEventFilters() != null && !filters.getEventFilters().getFilterList().isEmpty() && !TimelineStorageUtils.matchEventFilters(entity, filters.getEventFilters())) { continue; } TimelineEntity entityToBeReturned = createEntityToBeReturned( entity, dataToRetrieve.getFieldsToRetrieve()); Set<TimelineEntity> entitiesCreatedAtSameTime = sortedEntities.get(entityToBeReturned.getCreatedTime()); if (entitiesCreatedAtSameTime == null) { entitiesCreatedAtSameTime = new HashSet<TimelineEntity>(); } entitiesCreatedAtSameTime.add(entityToBeReturned); sortedEntities.put(entityToBeReturned.getCreatedTime(), entitiesCreatedAtSameTime); } } } } Set<TimelineEntity> entities = new HashSet<TimelineEntity>(); long entitiesAdded = 0; for (Set<TimelineEntity> entitySet : sortedEntities.values()) { for (TimelineEntity entity : entitySet) { entities.add(entity); ++entitiesAdded; if (entitiesAdded >= filters.getLimit()) { return entities; } } } return entities; }
@Test void testGetEntitiesByRelations() throws Exception { // Get entities based on relatesTo. TimelineFilterList relatesTo = new TimelineFilterList(Operator.OR); Set<Object> relatesToIds = new HashSet<Object>(Arrays.asList((Object) "flow1")); relatesTo.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "flow", relatesToIds)); Set<TimelineEntity> result = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters.Builder().relatesTo(relatesTo).build(), new TimelineDataToRetrieve()); assertEquals(1, result.size()); // Only one entity with ID id_1 should be returned. for (TimelineEntity entity : result) { if (!entity.getId().equals("id_1")) { fail("Incorrect filtering based on relatesTo"); } } // Get entities based on isRelatedTo. TimelineFilterList isRelatedTo = new TimelineFilterList(Operator.OR); Set<Object> isRelatedToIds = new HashSet<Object>(Arrays.asList((Object) "tid1_2")); isRelatedTo.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "type1", isRelatedToIds)); result = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters.Builder().isRelatedTo(isRelatedTo).build(), new TimelineDataToRetrieve()); assertEquals(2, result.size()); // Two entities with IDs' id_1 and id_3 should be returned. for (TimelineEntity entity : result) { if (!entity.getId().equals("id_1") && !entity.getId().equals("id_3")) { fail("Incorrect filtering based on isRelatedTo"); } } }
public static List<SubjectAlternativeName> getSubjectAlternativeNames(X509Certificate certificate) { try { byte[] extensionValue = certificate.getExtensionValue(SUBJECT_ALTERNATIVE_NAMES.getOId()); if (extensionValue == null) return List.of(); ASN1Encodable asn1Encodable = ASN1Primitive.fromByteArray(extensionValue); if (asn1Encodable instanceof ASN1OctetString) { asn1Encodable = ASN1Primitive.fromByteArray(((ASN1OctetString) asn1Encodable).getOctets()); } GeneralNames names = GeneralNames.getInstance(asn1Encodable); return SubjectAlternativeName.fromGeneralNames(names); } catch (IOException e) { throw new UncheckedIOException(e); } }
@Test void can_list_subject_alternative_names() { KeyPair keypair = KeyUtils.generateKeypair(KeyAlgorithm.EC, 256); X500Principal subject = new X500Principal("CN=myservice"); SubjectAlternativeName san = new SubjectAlternativeName(DNS, "dns-san"); X509Certificate cert = X509CertificateBuilder .fromKeypair( keypair, subject, Instant.now(), Instant.now().plus(1, ChronoUnit.DAYS), SignatureAlgorithm.SHA512_WITH_ECDSA, BigInteger.valueOf(1)) .addSubjectAlternativeName(san) .build(); List<SubjectAlternativeName> sans = X509CertificateUtils.getSubjectAlternativeNames(cert); assertEquals(1, sans.size()); assertEquals(san, sans.get(0)); }
public static String getChecksum(String algorithm, File file) throws NoSuchAlgorithmException, IOException { FileChecksums fileChecksums = CHECKSUM_CACHE.get(file); if (fileChecksums == null) { try (InputStream stream = Files.newInputStream(file.toPath())) { final MessageDigest md5Digest = getMessageDigest(MD5); final MessageDigest sha1Digest = getMessageDigest(SHA1); final MessageDigest sha256Digest = getMessageDigest(SHA256); final byte[] buffer = new byte[BUFFER_SIZE]; int read = stream.read(buffer, 0, BUFFER_SIZE); while (read > -1) { // update all checksums together instead of reading the file multiple times md5Digest.update(buffer, 0, read); sha1Digest.update(buffer, 0, read); sha256Digest.update(buffer, 0, read); read = stream.read(buffer, 0, BUFFER_SIZE); } fileChecksums = new FileChecksums( getHex(md5Digest.digest()), getHex(sha1Digest.digest()), getHex(sha256Digest.digest()) ); CHECKSUM_CACHE.put(file, fileChecksums); } } switch (algorithm.toUpperCase()) { case MD5: return fileChecksums.md5; case SHA1: return fileChecksums.sha1; case SHA256: return fileChecksums.sha256; default: throw new NoSuchAlgorithmException(algorithm); } }
@Test public void testGetChecksum_FileNotFound() throws Exception { String algorithm = "MD5"; File file = new File("not a valid file"); Exception exception = Assert.assertThrows(IOException.class, () -> Checksum.getChecksum(algorithm, file)); assertTrue(exception.getMessage().contains("not a valid file")); }
@Override public void run() { try { // make sure we call afterRun() even on crashes // and operate countdown latches, else we may hang the parallel runner if (steps == null) { beforeRun(); } if (skipped) { return; } int count = steps.size(); int index = 0; while ((index = nextStepIndex()) < count) { currentStep = steps.get(index); execute(currentStep); if (currentStepResult != null) { // can be null if debug step-back or hook skip result.addStepResult(currentStepResult); } } } catch (Exception e) { if (currentStepResult != null) { result.addStepResult(currentStepResult); } logError("scenario [run] failed\n" + StringUtils.throwableToString(e)); currentStepResult = result.addFakeStepResult("scenario [run] failed", e); } finally { if (!skipped) { afterRun(); if (isFailed() && engine.getConfig().isAbortSuiteOnFailure()) { featureRuntime.suite.abort(); } } if (caller.isNone()) { logAppender.close(); // reclaim memory } } }
@Test void testMatchXmlXpath() { fail = true; run( "xml myXml = <root><foo>bar</foo><hello><text>hello \"world\"</text></hello><hello><text>hello \"moon\"</text></hello></root>", "match myXml //myXml2/root/text == '#notnull'" ); }
@Override public <T> ReducingState<T> getReducingState(ReducingStateDescriptor<T> stateProperties) { KeyedStateStore keyedStateStore = checkPreconditionsAndGetKeyedStateStore(stateProperties); stateProperties.initializeSerializerUnlessSet(this::createSerializer); return keyedStateStore.getReducingState(stateProperties); }
@Test void testReducingStateInstantiation() throws Exception { final ExecutionConfig config = new ExecutionConfig(); config.getSerializerConfig().registerKryoType(Path.class); final AtomicReference<Object> descriptorCapture = new AtomicReference<>(); StreamingRuntimeContext context = createRuntimeContext(descriptorCapture, config); @SuppressWarnings("unchecked") ReduceFunction<TaskInfo> reducer = (ReduceFunction<TaskInfo>) mock(ReduceFunction.class); ReducingStateDescriptor<TaskInfo> descr = new ReducingStateDescriptor<>("name", reducer, TaskInfo.class); context.getReducingState(descr); StateDescriptor<?, ?> descrIntercepted = (StateDescriptor<?, ?>) descriptorCapture.get(); TypeSerializer<?> serializer = descrIntercepted.getSerializer(); // check that the Path class is really registered, i.e., the execution config was applied assertThat(serializer).isInstanceOf(KryoSerializer.class); assertThat(((KryoSerializer<?>) serializer).getKryo().getRegistration(Path.class).getId()) .isPositive(); }
@Description("Smallest IP address for a given IP prefix") @ScalarFunction("ip_subnet_min") @SqlType(StandardTypes.IPADDRESS) public static Slice ipSubnetMin(@SqlType(StandardTypes.IPPREFIX) Slice value) { return castFromIpPrefixToIpAddress(value); }
@Test public void testIpSubnetMin() { assertFunction("IP_SUBNET_MIN(IPPREFIX '1.2.3.4/24')", IPADDRESS, "1.2.3.0"); assertFunction("IP_SUBNET_MIN(IPPREFIX '1.2.3.4/32')", IPADDRESS, "1.2.3.4"); assertFunction("IP_SUBNET_MIN(IPPREFIX '64:ff9b::17/64')", IPADDRESS, "64:ff9b::"); assertFunction("IP_SUBNET_MIN(IPPREFIX '64:ff9b::17/127')", IPADDRESS, "64:ff9b::16"); assertFunction("IP_SUBNET_MIN(IPPREFIX '64:ff9b::17/128')", IPADDRESS, "64:ff9b::17"); assertFunction("IP_SUBNET_MIN(IPPREFIX '64:ff9b::17/0')", IPADDRESS, "::"); }
public StateStore getStore() { return store; }
@Test public void shouldGetVersionedStore() { givenWrapperWithVersionedStore(); assertThat(wrapper.getStore(), equalTo(versionedStore)); }
@Override public String parseProperty(String key, String value, PropertiesLookup properties) { log.trace("Parsing property '{}={}'", key, value); if (value != null) { initEncryptor(); Matcher matcher = PATTERN.matcher(value); while (matcher.find()) { if (log.isTraceEnabled()) { log.trace("Decrypting part '{}'", matcher.group(0)); } String decrypted = encryptor.decrypt(matcher.group(1)); value = value.replace(matcher.group(0), decrypted); } } return value; }
@Test public void testDecryptsPartiallyEncryptedProperty() { String parmValue = "tiger"; String encParmValue = format("%s%s%s", JASYPT_PREFIX_TOKEN, encryptor.encrypt(parmValue), JASYPT_SUFFIX_TOKEN); String expected = format("http://somehost:port/?param1=%s&param2=somethingelse", parmValue); String propertyValue = format("http://somehost:port/?param1=%s&param2=somethingelse", encParmValue); String result = jasyptPropertiesParser.parseProperty(KEY, propertyValue, null); assertThat(result, is(expected)); }
@Override public Map<PCollection<?>, ReplacementOutput> mapOutputs( Map<TupleTag<?>, PCollection<?>> outputs, OutputT newOutput) { throw new UnsupportedOperationException(message); }
@Test public void mapOutputThrows() { thrown.expect(UnsupportedOperationException.class); thrown.expectMessage(message); factory.mapOutputs(Collections.emptyMap(), PDone.in(pipeline)); }
Plugin create(Options.Plugin plugin) { try { return instantiate(plugin.pluginString(), plugin.pluginClass(), plugin.argument()); } catch (IOException | URISyntaxException e) { throw new CucumberException(e); } }
@Test void instantiates_timeline_plugin_with_dir_arg() { PluginOption option = parse("timeline:" + tmp.toAbsolutePath()); plugin = fc.create(option); assertThat(plugin.getClass(), is(equalTo(TimelineFormatter.class))); }
protected Response getVipResponse(String version, String entityName, String acceptHeader, EurekaAccept eurekaAccept, Key.EntityType entityType) { if (!registry.shouldAllowAccess(false)) { return Response.status(Response.Status.FORBIDDEN).build(); } CurrentRequestVersion.set(Version.toEnum(version)); Key.KeyType keyType = Key.KeyType.JSON; if (acceptHeader == null || !acceptHeader.contains("json")) { keyType = Key.KeyType.XML; } Key cacheKey = new Key( entityType, entityName, keyType, CurrentRequestVersion.get(), eurekaAccept ); String payLoad = responseCache.get(cacheKey); CurrentRequestVersion.remove(); if (payLoad != null) { logger.debug("Found: {}", entityName); return Response.ok(payLoad).build(); } else { logger.debug("Not Found: {}", entityName); return Response.status(Response.Status.NOT_FOUND).build(); } }
@Test public void testFullVipGet() throws Exception { Response response = resource.getVipResponse( Version.V2.name(), vipName, MediaType.APPLICATION_JSON, EurekaAccept.full, Key.EntityType.VIP ); String json = String.valueOf(response.getEntity()); DecoderWrapper decoder = CodecWrappers.getDecoder(CodecWrappers.LegacyJacksonJson.class); Applications decodedApps = decoder.decode(json, Applications.class); Application decodedApp = decodedApps.getRegisteredApplications(testApplication.getName()); assertThat(EurekaEntityComparators.equal(testApplication, decodedApp), is(true)); }
CachedLayer writeTarLayer(DescriptorDigest diffId, Blob compressedBlob) throws IOException { Files.createDirectories(cacheStorageFiles.getLocalDirectory()); Files.createDirectories(cacheStorageFiles.getTemporaryDirectory()); try (TempDirectoryProvider tempDirectoryProvider = new TempDirectoryProvider()) { Path temporaryLayerDirectory = tempDirectoryProvider.newDirectory(cacheStorageFiles.getTemporaryDirectory()); Path temporaryLayerFile = cacheStorageFiles.getTemporaryLayerFile(temporaryLayerDirectory); BlobDescriptor layerBlobDescriptor; try (OutputStream fileOutputStream = new BufferedOutputStream(Files.newOutputStream(temporaryLayerFile))) { layerBlobDescriptor = compressedBlob.writeTo(fileOutputStream); } // Renames the temporary layer file to its digest // (temp/temp -> temp/<digest>) String fileName = layerBlobDescriptor.getDigest().getHash(); Path digestLayerFile = temporaryLayerDirectory.resolve(fileName); moveIfDoesNotExist(temporaryLayerFile, digestLayerFile); // Moves the temporary directory to directory named with diff ID // (temp/<digest> -> <diffID>/<digest>) Path destination = cacheStorageFiles.getLocalDirectory().resolve(diffId.getHash()); moveIfDoesNotExist(temporaryLayerDirectory, destination); return CachedLayer.builder() .setLayerDigest(layerBlobDescriptor.getDigest()) .setLayerDiffId(diffId) .setLayerSize(layerBlobDescriptor.getSize()) .setLayerBlob(Blobs.from(destination.resolve(fileName))) .build(); } }
@Test public void testWriteTarLayer() throws IOException { Blob uncompressedLayerBlob = Blobs.from("uncompressedLayerBlob"); DescriptorDigest diffId = getDigest(uncompressedLayerBlob).getDigest(); CachedLayer cachedLayer = cacheStorageWriter.writeTarLayer(diffId, compress(uncompressedLayerBlob)); BlobDescriptor layerBlobDescriptor = getDigest(compress(uncompressedLayerBlob)); // Verifies cachedLayer is correct. Assert.assertEquals(layerBlobDescriptor.getDigest(), cachedLayer.getDigest()); Assert.assertEquals(diffId, cachedLayer.getDiffId()); Assert.assertEquals(layerBlobDescriptor.getSize(), cachedLayer.getSize()); Assert.assertArrayEquals( Blobs.writeToByteArray(uncompressedLayerBlob), Blobs.writeToByteArray(decompress(cachedLayer.getBlob()))); // Verifies that the files are present. Assert.assertTrue( Files.exists( cacheStorageFiles .getLocalDirectory() .resolve(cachedLayer.getDiffId().getHash()) .resolve(cachedLayer.getDigest().getHash()))); }
@Override protected void processOptions(LinkedList<String> args) throws IOException { CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE, OPTION_FOLLOW_LINK, OPTION_FOLLOW_ARG_LINK, null); cf.parse(args); if (cf.getOpt(OPTION_FOLLOW_LINK)) { getOptions().setFollowLink(true); } else if (cf.getOpt(OPTION_FOLLOW_ARG_LINK)) { getOptions().setFollowArgLink(true); } // search for first non-path argument (ie starts with a "-") and capture and // remove the remaining arguments as expressions LinkedList<String> expressionArgs = new LinkedList<String>(); Iterator<String> it = args.iterator(); boolean isPath = true; while (it.hasNext()) { String arg = it.next(); if (isPath) { if (arg.startsWith("-")) { isPath = false; } } if (!isPath) { expressionArgs.add(arg); it.remove(); } } if (args.isEmpty()) { args.add(Path.CUR_DIR); } Expression expression = parseExpression(expressionArgs); if (!expression.isAction()) { Expression and = getExpression(And.class); Deque<Expression> children = new LinkedList<Expression>(); children.add(getExpression(Print.class)); children.add(expression); and.addChildren(children); expression = and; } setRootExpression(expression); }
@Test public void processOptionsKnownUnknown() throws IOException { Find find = new Find(); find.setConf(conf); String args = "path -print -unknown -print"; try { find.processOptions(getArgs(args)); fail("Unknown expression not caught"); } catch (IOException e) { } }
public static InternalSchema reconcileSchema(Schema incomingSchema, InternalSchema oldTableSchema, boolean makeMissingFieldsNullable) { /* If incoming schema is null, we fall back on table schema. */ if (incomingSchema.getType() == Schema.Type.NULL) { return oldTableSchema; } InternalSchema inComingInternalSchema = convert(incomingSchema, oldTableSchema.getNameToPosition()); // check column add/missing List<String> colNamesFromIncoming = inComingInternalSchema.getAllColsFullName(); List<String> colNamesFromOldSchema = oldTableSchema.getAllColsFullName(); List<String> diffFromOldSchema = colNamesFromOldSchema.stream().filter(f -> !colNamesFromIncoming.contains(f)).collect(Collectors.toList()); List<String> diffFromEvolutionColumns = colNamesFromIncoming.stream().filter(f -> !colNamesFromOldSchema.contains(f)).collect(Collectors.toList()); // check type change. List<String> typeChangeColumns = colNamesFromIncoming .stream() .filter(f -> colNamesFromOldSchema.contains(f) && !inComingInternalSchema.findType(f).equals(oldTableSchema.findType(f))) .collect(Collectors.toList()); if (colNamesFromIncoming.size() == colNamesFromOldSchema.size() && diffFromOldSchema.size() == 0 && typeChangeColumns.isEmpty()) { return oldTableSchema; } // Remove redundancy from diffFromEvolutionSchema. // for example, now we add a struct col in evolvedSchema, the struct col is " user struct<name:string, age:int> " // when we do diff operation: user, user.name, user.age will appear in the resultSet which is redundancy, user.name and user.age should be excluded. // deal with add operation TreeMap<Integer, String> finalAddAction = new TreeMap<>(); for (int i = 0; i < diffFromEvolutionColumns.size(); i++) { String name = diffFromEvolutionColumns.get(i); int splitPoint = name.lastIndexOf("."); String parentName = splitPoint > 0 ? name.substring(0, splitPoint) : ""; if (!parentName.isEmpty() && diffFromEvolutionColumns.contains(parentName)) { // find redundancy, skip it continue; } finalAddAction.put(inComingInternalSchema.findIdByName(name), name); } TableChanges.ColumnAddChange addChange = TableChanges.ColumnAddChange.get(oldTableSchema); finalAddAction.entrySet().stream().forEach(f -> { String name = f.getValue(); int splitPoint = name.lastIndexOf("."); String parentName = splitPoint > 0 ? name.substring(0, splitPoint) : ""; String rawName = splitPoint > 0 ? name.substring(splitPoint + 1) : name; // try to infer add position. java.util.Optional<String> inferPosition = colNamesFromIncoming.stream().filter(c -> c.lastIndexOf(".") == splitPoint && c.startsWith(parentName) && inComingInternalSchema.findIdByName(c) > inComingInternalSchema.findIdByName(name) && oldTableSchema.findIdByName(c) > 0).sorted((s1, s2) -> oldTableSchema.findIdByName(s1) - oldTableSchema.findIdByName(s2)).findFirst(); addChange.addColumns(parentName, rawName, inComingInternalSchema.findType(name), null); inferPosition.map(i -> addChange.addPositionChange(name, i, "before")); }); // do type evolution. InternalSchema internalSchemaAfterAddColumns = SchemaChangeUtils.applyTableChanges2Schema(oldTableSchema, addChange); TableChanges.ColumnUpdateChange typeChange = TableChanges.ColumnUpdateChange.get(internalSchemaAfterAddColumns); typeChangeColumns.stream().filter(f -> !inComingInternalSchema.findType(f).isNestedType()).forEach(col -> { typeChange.updateColumnType(col, inComingInternalSchema.findType(col)); }); if (makeMissingFieldsNullable) { // mark columns missing from incoming schema as nullable Set<String> visited = new HashSet<>(); diffFromOldSchema.stream() // ignore meta fields .filter(col -> !META_FIELD_NAMES.contains(col)) .sorted() .forEach(col -> { // if parent is marked as nullable, only update the parent and not all the missing children field String parent = TableChangesHelper.getParentName(col); if (!visited.contains(parent)) { typeChange.updateColumnNullability(col, true); } visited.add(col); }); } return SchemaChangeUtils.applyTableChanges2Schema(internalSchemaAfterAddColumns, typeChange); }
@Test public void testEvolutionSchemaFromNewAvroSchema() { Types.RecordType oldRecord = Types.RecordType.get( Types.Field.get(0, false, "id", Types.IntType.get()), Types.Field.get(1, true, "data", Types.StringType.get()), Types.Field.get(2, true, "preferences", Types.RecordType.get( Types.Field.get(5, false, "feature1", Types.BooleanType.get()), Types.Field.get(6, true, "featurex", Types.BooleanType.get()), Types.Field.get(7, true, "feature2", Types.BooleanType.get()))), Types.Field.get(3, false,"doubles", Types.ArrayType.get(8, false, Types.DoubleType.get())), Types.Field.get(4, false, "locations", Types.MapType.get(9, 10, Types.StringType.get(), Types.RecordType.get( Types.Field.get(11, false, "laty", Types.FloatType.get()), Types.Field.get(12, false, "long", Types.FloatType.get())), false) ) ); InternalSchema oldSchema = new InternalSchema(oldRecord); Types.RecordType evolvedRecord = Types.RecordType.get( Types.Field.get(0, false, "id", Types.IntType.get()), Types.Field.get(1, true, "data", Types.StringType.get()), Types.Field.get(2, true, "preferences", Types.RecordType.get( Types.Field.get(5, false, "feature1", Types.BooleanType.get()), Types.Field.get(5, true, "featurex", Types.BooleanType.get()), Types.Field.get(6, true, "feature2", Types.BooleanType.get()), Types.Field.get(5, true, "feature3", Types.BooleanType.get()))), Types.Field.get(3, false,"doubles", Types.ArrayType.get(7, false, Types.DoubleType.get())), Types.Field.get(4, false, "locations", Types.MapType.get(8, 9, Types.StringType.get(), Types.RecordType.get( Types.Field.get(10, false, "laty", Types.FloatType.get()), Types.Field.get(11, false, "long", Types.FloatType.get())), false) ), Types.Field.get(0, false, "add1", Types.IntType.get()), Types.Field.get(2, true, "addStruct", Types.RecordType.get( Types.Field.get(5, false, "nest1", Types.BooleanType.get()), Types.Field.get(5, true, "nest2", Types.BooleanType.get()))) ); evolvedRecord = (Types.RecordType)InternalSchemaBuilder.getBuilder().refreshNewId(evolvedRecord, new AtomicInteger(0)); Schema evolvedAvroSchema = AvroInternalSchemaConverter.convert(evolvedRecord, "test1"); InternalSchema result = AvroSchemaEvolutionUtils.reconcileSchema(evolvedAvroSchema, oldSchema, false); Types.RecordType checkedRecord = Types.RecordType.get( Types.Field.get(0, false, "id", Types.IntType.get()), Types.Field.get(1, true, "data", Types.StringType.get()), Types.Field.get(2, true, "preferences", Types.RecordType.get( Types.Field.get(5, false, "feature1", Types.BooleanType.get()), Types.Field.get(6, true, "featurex", Types.BooleanType.get()), Types.Field.get(7, true, "feature2", Types.BooleanType.get()), Types.Field.get(17, true, "feature3", Types.BooleanType.get()))), Types.Field.get(3, false,"doubles", Types.ArrayType.get(8, false, Types.DoubleType.get())), Types.Field.get(4, false, "locations", Types.MapType.get(9, 10, Types.StringType.get(), Types.RecordType.get( Types.Field.get(11, false, "laty", Types.FloatType.get()), Types.Field.get(12, false, "long", Types.FloatType.get())), false) ), Types.Field.get(13, true, "add1", Types.IntType.get()), Types.Field.get(14, true, "addStruct", Types.RecordType.get( Types.Field.get(15, false, "nest1", Types.BooleanType.get()), Types.Field.get(16, true, "nest2", Types.BooleanType.get()))) ); Assertions.assertEquals(result.getRecord(), checkedRecord); }
@ApiOperation(value = "Delete device profile (deleteDeviceProfile)", notes = "Deletes the device profile. Referencing non-existing device profile Id will cause an error. " + "Can't delete the device profile if it is referenced by existing devices." + TENANT_AUTHORITY_PARAGRAPH) @PreAuthorize("hasAuthority('TENANT_ADMIN')") @RequestMapping(value = "/deviceProfile/{deviceProfileId}", method = RequestMethod.DELETE) @ResponseStatus(value = HttpStatus.OK) public void deleteDeviceProfile( @Parameter(description = DEVICE_PROFILE_ID_PARAM_DESCRIPTION) @PathVariable(DEVICE_PROFILE_ID) String strDeviceProfileId) throws ThingsboardException { checkParameter(DEVICE_PROFILE_ID, strDeviceProfileId); DeviceProfileId deviceProfileId = new DeviceProfileId(toUUID(strDeviceProfileId)); DeviceProfile deviceProfile = checkDeviceProfileId(deviceProfileId, Operation.DELETE); tbDeviceProfileService.delete(deviceProfile, getCurrentUser()); }
@Test public void testDeleteDeviceProfile() throws Exception { DeviceProfile deviceProfile = this.createDeviceProfile("Device Profile"); DeviceProfile savedDeviceProfile = doPost("/api/deviceProfile", deviceProfile, DeviceProfile.class); Mockito.reset(tbClusterService, auditLogService); doDelete("/api/deviceProfile/" + savedDeviceProfile.getId().getId().toString()) .andExpect(status().isOk()); String savedDeviceProfileIdFtr = savedDeviceProfile.getId().getId().toString(); testNotifyEntityBroadcastEntityStateChangeEventOneTime(savedDeviceProfile, savedDeviceProfile.getId(), savedDeviceProfile.getId(), savedTenant.getId(), tenantAdmin.getCustomerId(), tenantAdmin.getId(), tenantAdmin.getEmail(), ActionType.DELETED, savedDeviceProfileIdFtr); doGet("/api/deviceProfile/" + savedDeviceProfile.getId().getId().toString()) .andExpect(status().isNotFound()) .andExpect(statusReason(containsString(msgErrorNoFound("Device profile", savedDeviceProfileIdFtr)))); }
@Override public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { super.onDataReceived(device, data); if (data.size() < 3) { onInvalidDataReceived(device, data); return; } final int responseCode = data.getIntValue(Data.FORMAT_UINT8, 0); final int requestCode = data.getIntValue(Data.FORMAT_UINT8, 1); final int status = data.getIntValue(Data.FORMAT_UINT8, 2); if (responseCode != SC_OP_CODE_RESPONSE_CODE) { onInvalidDataReceived(device, data); return; } if (status != SC_RESPONSE_SUCCESS) { onSCOperationError(device, requestCode, status); return; } if (requestCode == SC_OP_CODE_REQUEST_SUPPORTED_SENSOR_LOCATIONS) { final int size = data.size() - 3; final int[] locations = new int[size]; for (int i = 0; i < size; ++i) { locations[i] = data.getIntValue(Data.FORMAT_UINT8, 3 + i); } onSupportedSensorLocationsReceived(device, locations); } else { onSCOperationCompleted(device, requestCode); } }
@Test public void onSCOperationError() { final MutableData data = new MutableData(new byte[] { 0x10, 0x02, 0x02}); response.onDataReceived(null, data); assertFalse(success); assertEquals(2, errorCode); assertEquals(2, requestCode); assertNull(locations); }
@Override public UnregisterBrokerResult unregisterBroker(int brokerId, UnregisterBrokerOptions options) { final KafkaFutureImpl<Void> future = new KafkaFutureImpl<>(); final long now = time.milliseconds(); final Call call = new Call("unregisterBroker", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) { @Override UnregisterBrokerRequest.Builder createRequest(int timeoutMs) { UnregisterBrokerRequestData data = new UnregisterBrokerRequestData().setBrokerId(brokerId); return new UnregisterBrokerRequest.Builder(data); } @Override void handleResponse(AbstractResponse abstractResponse) { final UnregisterBrokerResponse response = (UnregisterBrokerResponse) abstractResponse; Errors error = Errors.forCode(response.data().errorCode()); switch (error) { case NONE: future.complete(null); break; case REQUEST_TIMED_OUT: throw error.exception(); default: log.error("Unregister broker request for broker ID {} failed: {}", brokerId, error.message()); future.completeExceptionally(error.exception()); break; } } @Override void handleFailure(Throwable throwable) { future.completeExceptionally(throwable); } }; runnable.call(call, now); return new UnregisterBrokerResult(future); }
@Test public void testUnregisterBrokerTimeoutMaxWait() { int nodeId = 1; try (final AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions( NodeApiVersions.create(ApiKeys.UNREGISTER_BROKER.id, (short) 0, (short) 0)); UnregisterBrokerOptions options = new UnregisterBrokerOptions(); options.timeoutMs = 10; UnregisterBrokerResult result = env.adminClient().unregisterBroker(nodeId, options); // Validate response assertNotNull(result.all()); TestUtils.assertFutureThrows(result.all(), Errors.REQUEST_TIMED_OUT.exception().getClass()); } }
public static <T> List<T> sub(List<T> list, int start, int end) { return ListUtil.sub(list, start, end); }
@Test public void subInput1PositiveNegativePositiveOutputArrayIndexOutOfBoundsException() { assertThrows(IndexOutOfBoundsException.class, () -> { // Arrange final List<Integer> list = new ArrayList<>(); list.add(null); final int start = 2_147_483_643; final int end = -2_147_483_648; final int step = 2; // Act CollUtil.sub(list, start, end, step); // Method is not expected to return due to exception thrown }); }
@Override public void audit(final QueryContext queryContext, final RuleMetaData globalRuleMetaData, final ShardingSphereDatabase database, final ShardingRule rule) { Collection<ShardingAuditStrategyConfiguration> auditStrategies = getShardingAuditStrategies(queryContext.getSqlStatementContext(), rule); if (auditStrategies.isEmpty()) { return; } Collection<String> disableAuditNames = queryContext.getHintValueContext().getDisableAuditNames(); for (ShardingAuditStrategyConfiguration auditStrategy : auditStrategies) { for (String auditorName : auditStrategy.getAuditorNames()) { if (!auditStrategy.isAllowHintDisable() || !disableAuditNames.contains(auditorName.toLowerCase())) { rule.getAuditors().get(auditorName).check(queryContext.getSqlStatementContext(), queryContext.getParameters(), globalRuleMetaData, database); } } } }
@Test void assertCheckSuccessByDisableAuditNames() { when(auditStrategy.isAllowHintDisable()).thenReturn(true); RuleMetaData globalRuleMetaData = mock(RuleMetaData.class); new ShardingSQLAuditor().audit(new QueryContext(sqlStatementContext, "", Collections.emptyList(), hintValueContext, mockConnectionContext(), mock(ShardingSphereMetaData.class)), globalRuleMetaData, databases.get("foo_db"), rule); verify(rule.getAuditors().get("auditor_1"), times(0)).check(sqlStatementContext, Collections.emptyList(), globalRuleMetaData, databases.get("foo_db")); }
public static String SHA1(String data) { return SHA1(data.getBytes()); }
@Test public void testSHA1() throws Exception { String biezhiSHA1 = "2aa70e156cfa0d5928574ee2d8904fb1d9c74ea0"; Assert.assertEquals( biezhiSHA1, EncryptKit.SHA1("biezhi") ); Assert.assertEquals( biezhiSHA1, EncryptKit.SHA1("biezhi".getBytes()) ); TestCase.assertTrue( Arrays.equals( ConvertKit.hexString2Bytes(biezhiSHA1), EncryptKit.SHA1ToByte("biezhi".getBytes()) ) ); }
@VisibleForTesting Object evaluate(final GenericRow row) { return term.getValue(new TermEvaluationContext(row)); }
@Test public void shouldEvaluateLogicalExpressions_and() { // Given: final Expression expression1 = new LogicalBinaryExpression( LogicalBinaryExpression.Type.AND, COL11, new BooleanLiteral(true) ); final Expression expression2 = new LogicalBinaryExpression( LogicalBinaryExpression.Type.AND, COL11, new BooleanLiteral(false) ); // When: InterpretedExpression interpreter1 = interpreter(expression1); InterpretedExpression interpreter2 = interpreter(expression2); // Then: assertThat(interpreter1.evaluate(make(11, true)), is(true)); assertThat(interpreter1.evaluate(make(11, false)), is(false)); assertThat(interpreter2.evaluate(make(11, true)), is(false)); assertThat(interpreter2.evaluate(make(11, false)), is(false)); }
@Override public void export(RegisterTypeEnum registerType) { if (this.exported) { return; } if (getScopeModel().isLifeCycleManagedExternally()) { // prepare model for reference getScopeModel().getDeployer().prepare(); } else { // ensure start module, compatible with old api usage getScopeModel().getDeployer().start(); } synchronized (this) { if (this.exported) { return; } if (!this.isRefreshed()) { this.refresh(); } if (this.shouldExport()) { this.init(); if (shouldDelay()) { // should register if delay export doDelayExport(); } else if (Integer.valueOf(-1).equals(getDelay()) && Boolean.parseBoolean(ConfigurationUtils.getProperty( getScopeModel(), CommonConstants.DUBBO_MANUAL_REGISTER_KEY, "false"))) { // should not register by default doExport(RegisterTypeEnum.MANUAL_REGISTER); } else { doExport(registerType); } } } }
@Test void testExportWithoutRegistryConfig() { serviceWithoutRegistryConfig.export(); assertThat(serviceWithoutRegistryConfig.getExportedUrls(), hasSize(1)); URL url = serviceWithoutRegistryConfig.toUrl(); assertThat(url.getProtocol(), equalTo("mockprotocol2")); assertThat(url.getPath(), equalTo(DemoService.class.getName())); assertThat(url.getParameters(), hasEntry(ANYHOST_KEY, "true")); assertThat(url.getParameters(), hasEntry(APPLICATION_KEY, "app")); assertThat(url.getParameters(), hasKey(BIND_IP_KEY)); assertThat(url.getParameters(), hasKey(BIND_PORT_KEY)); assertThat(url.getParameters(), hasEntry(EXPORT_KEY, "true")); assertThat(url.getParameters(), hasEntry("echo.0.callback", "false")); assertThat(url.getParameters(), hasEntry(GENERIC_KEY, "false")); assertThat(url.getParameters(), hasEntry(INTERFACE_KEY, DemoService.class.getName())); assertThat(url.getParameters(), hasKey(METHODS_KEY)); assertThat(url.getParameters().get(METHODS_KEY), containsString("echo")); assertThat(url.getParameters(), hasEntry(SIDE_KEY, PROVIDER)); // export MetadataService and DemoService in "mockprotocol2" protocol. Mockito.verify(protocolDelegate, times(2)).export(Mockito.any(Invoker.class)); }
@Deprecated public synchronized <K, V> Topology addGlobalStore(final StoreBuilder<?> storeBuilder, final String sourceName, final Deserializer<K> keyDeserializer, final Deserializer<V> valueDeserializer, final String topic, final String processorName, final org.apache.kafka.streams.processor.ProcessorSupplier<K, V> stateUpdateSupplier) { internalTopologyBuilder.addGlobalStore( new StoreBuilderWrapper(storeBuilder), sourceName, null, keyDeserializer, valueDeserializer, topic, processorName, () -> ProcessorAdapter.adapt(stateUpdateSupplier.get()), true ); return this; }
@Deprecated // testing old PAPI @Test public void shouldNotAllowToAddGlobalStoreWithSourceNameEqualsProcessorName() { when(globalStoreBuilder.name()).thenReturn("anyName"); assertThrows(TopologyException.class, () -> topology.addGlobalStore( globalStoreBuilder, "sameName", null, null, "anyTopicName", "sameName", new MockProcessorSupplier<>())); }
public static void checkProjectKey(String keyCandidate) { checkArgument(isValidProjectKey(keyCandidate), MALFORMED_KEY_MESSAGE, keyCandidate, ALLOWED_CHARACTERS_MESSAGE); }
@Test public void checkProjectKey_with_correct_keys() { ComponentKeys.checkProjectKey("abc"); ComponentKeys.checkProjectKey("a-b_1.:2"); }
public static <T, PredicateT extends ProcessFunction<T, Boolean>> Filter<T> by( PredicateT predicate) { return new Filter<>(predicate); }
@Test @Category(NeedsRunner.class) public void testFilterByPredicateWithLambda() { PCollection<Integer> output = p.apply(Create.of(1, 2, 3, 4, 5, 6, 7)).apply(Filter.by(i -> i % 2 == 0)); PAssert.that(output).containsInAnyOrder(2, 4, 6); p.run(); }
@Override public void createPod(Pod pod) { checkNotNull(pod, ERR_NULL_POD); checkArgument(!Strings.isNullOrEmpty(pod.getMetadata().getUid()), ERR_NULL_POD_UID); kubevirtPodStore.createPod(pod); log.debug(String.format(MSG_POD, pod.getMetadata().getName(), MSG_CREATED)); }
@Test(expected = NullPointerException.class) public void testCreateNullPod() { target.createPod(null); }
private static String[] getBucketAndPrefix() throws InvalidConfException { URI uri = normalizeConfigPath(Config.aws_s3_path, "s3", "Config.aws_s3_path", true); String path = uri.getPath(); if (path.startsWith("/")) { // remove leading '/' for backwards compatibility path = path.substring(1); } return new String[] {uri.getAuthority(), path}; }
@Test public void testGetBucketAndPrefix() throws Exception { String oldAwsS3Path = Config.aws_s3_path; SharedDataStorageVolumeMgr sdsvm = new SharedDataStorageVolumeMgr(); Config.aws_s3_path = "bucket/dir1/dir2"; String[] bucketAndPrefix1 = Deencapsulation.invoke(sdsvm, "getBucketAndPrefix"); Assert.assertEquals(2, bucketAndPrefix1.length); Assert.assertEquals("bucket", bucketAndPrefix1[0]); Assert.assertEquals("dir1/dir2", bucketAndPrefix1[1]); Config.aws_s3_path = "bucket"; String[] bucketAndPrefix2 = Deencapsulation.invoke(sdsvm, "getBucketAndPrefix"); Assert.assertEquals(2, bucketAndPrefix2.length); Assert.assertEquals("bucket", bucketAndPrefix2[0]); Assert.assertEquals("", bucketAndPrefix2[1]); Config.aws_s3_path = "bucket/"; String[] bucketAndPrefix3 = Deencapsulation.invoke(sdsvm, "getBucketAndPrefix"); Assert.assertEquals(2, bucketAndPrefix3.length); Assert.assertEquals("bucket", bucketAndPrefix3[0]); Assert.assertEquals("", bucketAndPrefix3[1]); // allow leading s3:// in configuration, will be just ignored. Config.aws_s3_path = "s3://a-bucket/b"; { String[] bucketAndPrefix = Deencapsulation.invoke(sdsvm, "getBucketAndPrefix"); Assert.assertEquals(2, bucketAndPrefix.length); Assert.assertEquals("a-bucket", bucketAndPrefix[0]); Assert.assertEquals("b", bucketAndPrefix[1]); } Config.aws_s3_path = oldAwsS3Path; }
@VisibleForTesting void updateConfig( Configuration config, ConfigOption<List<String>> configOption, List<String> newValue) { final List<String> originalValue = config.getOptional(configOption).orElse(Collections.emptyList()); if (hasLocal(originalValue)) { LOG.info( "Updating configuration '{}' after to replace local artifact: '{}'", configOption.key(), newValue); config.set(configOption, newValue); } }
@Test void testNoUpdateConfig() { List<String> artifactList = Collections.singletonList("s3://my-bucket/my-artifact.jar"); Configuration config = new Configuration(); config.set(ArtifactFetchOptions.ARTIFACT_LIST, artifactList); artifactUploader.updateConfig(config, ArtifactFetchOptions.ARTIFACT_LIST, artifactList); assertThat(config.get(ArtifactFetchOptions.ARTIFACT_LIST)).isEqualTo(artifactList); }
public static byte[] toSeed(List<String> words, String passphrase) { Objects.requireNonNull(passphrase, "A null passphrase is not allowed."); // To create binary seed from mnemonic, we use PBKDF2 function // with mnemonic sentence (in UTF-8) used as a password and // string "mnemonic" + passphrase (again in UTF-8) used as a // salt. Iteration count is set to 2048 and HMAC-SHA512 is // used as a pseudo-random function. Desired length of the // derived key is 512 bits (= 64 bytes). // String pass = InternalUtils.SPACE_JOINER.join(words); String salt = "mnemonic" + passphrase; Stopwatch watch = Stopwatch.start(); byte[] seed = PBKDF2SHA512.derive(pass, salt, PBKDF2_ROUNDS, 64); log.info("PBKDF2 took {}", watch); return seed; }
@Test(expected = NullPointerException.class) public void testNullPassphrase() { List<String> code = WHITESPACE_SPLITTER.splitToList("legal winner thank year wave sausage worth useful legal winner thank yellow"); MnemonicCode.toSeed(code, null); }
@Private public void handleEvent(JobHistoryEvent event) { synchronized (lock) { // If this is JobSubmitted Event, setup the writer if (event.getHistoryEvent().getEventType() == EventType.AM_STARTED) { try { AMStartedEvent amStartedEvent = (AMStartedEvent) event.getHistoryEvent(); setupEventWriter(event.getJobID(), amStartedEvent); } catch (IOException ioe) { LOG.error("Error JobHistoryEventHandler in handleEvent: " + event, ioe); throw new YarnRuntimeException(ioe); } } // For all events // (1) Write it out // (2) Process it for JobSummary // (3) Process it for ATS (if enabled) MetaInfo mi = fileMap.get(event.getJobID()); try { HistoryEvent historyEvent = event.getHistoryEvent(); if (! (historyEvent instanceof NormalizedResourceEvent)) { mi.writeEvent(historyEvent); } processEventForJobSummary(event.getHistoryEvent(), mi.getJobSummary(), event.getJobID()); if (LOG.isDebugEnabled()) { LOG.debug("In HistoryEventHandler " + event.getHistoryEvent().getEventType()); } } catch (IOException e) { LOG.error("Error writing History Event: " + event.getHistoryEvent(), e); throw new YarnRuntimeException(e); } if (event.getHistoryEvent().getEventType() == EventType.JOB_SUBMITTED) { JobSubmittedEvent jobSubmittedEvent = (JobSubmittedEvent) event.getHistoryEvent(); mi.getJobIndexInfo().setSubmitTime(jobSubmittedEvent.getSubmitTime()); mi.getJobIndexInfo().setQueueName(jobSubmittedEvent.getJobQueueName()); } //initialize the launchTime in the JobIndexInfo of MetaInfo if(event.getHistoryEvent().getEventType() == EventType.JOB_INITED ){ JobInitedEvent jie = (JobInitedEvent) event.getHistoryEvent(); mi.getJobIndexInfo().setJobStartTime(jie.getLaunchTime()); } if (event.getHistoryEvent().getEventType() == EventType.JOB_QUEUE_CHANGED) { JobQueueChangeEvent jQueueEvent = (JobQueueChangeEvent) event.getHistoryEvent(); mi.getJobIndexInfo().setQueueName(jQueueEvent.getJobQueueName()); } // If this is JobFinishedEvent, close the writer and setup the job-index if (event.getHistoryEvent().getEventType() == EventType.JOB_FINISHED) { try { JobFinishedEvent jFinishedEvent = (JobFinishedEvent) event.getHistoryEvent(); mi.getJobIndexInfo().setFinishTime(jFinishedEvent.getFinishTime()); mi.getJobIndexInfo().setNumMaps(jFinishedEvent.getSucceededMaps()); mi.getJobIndexInfo().setNumReduces( jFinishedEvent.getSucceededReduces()); mi.getJobIndexInfo().setJobStatus(JobState.SUCCEEDED.toString()); closeEventWriter(event.getJobID()); processDoneFiles(event.getJobID()); } catch (IOException e) { throw new YarnRuntimeException(e); } } // In case of JOB_ERROR, only process all the Done files(e.g. job // summary, job history file etc.) if it is last AM retry. if (event.getHistoryEvent().getEventType() == EventType.JOB_ERROR) { try { JobUnsuccessfulCompletionEvent jucEvent = (JobUnsuccessfulCompletionEvent) event.getHistoryEvent(); mi.getJobIndexInfo().setFinishTime(jucEvent.getFinishTime()); mi.getJobIndexInfo().setNumMaps(jucEvent.getSucceededMaps()); mi.getJobIndexInfo().setNumReduces(jucEvent.getSucceededReduces()); mi.getJobIndexInfo().setJobStatus(jucEvent.getStatus()); closeEventWriter(event.getJobID()); if(context.isLastAMRetry()) processDoneFiles(event.getJobID()); } catch (IOException e) { throw new YarnRuntimeException(e); } } if (event.getHistoryEvent().getEventType() == EventType.JOB_FAILED || event.getHistoryEvent().getEventType() == EventType.JOB_KILLED) { try { JobUnsuccessfulCompletionEvent jucEvent = (JobUnsuccessfulCompletionEvent) event .getHistoryEvent(); mi.getJobIndexInfo().setFinishTime(jucEvent.getFinishTime()); mi.getJobIndexInfo().setNumMaps(jucEvent.getSucceededMaps()); mi.getJobIndexInfo().setNumReduces(jucEvent.getSucceededReduces()); mi.getJobIndexInfo().setJobStatus(jucEvent.getStatus()); closeEventWriter(event.getJobID()); processDoneFiles(event.getJobID()); } catch (IOException e) { throw new YarnRuntimeException(e); } } } }
@Test (timeout=50000) public void testFirstFlushOnCompletionEvent() throws Exception { TestParams t = new TestParams(); Configuration conf = new Configuration(); conf.set(MRJobConfig.MR_AM_STAGING_DIR, t.workDir); conf.setLong(MRJobConfig.MR_AM_HISTORY_COMPLETE_EVENT_FLUSH_TIMEOUT_MS, 60 * 1000l); conf.setInt(MRJobConfig.MR_AM_HISTORY_JOB_COMPLETE_UNFLUSHED_MULTIPLIER, 10); conf.setInt(MRJobConfig.MR_AM_HISTORY_MAX_UNFLUSHED_COMPLETE_EVENTS, 10); conf.setInt( MRJobConfig.MR_AM_HISTORY_USE_BATCHED_FLUSH_QUEUE_SIZE_THRESHOLD, 200); JHEvenHandlerForTest realJheh = new JHEvenHandlerForTest(t.mockAppContext, 0); JHEvenHandlerForTest jheh = spy(realJheh); jheh.init(conf); EventWriter mockWriter = null; try { jheh.start(); handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent( t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1))); mockWriter = jheh.getEventWriter(); verify(mockWriter).write(any(HistoryEvent.class)); for (int i = 0; i < 100; i++) { queueEvent(jheh, new JobHistoryEvent(t.jobId, new TaskStartedEvent( t.taskID, 0, TaskType.MAP, ""))); } handleNextNEvents(jheh, 100); verify(mockWriter, times(0)).flush(); // First completion event, but min-queue-size for batching flushes is 10 handleEvent(jheh, new JobHistoryEvent(t.jobId, new TaskFinishedEvent( t.taskID, t.taskAttemptID, 0, TaskType.MAP, "", null, 0))); verify(mockWriter).flush(); } finally { jheh.stop(); verify(mockWriter).close(); } }
@Override public TableBuilder buildTable(TableIdentifier identifier, Schema schema) { return new ViewAwareTableBuilder(identifier, schema); }
@Test public void testCommitExceptionWithMessage() { TableIdentifier tableIdent = TableIdentifier.of("db", "tbl"); BaseTable table = (BaseTable) catalog.buildTable(tableIdent, SCHEMA).create(); TableOperations ops = table.operations(); TableMetadata metadataV1 = ops.current(); table.updateSchema().addColumn("n", Types.IntegerType.get()).commit(); ops.refresh(); try (MockedStatic<JdbcUtil> mockedStatic = Mockito.mockStatic(JdbcUtil.class)) { mockedStatic .when(() -> JdbcUtil.loadTable(any(), any(), any(), any())) .thenThrow(new SQLException("constraint failed")); assertThatThrownBy(() -> ops.commit(ops.current(), metadataV1)) .isInstanceOf(AlreadyExistsException.class) .hasMessageStartingWith("Table already exists: " + tableIdent); } }
public void add() { add(1L, defaultPosition); }
@Test final void testAddPoint() { final String metricName = "unitTestCounter"; Point p = receiver.pointBuilder().set("x", 2L).set("y", 3.0d).set("z", "5").build(); Counter c = receiver.declareCounter(metricName, p); c.add(); Bucket b = receiver.getSnapshot(); final Map<String, List<Entry<Point, UntypedMetric>>> valuesByMetricName = b.getValuesByMetricName(); assertEquals(1, valuesByMetricName.size()); List<Entry<Point, UntypedMetric>> x = valuesByMetricName.get(metricName); assertEquals(1, x.size()); assertEquals(p, x.get(0).getKey()); assertEquals(1, x.get(0).getValue().getCount()); }
@Override public long decrementAndGet(K key) { return complete(asyncCounterMap.decrementAndGet(key)); }
@Test public void testDecrementAndGet() { atomicCounterMap.put(KEY1, VALUE1); Long afterIncrement = atomicCounterMap.decrementAndGet(KEY1); assertThat(afterIncrement, is(VALUE1 - 1)); }
public static synchronized void configure(DataflowWorkerLoggingOptions options) { if (!initialized) { throw new RuntimeException("configure() called before initialize()"); } // For compatibility reason, we do not call SdkHarnessOptions.getConfiguredLoggerFromOptions // to config the logging for legacy worker, instead replicate the config steps used for // DataflowWorkerLoggingOptions for default log level and log level overrides. SdkHarnessOptions harnessOptions = options.as(SdkHarnessOptions.class); boolean usedDeprecated = false; // default value for both DefaultSdkHarnessLogLevel and DefaultWorkerLogLevel are INFO Level overrideLevel = getJulLevel(harnessOptions.getDefaultSdkHarnessLogLevel()); if (options.getDefaultWorkerLogLevel() != null && options.getDefaultWorkerLogLevel() != INFO) { overrideLevel = getJulLevel(options.getDefaultWorkerLogLevel()); usedDeprecated = true; } LogManager.getLogManager().getLogger(ROOT_LOGGER_NAME).setLevel(overrideLevel); if (options.getWorkerLogLevelOverrides() != null) { for (Map.Entry<String, DataflowWorkerLoggingOptions.Level> loggerOverride : options.getWorkerLogLevelOverrides().entrySet()) { Logger logger = Logger.getLogger(loggerOverride.getKey()); logger.setLevel(getJulLevel(loggerOverride.getValue())); configuredLoggers.add(logger); } usedDeprecated = true; } else if (harnessOptions.getSdkHarnessLogLevelOverrides() != null) { for (Map.Entry<String, SdkHarnessOptions.LogLevel> loggerOverride : harnessOptions.getSdkHarnessLogLevelOverrides().entrySet()) { Logger logger = Logger.getLogger(loggerOverride.getKey()); logger.setLevel(getJulLevel(loggerOverride.getValue())); configuredLoggers.add(logger); } } // If the options specify a level for messages logged to System.out/err, we need to reconfigure // the corresponding stream adapter. if (options.getWorkerSystemOutMessageLevel() != null) { System.out.close(); System.setOut( JulHandlerPrintStreamAdapterFactory.create( loggingHandler, SYSTEM_OUT_LOG_NAME, getJulLevel(options.getWorkerSystemOutMessageLevel()), Charset.defaultCharset())); } if (options.getWorkerSystemErrMessageLevel() != null) { System.err.close(); System.setErr( JulHandlerPrintStreamAdapterFactory.create( loggingHandler, SYSTEM_ERR_LOG_NAME, getJulLevel(options.getWorkerSystemErrMessageLevel()), Charset.defaultCharset())); } if (usedDeprecated) { LOG.warn( "Deprecated DataflowWorkerLoggingOptions are used for log level settings." + "Consider using options defined in SdkHarnessOptions for forward compatibility."); } }
@Test public void testWithWorkerConfigurationOverride() { DataflowWorkerLoggingOptions options = PipelineOptionsFactory.as(DataflowWorkerLoggingOptions.class); options.setDefaultWorkerLogLevel(DataflowWorkerLoggingOptions.Level.WARN); DataflowWorkerLoggingInitializer.configure(options); Logger rootLogger = LogManager.getLogManager().getLogger(""); assertEquals(1, rootLogger.getHandlers().length); assertEquals(Level.WARNING, rootLogger.getLevel()); assertIsDataflowWorkerLoggingHandler(rootLogger.getHandlers()[0], Level.ALL); }
public StatisticRange addAndSumDistinctValues(StatisticRange other) { double newDistinctValues = distinctValues + other.distinctValues; return expandRangeWithNewDistinct(newDistinctValues, other); }
@Test public void testAddAndSumDistinctValues() { assertEquals(unboundedRange(NaN).addAndSumDistinctValues(unboundedRange(NaN)), unboundedRange(NaN)); assertEquals(unboundedRange(NaN).addAndSumDistinctValues(unboundedRange(1)), unboundedRange(NaN)); assertEquals(unboundedRange(1).addAndSumDistinctValues(unboundedRange(NaN)), unboundedRange(NaN)); assertEquals(unboundedRange(1).addAndSumDistinctValues(unboundedRange(2)), unboundedRange(3)); assertEquals(StatisticRange.empty().addAndSumDistinctValues(StatisticRange.empty()), StatisticRange.empty()); assertEquals(range(0, 1, 1).addAndSumDistinctValues(StatisticRange.empty()), range(0, 1, 1)); assertEquals(range(0, 1, 1).addAndSumDistinctValues(range(1, 2, 1)), range(0, 2, 2)); }
public Matrix aat() { Matrix C = new Matrix(m, m); C.mm(NO_TRANSPOSE, this, TRANSPOSE, this); C.uplo(LOWER); return C; }
@Test public void testAAT() { System.out.println("AAT"); Matrix c = matrix.aat(); assertEquals(c.nrow(), 3); assertEquals(c.ncol(), 3); for (int i = 0; i < C.length; i++) { for (int j = 0; j < C[i].length; j++) { assertEquals(C[i][j], c.get(i, j), 1E-6f); } } }
public void updateCheckboxes( EnumSet<RepositoryFilePermission> permissionEnumSet ) { updateCheckboxes( false, permissionEnumSet ); }
@Test public void testUpdateCheckboxesManagePermissionsAppropriateTrue() { permissionsCheckboxHandler.updateCheckboxes( true, EnumSet.of( RepositoryFilePermission.ACL_MANAGEMENT, RepositoryFilePermission.DELETE, RepositoryFilePermission.WRITE, RepositoryFilePermission.READ ) ); verify( readCheckbox, times( 1 ) ).setChecked( true ); verify( writeCheckbox, times( 1 ) ).setChecked( true ); verify( deleteCheckbox, times( 1 ) ).setChecked( true ); verify( manageCheckbox, times( 1 ) ).setChecked( true ); verify( readCheckbox, times( 1 ) ).setDisabled( true ); verify( writeCheckbox, times( 1 ) ).setDisabled( true ); verify( deleteCheckbox, times( 1 ) ).setDisabled( true ); verify( manageCheckbox, times( 1 ) ).setDisabled( false ); }
@CanIgnoreReturnValue public final Ordered containsAtLeast( @Nullable Object k0, @Nullable Object v0, @Nullable Object... rest) { return containsAtLeastEntriesIn(accumulateMap("containsAtLeast", k0, v0, rest)); }
@Test public void containsAtLeastNotInOrder() { ImmutableMap<String, Integer> actual = ImmutableMap.of("jan", 1, "feb", 2, "march", 3); assertThat(actual).containsAtLeast("march", 3, "feb", 2); expectFailureWhenTestingThat(actual).containsAtLeast("march", 3, "feb", 2).inOrder(); assertFailureKeys( "required entries were all found, but order was wrong", "expected to contain at least", "but was"); assertFailureValue("expected to contain at least", "{march=3, feb=2}"); assertFailureValue("but was", "{jan=1, feb=2, march=3}"); }
@PutMapping(value = "/log") @Secured(action = ActionTypes.WRITE, resource = "nacos/admin", signType = SignType.CONSOLE) public String setLogLevel(@RequestParam String logName, @RequestParam String logLevel) { Loggers.setLogLevel(logName, logLevel); return HttpServletResponse.SC_OK + ""; }
@Test void testSetLogLevel() { String res = coreOpsController.setLogLevel("1", "info"); assertEquals("200", res); }
public Optional<String> findAlias(final String projectionName) { for (Projection each : projections) { if (each instanceof ShorthandProjection) { Optional<Projection> projection = ((ShorthandProjection) each).getActualColumns().stream().filter(optional -> projectionName.equalsIgnoreCase(getOriginalColumnName(optional))).findFirst(); if (projection.isPresent()) { return projection.map(Projection::getExpression); } } if (projectionName.equalsIgnoreCase(SQLUtils.getExactlyValue(each.getExpression()))) { return each.getAlias().map(IdentifierValue::getValue); } } return Optional.empty(); }
@Test void assertFindAlias() { Projection projection = getColumnProjectionWithAlias(); ProjectionsContext projectionsContext = new ProjectionsContext(0, 0, true, Collections.singleton(projection)); assertTrue(projectionsContext.findAlias(projection.getExpression()).isPresent()); }
@Override public boolean tryRegister(ThreadPoolPlugin plugin) { return false; }
@Test public void testTryRegister() { Assert.assertFalse(manager.tryRegister(new TestPlugin())); }
public static List<SelectBufferResult> splitMessageBuffer(ByteBuffer cqBuffer, ByteBuffer msgBuffer) { if (cqBuffer == null || msgBuffer == null) { log.error("MessageFormatUtil split buffer error, cq buffer or msg buffer is null"); return new ArrayList<>(); } cqBuffer.rewind(); msgBuffer.rewind(); List<SelectBufferResult> bufferResultList = new ArrayList<>( cqBuffer.remaining() / CONSUME_QUEUE_UNIT_SIZE); if (msgBuffer.remaining() == 0) { log.error("MessageFormatUtil split buffer error, msg buffer length is 0"); return bufferResultList; } if (cqBuffer.remaining() == 0 || cqBuffer.remaining() % CONSUME_QUEUE_UNIT_SIZE != 0) { log.error("MessageFormatUtil split buffer error, cq buffer size is {}", cqBuffer.remaining()); return bufferResultList; } try { long firstCommitLogOffset = MessageFormatUtil.getCommitLogOffsetFromItem(cqBuffer); for (int position = cqBuffer.position(); position < cqBuffer.limit(); position += CONSUME_QUEUE_UNIT_SIZE) { cqBuffer.position(position); long logOffset = MessageFormatUtil.getCommitLogOffsetFromItem(cqBuffer); int bufferSize = MessageFormatUtil.getSizeFromItem(cqBuffer); long tagCode = MessageFormatUtil.getTagCodeFromItem(cqBuffer); int offset = (int) (logOffset - firstCommitLogOffset); if (offset + bufferSize > msgBuffer.limit()) { log.error("MessageFormatUtil split buffer error, message buffer offset exceeded limit. " + "Expect length: {}, Actual length: {}", offset + bufferSize, msgBuffer.limit()); break; } msgBuffer.position(offset); int magicCode = getMagicCode(msgBuffer); if (magicCode == BLANK_MAGIC_CODE) { offset += COMMIT_LOG_CODA_SIZE; msgBuffer.position(offset); magicCode = getMagicCode(msgBuffer); } if (magicCode != MessageDecoder.MESSAGE_MAGIC_CODE && magicCode != MessageDecoder.MESSAGE_MAGIC_CODE_V2) { log.error("MessageFormatUtil split buffer error, found unknown magic code. " + "Message offset: {}, wrong magic code: {}", offset, magicCode); continue; } if (bufferSize != getTotalSize(msgBuffer)) { log.error("MessageFormatUtil split buffer error, message length not match. " + "CommitLog length: {}, buffer length: {}", getTotalSize(msgBuffer), bufferSize); continue; } ByteBuffer sliceBuffer = msgBuffer.slice(); sliceBuffer.limit(bufferSize); bufferResultList.add(new SelectBufferResult(sliceBuffer, offset, bufferSize, tagCode)); } } finally { cqBuffer.rewind(); msgBuffer.rewind(); } return bufferResultList; }
@Test public void testSplitMessages() { ByteBuffer msgBuffer1 = buildMockedMessageBuffer(); msgBuffer1.putLong(MessageFormatUtil.QUEUE_OFFSET_POSITION, 10); ByteBuffer msgBuffer2 = ByteBuffer.allocate(COMMIT_LOG_CODA_SIZE); msgBuffer2.putInt(MessageFormatUtil.COMMIT_LOG_CODA_SIZE); msgBuffer2.putInt(MessageFormatUtil.BLANK_MAGIC_CODE); msgBuffer2.putLong(System.currentTimeMillis()); msgBuffer2.flip(); ByteBuffer msgBuffer3 = buildMockedMessageBuffer(); msgBuffer3.putLong(MessageFormatUtil.QUEUE_OFFSET_POSITION, 11); ByteBuffer msgBuffer = ByteBuffer.allocate( msgBuffer1.remaining() + msgBuffer2.remaining() + msgBuffer3.remaining()); msgBuffer.put(msgBuffer1); msgBuffer.put(msgBuffer2); msgBuffer.put(msgBuffer3); msgBuffer.flip(); ByteBuffer cqBuffer1 = ByteBuffer.allocate(MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE); cqBuffer1.putLong(1000); cqBuffer1.putInt(MSG_LEN); cqBuffer1.putLong(0); cqBuffer1.flip(); ByteBuffer cqBuffer2 = ByteBuffer.allocate(MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE); cqBuffer2.putLong(1000 + MessageFormatUtil.COMMIT_LOG_CODA_SIZE + MSG_LEN); cqBuffer2.putInt(MSG_LEN); cqBuffer2.putLong(0); cqBuffer2.flip(); ByteBuffer cqBuffer3 = ByteBuffer.allocate(MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE); cqBuffer3.putLong(1000 + MSG_LEN); cqBuffer3.putInt(MSG_LEN); cqBuffer3.putLong(0); cqBuffer3.flip(); ByteBuffer cqBuffer4 = ByteBuffer.allocate(MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE); cqBuffer4.putLong(1000 + MessageFormatUtil.COMMIT_LOG_CODA_SIZE + MSG_LEN); cqBuffer4.putInt(MSG_LEN - 10); cqBuffer4.putLong(0); cqBuffer4.flip(); ByteBuffer cqBuffer5 = ByteBuffer.allocate(MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE); cqBuffer5.putLong(1000 + MessageFormatUtil.COMMIT_LOG_CODA_SIZE + MSG_LEN); cqBuffer5.putInt(MSG_LEN * 10); cqBuffer5.putLong(0); cqBuffer5.flip(); // Message buffer size is 0 or consume queue buffer size is 0 Assert.assertEquals(0, MessageFormatUtil.splitMessageBuffer(null, ByteBuffer.allocate(0)).size()); Assert.assertEquals(0, MessageFormatUtil.splitMessageBuffer(cqBuffer1, null).size()); Assert.assertEquals(0, MessageFormatUtil.splitMessageBuffer(cqBuffer1, ByteBuffer.allocate(0)).size()); Assert.assertEquals(0, MessageFormatUtil.splitMessageBuffer(ByteBuffer.allocate(0), msgBuffer).size()); Assert.assertEquals(0, MessageFormatUtil.splitMessageBuffer(ByteBuffer.allocate(10), msgBuffer).size()); ByteBuffer cqBuffer = ByteBuffer.allocate(MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE * 2); cqBuffer.put(cqBuffer1); cqBuffer.put(cqBuffer2); cqBuffer.flip(); cqBuffer1.rewind(); cqBuffer2.rewind(); List<SelectBufferResult> msgList = MessageFormatUtil.splitMessageBuffer(cqBuffer, msgBuffer); Assert.assertEquals(2, msgList.size()); Assert.assertEquals(0, msgList.get(0).getStartOffset()); Assert.assertEquals(MSG_LEN, msgList.get(0).getSize()); Assert.assertEquals(MSG_LEN + MessageFormatUtil.COMMIT_LOG_CODA_SIZE, msgList.get(1).getStartOffset()); Assert.assertEquals(MSG_LEN, msgList.get(1).getSize()); cqBuffer = ByteBuffer.allocate(MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE * 2); cqBuffer.put(cqBuffer1); cqBuffer.put(cqBuffer4); cqBuffer.flip(); cqBuffer1.rewind(); cqBuffer4.rewind(); msgList = MessageFormatUtil.splitMessageBuffer(cqBuffer, msgBuffer); Assert.assertEquals(1, msgList.size()); Assert.assertEquals(0, msgList.get(0).getStartOffset()); Assert.assertEquals(MSG_LEN, msgList.get(0).getSize()); cqBuffer = ByteBuffer.allocate(MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE * 3); cqBuffer.put(cqBuffer1); cqBuffer.put(cqBuffer3); cqBuffer.flip(); cqBuffer1.rewind(); cqBuffer3.rewind(); msgList = MessageFormatUtil.splitMessageBuffer(cqBuffer, msgBuffer); Assert.assertEquals(2, msgList.size()); Assert.assertEquals(0, msgList.get(0).getStartOffset()); Assert.assertEquals(MSG_LEN, msgList.get(0).getSize()); Assert.assertEquals(MSG_LEN + MessageFormatUtil.COMMIT_LOG_CODA_SIZE, msgList.get(1).getStartOffset()); Assert.assertEquals(MSG_LEN, msgList.get(1).getSize()); cqBuffer = ByteBuffer.allocate(MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE); cqBuffer.put(cqBuffer5); cqBuffer.flip(); msgList = MessageFormatUtil.splitMessageBuffer(cqBuffer, msgBuffer); Assert.assertEquals(0, msgList.size()); // Wrong magic code, it will destroy the mocked message buffer msgBuffer.putInt(MessageFormatUtil.MAGIC_CODE_POSITION, -1); cqBuffer = ByteBuffer.allocate(MessageFormatUtil.CONSUME_QUEUE_UNIT_SIZE * 2); cqBuffer.put(cqBuffer1); cqBuffer.put(cqBuffer2); cqBuffer.flip(); cqBuffer1.rewind(); cqBuffer2.rewind(); Assert.assertEquals(1, MessageFormatUtil.splitMessageBuffer(cqBuffer, msgBuffer).size()); }
public static long[] rowSums(int[][] matrix) { long[] x = new long[matrix.length]; for (int i = 0; i < x.length; i++) { x[i] = sum(matrix[i]); } return x; }
@Test public void testRowSums() { System.out.println("rowSums"); double[][] A = { {0.7220180, 0.07121225, 0.6881997}, {-0.2648886, -0.89044952, 0.3700456}, {-0.6391588, 0.44947578, 0.6240573} }; double[] r = {1.4814300, -0.7852925, 0.4343743}; double[] result = MathEx.rowSums(A); for (int i = 0; i < r.length; i++) { assertEquals(result[i], r[i], 1E-7); } }
public String checkAuthenticationStatus(AdSession adSession, SamlSession samlSession, String artifact) throws BvdException, SamlSessionException, UnsupportedEncodingException, AdException { AdAuthenticationStatus status = AdAuthenticationStatus.valueOfLabel(adSession.getAuthenticationStatus()); if (status == null) { throw new AdException("No successful authentication"); } return switch (status) { case STATUS_SUCCESS -> bvdClient.startBvdSession( adSession.getBsn(), "BSN", samlSession.getServiceEntityId(), LevelOfAssurance.map(String.valueOf(adSession.getAuthenticationLevel())), samlSession.getServiceUuid(), samlSession.getTransactionId()); case STATUS_CANCELED -> assertionConsumerServiceUrlService.generateRedirectUrl( artifact, samlSession.getTransactionId(), samlSession.getHttpSessionId(), BvdStatus.CANCELLED); default -> throw new AdException("No successful authentication"); }; }
@Test public void checkAuthenticationStatusCanceledTest() throws BvdException, AdException, SamlSessionException, UnsupportedEncodingException { AdSession adSession = new AdSession(); adSession.setAuthenticationStatus(AdAuthenticationStatus.STATUS_CANCELED.label); SamlSession samlSession = new SamlSession(1L); samlSession.setTransactionId("transactionId"); samlSession.setHttpSessionId("HttpSessionId"); String artifact = "artifact"; adService.checkAuthenticationStatus(adSession, samlSession, artifact); verify(assertionConsumerServiceUrlServiceMock, times(1)).generateRedirectUrl(anyString(), anyString(), anyString(), any(BvdStatus.class)); }
public abstract Map<String, SubClusterPolicyConfiguration> getPoliciesConfigurations() throws Exception;
@Test public void testGetPoliciesConfigurations() throws YarnException { Map<String, SubClusterPolicyConfiguration> queuePolicies = facade.getPoliciesConfigurations(); for (String queue : queuePolicies.keySet()) { SubClusterPolicyConfiguration expectedPC = stateStoreTestUtil.queryPolicyConfiguration(queue); SubClusterPolicyConfiguration cachedPC = queuePolicies.get(queue); assertEquals(expectedPC, cachedPC); } }
@Override public TopologyGraph getGraph(Topology topology) { return defaultTopology(topology).getGraph(); }
@Test public void testGetGraph() { manager.registerTenantId(TenantId.tenantId(tenantIdValue1)); VirtualNetwork virtualNetwork = manager.createVirtualNetwork(TenantId.tenantId(tenantIdValue1)); TopologyService topologyService = manager.get(virtualNetwork.id(), TopologyService.class); Topology topology = topologyService.currentTopology(); // test the getGraph() method. assertNotNull("The graph should not be null.", topologyService.getGraph(topology)); }
@Override synchronized Long headRecordOffset(final TopicPartition partition) { return wrapped.headRecordOffset(partition); }
@Test public void testHeadRecordOffset() { final TopicPartition partition = new TopicPartition("topic", 0); final Long recordOffset = 0L; when(wrapped.headRecordOffset(partition)).thenReturn(recordOffset); final Long result = synchronizedPartitionGroup.headRecordOffset(partition); assertEquals(recordOffset, result); verify(wrapped, times(1)).headRecordOffset(partition); }
@VisibleForTesting Row transformInput(KafkaRecord<byte[], byte[]> record) { Row.FieldValueBuilder builder = Row.withSchema(getSchema()).withFieldValues(ImmutableMap.of()); if (schema.hasField(Schemas.MESSAGE_KEY_FIELD)) { builder.withFieldValue(Schemas.MESSAGE_KEY_FIELD, record.getKV().getKey()); } if (schema.hasField(Schemas.EVENT_TIMESTAMP_FIELD)) { builder.withFieldValue( Schemas.EVENT_TIMESTAMP_FIELD, Instant.ofEpochMilli(record.getTimestamp())); } if (schema.hasField(Schemas.HEADERS_FIELD)) { @Nullable Headers recordHeaders = record.getHeaders(); if (recordHeaders != null) { ImmutableListMultimap.Builder<String, byte[]> headersBuilder = ImmutableListMultimap.builder(); recordHeaders.forEach(header -> headersBuilder.put(header.key(), header.value())); ImmutableList.Builder<Row> listBuilder = ImmutableList.builder(); headersBuilder .build() .asMap() .forEach( (key, values) -> { Row entry = Row.withSchema(Schemas.HEADERS_ENTRY_SCHEMA) .withFieldValue(Schemas.HEADERS_KEY_FIELD, key) .withFieldValue(Schemas.HEADERS_VALUES_FIELD, values) .build(); listBuilder.add(entry); }); builder.withFieldValue(Schemas.HEADERS_FIELD, listBuilder.build()); } } if (payloadSerializer == null) { builder.withFieldValue(Schemas.PAYLOAD_FIELD, record.getKV().getValue()); } else { byte[] payload = record.getKV().getValue(); if (payload != null) { builder.withFieldValue( Schemas.PAYLOAD_FIELD, payloadSerializer.deserialize(record.getKV().getValue())); } } return builder.build(); }
@Test public void recordToRowFailures() { { Schema payloadSchema = Schema.builder().addStringField("def").build(); NestedPayloadKafkaTable table = newTable( Schema.builder() .addRowField(Schemas.PAYLOAD_FIELD, payloadSchema) .addField(Schemas.HEADERS_FIELD, Schemas.HEADERS_FIELD_TYPE) .build(), Optional.of(serializer)); doThrow(new IllegalArgumentException("")).when(serializer).deserialize(any()); assertThrows( IllegalArgumentException.class, () -> table.transformInput( readRecord( new byte[] {}, "abc".getBytes(UTF_8), 123, ImmutableListMultimap.of()))); } // Schema requires headers, missing in message { NestedPayloadKafkaTable table = newTable( Schema.builder() .addByteArrayField(Schemas.PAYLOAD_FIELD) .addField(Schemas.HEADERS_FIELD, Schemas.HEADERS_FIELD_TYPE) .build(), Optional.empty()); assertThrows( IllegalArgumentException.class, () -> table.transformInput( new KafkaRecord<>( TOPIC, 0, 0, 0, KafkaTimestampType.LOG_APPEND_TIME, null, new byte[] {}, new byte[] {}))); } }
@VisibleForTesting Pair<String, File> encryptSegmentIfNeeded(File tempDecryptedFile, File tempEncryptedFile, boolean isUploadedSegmentEncrypted, String crypterUsedInUploadedSegment, String crypterClassNameInTableConfig, String segmentName, String tableNameWithType) { boolean segmentNeedsEncryption = StringUtils.isNotEmpty(crypterClassNameInTableConfig); // form the output File finalSegmentFile = (isUploadedSegmentEncrypted || segmentNeedsEncryption) ? tempEncryptedFile : tempDecryptedFile; String crypterClassName = StringUtils.isEmpty(crypterClassNameInTableConfig) ? crypterUsedInUploadedSegment : crypterClassNameInTableConfig; ImmutablePair<String, File> out = ImmutablePair.of(crypterClassName, finalSegmentFile); if (!segmentNeedsEncryption) { return out; } if (isUploadedSegmentEncrypted && !crypterClassNameInTableConfig.equals(crypterUsedInUploadedSegment)) { throw new ControllerApplicationException(LOGGER, String.format( "Uploaded segment is encrypted with '%s' while table config requires '%s' as crypter " + "(segment name = '%s', table name = '%s').", crypterUsedInUploadedSegment, crypterClassNameInTableConfig, segmentName, tableNameWithType), Response.Status.INTERNAL_SERVER_ERROR); } // encrypt segment PinotCrypter pinotCrypter = PinotCrypterFactory.create(crypterClassNameInTableConfig); LOGGER.info("Using crypter class '{}' for encrypting '{}' to '{}' (segment name = '{}', table name = '{}').", crypterClassNameInTableConfig, tempDecryptedFile, tempEncryptedFile, segmentName, tableNameWithType); pinotCrypter.encrypt(tempDecryptedFile, tempEncryptedFile); return out; }
@Test public void testEncryptSegmentIfNeededNoEncryption() { // arrange boolean uploadedSegmentIsEncrypted = false; String crypterClassNameInTableConfig = null; String crypterClassNameUsedInUploadedSegment = null; // act Pair<String, File> encryptionInfo = _resource .encryptSegmentIfNeeded(_decryptedFile, _encryptedFile, uploadedSegmentIsEncrypted, crypterClassNameUsedInUploadedSegment, crypterClassNameInTableConfig, SEGMENT_NAME, TABLE_NAME); // assert assertNull(encryptionInfo.getLeft()); assertEquals(_decryptedFile, encryptionInfo.getRight()); }
@SuppressWarnings("checkstyle:MissingSwitchDefault") @Override protected void doCommit(TableMetadata base, TableMetadata metadata) { int version = currentVersion() + 1; CommitStatus commitStatus = CommitStatus.FAILURE; /* This method adds no fs scheme, and it persists in HTS that way. */ final String newMetadataLocation = rootMetadataFileLocation(metadata, version); HouseTable houseTable = HouseTable.builder().build(); try { // Now that we have metadataLocation we stamp it in metadata property. Map<String, String> properties = new HashMap<>(metadata.properties()); failIfRetryUpdate(properties); String currentTsString = String.valueOf(Instant.now(Clock.systemUTC()).toEpochMilli()); properties.put(getCanonicalFieldName("lastModifiedTime"), currentTsString); if (base == null) { properties.put(getCanonicalFieldName("creationTime"), currentTsString); } properties.put( getCanonicalFieldName("tableVersion"), properties.getOrDefault( getCanonicalFieldName("tableLocation"), CatalogConstants.INITIAL_VERSION)); properties.put(getCanonicalFieldName("tableLocation"), newMetadataLocation); String serializedSnapshotsToPut = properties.remove(CatalogConstants.SNAPSHOTS_JSON_KEY); String serializedSnapshotRefs = properties.remove(CatalogConstants.SNAPSHOTS_REFS_KEY); boolean isStageCreate = Boolean.parseBoolean(properties.remove(CatalogConstants.IS_STAGE_CREATE_KEY)); logPropertiesMap(properties); TableMetadata updatedMetadata = metadata.replaceProperties(properties); if (serializedSnapshotsToPut != null) { List<Snapshot> snapshotsToPut = SnapshotsUtil.parseSnapshots(fileIO, serializedSnapshotsToPut); Pair<List<Snapshot>, List<Snapshot>> snapshotsDiff = SnapshotsUtil.symmetricDifferenceSplit(snapshotsToPut, updatedMetadata.snapshots()); List<Snapshot> appendedSnapshots = snapshotsDiff.getFirst(); List<Snapshot> deletedSnapshots = snapshotsDiff.getSecond(); snapshotInspector.validateSnapshotsUpdate( updatedMetadata, appendedSnapshots, deletedSnapshots); Map<String, SnapshotRef> snapshotRefs = serializedSnapshotRefs == null ? new HashMap<>() : SnapshotsUtil.parseSnapshotRefs(serializedSnapshotRefs); updatedMetadata = maybeAppendSnapshots(updatedMetadata, appendedSnapshots, snapshotRefs, true); updatedMetadata = maybeDeleteSnapshots(updatedMetadata, deletedSnapshots); } final TableMetadata updatedMtDataRef = updatedMetadata; metricsReporter.executeWithStats( () -> TableMetadataParser.write(updatedMtDataRef, io().newOutputFile(newMetadataLocation)), InternalCatalogMetricsConstant.METADATA_UPDATE_LATENCY); houseTable = houseTableMapper.toHouseTable(updatedMetadata); if (!isStageCreate) { houseTableRepository.save(houseTable); } else { /** * Refresh current metadata for staged tables from newly created metadata file and disable * "forced refresh" in {@link OpenHouseInternalTableOperations#commit(TableMetadata, * TableMetadata)} */ refreshFromMetadataLocation(newMetadataLocation); } commitStatus = CommitStatus.SUCCESS; } catch (InvalidIcebergSnapshotException e) { throw new BadRequestException(e, e.getMessage()); } catch (CommitFailedException e) { throw e; } catch (HouseTableCallerException | HouseTableNotFoundException | HouseTableConcurrentUpdateException e) { throw new CommitFailedException(e); } catch (Throwable persistFailure) { // Try to reconnect and determine the commit status for unknown exception log.error( "Encounter unexpected error while updating metadata.json for table:" + tableIdentifier, persistFailure); commitStatus = checkCommitStatus(newMetadataLocation, metadata); switch (commitStatus) { case SUCCESS: log.debug("Calling doCommit succeeded"); break; case FAILURE: // logging error and exception-throwing co-existence is needed, given the exception // handler in // org.apache.iceberg.BaseMetastoreCatalog.BaseMetastoreCatalogTableBuilder.create swallow // the // nested exception information. log.error("Exception details:", persistFailure); throw new CommitFailedException( persistFailure, String.format( "Persisting metadata file %s at version %s for table %s failed while persisting to house table", newMetadataLocation, version, GSON.toJson(houseTable))); case UNKNOWN: throw new CommitStateUnknownException(persistFailure); } } finally { switch (commitStatus) { case FAILURE: metricsReporter.count(InternalCatalogMetricsConstant.COMMIT_FAILED_CTR); break; case UNKNOWN: metricsReporter.count(InternalCatalogMetricsConstant.COMMIT_STATE_UNKNOWN); break; default: break; /*should never happen, kept to silence SpotBugs*/ } } }
@Test void testDoCommitCherryPickSnapshotBaseUnchanged() throws IOException { List<Snapshot> testSnapshots = IcebergTestUtil.getSnapshots(); List<Snapshot> testWapSnapshots = IcebergTestUtil.getWapSnapshots(); // add 1 snapshot and 1 staged snapshot to the base metadata TableMetadata base = TableMetadata.buildFrom(BASE_TABLE_METADATA) .setBranchSnapshot(testSnapshots.get(0), SnapshotRef.MAIN_BRANCH) .addSnapshot(testWapSnapshots.get(0)) .build(); List<Snapshot> newSnapshots = new ArrayList<>(); newSnapshots.add(testSnapshots.get(0)); newSnapshots.add(testWapSnapshots.get(0)); Map<String, String> properties = new HashMap<>(base.properties()); try (MockedStatic<TableMetadataParser> ignoreWriteMock = Mockito.mockStatic(TableMetadataParser.class)) { // cherry pick the staged snapshot properties.put( CatalogConstants.SNAPSHOTS_JSON_KEY, SnapshotsUtil.serializedSnapshots(newSnapshots)); properties.put( CatalogConstants.SNAPSHOTS_REFS_KEY, SnapshotsUtil.serializeMap( IcebergTestUtil.obtainSnapshotRefsFromSnapshot(testWapSnapshots.get(0)))); properties.put(getCanonicalFieldName("tableLocation"), TEST_LOCATION); TableMetadata metadata = base.replaceProperties(properties); openHouseInternalTableOperations.doCommit(base, metadata); Mockito.verify(mockHouseTableMapper).toHouseTable(tblMetadataCaptor.capture()); Map<String, String> updatedProperties = tblMetadataCaptor.getValue().properties(); // verify the staged snapshot is cherry picked by use the existing one Assertions.assertEquals( null, updatedProperties.get(getCanonicalFieldName("staged_snapshots"))); Assertions.assertEquals( null, updatedProperties.get(getCanonicalFieldName("appended_snapshots"))); Assertions.assertEquals( Long.toString(testWapSnapshots.get(0).snapshotId()), updatedProperties.get(getCanonicalFieldName("cherry_picked_snapshots"))); Assertions.assertEquals( null, updatedProperties.get(getCanonicalFieldName("deleted_snapshots"))); Mockito.verify(mockHouseTableRepository, Mockito.times(1)).save(Mockito.eq(mockHouseTable)); } }
@Override public V takeFirst() throws InterruptedException { return commandExecutor.getInterrupted(takeFirstAsync()); }
@Test public void testTakeFirstAwait() throws InterruptedException { RBlockingDeque<Integer> deque = redisson.getPriorityBlockingDeque("queue:take"); Executors.newSingleThreadScheduledExecutor().schedule(() -> { RBlockingDeque<Integer> deque1 = redisson.getBlockingDeque("queue:take"); deque1.add(1); deque1.add(2); deque1.add(3); deque1.add(4); }, 10, TimeUnit.SECONDS); long s = System.currentTimeMillis(); assertThat(deque.takeFirst()).isEqualTo(1); assertThat(System.currentTimeMillis() - s).isGreaterThan(9000); Thread.sleep(50); assertThat(deque.takeFirst()).isEqualTo(2); assertThat(deque.takeFirst()).isEqualTo(3); assertThat(deque.takeFirst()).isEqualTo(4); }
@Override @Deprecated public <VR> KStream<K, VR> flatTransformValues(final org.apache.kafka.streams.kstream.ValueTransformerSupplier<? super V, Iterable<VR>> valueTransformerSupplier, final String... stateStoreNames) { Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null"); return doFlatTransformValues( toValueTransformerWithKeySupplier(valueTransformerSupplier), NamedInternal.empty(), stateStoreNames); }
@Test @SuppressWarnings("deprecation") public void shouldNotAllowNullStoreNamesOnFlatTransformValuesWithFlatValueSupplierAndNamed() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.flatTransformValues( flatValueTransformerSupplier, Named.as("flatValueTransformer"), (String[]) null)); assertThat(exception.getMessage(), equalTo("stateStoreNames can't be a null array")); }
public byte[] getNextTag() { byte[] tagBytes = null; if (tagPool != null) { tagBytes = tagPool.pollFirst(); } if (tagBytes == null) { long tag = nextTagId++; int size = encodingSize(tag); tagBytes = new byte[size]; for (int i = 0; i < size; ++i) { tagBytes[size - 1 - i] = (byte) (tag >>> (i * 8)); } } return tagBytes; }
@Test public void testNewTagsOnSuccessiveCheckouts() { AmqpTransferTagGenerator tagGen = new AmqpTransferTagGenerator(true); byte[] tag1 = tagGen.getNextTag(); byte[] tag2 = tagGen.getNextTag(); byte[] tag3 = tagGen.getNextTag(); assertNotSame(tag1, tag2); assertNotSame(tag1, tag3); assertNotSame(tag3, tag2); assertFalse(Arrays.equals(tag1, tag2)); assertFalse(Arrays.equals(tag1, tag3)); assertFalse(Arrays.equals(tag3, tag2)); }
@CanIgnoreReturnValue public final Ordered containsExactly(@Nullable Object @Nullable ... varargs) { List<@Nullable Object> expected = (varargs == null) ? newArrayList((@Nullable Object) null) : asList(varargs); return containsExactlyElementsIn( expected, varargs != null && varargs.length == 1 && varargs[0] instanceof Iterable); }
@Test public void iterableContainsExactlyWithMany() { assertThat(asList(1, 2, 3)).containsExactly(1, 2, 3); }
public static boolean isEditionBundled(Plugin plugin) { return SONARSOURCE_ORGANIZATION.equalsIgnoreCase(plugin.getOrganization()) && Arrays.stream(SONARSOURCE_COMMERCIAL_LICENSES).anyMatch(s -> s.equalsIgnoreCase(plugin.getLicense())); }
@Test public void isEditionBundled_on_PluginINfo_returns_true_for_organization_SonarSource_and_license_Commercial_case_insensitive() { PluginInfo pluginInfo = newPluginInfo(randomizeCase("SonarSource"), randomizeCase("Commercial")); assertThat(EditionBundledPlugins.isEditionBundled(pluginInfo)).isTrue(); }
@Override @Nullable public Object convert(String value) { if (value == null || value.isEmpty()) { return null; } final Parser parser = new Parser(timeZone.toTimeZone()); final List<DateGroup> r = parser.parse(value); if (r.isEmpty() || r.get(0).getDates().isEmpty()) { return null; } return new DateTime(r.get(0).getDates().get(0), timeZone); }
@Test public void convertUsesEtcUTCIfTimeZoneSettingIsNotAString() throws Exception { Converter c = new FlexibleDateConverter(ImmutableMap.<String, Object>of("time_zone", 42)); final DateTime dateOnly = (DateTime) c.convert("2014-3-12"); assertThat(dateOnly.getZone()).isEqualTo(DateTimeZone.forID("Etc/UTC")); }
public static Object convertValue(final Object value, final Class<?> convertType) throws SQLFeatureNotSupportedException { ShardingSpherePreconditions.checkNotNull(convertType, () -> new SQLFeatureNotSupportedException("Type can not be null")); if (null == value) { return convertNullValue(convertType); } if (value.getClass() == convertType) { return value; } if (value instanceof LocalDateTime) { return convertLocalDateTimeValue((LocalDateTime) value, convertType); } if (value instanceof Timestamp) { return convertTimestampValue((Timestamp) value, convertType); } if (URL.class.equals(convertType)) { return convertURL(value); } if (value instanceof Number) { return convertNumberValue(value, convertType); } if (value instanceof Date) { return convertDateValue((Date) value, convertType); } if (value instanceof byte[]) { return convertByteArrayValue((byte[]) value, convertType); } if (boolean.class.equals(convertType)) { return convertBooleanValue(value); } if (String.class.equals(convertType)) { return value.toString(); } try { return convertType.cast(value); } catch (final ClassCastException ignored) { throw new SQLFeatureNotSupportedException("getObject with type"); } }
@Test void assertConvertURLValueError() { String urlString = "no-exist:shardingsphere.apache.org/"; assertThrows(UnsupportedDataTypeConversionException.class, () -> ResultSetUtils.convertValue(urlString, URL.class)); }
@Override public String toString(final RouteUnit routeUnit) { return identifier.getQuoteCharacter().wrap(getConstraintValue(routeUnit)); }
@Test void assertUpperCaseToString() { SQLStatementContext sqlStatementContext = mockSQLStatementContext(); assertThat(new ConstraintToken(0, 1, new IdentifierValue("uc"), sqlStatementContext, mock(ShardingRule.class)).toString(getRouteUnit()), is("uc_t_order_0")); }
public String getLegacyColumnName( DatabaseMetaData dbMetaData, ResultSetMetaData rsMetaData, int index ) throws KettleDatabaseException { if ( dbMetaData == null ) { throw new KettleDatabaseException( BaseMessages.getString( PKG, "MySQLDatabaseMeta.Exception.LegacyColumnNameNoDBMetaDataException" ) ); } if ( rsMetaData == null ) { throw new KettleDatabaseException( BaseMessages.getString( PKG, "MySQLDatabaseMeta.Exception.LegacyColumnNameNoRSMetaDataException" ) ); } try { return dbMetaData.getDriverMajorVersion() > 3 ? rsMetaData.getColumnLabel( index ) : rsMetaData.getColumnName( index ); } catch ( Exception e ) { throw new KettleDatabaseException( String.format( "%s: %s", BaseMessages.getString( PKG, "MySQLDatabaseMeta.Exception.LegacyColumnNameException" ), e.getMessage() ), e ); } }
@Test public void testGetLegacyColumnNameDriverLessOrEqualToThreeFieldMySQL() throws Exception { DatabaseMetaData databaseMetaData = mock( DatabaseMetaData.class ); doReturn( 3 ).when( databaseMetaData ).getDriverMajorVersion(); assertEquals( "MySQL", new MySQLDatabaseMeta().getLegacyColumnName( databaseMetaData, getResultSetMetaData(), 5 ) ); }
@ScalarOperator(LESS_THAN_OR_EQUAL) @SqlType(StandardTypes.BOOLEAN) public static boolean lessThanOrEqual(@SqlType("unknown") boolean left, @SqlType("unknown") boolean right) { throw new AssertionError("value of unknown type should all be NULL"); }
@Test public void testLessThanOrEqual() { assertFunction("NULL <= NULL", BOOLEAN, null); }