repo_name stringlengths 6 101 | path stringlengths 4 300 | text stringlengths 7 1.31M |
|---|---|---|
xxm1995/bootx-cloud | bootx-commons/common-seata/src/main/java/cn/bootx/common/seata/redis/proxy/TccRedisClientProxyImpl.java | <reponame>xxm1995/bootx-cloud<gh_stars>1-10
package cn.bootx.common.seata.redis.proxy;
import cn.bootx.common.redis.RedisClient;
import io.seata.core.context.RootContext;
import io.seata.rm.tcc.api.BusinessActionContext;
import io.seata.rm.tcc.api.BusinessActionContextParameter;
import lombok.RequiredArgsConstructor;
import org.springframework.stereotype.Component;
import java.util.*;
import java.util.concurrent.TimeUnit;
/**
* TccRedisClient代理实现
* @author xxm
* @date 2021/4/28
*/
@RequiredArgsConstructor
@Component
public class TccRedisClientProxyImpl implements TccRedisClientProxy{
private final RedisClient redisTemplate;
private boolean notTcc(){
return Objects.isNull(RootContext.getBranchType());
}
@Override
public void deleteKey(@BusinessActionContextParameter(paramName = "key") String key) {
if (notTcc()) {
redisTemplate.deleteKey(key);
}
}
@Override
public void deleteKeyTcc(BusinessActionContext context) {
String key = (String) context.getActionContext("key");
redisTemplate.deleteKey(key);
}
@Override
public void deleteKeys(@BusinessActionContextParameter(paramName = "keys") Collection<String> keys) {
if (notTcc()) {
redisTemplate.deleteKeys(keys);
}
}
@Override
public void deleteKeysTcc(BusinessActionContext context) {
//noinspection unchecked
Collection<String> keys = (Collection<String>) context.getActionContext("key");
redisTemplate.deleteKeys(keys);
}
@Override
public void set(
@BusinessActionContextParameter(paramName = "key") String key,
@BusinessActionContextParameter(paramName = "value") String value) {
if (notTcc()) {
redisTemplate.set(key, value);
}
}
@Override
public void setTcc(BusinessActionContext context) {
String key = (String) context.getActionContext("key");
String value = (String) context.getActionContext("value");
redisTemplate.set(key, value);
}
@Override
public void setWithTimeout(
@BusinessActionContextParameter(paramName = "key") String key,
@BusinessActionContextParameter(paramName = "value") String value,
@BusinessActionContextParameter(paramName = "timeoutMs") long timeoutMs) {
if (notTcc()) {
redisTemplate.setWithTimeout(key, value, timeoutMs);
}
}
@Override
public void setWithTimeoutTcc(BusinessActionContext context){
String key = (String) context.getActionContext("key");
String value = (String) context.getActionContext("value");
long timeoutMs = (long) context.getActionContext("timeoutMs");
redisTemplate.setWithTimeout(key, value, timeoutMs);
}
@Override
public void hset(
@BusinessActionContextParameter(paramName = "key") String key,
@BusinessActionContextParameter(paramName = "column") String column,
@BusinessActionContextParameter(paramName = "value") Object value) {
if (notTcc()) {
redisTemplate.hset(key, column, value);
}
}
@Override
public void hsetTcc(BusinessActionContext context) {
String key = (String) context.getActionContext("key");
String column = (String) context.getActionContext("column");
Object value = context.getActionContext("value");
redisTemplate.hset(key, column, value);
}
@Override
public void hmSet(@BusinessActionContextParameter(paramName = "key")String key,
@BusinessActionContextParameter(paramName = "map")Map<String, String> map) {
if (notTcc()) {
redisTemplate.hmSet(key, map);
}
}
@Override
public void hmSetTcc(BusinessActionContext context) {
String key = (String) context.getActionContext("key");
//noinspection unchecked
Map<String, String> map = (Map<String, String>) context.getActionContext("map");
redisTemplate.hmSet(key, map);
}
@Override
public void expire(@BusinessActionContextParameter(paramName = "key") String key,
@BusinessActionContextParameter(paramName = "expire") long timeoutMs) {
if (notTcc()) {
redisTemplate.expire(key, timeoutMs);
}
}
@Override
public void expireTcc(BusinessActionContext context) {
String key = (String) context.getActionContext("key");
long timeoutMs = (long) context.getActionContext("expire");
redisTemplate.expire(key, timeoutMs);
}
@Override
public void expireUnit(@BusinessActionContextParameter(paramName = "key") String key,
@BusinessActionContextParameter(paramName = "expire") long expire,
@BusinessActionContextParameter(paramName = "timeUnit") TimeUnit timeUnit) {
if (notTcc()) {
redisTemplate.expireUnit(key, expire,timeUnit);
}
}
@Override
public void expireUnitTcc(BusinessActionContext context) {
String key = (String) context.getActionContext("key");
long expire = (long) context.getActionContext("expire");
TimeUnit timeUnit = (TimeUnit) context.getActionContext("timeUnit");
redisTemplate.expireUnit(key, expire,timeUnit);
}
@Override
public void zadd(String key, String value, long score) {
if (notTcc()) {
redisTemplate.zadd(key, value,score);
}
}
@Override
public void zaddTcc(BusinessActionContext context) {
String key = (String) context.getActionContext("key");
String value = (String) context.getActionContext("value");
long score = (long) context.getActionContext("score");
redisTemplate.zadd(key, value,score);
}
@Override
public void zremRangeByScore(String key, long start, long end) {
if (notTcc()) {
redisTemplate.zremRangeByScore(key, start,end);
}
}
@Override
public void zremRangeByScoreTcc(BusinessActionContext context) {
String key = (String) context.getActionContext("key");
long start = (long) context.getActionContext("start");
long end = (long) context.getActionContext("end");
redisTemplate.zremRangeByScore(key, start,end);
}
@Override
public void zremByMembers(String key, String... members) {
if (notTcc()) {
redisTemplate.zremByMembers(key, members);
}
}
@Override
public void zremByMembersTcc(BusinessActionContext context) {
String key = (String) context.getActionContext("key");
String[] members = (String[]) context.getActionContext("members");
redisTemplate.zremByMembers(key, members);
}
@Override
public void renameTcc(
@BusinessActionContextParameter(paramName = "oldKey") String oldKey,
@BusinessActionContextParameter(paramName = "newKey") String newKey) {
if (notTcc()) {
redisTemplate.rename(oldKey,newKey);
}
}
@Override
public void renameTcc(BusinessActionContext context) {
String oldKey = (String) context.getActionContext("oldKey");
String newKey = (String) context.getActionContext("newKey");
redisTemplate.rename(oldKey,newKey);
}
/**
* 默认空回滚
*/
@Override
public void cancel(BusinessActionContext context){
}
@Override
public String get(String key) {
return redisTemplate.get(key);
}
@Override
public List<String> get(Collection<String> keys) {
return redisTemplate.get(keys);
}
@Override
public Object hget(String key, String column) {
return redisTemplate.hget(key,column);
}
@Override
public Map<String, String> hmGet(String key) {
return redisTemplate.hmGet(key);
}
@Override
public Set<String> zrangeByScore(String key, long start, long end) {
return redisTemplate.zrangeByScore(key,start,end);
}
}
|
warehouse-picking-automation-challenges/nimbro_picking | nimbro_actuators/drc_interface/src/controller_tool/controller_tool_hand.cpp | <gh_stars>10-100
// Commands for communicating with the hand controller
// Author: <NAME> <<EMAIL>>
#include "controller_tool_hand.h"
#include "controller_tool_dxl.h"
#include "../../firmware/hand_bootloader/interface.h"
#include <stdio.h>
#include <unistd.h>
#include <string.h>
namespace hand
{
static int doFlash(FILE* f)
{
uint8_t pagebuf[256];
uint32_t pageIdx = 0;
while(!feof(f))
{
unsigned int retryCount = 0;
memset(pagebuf, 0xFF, sizeof(pagebuf));
int bytes = fread(pagebuf, 1, sizeof(pagebuf), f);
if(bytes == 0)
continue;
if(bytes < 0)
{
perror("Could not read from input file");
return 1;
}
for(int i = 0; i < 8; ++i)
{
const unsigned int packetSize = 256/8;
uint16_t off = i * packetSize;
const uint8_t* packetData = pagebuf + off;
while(!dxl::writeRegister(BOOTLOADER_DXL_ID, BOOTLOADER_REG_PAGEBUFFER + off, packetData, packetSize))
{
fprintf(stderr, "Retrying to write packet %d of page %d\n", i, pageIdx);
usleep(100 * 1000);
}
}
uint32_t pageAddr = pageIdx * 256;
#if 0
printf("Read back before write\n");
for(int i = 0; i < 8; ++i)
{
const unsigned int packetSize = 256/8;
uint16_t off = i * packetSize;
dxl::DXLAnswer answer;
while(!dxl::readRegister(BOOTLOADER_DXL_ID, BOOTLOADER_REG_PAGEBUFFER + off, packetSize, &answer))
{
fprintf(stderr, "Retrying to write packet %d of page %d\n", i, pageIdx);
usleep(100 * 1000);
}
for(unsigned int j = 0; j < packetSize; ++j)
{
if(pagebuf[off + j] != answer.data[j])
{
fprintf(stderr, "Before-Write mismatch at addr 0x%X: write 0x%02X, got 0x%02X\n", pageAddr + j, pagebuf[off + j], answer.data[j]);
return 1;
}
}
}
#endif
uint8_t cmd[] = {
BOOTLOADER_COMMAND_WRITE_PAGE,
(uint8_t)(pageAddr),
(uint8_t)(pageAddr >> 8),
(uint8_t)(pageAddr >> 16),
(uint8_t)(pageAddr >> 24)
};
while(!dxl::writeRegister(BOOTLOADER_DXL_ID, BOOTLOADER_REG_COMMAND, cmd, sizeof(cmd)))
{
if(++retryCount > 100)
return 1;
fprintf(stderr, "Retrying to issue write command...\n");
usleep(100 * 1000);
}
usleep(10 * 1000);
printf("Waiting for write complete...\n");
dxl::DXLAnswer answer;
while(!dxl::readRegister(BOOTLOADER_DXL_ID, BOOTLOADER_REG_COMMAND, 1, &answer, 10))
{
if(++retryCount > 100)
return 1;
usleep(100 * 1000);
}
if(answer.data[0] != BOOTLOADER_COMMAND_IDLE)
{
fprintf(stderr, "Invalid bootloader return code while writing: %u\n", answer.data[0]);
return 1;
}
cmd[0] = BOOTLOADER_COMMAND_READ_PAGE;
while(!dxl::writeRegister(BOOTLOADER_DXL_ID, BOOTLOADER_REG_COMMAND, cmd, sizeof(cmd)))
{
if(++retryCount > 100)
return 1;
fprintf(stderr, "Retrying to issue read command...\n");
usleep(100 * 1000);
}
printf("Waiting for read complete\n");
while(!dxl::readRegister(BOOTLOADER_DXL_ID, BOOTLOADER_REG_COMMAND, 1, &answer))
{
if(++retryCount > 100)
return 1;
usleep(100 * 1000);
}
if(answer.data[0] != BOOTLOADER_COMMAND_IDLE)
{
fprintf(stderr, "Invalid bootloader return code while writing: %u\n", answer.data[0]);
return 1;
}
printf("Read back\n");
for(int i = 0; i < 8; ++i)
{
const unsigned int packetSize = 256/8;
uint16_t off = i * packetSize;
dxl::DXLAnswer answer;
while(!dxl::readRegister(BOOTLOADER_DXL_ID, BOOTLOADER_REG_PAGEBUFFER + off, packetSize, &answer))
{
fprintf(stderr, "Retrying to write packet %d of page %d\n", i, pageIdx);
usleep(100 * 1000);
}
for(unsigned int j = 0; j < packetSize; ++j)
{
if(pagebuf[off + j] != answer.data[j])
{
fprintf(stderr, "After-Write mismatch at addr 0x%X: write 0x%02X, got 0x%02X\n", pageAddr + j, pagebuf[off + j], answer.data[j]);
return 1;
}
}
}
printf("Page verified.\n");
pageIdx++;
}
return 0;
}
int flash(const std::string& file)
{
FILE* f = fopen(file.c_str(), "r");
if(!f)
{
perror("Could not open input file");
return 1;
}
printf("Trying to enter bootloader...\n");
while(1)
{
// Place hand controller in bootloader mode
uint8_t code = 0xAB;
dxl::writeRegister(29, 0x100, &code, 1);
// Check if successful
dxl::DXLAnswer answer;
if(dxl::readRegister(BOOTLOADER_DXL_ID, 0, 2, &answer))
{
printf("Got model: 0x%02X%02X\n", answer.data[1], answer.data[0]);
if(answer.data[0] == (BOOTLOADER_MODEL & 0xFF)
&& answer.data[1] == (BOOTLOADER_MODEL >> 8))
{
break;
}
}
}
printf("Entered.\n");
int ret = doFlash(f);
if(ret == 0)
{
printf("Exiting bootloader...\n");
for(int i = 0; i < 10; ++i)
{
uint8_t command = BOOTLOADER_COMMAND_EXIT;
if(dxl::writeRegister(BOOTLOADER_DXL_ID, BOOTLOADER_REG_COMMAND, &command, 1))
break;
}
}
return ret;
}
}
|
mauryabip/DealGali | DealGali/ThirdParty/HeaderAnimation/UINavigationBar+PS.h | //
// UINavigationBar+PS.h
// PSGenericClass
//
// Created by Ryan_Man on 16/6/14.
// Copyright © 2016年 Ryan_Man. All rights reserved.
//
#import <UIKit/UIKit.h>
#define NavigationBarBGColor [UIColor colorWithRed:0/255.0f green:0/255.0f blue:0/255.0f alpha:1]
@interface UINavigationBar (PS)
- (void)ps_setBackgroundColor:(UIColor *)backgroundColor;
- (void)ps_setElementsAlpha:(CGFloat)alpha;
- (void)ps_setTranslationY:(CGFloat)translationY;
- (void)ps_setTransformIdentity;
- (void)ps_reset;
@end
|
chenth0517/JatWeb | src/test/SendMailTest.java | <gh_stars>0
package test;
import static org.junit.Assert.*;
import org.junit.Test;
import java.security.GeneralSecurityException;
import java.security.Security;
import java.util.Date;
import java.util.Properties;
import javax.mail.Authenticator;
import javax.mail.Message;
import javax.mail.MessagingException;
import javax.mail.PasswordAuthentication;
import javax.mail.Session;
import javax.mail.Transport;
import javax.mail.internet.InternetAddress;
import javax.mail.internet.MimeMessage;
import com.sun.mail.util.MailSSLSocketFactory;
public class SendMailTest {
@Test
public void test() {
{
try {
Security.addProvider(new com.sun.net.ssl.internal.ssl.Provider());
final String SSL_FACTORY = "javax.net.ssl.SSLSocketFactory";
// Get a Properties object
Properties props = System.getProperties();
MailSSLSocketFactory sf = new MailSSLSocketFactory();
sf.setTrustAllHosts(true);
// or
// sf.setTrustedHosts(new String[] { "my-server" });
props.put("mail.smtp.ssl.enable", "true");
// also use following for additional safety
//props.put("mail.smtp.ssl.checkserveridentity", "true");
props.put("mail.smtp.ssl.socketFactory", sf);
props.setProperty("mail.smtp.host", "smtp.126.com");
props.setProperty("mail.smtp.socketFactory.class", SSL_FACTORY);
props.setProperty("mail.smtp.socketFactory.fallback", "false");
props.put("mail.transport.protocol", "smtp");
props.setProperty("mail.smtp.port", "465");
props.setProperty("mail.smtp.socketFactory.port", "465");
props.put("mail.smtp.auth", "true");
final String username = "<EMAIL>";
final String password = "<PASSWORD>";
Session session = Session.getDefaultInstance(props,
new Authenticator() {
protected PasswordAuthentication getPasswordAuthentication() {
return new PasswordAuthentication(username, password);
}
});
Message msg = new MimeMessage(session);
msg.setFrom(new InternetAddress(username));
msg.setRecipients(Message.RecipientType.TO,
InternetAddress.parse("<EMAIL>", false));
msg.setSubject("Hello");
msg.setText("How are you");
msg.setSentDate(new Date());
Transport.send(msg);
System.out.println("Message sent.");
} catch (Exception e) {
e.printStackTrace();
}
}
}
}
|
szotaa/spotify-fetcher | src/main/java/pl/szotaa/spotifyfetcher/dto/SpotifyRecentlyPlayedResponse.java | <reponame>szotaa/spotify-fetcher
package pl.szotaa.spotifyfetcher.dto;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.time.LocalDateTime;
@Data
@NoArgsConstructor
@AllArgsConstructor
public class SpotifyRecentlyPlayedResponse {
private Item[] items;
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class Item {
private Track track;
@JsonProperty("played_at")
private LocalDateTime playedAt;
}
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class Track {
@JsonProperty("id")
private String spotifyId;
private Album album;
private Artist[] artists;
private String name;
private Integer popularity;
}
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class Album {
@JsonProperty("id")
private String spotifyId;
private String name;
@JsonProperty("release_date")
private String releaseDate;
}
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class Artist {
@JsonProperty("id")
private String spotifyId;
private String name;
}
}
|
flux-capacitor-io/flux-capacitor-client | java-client/src/test/java/io/fluxcapacitor/javaclient/givenwhenthen/GivenWhenThenMultiHandlerTest.java | <reponame>flux-capacitor-io/flux-capacitor-client<filename>java-client/src/test/java/io/fluxcapacitor/javaclient/givenwhenthen/GivenWhenThenMultiHandlerTest.java<gh_stars>1-10
/*
* Copyright (c) 2016-2021 Flux Capacitor.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.fluxcapacitor.javaclient.givenwhenthen;
import io.fluxcapacitor.javaclient.FluxCapacitor;
import io.fluxcapacitor.javaclient.MockException;
import io.fluxcapacitor.javaclient.common.exception.FunctionalException;
import io.fluxcapacitor.javaclient.test.TestFixture;
import io.fluxcapacitor.javaclient.tracking.handling.HandleCommand;
import io.fluxcapacitor.javaclient.tracking.handling.HandleEvent;
import io.fluxcapacitor.javaclient.tracking.handling.HandleQuery;
import lombok.Value;
import org.junit.jupiter.api.Test;
import org.mockito.InOrder;
import static org.hamcrest.CoreMatchers.isA;
import static org.mockito.Mockito.inOrder;
import static org.mockito.Mockito.spy;
class GivenWhenThenMultiHandlerTest {
private final CommandHandler commandHandler = spy(new CommandHandler());
private final EventHandler eventHandler = spy(new EventHandler());
private final QueryHandler queryHandler = spy(new QueryHandler());
private final TestFixture
subject = TestFixture.create(commandHandler, eventHandler, queryHandler);
@Test
void testExpectNoEventsAndNoResult() {
subject.givenNoPriorActivity().whenCommand(new YieldsNoResult()).expectNoEvents().expectNoResult();
}
@Test
void testExpectResultButNoEvents() {
subject.givenNoPriorActivity().whenCommand(new YieldsResult()).expectNoEvents().expectResult(isA(String.class));
}
@Test
void testExpectExceptionButNoEvents() {
subject.givenNoPriorActivity().whenCommand(new YieldsFunctionalException()).expectNoEvents().expectException(FunctionalMockException.class);
}
@Test
void testExpectEventButNoResult() {
YieldsEventAndNoResult command = new YieldsEventAndNoResult();
subject.givenNoPriorActivity().whenCommand(command).expectOnlyEvents(command).expectNoResult();
}
@Test
void testExpectResultAndEvent() {
YieldsEventAndResult command = new YieldsEventAndResult();
subject.givenNoPriorActivity().whenCommand(command).expectOnlyEvents(command).expectResult(isA(String.class));
}
@Test
void testExpectExceptionAndEvent() {
YieldsEventAndException command = new YieldsEventAndException();
subject.givenNoPriorActivity().whenCommand(command).expectOnlyEvents(command).expectException(Exception.class);
}
@Test
void testWithGivenCommandsAndResult() {
subject.givenCommands(new YieldsNoResult()).whenCommand(new YieldsResult()).expectResult(isA(String.class)).expectNoEvents();
}
@Test
void testWithGivenCommandsAndNoResult() {
subject.givenCommands(new YieldsResult()).whenCommand(new YieldsNoResult()).expectNoResult().expectNoEvents();
}
@Test
void testWithGivenCommandsAndEventsFromGiven() {
subject.givenCommands(new YieldsEventAndResult()).whenCommand(new YieldsNoResult()).expectNoResult().expectNoEvents();
}
@Test
void testWithGivenCommandsAndEventsFromCommand() {
YieldsEventAndNoResult command = new YieldsEventAndNoResult();
subject.givenCommands(new YieldsNoResult()).whenCommand(command).expectNoResult().expectEvents(command);
}
@Test
void testWithMultipleGivenCommands() {
YieldsEventAndNoResult command = new YieldsEventAndNoResult();
subject.givenCommands(new YieldsNoResult(), new YieldsResult(), command, command).whenCommand(command).expectNoResult().expectOnlyEvents(command);
}
@Test
void testAndGivenCommands() {
subject.givenCommands(new YieldsResult()).givenCommands(new YieldsEventAndNoResult()).whenCommand(new YieldsNoResult()).expectNoResult().expectNoEvents();
InOrder inOrder = inOrder(commandHandler);
inOrder.verify(commandHandler).handle(new YieldsResult());
inOrder.verify(commandHandler).handle(new YieldsEventAndNoResult());
inOrder.verify(commandHandler).handle(new YieldsNoResult());
}
@Test
void testExpectCommands() {
subject.whenEvent("some event").expectCommands(new YieldsNoResult()).expectNoEvents().expectNoResult();
}
@Test
void testExpectCommandsAndIndirectEvents() {
subject.whenEvent(123).expectNoResult().expectCommands(new YieldsEventAndResult()).expectEvents(new YieldsEventAndResult());
}
@Test
void testQuery() {
subject.whenQuery("bla").expectResult("bla");
}
@Test
void testFailingQuery() {
subject.whenQuery(1L).expectException(Exception.class);
}
private static class CommandHandler {
@HandleCommand
public void handle(YieldsNoResult command) {
//no op
}
@HandleCommand
public String handle(YieldsResult command) {
return "result";
}
@HandleCommand
public void handle(YieldsTechnicalException command) {
throw new MockException();
}
@HandleCommand
public void handle(YieldsFunctionalException command) {
throw new FunctionalMockException();
}
@HandleCommand
public void handle(YieldsEventAndNoResult command) {
FluxCapacitor.publishEvent(command);
}
@HandleCommand
public String handle(YieldsEventAndResult command) {
FluxCapacitor.publishEvent(command);
return "result";
}
@HandleCommand
public void handle(YieldsEventAndException command) {
FluxCapacitor.publishEvent(command);
throw new MockException();
}
}
private static class EventHandler {
@HandleEvent
public void handle(String event) {
FluxCapacitor.sendCommand(new YieldsNoResult());
}
@HandleEvent
public void handle(Integer event) throws Exception {
FluxCapacitor.sendCommand(new YieldsEventAndResult()).get();
}
}
private static class QueryHandler {
@HandleQuery
public String handle(String query) {
return query;
}
@HandleQuery
public String handleButFail(Long query) {
throw new MockException();
}
}
@Value
private static class YieldsNoResult {
}
@Value
private static class YieldsResult {
}
@Value
private static class YieldsTechnicalException {
}
@Value
private static class YieldsFunctionalException {
}
@Value
private static class YieldsEventAndNoResult {
}
@Value
private static class YieldsEventAndResult {
}
@Value
private static class YieldsEventAndException {
}
private static class FunctionalMockException extends FunctionalException {
}
}
|
manovotn/core | tests-arquillian/src/test/java/org/jboss/weld/tests/alternatives/weld930/AlternativeProducer.java | package org.jboss.weld.tests.alternatives.weld930;
import javax.enterprise.inject.Alternative;
import javax.enterprise.inject.Produces;
import javax.inject.Named;
/**
* @author <NAME>
*/
@Alternative
public class AlternativeProducer {
@Produces
@Named("product")
public Product produce() throws Exception {
return new Product("Alternative");
}
}
|
iliedorobat/LIDO-Parser | src/ro/webdata/parser/xml/lido/core/complex/rightsComplexType/RightsComplexTypeDAO.java | <gh_stars>0
package ro.webdata.parser.xml.lido.core.complex.rightsComplexType;
import org.w3c.dom.Node;
public interface RightsComplexTypeDAO {
/**
*
* @param node <b>lido:rightsComplexType</b> node type.
* @return <b>RightsComplexType</b>
*/
RightsComplexType getRightsComplexType(Node node);
}
|
TonyTangAndroid/strictmode-notifier | library-common/src/main/java/com/nshmura/strictmodenotifier/CustomAction.java | <reponame>TonyTangAndroid/strictmode-notifier<gh_stars>100-1000
package com.nshmura.strictmodenotifier;
public interface CustomAction {
void onViolation(StrictModeViolation violation);
} |
rubis-lab/popt-dev | samples/omp/include/sched_core.hpp | <filename>samples/omp/include/sched_core.hpp
#ifndef __SCHED_CORE_H__
#define __SCHED_CORE_H__
#include <linux/kernel.h>
#include <linux/types.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <vector>
#define gettid() syscall(__NR_gettid)
// contains thread-specific arguments
struct thr_arg {
int thr_id;
int exec_time;
};
// contains shared arguments among the threads and the vector of threads
struct task_arg {
int option;
int task_id;
int parent;
int deadline;
int period;
std::vector<thr_arg> thr_set;
};
struct sched_attr {
__u32 size;
__u32 sched_policy;
__u64 sched_flags;
/* SCHED_NORMAL, SCHED_BATCH */
__s32 sched_nice;
/* SCHED_FIFO, SCHED_RR */
__u32 sched_priority;
/* SCHED_DEADLINE (nsec) */
__u64 sched_runtime;
__u64 sched_deadline;
__u64 sched_period;
};
int sched_setattr(pid_t pid,
const struct sched_attr *attr,
unsigned int flags);
int sched_getattr(pid_t pid,
struct sched_attr *attr,
unsigned int size,
unsigned int flags);
#endif
|
heyImDrew/edupro | frontend/src/actions/cards_add.js | import axios from "axios";
import {authConstants} from "./types";
import {toast} from "react-toastify";
const HOST = "http://localhost:9000/api/"
export const cards_add = (data) => async dispatch => {
const config = {
headers: {
'Content-Type': "application/json",
'Authorization': 'Bearer ' + localStorage.getItem('token')
}
};
try {
const res = await axios.post(HOST + "cards/add/", {
"desk_id": data.desk_id,
"question": data.card_q,
"answer": data.card_a,
}, config)
toast.success("Card successfully added!")
dispatch({
type: authConstants.CARDS_ADD_SUCCESS,
payload: res.data.card_id
})
} catch (err) {
dispatch({
type: authConstants.CARDS_ADD_FAIL
})
}
} |
rusucosmin/Cplusplus | Lumanari/main.cpp | #include <fstream>
using namespace std;
ifstream cin("lumanari.in");
ofstream cout("lumanari.out");
int a[200005];
int n=0, m, sol=0;
int ok=0;
int main()
{
cin>>m;
for(int i=1;i<=m;++i)
cin>>a[i];
for(int i=1;i<=m;++i)
{
for(int j=1;j<=m, ok<i ;++j)
if(a[j])
{
--a[j];
++ok;
}
if(ok==i)
++n;
else break;
}
cout<<n-1<<"\n";
cin.close();
cout.close();
return 0;
}
|
tadejsv/catalyst | catalyst/core/callback.py | <reponame>tadejsv/catalyst
from typing import TYPE_CHECKING
from enum import IntFlag
if TYPE_CHECKING:
from catalyst.core.runner import IRunner
class ICallback:
"""A callable abstraction for deep learning runs."""
def on_experiment_start(self, runner: "IRunner") -> None:
"""Event handler for experiment start."""
pass
def on_epoch_start(self, runner: "IRunner") -> None:
"""Event handler for epoch start."""
pass
def on_loader_start(self, runner: "IRunner") -> None:
"""Event handler for loader start."""
pass
def on_batch_start(self, runner: "IRunner") -> None:
"""Event handler for batch start."""
pass
def on_batch_end(self, runner: "IRunner") -> None:
"""Event handler for batch end."""
pass
def on_loader_end(self, runner: "IRunner") -> None:
"""Event handler for loader end."""
pass
def on_epoch_end(self, runner: "IRunner") -> None:
"""Event handler for epoch end."""
pass
def on_experiment_end(self, runner: "IRunner") -> None:
"""Event handler for experiment end."""
pass
def on_exception(self, runner: "IRunner") -> None:
"""Event handler for exception case."""
pass
class CallbackOrder(IntFlag):
"""Callback usage order during training.
Catalyst executes Callbacks with low `CallbackOrder`
**before** Callbacks with high `CallbackOrder`.
Predefined orders:
- **Internal** (0) - some Catalyst Extras,
like PhaseCallbacks (used in GANs).
- **Metric** (10) - Callbacks with metrics and losses computation.
- **MetricAggregation** (20) - metrics aggregation callbacks,
like sum different losses into one.
- **Backward** (30) - backward step.
- **Optimizer** (40) - optimizer step,
requires computed metrics for optimization.
- **Scheduler** (50) - scheduler step,
in `ReduceLROnPlateau` case
requires computed validation metrics for optimizer schedule.
- **Checkpoint** (60) - checkpoint step.
- **External** (100) - additional callbacks with custom logic.
Nevertheless, you always can create CustomCallback with any order,
for example::
>>> class MyCustomCallback(Callback):
>>> def __init__(self):
>>> super().__init__(order=13)
>>> ...
# MyCustomCallback will be executed after all `Metric`-Callbacks
# but before all `MetricAggregation`-Callbacks.
"""
Internal = internal = 0
Metric = metric = 10
MetricAggregation = metric_aggregation = 20
Backward = backward = 30
Optimizer = optimizer = 40
Scheduler = scheduler = 50
Checkpoint = checkpoint = 50
External = external = 100
class Callback(ICallback):
"""
An abstraction that lets you customize your experiment run logic.
Args:
order: flag from ``CallbackOrder``
To give users maximum flexibility and extensibility Catalyst supports
callback execution anywhere in the training loop:
.. code:: bash
-- experiment start
---- epoch start
------ loader start
-------- batch start
---------- batch handler (Runner logic)
-------- batch end
------ loader end
---- epoch end
-- experiment end
exception – if an Exception was raised
Abstraction, please check out implementations for more details:
- :py:mod:`catalyst.callbacks.criterion.CriterionCallback`
- :py:mod:`catalyst.callbacks.optimizer.OptimizerCallback`
- :py:mod:`catalyst.callbacks.scheduler.SchedulerCallback`
- :py:mod:`catalyst.callbacks.checkpoint.CheckpointCallback`
.. note::
To learn more about Catalyst Core concepts, please check out
- :py:mod:`catalyst.core.runner.IRunner`
- :py:mod:`catalyst.core.engine.Engine`
- :py:mod:`catalyst.core.callback.Callback`
"""
def __init__(self, order: int):
"""Init."""
self.order = order
class IMetricCallback(Callback):
"""Metric callback interface, abstraction over metric step."""
def __init__(self):
"""Init."""
super().__init__(order=CallbackOrder.Metric)
class ICriterionCallback(IMetricCallback):
"""Criterion callback interface, abstraction over criterion step."""
pass
class IBackwardCallback(Callback):
"""Backward callback interface, abstraction over backward step."""
def __init__(self):
"""Init."""
super().__init__(order=CallbackOrder.Backward)
class IOptimizerCallback(Callback):
"""Optimizer callback interface, abstraction over optimizer step."""
def __init__(self):
"""Init."""
super().__init__(order=CallbackOrder.Optimizer)
class ISchedulerCallback(Callback):
"""Scheduler callback interface, abstraction over scheduler step."""
def __init__(self):
"""Init."""
super().__init__(order=CallbackOrder.Scheduler)
class ICheckpointCallback(Callback):
"""Checkpoint callback interface, abstraction over checkpoint step."""
def __init__(self):
"""Init."""
super().__init__(order=CallbackOrder.Checkpoint)
class CallbackWrapper(Callback):
"""Enable/disable callback execution.
Args:
base_callback: callback to wrap
enable_callback: indicator to enable/disable
callback, if ``True`` then callback will be enabled,
default ``True``
"""
def __init__(self, base_callback: Callback, enable_callback: bool = True):
"""Init."""
if base_callback is None or not isinstance(base_callback, Callback):
raise ValueError(f"Expected callback but got - {type(base_callback)}!")
super().__init__(order=base_callback.order)
self.callback = base_callback
self._is_enabled = enable_callback
def on_experiment_start(self, runner: "IRunner") -> None:
"""Event handler for experiment start."""
if self._is_enabled:
self.callback.on_experiment_start(runner)
def on_epoch_start(self, runner: "IRunner") -> None:
"""Event handler for epoch start."""
if self._is_enabled:
self.callback.on_epoch_start(runner)
def on_loader_start(self, runner: "IRunner") -> None:
"""Event handler for loader start."""
if self._is_enabled:
self.callback.on_loader_start(runner)
def on_batch_start(self, runner: "IRunner") -> None:
"""Event handler for batch start."""
if self._is_enabled:
self.callback.on_batch_start(runner)
def on_batch_end(self, runner: "IRunner") -> None:
"""Event handler for batch end."""
if self._is_enabled:
self.callback.on_batch_end(runner)
def on_loader_end(self, runner: "IRunner") -> None:
"""Event handler for loader end."""
if self._is_enabled:
self.callback.on_loader_end(runner)
def on_epoch_end(self, runner: "IRunner") -> None:
"""Event handler for epoch end."""
if self._is_enabled:
self.callback.on_epoch_end(runner)
def on_experiment_end(self, runner: "IRunner") -> None:
"""Event handler for experiment end."""
if self._is_enabled:
self.callback.on_experiment_end(runner)
def on_exception(self, runner: "IRunner") -> None:
"""Event handler for exception case."""
if self._is_enabled:
self.callback.on_exception(runner)
__all__ = [
"ICallback",
"Callback",
"CallbackOrder",
"IMetricCallback",
"ICriterionCallback",
"IBackwardCallback",
"IOptimizerCallback",
"ISchedulerCallback",
"ICheckpointCallback",
"CallbackWrapper",
]
|
maiduoduo/EJoy | app/src/main/java/com/ejoy/tool/ui/douyin/data/adapter/DouyinVideoAdapter.java | package com.ejoy.tool.ui.douyin.data.adapter;
import android.content.Context;
import android.view.View;
import android.view.animation.Animation;
import android.view.animation.LinearInterpolator;
import android.view.animation.RotateAnimation;
import android.widget.ImageView;
import android.widget.RelativeLayout;
import android.widget.TextView;
import com.airbnb.lottie.LottieAnimationView;
import com.chad.library.adapter.base.BaseQuickAdapter;
import com.chad.library.adapter.base.BaseViewHolder;
import com.ejoy.tool.R;
import com.ejoy.tool.scaffold.utils.GlideUtils;
import com.ejoy.tool.ui.douyin.data.constant.AutoLinkHerfManager;
import com.ejoy.tool.ui.douyin.data.constant.OnVideoControllerListener;
import com.module.ires.bean.bean.DouyinVideoBean;
import com.module.ires.bean.utils.EDensityUtils;
import com.module.ires.bean.utils.EResUtils;
import com.module.iviews.image.DouyinCircleImageView;
import com.module.iviews.textview.AutoLinkTextView;
import com.module.iviews.textview.IconFontTextView;
import com.module.iviews.view.DouyinLikeView;
import java.util.List;
import static android.view.View.GONE;
import static android.view.View.INVISIBLE;
import static android.view.View.VISIBLE;
import static android.view.animation.Animation.INFINITE;
public class DouyinVideoAdapter extends BaseQuickAdapter<DouyinVideoBean, BaseViewHolder> {
private static final String TAG = "adapter";
private Context mContext;
private OnVideoControllerListener listener;
public DouyinVideoAdapter(int layoutResId, List<DouyinVideoBean> data, Context context) {
super(layoutResId, data);
mContext = context;
}
public void setListener(OnVideoControllerListener listener) {
this.listener = listener;
}
@Override
protected void convert(BaseViewHolder helper, DouyinVideoBean item) {
int position = helper.getAdapterPosition();
DouyinLikeView likeView = helper.getView(R.id.likeview);
// DouyinControllerView controllerView = helper.getView(R.id.controller);
ImageView ivCover = helper.getView(R.id.iv_cover);
ivCover.setImageResource(item.getCoverRes());
//播放
RelativeLayout rlCotrollerView = helper.getView(R.id.rlCotrollerView);
AutoLinkTextView autoLinkTextView = helper.getView(R.id.tv_content);
DouyinCircleImageView ivHead = helper.getView(R.id.iv_head);
LottieAnimationView animationView = helper.getView(R.id.lottie_anim);
RelativeLayout rlLike = helper.getView(R.id.rl_like);
IconFontTextView ivComment = helper.getView(R.id.iv_comment);
IconFontTextView ivShare = helper.getView(R.id.iv_share);
ImageView ivRecord = helper.getView(R.id.iv_record);
RelativeLayout rlRecord = helper.getView(R.id.rl_record);
TextView tvNickname = helper.getView(R.id.tv_nickname);
DouyinCircleImageView ivHeadAnim = helper.getView(R.id.iv_head_anim);
IconFontTextView ivLike = helper.getView(R.id.iv_like);
TextView tvLikecount = helper.getView(R.id.tv_likecount);
TextView tvCommentcount = helper.getView(R.id.tv_commentcount);
TextView tvSharecount = helper.getView(R.id.tv_sharecount);
ImageView ivFocus = helper.getView(R.id.iv_focus);
/**
* 循环旋转动画
*/
setRotateAnim(rlRecord);
/**
* 设置视频数据
*/
// ivHead.setImageResource(item.getUserBean().getHead());
GlideUtils.showImage(mContext,item.getUserBean().getHeadUrl(),ivHead);
tvNickname.setText("@" + item.getUserBean().getNickName());
AutoLinkHerfManager.setContent(item.getContent(), autoLinkTextView);
// ivHeadAnim.setImageResource(item.getUserBean().getHead());
GlideUtils.showImage(mContext,item.getUserBean().getHeadUrl(),ivHeadAnim);
tvLikecount.setText(EDensityUtils.numberFilter(item.getLikeCount()));
tvCommentcount.setText(EDensityUtils.numberFilter(item.getCommentCount()));
tvSharecount.setText(EDensityUtils.numberFilter(item.getShareCount()));
animationView.setAnimation("like.json");
//点赞状态
if (item.isLiked()) {
ivLike.setTextColor(EResUtils.getColor(mContext, R.color.color_douyin_FF0041));
} else {
ivLike.setTextColor(EResUtils.getColor(mContext, R.color.white));
}
//关注状态
if (item.isFocused()) {
ivFocus.setVisibility(GONE);
} else {
ivFocus.setVisibility(VISIBLE);
}
likeView.setOnLikeListener(() -> {
/**
* 点赞动作
*/
if (!item.isLiked()) {//未点赞,会有点赞效果,否则无
//点赞
animationView.setVisibility(VISIBLE);
animationView.playAnimation();
ivLike.setTextColor(EResUtils.getColor(mContext,R.color.color_douyin_FF0041));
} else {
//取消点赞
animationView.setVisibility(INVISIBLE);
ivLike.setTextColor(EResUtils.getColor(mContext,R.color.white));
}
item.setLiked(!item.isLiked());
});
/**
* 监听事件
*/
ivHead.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
listener.onHeadClick();
}
});
ivComment.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
listener.onCommentClick();
}
});
ivShare.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
listener.onShareClick();
}
});
rlLike.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
listener.onLikeClick();
/**
* 点赞动作
*/
if (!item.isLiked()) {//未点赞,会有点赞效果,否则无
//点赞
animationView.setVisibility(VISIBLE);
animationView.playAnimation();
ivLike.setTextColor(EResUtils.getColor(mContext,R.color.color_douyin_FF0041));
} else {
//取消点赞
animationView.setVisibility(INVISIBLE);
ivLike.setTextColor(EResUtils.getColor(mContext,R.color.white));
}
item.setLiked(!item.isLiked());
}
});
ivFocus.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
if (!item.isFocused()) {
item.setLiked(true);
ivFocus.setVisibility(GONE);
}
}
});
}
/**
* 循环旋转动画
*
* @param rlRecord
*/
private void setRotateAnim(RelativeLayout rlRecord) {
RotateAnimation rotateAnimation = new RotateAnimation(0, 359,
Animation.RELATIVE_TO_SELF, 0.5f, Animation.RELATIVE_TO_SELF, 0.5f);
rotateAnimation.setRepeatCount(INFINITE);
rotateAnimation.setDuration(8000);
rotateAnimation.setInterpolator(new LinearInterpolator());
rlRecord.startAnimation(rotateAnimation);
}
}
|
octoblu/bluprinter | src/components/RunPageHeader/index.spec.js | <filename>src/components/RunPageHeader/index.spec.js
import chai, { expect } from 'chai'
import chaiEnzyme from 'chai-enzyme'
import React from 'react'
import sinon from 'sinon'
import sinonChai from 'sinon-chai'
import { mount, shallow } from 'enzyme'
import RunPageHeader from './'
chai.use(chaiEnzyme())
chai.use(sinonChai)
describe('<RunPageHeader />', () => {
describe('when given a device with name and logo', () => {
it('should render the logo', () => {
const device = { logo: 'icons.octoblu.com/cats', name: 'cats' }
const sut = shallow(<RunPageHeader device={device} />)
expect(sut.containsMatchingElement(
<img src="icons.octoblu.com/cats" alt="cats" />
)).to.equal(true)
})
})
describe('when the device is online', () => {
it('should render the online tag', () => {
const device = { logo: 'icons.octoblu.com/cats', name: 'cats', online: true }
const sut = shallow(<RunPageHeader device={device} />)
expect(sut.containsMatchingElement(
<span>Online</span>
)).to.equal(true)
})
})
describe('when the device is offline', () => {
it('should render the offline tag', () => {
const device = { logo: 'icons.octoblu.com/cats', name: 'cats', online: false }
const sut = shallow(<RunPageHeader device={device} />)
expect(sut.containsMatchingElement(
<span>Offline</span>
)).to.equal(true)
})
})
})
|
denkasyanov/education-backend | src/tinkoff/tests/credit_client/tests_tinkoff_credit_creation.py | <reponame>denkasyanov/education-backend<filename>src/tinkoff/tests/credit_client/tests_tinkoff_credit_creation.py
import pytest
pytestmark = [pytest.mark.django_db]
@pytest.fixture(autouse=True)
def api_call(mocker):
return mocker.patch('tinkoff.credit.TinkoffCredit.call', return_value={'link': '__mocked'})
def test_items(tinkoff):
got = tinkoff.get_items()
assert len(got) == 1
assert got[0]['name'] == 'Предоставление доступа к записи курса «Пентакли и Тентакли»'
assert got[0]['quantity'] == 1
assert got[0]['price'] == 100500
def test_user(tinkoff):
got = tinkoff.get_user()
assert got['contact']['fio']['firstName'] == '<NAME>'
assert got['contact']['fio']['lastName'] == 'Пейзенгольц'
def test_order_data(tinkoff, order, api_call):
tinkoff.get_initial_payment_url()
got = api_call.call_args[1]['payload']
assert got['shopId'] == '1234'
assert got['showcaseId'] == '123-45'
assert got['sum'] == 100500
assert got['orderNumber'] == order.id
assert got['items'][0]['name'] == 'Предоставление доступа к записи курса «Пентакли и Тентакли»'
assert got['values']['contact']['fio']['firstName'] == '<NAME>'
def test_return_value(tinkoff):
url = tinkoff.get_initial_payment_url()
assert url == '__mocked'
|
MinsxCloud/minsx-authorization-server | minsx-authorization-entity/src/main/java/com/minsx/authorization/entity/base/type/CustomSettingState.java | <gh_stars>1-10
package com.minsx.authorization.entity.base.type;
public enum CustomSettingState {
ENABLE(1),DISABLE(-1),UNKNOWN(0);
Integer value;
CustomSettingState(Integer value){
this.value=value;
}
public Integer getValue() {
return value;
}
public void setValue(Integer value) {
this.value = value;
}
public static CustomSettingState getCustomSettingState(Integer value) {
switch (value) {
case 1:
return ENABLE;
case -1:
return DISABLE;
default:
return UNKNOWN;
}
}
}
|
longzuo/SimpleKeyValueDb | net/Epoll.hpp | <filename>net/Epoll.hpp
#ifndef SDB_EPOLL_HPP
#define SDB_EPOLL_HPP
#include <errno.h>
#include <vector>
#include "../core/Exception.hpp"
#include "IOEvent.hpp"
#include "NetLib.hpp"
#include "unix_header.h"
namespace Net {
class EpollEvents {
public:
struct epoll_event* events;
int length;
int capcity;
int maxEvent;
int size() { return length; }
int maxEvents() { return maxEvent; }
void reserve(int);
void resize(int);
void addEvent();
void clear() { length = 0; }
EpollEvents() : length(0), capcity(0), maxEvent(0), events(nullptr) {}
EpollEvents(const EpollEvents&);
EpollEvents(EpollEvents&&);
EpollEvents& operator=(const EpollEvents&);
EpollEvents& operator=(EpollEvents&&);
epoll_event& operator[](int);
};
void EpollEvents::reserve(int nsize) {
if (nsize <= length) {
return;
}
epoll_event* nptr = new epoll_event[nsize];
for (int i = 0; i < length; i++) {
nptr[i] = events[i];
}
if (events != nullptr) {
delete[] events;
}
events = nptr;
capcity = nsize;
}
void EpollEvents::resize(int nsize) {
if (capcity < nsize) {
reserve(nsize);
}
length = nsize;
}
void EpollEvents::addEvent() {
if (maxEvent + 1 > capcity) {
reserve((maxEvent + 1) * 2);
}
maxEvent += 1;
}
EpollEvents::EpollEvents(const EpollEvents& oth) {
if (maxEvent < oth.maxEvent) {
reserve(oth.maxEvent);
}
for (int i = 0; i < oth.length; i++) {
events[i] = oth.events[i];
}
length = oth.length;
maxEvent = oth.maxEvent;
}
EpollEvents::EpollEvents(EpollEvents&& oth) {
length = oth.length;
capcity = oth.capcity;
events = oth.events;
maxEvent = oth.maxEvent;
oth.length = 0;
oth.capcity = 0;
oth.events = nullptr;
oth.maxEvent = 0;
}
EpollEvents& EpollEvents::operator=(const EpollEvents& oth) {
if (maxEvent < oth.maxEvent) {
reserve(oth.maxEvent);
}
for (int i = 0; i < oth.length; i++) {
events[i] = oth.events[i];
}
length = oth.length;
maxEvent = oth.maxEvent;
return *this;
}
EpollEvents& EpollEvents::operator=(EpollEvents&& oth) {
length = oth.length;
capcity = oth.capcity;
events = oth.events;
maxEvent = oth.maxEvent;
oth.length = 0;
oth.capcity = 0;
oth.events = nullptr;
oth.maxEvent = 0;
return *this;
}
epoll_event& EpollEvents::operator[](int pos) {
if (pos >= length) {
throw NetException("out of index of epoll events");
}
return events[pos];
}
class Epoll final {
private:
int epfd;
public:
Epoll();
~Epoll();
bool addEvent(IOEvent&);
bool delEvent(IOEvent&);
bool modifyEvent(IOEvent&);
int waitEvents(EpollEvents&, int);
};
Epoll::Epoll() {
this->epfd = epoll_create(1024);
if (this->epfd == -1) {
throw NetException("can not create epoll!");
}
}
Epoll::~Epoll() {
if (epfd != -1) {
::close(epfd);
}
}
bool Epoll::addEvent(IOEvent& e) {
epoll_event ee = {0, 0};
if (e.getmask() & EventStatus::EVENT_READABLE) {
ee.events |= EPOLLIN;
}
if (e.getmask() & EventStatus::EVENT_WRITEABLE) {
ee.events |= EPOLLOUT;
}
// int option;
ee.data.fd = e.getfd();
if (epoll_ctl(this->epfd, EPOLL_CTL_ADD, e.getfd(), &ee) == -1) {
return false;
}
return true;
}
bool Epoll::delEvent(IOEvent& e) {
return (epoll_ctl(this->epfd, EPOLL_CTL_DEL, e.getfd(), nullptr) != -1);
}
bool Epoll::modifyEvent(IOEvent& e) {
epoll_event ee = {0, 0};
ee.events = 0;
if (e.getmask() & EventStatus::EVENT_READABLE) {
ee.events |= EPOLLIN;
}
if (e.getmask() & EventStatus::EVENT_WRITEABLE) {
ee.events |= EPOLLOUT;
}
ee.data.fd = e.getfd();
if (e.getmask() != EventStatus::EVENT_NONE) {
return (epoll_ctl(this->epfd, EPOLL_CTL_MOD, e.getfd(), &ee) != -1);
} else {
return (epoll_ctl(this->epfd, EPOLL_CTL_DEL, e.getfd(), &ee) != -1);
}
}
int Epoll::waitEvents(EpollEvents& fired, int timeout) {
int numevents = 0;
numevents =
epoll_wait(this->epfd, fired.events, fired.maxEvents(), timeout);
if (numevents < 0) {
throw NetException("epoll error,errno is:" + std::to_string(errno));
}
// fired.resize()
fired.resize(numevents);
return numevents;
}
} // namespace Net
#endif |
ramkrishnakc/smproject | client/components/FullScreenLoader.js | <filename>client/components/FullScreenLoader.js
import React from 'react';
import PropType from 'prop-types';
const Loader = (props = {}) => (
<div>
<div id="loader-wrapper">
<div className="loader-container">
<div className="loader-text">
<h3 style={{color: props.color}}> {props.title || ''} </h3>
</div>
<div
className="loader-spinner"
style={{borderTopColor: props.loaderSpinnerColor || props.color}}
/>
<div
className="loader-logo-container"
style={{background: props.loaderContainerColor || props.color}}
/>
</div>
</div>
</div>
);
Loader.propTypes = {
title: PropType.string,
color: PropType.string,
loaderSpinnerColor: PropType.string,
loaderContainerColor: PropType.string,
};
Loader.defaultProps = {
title: '',
color: '',
loaderSpinnerColor: '',
loaderContainerColor: '',
};
export default Loader;
|
fillingthemoon/trbc_01 | client/src/reducers/eventsReducer.js | import eventsService from '../services/eventsService'
import { setNotification } from '../reducers/notificationReducer'
const eventsReducer = (state = [], action) => {
switch (action.type) {
case 'GET_EVENTS': {
return action.data.events
}
case 'CREATE_EVENT': {
return state.concat(action.data.newItemResponse)
}
case 'UPDATE_EVENT': {
return state.map(item => item.id === action.data.id
? action.data.updatedItemResponse
: item
)
}
case 'DELETE_EVENT': {
return state.filter(upcomingSermon => upcomingSermon.id !== action.data.id)
}
default: {
return state
}
}
}
export const getEvents = () => {
return async dispatch => {
try {
const events = await eventsService.getEvents()
dispatch({
type: 'GET_EVENTS',
data: {
events,
}
})
} catch (error) {
console.log(error)
}
}
}
export const createEvent = (newItem) => {
return async dispatch => {
try {
const newItemResponse =
await eventsService.createEvent(newItem)
dispatch({
type: 'CREATE_EVENT',
data: {
newItemResponse,
}
})
dispatch(setNotification('success', 'Successfully added! Please refresh to view.', 4))
} catch (error) {
dispatch(setNotification('error', error.response.data.error, 4))
}
}
}
export const updateEvent = (id, updatedItem) => {
return async dispatch => {
try {
const updatedItemResponse =
await eventsService.updateEvent(id, updatedItem)
dispatch({
type: 'UPDATE_EVENT',
data: {
id,
updatedItemResponse,
}
})
dispatch(setNotification('success', 'Successfully updated! Please refresh to view.', 4))
} catch (error) {
dispatch(setNotification('error', error.response.data.error, 4))
}
}
}
export const deleteEvent = (id) => {
return async dispatch => {
try {
await eventsService.deleteEvent(id)
dispatch({
type: 'DELETE_EVENT',
data: {
id
}
})
dispatch(setNotification('success', 'Successfully deleted! Please refresh to view.', 4))
} catch (error) {
dispatch(setNotification('error', error.response.data.error, 4))
}
}
}
export default eventsReducer |
axyjs/axy-define-asm | lib/context/finder.js | /**
* Finds source files, transmits them to the wrapper and to the writer
*
* Options that affect search:
* - addExt
* - all
* - ignoreDir
* - ignoreExt
* - ignoreFile
*
* Additionally
* - verbose - for log
*/
/// <reference path="../../typing/node.d.ts" />
"use strict";
var fs = require("fs");
var path = require("path");
var Finder = (function () {
function Finder(context) {
this.context = context;
this.source = this.context.options.source;
this.counter = this.context.counter;
this.root = this.context.options.root;
}
Finder.prototype.run = function () {
var stat;
if (!fs.existsSync(this.source)) {
return this.context.error("Not found file or directory " + this.source);
}
stat = fs.statSync(this.source);
if (stat.isFile()) {
this.runFile();
}
else if (stat.isDirectory()) {
this.runDirectory();
}
else {
return this.context.error("Source " + this.source + " is not file or directory");
}
};
Finder.prototype.runFile = function () {
this.processFile("/index" + path.extname(this.source), this.source);
};
Finder.prototype.runDirectory = function () {
var context = this.context, self = this;
function dir(full, short) {
context.counter.begin("Dir " + short);
fs.readdir(full, function (err, files) {
if (err) {
context.counter.done();
context.error(err.message);
}
else {
files.forEach(function (file) {
var fullname = full + "/" + file, shortname = short + "/" + file;
context.counter.begin("File " + shortname);
fs.stat(fullname, function (err, stat) {
if (err) {
context.counter.done("File " + shortname);
context.error(err.message);
}
else if (stat.isFile()) {
if (self.filter(shortname, fullname, false)) {
context.out.log(shortname, 1);
self.processFile(shortname, fullname);
}
else {
context.out.log("Ignore file " + shortname, 2);
}
context.counter.done("File " + shortname);
}
else {
if (self.filter(shortname, fullname, true)) {
dir(fullname, shortname);
}
else {
context.out.log("Ignore directory " + shortname, 2);
}
context.counter.done("File " + shortname);
}
});
});
context.counter.done("Dir " + short);
}
});
}
dir(this.source, "");
};
Finder.prototype.processFile = function (shortname, fullname) {
if (fullname === void 0) { fullname = null; }
var context = this.context, ext = path.extname(shortname).slice(1);
if (!fullname) {
fullname = this.source + "/" + shortname;
}
context.counter.begin("Process " + shortname);
fs.readFile(fullname, "utf-8", function (err, content) {
var wrapped;
if (err) {
context.counter.done("Process " + shortname);
context.error(err.message);
}
else {
wrapped = context.wrapper.wrap(content, shortname, ext);
context.writer.write(shortname, wrapped);
context.counter.done("Process " + shortname);
}
});
};
Finder.prototype.filter = function (shortname, fullname, isDir) {
return this.context.filter.filter(shortname, isDir);
};
return Finder;
})();
exports.Finder = Finder;
|
DDemmer1/auto-post | src/main/java/de/demmer/dennis/autopost/services/userhandling/SessionService.java | <filename>src/main/java/de/demmer/dennis/autopost/services/userhandling/SessionService.java<gh_stars>1-10
package de.demmer.dennis.autopost.services.userhandling;
import de.demmer.dennis.autopost.entities.user.Facebookuser;
import de.demmer.dennis.autopost.repositories.FacebookuserRepository;
import de.demmer.dennis.autopost.services.facebook.FacebookService;
import lombok.extern.log4j.Log4j2;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import javax.servlet.http.HttpSession;
import java.util.HashMap;
import java.util.Map;
/**
* Handles a user session
*/
@Transactional
@Log4j2
@Service
public class SessionService{
@Autowired
HttpSession session;
@Autowired
FacebookService facebookService;
@Autowired
FacebookuserRepository userRepository;
/**
* Adds the a some information about the user to the current Httpsession.
* Is called at login.
*
* @param user
*/
public void addActiveUser(Facebookuser user){
Map<String ,String> activeuser = new HashMap<>();
activeuser.put("id",user.getId()+"");
activeuser.put("name",user.getName());
activeuser.put("admin",user.getAdmin());
activeuser.put("profilePic",facebookService.getProfilePicture(user.getOauthToken()));
session.setAttribute("activeuser",activeuser);
}
/**
* Removes the current active user from the Httpsession. Called at login or account deletion.
*/
public void removeActiveUser(){
session.removeAttribute("activeuser");
}
/**
* Returns the current user in the Httpsession
*
* @return
*/
public Facebookuser getActiveUser(){
Map<String,String> userMap = (Map<String, String>) session.getAttribute("activeuser");
if(userMap!=null){
Integer userId = Integer.valueOf(userMap.get("id"));
return userRepository.findFacebookuserById(userId);
} else return null;
}
}
|
maxwillianzhu/jsite | jsite-flowable/src/main/java/com/jsite/modules/business/service/listener/LeaveModifyProcessor.java | <reponame>maxwillianzhu/jsite<filename>jsite-flowable/src/main/java/com/jsite/modules/business/service/listener/LeaveModifyProcessor.java
package com.jsite.modules.business.service.listener;
import org.flowable.engine.delegate.TaskListener;
import org.flowable.task.service.delegate.DelegateTask;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
@Service
@Transactional
public class LeaveModifyProcessor implements TaskListener {
private static final long serialVersionUID = 1L;
// @Autowired
// private LeaveDao leaveDao;
// @Autowired
// private RuntimeService runtimeService;
@Override
public void notify(DelegateTask delegateTask) {
// String processInstanceId = delegateTask.getProcessInstanceId();
// ProcessInstance processInstance = runtimeService.createProcessInstanceQuery().processInstanceId(processInstanceId).singleResult();
// Leave leave = new Leave(processInstance.getBusinessKey());
// leave.setLeaveType((String) delegateTask.getVariable("leaveType"));
// leave.setStartTime((Date) delegateTask.getVariable("startTime"));
// leave.setEndTime((Date) delegateTask.getVariable("endTime"));
// leave.setReason((String) delegateTask.getVariable("reason"));
// leave.preUpdate();
// leaveDao.update(leave);
}
}
|
wzhao18/LightSaber | test/benchmarks/microbenchmarks/TestProjection.cpp | <gh_stars>10-100
#include <iostream>
#include "microbenchmarks/RandomDataGenerator.h"
#include "cql/expressions/ColumnReference.h"
#include "utils/WindowDefinition.h"
#include "cql/operators/codeGeneration/OperatorKernel.h"
#include "utils/QueryOperator.h"
#include "utils/Query.h"
class TestProjection : public RandomDataGenerator {
private:
void createApplication() override {
// Configure projection
std::vector<Expression *> expressions(2);
// Always project the timestamp
expressions[0] = new ColumnReference(0);
expressions[1] = new ColumnReference(1);
Projection *projection = new Projection(expressions);
auto window = new WindowDefinition(ROW_BASED, 60, 60);
bool replayTimestamps = window->isRangeBased();
// Set up code-generated operator
OperatorKernel *genCode = new OperatorKernel(true);
genCode->setInputSchema(getSchema());
genCode->setProjection(projection);
genCode->setQueryId(0);
genCode->setup();
OperatorCode *cpuCode = genCode;
// Print operator
std::cout << cpuCode->toSExpr() << std::endl;
auto queryOperator = new QueryOperator(*cpuCode);
std::vector<QueryOperator *> operators;
operators.push_back(queryOperator);
long timestampReference = std::chrono::system_clock::now().time_since_epoch().count();
std::vector<std::shared_ptr<Query>> queries(1);
queries[0] = std::make_shared<Query>(0, operators, *window, m_schema, timestampReference, false, false, true);
m_application = new QueryApplication(queries);
m_application->setup();
}
public:
TestProjection(bool inMemory = true) {
m_name = "TestProjection";
createSchema();
createApplication();
if (inMemory)
loadInMemoryData();
}
};
int main(int argc, const char **argv) {
std::unique_ptr<BenchmarkQuery> benchmarkQuery {};
BenchmarkQuery::parseCommandLineArguments(argc, argv);
benchmarkQuery = std::make_unique<TestProjection>();
return benchmarkQuery->runBenchmark();
} |
xswz8015/infra | go/src/infra/cmd/gaedeploy/cache/cache.go | <reponame>xswz8015/infra
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package cache
import (
"context"
"crypto/rand"
"encoding/hex"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"time"
"github.com/dustin/go-humanize"
"go.chromium.org/luci/common/clock"
"go.chromium.org/luci/common/errors"
"go.chromium.org/luci/common/logging"
"infra/cmd/gaedeploy/source"
)
// Cache represents an on-disk cache of unpacked tarballs.
//
// It also knows how to populate and trim it.
//
// Directory layout:
// <Root>/
// <artifact's sha256 hex digest>/
// lock # lock file to manage concurrent access
// cache.json # bookkeeping info about this cache entry
// tmp_*.tar.gz # exists temporarily when fetching the tarball
// tmp_data_*/ # exists temporarily when unpacking the tarball
// data/ # the unpacked tarball goes here
type Cache struct {
Root string // the root cache directory
}
// WithTarball calls `cb` with a path to the unpacked tarball.
//
// If the cache has such tarball already (as identified by its SHA256 digest),
// calls `cb` right away. Otherwise fetches and unpacks the tarball first.
//
// `cb` may modify files in the directory if necessary. Modifications will be
// preserved in the cache, and multiple concurrent WithTarballs calls (perhaps
// made from different processes) will see each other's modifications, so `cb`
// should be careful doing them. This is required to allow gaedeploy to generate
// temporary app-specific GAE YAMLs, which are required by gcloud to be
// side-by-side with the code being deployed (and thus must reside inside the
// unpacked tarball directory).
func (c *Cache) WithTarball(ctx context.Context, src source.Source, cb func(path string) error) error {
entryDir := filepath.Join(c.Root, hex.EncodeToString(src.SHA256()))
if err := os.MkdirAll(entryDir, 0700); err != nil {
return errors.Annotate(err, "failed to create a directory for the tarball").Err()
}
// Enter the global critical section to avoid weird cache states when
// unpacking the tarball due to concurrent execution of multiple processes.
unlockFS, err := lockFS(ctx, filepath.Join(entryDir, "lock"), 5*time.Minute)
if err != nil {
return errors.Annotate(err, "failed to grab the FS lock").Err()
}
unlocked := false
unlock := func() {
if !unlocked {
unlocked = true
if err := unlockFS(); err != nil {
logging.Errorf(ctx, "Failed to remove the FS lock: %s", err)
}
}
}
defer unlock()
// Drop a JSON file with info about the cache entry. Used by the GC.
err = modifyMetadata(ctx, entryDir, func(m *cacheMetadata) {
now := clock.Now(ctx)
if m.Created.IsZero() {
m.Created = now
}
m.Touched = now
})
if err != nil {
return errors.Annotate(err, "failed to update the cache metadata file").Err()
}
// Fetch and unpack the tarball if haven't done it yet.
tarballDir := filepath.Join(entryDir, "data")
if _, err := os.Stat(tarballDir); err != nil {
if !os.IsNotExist(err) {
return errors.Annotate(err, "failed to check presence of the unpacked tarball").Err()
}
// Prepare a temp file to download the tarball into.
tmp, err := ioutil.TempFile(entryDir, "tmp_*.tar.gz")
if err != nil {
return errors.Annotate(err, "failed to create a temp file to fetch the tarball into").Err()
}
tmpName := tmp.Name()
tmp.Close() // we are only after the file name
nukeTmpFile := func() {
if err := os.Remove(tmpName); err != nil && os.IsNotExist(err) {
logging.Warningf(ctx, "Failed to delete the temp file: %s", err)
}
}
// Note: note using defer for nukeTmpFile and (later) nukeStagingDir because
// we want them called before cb(...). Defers will be called after.
// Prepare a staging directory to unzip the tarball into. We'll rename it
// into `tarballDir` on success.
stagingDir, err := ioutil.TempDir(entryDir, "tmp_data_*")
if err != nil {
return errors.Annotate(err, "failed to create a temp directory to unpack the tarball into").Err()
}
nukeStagingDir := func() {
if err := os.RemoveAll(stagingDir); err != nil {
logging.Warningf(ctx, "Failed to delete the staging directory: %s", err)
}
}
// Download and untar the file into the staging directory.
err = fetchAndUntar(ctx, src, tmpName, stagingDir)
nukeTmpFile() // served its purpose
if err != nil {
nukeStagingDir() // contains incomplete garbage, kill it
return err // annotated already
}
if err := os.Rename(stagingDir, tarballDir); err != nil {
nukeStagingDir()
return errors.Annotate(err, "failed to move the staging directory into its final place").Err()
}
} else {
logging.Infof(ctx, "Found the unpackaged tarball in the cache.")
}
// Now that we have unpacked everything, release the lock to allow multiple
// concurrent callbacks to read from the cache directory at the same time.
unlock()
// Let the callback do the rest.
return cb(tarballDir)
}
// Trim removes old cache entries, keeping only most recently touched ones.
func (c *Cache) Trim(ctx context.Context, keep int) error {
logging.Infof(ctx, "Trimming the cache to keep only %d most recently touched entries...", keep)
files, err := ioutil.ReadDir(c.Root)
if err != nil && !os.IsNotExist(err) {
return errors.Annotate(err, "failed to scan the cache directory").Err()
}
type entry struct {
name string
meta cacheMetadata
}
var entries []entry
for _, file := range files {
if !file.IsDir() {
continue
}
switch meta, err := readMetadata(ctx, filepath.Join(c.Root, file.Name())); {
case err != nil:
logging.Warningf(ctx, "Skipping %q - %s", file.Name(), err)
case meta.Touched.IsZero():
logging.Warningf(ctx, "Skipping %q - empty or unrecognized", file.Name())
default:
entries = append(entries, entry{
name: file.Name(),
meta: meta,
})
}
}
if len(entries) <= keep {
logging.Infof(ctx, "Nothing to trim.")
return nil
}
// Oldest first.
sort.Slice(entries, func(i, j int) bool {
return entries[i].meta.Touched.Before(entries[j].meta.Touched)
})
done := 0
for i := 0; i < len(entries)-keep; i++ {
e := entries[i]
logging.Infof(ctx, "Trimming entry %q (created %s, last touched %s)...",
e.name, humanize.Time(e.meta.Created), humanize.Time(e.meta.Touched))
// Steamroll over file system locks. There's a chance of a race condition,
// but it is very improbable, since its unlikely anyone uses old entries.
if err := removeDir(filepath.Join(c.Root, e.name)); err != nil {
logging.Errorf(ctx, "Failed to trim %q - %s", e.name, err)
} else {
done++
}
}
logging.Infof(ctx, "Trimmed %d entries.", done)
if done != len(entries)-keep {
return errors.Reason("failed to delete some cache entries, see logs").Err()
}
return nil
}
// removeDir renames `path` into "del_*" first (to make it "disappear"), and
// then does os.RemoveAll.
//
// Skips the rename if `path` is already named `del_*`.
func removeDir(path string) error {
newPath := path
dir, base := filepath.Dir(path), filepath.Base(path)
if !strings.HasPrefix(base, "del_") {
rnd := [8]byte{}
if _, err := rand.Read(rnd[:]); err != nil {
return errors.Annotate(err, "failed to generate random suffix").Err()
}
newPath = filepath.Join(dir,
fmt.Sprintf("del_%d_%s_%s", os.Getpid(), hex.EncodeToString(rnd[:]), base))
if err := os.Rename(path, newPath); err != nil {
return errors.Annotate(err, "failed to rename the directory before deleting it").Err()
}
}
return os.RemoveAll(newPath)
}
|
cybernetics/jOOQ | jOOQ-test/src/org/jooq/test/mariadb/generatedclasses/tables/pojos/TIdentityPk.java | /**
* This class is generated by jOOQ
*/
package org.jooq.test.mariadb.generatedclasses.tables.pojos;
/**
* This class is generated by jOOQ.
*/
@java.lang.SuppressWarnings({ "all", "unchecked", "rawtypes" })
public class TIdentityPk implements java.io.Serializable {
private static final long serialVersionUID = 1524374851;
private final java.lang.Integer id;
private final java.lang.Integer val;
public TIdentityPk(
java.lang.Integer id,
java.lang.Integer val
) {
this.id = id;
this.val = val;
}
public java.lang.Integer getId() {
return this.id;
}
public java.lang.Integer getVal() {
return this.val;
}
}
|
nirvanarsc/CompetitiveProgramming | Java/leetcode/weekly_contests/weekly_265/P_4.java | <filename>Java/leetcode/weekly_contests/weekly_265/P_4.java<gh_stars>1-10
package leetcode.weekly_contests.weekly_265;
public class P_4 {
static boolean[][][] seen;
static int[][][] dp;
static char[] l;
static char[] r;
static int n;
static int m;
public boolean possiblyEquals(String s1, String s2) {
n = s1.length();
m = s2.length();
seen = new boolean[n + 1][m + 1][2000];
dp = new int[n + 1][m + 1][2000];
l = s1.toCharArray();
r = s2.toCharArray();
return dfs(0, 0, 0) == 1;
}
private static int dfs(int i, int j, int diff) {
if (i == n && j == m) {
return diff == 0 ? 1 : 0;
}
if (seen[i][j][diff + 1000]) {
return dp[i][j][diff + 1000];
}
int res = 0;
if (i < n && j < m && diff == 0 && l[i] == r[j]) {
res = Math.max(res, dfs(i + 1, j + 1, 0));
}
if (i < n && !isDigit(l[i]) && diff > 0) {
res = Math.max(res, dfs(i + 1, j, diff - 1));
}
if (j < m && !isDigit(r[j]) && diff < 0) {
res = Math.max(res, dfs(i, j + 1, diff + 1));
}
for (int k = i, val = 0; k < n && isDigit(l[k]); k++) {
val = val * 10 + (l[k] - '0');
res = Math.max(res, dfs(k + 1, j, diff - val));
}
for (int k = j, val = 0; k < m && isDigit(r[k]); k++) {
val = val * 10 + (r[k] - '0');
res = Math.max(res, dfs(i, k + 1, diff + val));
}
seen[i][j][diff + 1000] = true;
return dp[i][j][diff + 1000] = res;
}
private static boolean isDigit(char c) {
return '0' <= c && c <= '9';
}
}
|
mkleczek/river | qa/src/org/apache/river/test/impl/start/ServiceDescriptorUtil.java | <gh_stars>0
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.river.test.impl.start;
import java.io.File;
import java.io.IOException;
public class ServiceDescriptorUtil {
public static final String jini_root_dir = "/view/resendes/vob/jive";
public static final String jini_lib_dir = jini_root_dir + "/lib";
public static final String jini_policy_dir = jini_root_dir + "/policy";
public static final String jini_host ="resendes";
public static final String jini_port ="8080";
public static final String jini_url =
"http://" + jini_host + ":" + jini_port + "/";
public static String getLogDir() {
File log = null;
try {
log = File.createTempFile("SDU", "tmp");
return log.getCanonicalPath();
} catch (IOException ioe) {
System.out.println("Exception creating log dir: " + ioe);
} finally {
if (log != null) log.delete();
}
return null;
}
}
|
LegendKe/dujiaoxian | Pin/src/main/java/com/djx/pin/widget/MyRecyclerView.java | package com.djx.pin.widget;
import android.content.Context;
import android.support.annotation.Nullable;
import android.support.v7.widget.RecyclerView;
import android.util.AttributeSet;
/**
* Created by Administrator on 2016/9/21 0021.
*/
public class MyRecyclerView extends RecyclerView {
public MyRecyclerView(Context context) {
super(context);
}
public MyRecyclerView(Context context, @Nullable AttributeSet attrs) {
super(context, attrs);
}
public MyRecyclerView(Context context, @Nullable AttributeSet attrs, int defStyle) {
super(context, attrs, defStyle);
}
@Override
protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) {
int heightSpec = MeasureSpec.makeMeasureSpec(
Integer.MAX_VALUE >> 2, MeasureSpec.AT_MOST);
super.onMeasure(widthMeasureSpec, heightSpec);
}
@Override
protected boolean overScrollBy(int deltaX, int deltaY, int scrollX, int scrollY, int scrollRangeX, int scrollRangeY, int maxOverScrollX, int maxOverScrollY, boolean isTouchEvent) {
return super.overScrollBy(deltaX, deltaY, scrollX, scrollY, scrollRangeX, scrollRangeY, maxOverScrollX, maxOverScrollY, isTouchEvent);
}
}
|
GGGGITFKBJG/wethands | src/thread/CountDownLatch.h | <reponame>GGGGITFKBJG/wethands
// Copyright (c) 2020 GGGGITFKBJG
//
// Date: 2020-05-12 22:25:59
// Description:
#ifndef SRC_THREAD_COUNTDOWNLATCH_H_
#define SRC_THREAD_COUNTDOWNLATCH_H_
#include "src/utils/Uncopyable.h"
#include "src/thread/Mutex.h"
#include "src/thread/Condition.h"
namespace wethands {
// 倒计门闩量.
class CountDownLatch : public Uncopyable {
public:
explicit CountDownLatch(int count);
~CountDownLatch() = default;
// 递减计数. 当计数减为0时会唤醒所有线程.
void CountDown();
// 等待计数为0的条件发生.
void Wait();
// 获取内部计数值.
int GetCount() const;
private:
mutable MutexLock lock_;
Condition cond_;
int count_;
};
} // namespace wethands
#endif // SRC_THREAD_COUNTDOWNLATCH_H_
|
ubuntunux/PyEngine3D | PyEngine3D/UI/TKInter/MainWindow.py | <filename>PyEngine3D/UI/TKInter/MainWindow.py
import sys
import traceback
import os
import time
from threading import Thread
from collections import OrderedDict
from enum import Enum
import tkinter as tk
import tkinter.ttk as ttk
from tkinter import filedialog
from tkinter import messagebox
import numpy
from PyEngine3D.UI import logger
from PyEngine3D.Common.Command import *
from PyEngine3D.Utilities import Attributes, Attribute
from .EditableTreeview import SimpleEditableTreeview
TAG_NORMAL = 'normal'
TAG_LOADED = 'loaded'
def addDirtyMark(text):
if not text.startswith('*'):
return '*' + text
return text
def removeDirtyMark(text):
if text.startswith('*'):
return text[1:]
return text
def get_name(item):
return item['text']
def get_value(item, index=0):
return item['values'][index]
def get_tag(item):
return item['tags'][0] if 'tags' in item and 0 < len(item['tags']) else ''
def combobox_add_item(combobox, value):
values = combobox['values']
if values:
values += (value,)
else:
values = (value,)
combobox.configure(values=values)
if 1 == len(values):
combobox.current(0)
def combobox_clear(combobox):
combobox.configure(values=())
class ItemInfo:
def __init__(self, attribute_name, dataType, parent_info, index):
self.dataType = dataType
self.attribute_name = attribute_name
self.parent_info = parent_info
self.index = index
self.oldValue = None
def set_old_value(self, value):
self.oldValue = value
def __repr__(self):
return "ItemInfo :" + repr(self.__dict__)
class MessageThread(Thread):
def __init__(self, cmdQueue):
Thread.__init__(self)
self.running = True
self.cmdQueue = cmdQueue
self.commands = {}
self.limitDelta = 1.0 / 60.0 # 60fps
self.delta = 0.0
self.lastTime = 0.0
def connect(self, command_name, command):
self.commands[command_name] = command
def run(self):
self.lastTime = time.time()
while self.running:
# Timer
self.delta = time.time() - self.lastTime
if self.delta < self.limitDelta:
time.sleep(self.limitDelta - self.delta)
# print(1.0/(time.time() - self.lastTime))
self.lastTime = time.time()
# Process recieved queues
if not self.cmdQueue.empty():
# receive value must be tuple type
cmd, value = self.cmdQueue.get()
cmdName = get_command_name(cmd)
# recieved queues
if cmd == COMMAND.CLOSE_UI:
self.running = False
# call binded signal event
if cmdName in self.commands:
command = self.commands[cmdName]
if value is not None:
command(value)
else:
command()
class MainWindow:
def __init__(self, root, project_filename, cmdQueue, appCmdQueue, cmdPipe):
logger.info("Create MainWindow.")
self.root = root
self.project_filename = project_filename
self.cmdQueue = cmdQueue
self.appCmdQueue = appCmdQueue
self.cmdPipe = cmdPipe
self.selected_item = None
self.selected_item_categoty = ''
self.isFillAttributeTree = False
# MessageThread
self.message_thread = MessageThread(self.cmdQueue)
self.message_thread.start()
self.message_thread.connect(get_command_name(COMMAND.SHOW_UI), self.show)
self.message_thread.connect(get_command_name(COMMAND.HIDE_UI), self.hide)
self.message_thread.connect(get_command_name(COMMAND.TRANS_SCREEN_INFO), self.set_screen_info)
self.message_thread.connect(get_command_name(COMMAND.CLEAR_RENDERTARGET_LIST), self.clear_render_target_list)
self.message_thread.connect(get_command_name(COMMAND.TRANS_RENDERTARGET_INFO), self.add_render_target)
self.message_thread.connect(get_command_name(COMMAND.TRANS_RENDERING_TYPE_LIST), self.add_rendering_type)
self.message_thread.connect(get_command_name(COMMAND.TRANS_ANTIALIASING_LIST), self.add_anti_aliasing)
self.message_thread.connect(get_command_name(COMMAND.TRANS_GAME_BACKEND_LIST), self.add_game_backend)
self.message_thread.connect(get_command_name(COMMAND.TRANS_GAME_BACKEND_INDEX), self.set_game_backend_index)
self.message_thread.connect(get_command_name(COMMAND.CLOSE_UI), self.exit)
self.message_thread.connect(get_command_name(COMMAND.SORT_UI_ITEMS), self.sort_items)
self.message_thread.connect(get_command_name(COMMAND.TRANS_RESOURCE_LIST), self.add_resource_list)
self.message_thread.connect(get_command_name(COMMAND.TRANS_RESOURCE_INFO), self.set_resource_info)
self.message_thread.connect(get_command_name(COMMAND.TRANS_RESOURCE_ATTRIBUTE), self.fill_resource_attribute)
self.message_thread.connect(get_command_name(COMMAND.DELETE_RESOURCE_INFO), self.delete_resource_info)
self.message_thread.connect(get_command_name(COMMAND.DELETE_OBJECT_INFO), self.delete_object_info)
self.message_thread.connect(get_command_name(COMMAND.TRANS_OBJECT_INFO), self.add_object_info)
self.message_thread.connect(get_command_name(COMMAND.TRANS_OBJECT_ATTRIBUTE), self.fill_object_attribute)
self.message_thread.connect(get_command_name(COMMAND.CLEAR_OBJECT_LIST), self.clear_object_list)
width = 600
height = 800
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
x = int(screen_width - width)
y = int((screen_height / 2) - (height / 2))
frame_width = int(width / 2)
property_width = int(frame_width / 2)
root.resizable(width=True, height=True)
root.bind('<Escape>', self.exit)
root.geometry('%dx%d+%d+%d' % (width, height, x, y))
main_frame = tk.PanedWindow(root, orient=tk.HORIZONTAL, sashrelief=tk.RAISED)
main_frame.pack(fill="both", expand=True)
main_tab = ttk.Notebook(main_frame)
# set windows title
self.set_window_title(project_filename if project_filename else "Default Project")
def donothing(*args):
pass
# Menu
menubar = tk.Menu(root)
root.config(menu=menubar)
menu = tk.Menu(menubar, tearoff=0)
menu.add_command(label="New Project", command=self.new_project)
menu.add_command(label="Open Project", command=self.open_project)
menu.add_command(label="Save Project", command=self.save_project)
menu.add_separator()
menu.add_command(label="New Scene", command=self.new_scene)
menu.add_command(label="Save Scene", command=self.save_scene)
menu.add_separator()
menu.add_command(label="Exit", command=self.exit)
menubar.add_cascade(label="Menu", menu=menu)
view_mode_menu = tk.Menu(menubar, tearoff=0)
view_mode_menu.add_command(label="Wireframe", command=lambda: self.set_view_mode(COMMAND.VIEWMODE_WIREFRAME))
view_mode_menu.add_command(label="Shading", command=lambda: self.set_view_mode(COMMAND.VIEWMODE_SHADING))
view_mode_menu.add_separator()
menubar.add_cascade(label="View Mode", menu=view_mode_menu)
helpmenu = tk.Menu(menubar, tearoff=0)
helpmenu.add_command(label="Help", command=donothing)
helpmenu.add_command(label="About...", command=donothing)
menubar.add_cascade(label="Help", menu=helpmenu)
# command layout
command_frame = tk.Frame(main_tab, relief="sunken", padx=10, pady=10)
variable = tk.StringVar()
self.comboGameBackend = ttk.Combobox(command_frame, textvariable=variable)
self.comboGameBackend.bind("<<ComboboxSelected>>", self.change_game_backend, "+")
self.comboGameBackend.pack(fill="x", side="top")
separator = ttk.Separator(command_frame, orient='horizontal')
separator.pack(fill="x", side="top", pady=10)
self.play_button = tk.Button(command_frame, text="Play")
self.play_button.pack(fill="x", side="top")
self.play_button.bind("<Button-1>", self.toggle_play)
label_frame = ttk.LabelFrame(command_frame, text='Resolution')
label_frame.pack(fill="x", side="top", pady=10)
frame = tk.Frame(label_frame, relief="sunken", padx=5)
frame.pack(fill="x", side="top")
label = tk.Label(frame, text="Width", width=1)
label.pack(fill="x", side="left", expand=True)
self.spinWidth = tk.IntVar()
self.spinWidth.set(10)
spinbox = tk.Spinbox(frame, from_=0, to=9999, textvariable=self.spinWidth, width=1)
spinbox.pack(fill="x", side="left", expand=True)
frame = tk.Frame(label_frame, relief="sunken", padx=5)
frame.pack(fill="x", side="top")
label = tk.Label(frame, text="Height", width=1)
label.pack(fill="x", side="left", expand=True)
self.spinHeight = tk.IntVar()
self.spinHeight.set(0)
spinbox = tk.Spinbox(frame, from_=0, to=9999, textvariable=self.spinHeight, width=1)
spinbox.pack(fill="x", side="left", expand=True)
self.checkFullScreen = tk.IntVar()
self.checkFullScreen.set(0)
checkbutton = tk.Checkbutton(label_frame, text="Full Screen", variable=self.checkFullScreen)
checkbutton.pack(fill="x", side="top")
button = tk.Button(label_frame, text="Change Resolution")
button.pack(fill="x", side="top")
button.bind("<Button-1>", self.change_resolution)
label_frame = ttk.LabelFrame(command_frame, text='Rendering Type')
label_frame.pack(fill="x", side="top", pady=10)
self.comboRenderingType = ttk.Combobox(label_frame)
self.comboRenderingType.pack(fill="x", side="top")
self.comboRenderingType.bind("<<ComboboxSelected>>", self.set_rendering_type, "+")
label_frame = ttk.LabelFrame(command_frame, text='Anti Aliasing')
label_frame.pack(fill="x", side="top", pady=10)
self.comboAntiAliasing = ttk.Combobox(label_frame)
self.comboAntiAliasing.pack(fill="x", side="top")
self.comboAntiAliasing.bind("<<ComboboxSelected>>", self.set_anti_aliasing, "+")
label_frame = ttk.LabelFrame(command_frame, text='Render Target')
label_frame.pack(fill="x", side="top", pady=10)
self.comboRenderTargets = ttk.Combobox(label_frame)
self.comboRenderTargets.pack(fill="x", side="top")
self.comboRenderTargets.bind("<<ComboboxSelected>>", self.view_rendertarget, "+")
# resource layout
resource_frame = tk.Frame(main_tab, relief="sunken", padx=10, pady=10)
label_frame = ttk.LabelFrame(resource_frame, text='Create Resource')
label_frame.pack(fill="x", side="top", pady=10)
button = tk.Button(label_frame, text="Create Particle")
button.pack(fill="x", side="top")
button.bind("<Button-1>", self.create_particle)
button = tk.Button(label_frame, text="Create Spline")
button.pack(fill="x", side="top")
button.bind("<Button-1>", self.create_spline)
button_collision = tk.Button(label_frame, text="Create Collision")
button_collision.pack(fill="x", side="top")
button_collision.bind("<Button-1>", self.create_collision)
self.resource_menu = tk.Menu(root, tearoff=0)
self.resource_menu.add_command(label="Load", command=self.load_resource)
self.resource_menu.add_command(label="Action", command=self.action_resource)
self.resource_menu.add_command(label="Duplicate", command=self.duplicate_resource)
self.resource_menu.add_command(label="Save", command=self.save_resource)
self.resource_menu.add_command(label="Delete", command=self.delete_resource)
# self.resource_menu.bind("<FocusOut>", self.resource_menu.unpost)
self.resource_treeview = ttk.Treeview(resource_frame)
self.resource_treeview["columns"] = ("#1", )
self.resource_treeview.column("#0", width=property_width)
self.resource_treeview.column("#1", width=property_width)
self.resource_treeview.heading("#0", text="Resource Name",
command=lambda: self.sort_treeview(self.resource_treeview, 0))
self.resource_treeview.heading("#1", text="Resource Type",
command=lambda: self.sort_treeview(self.resource_treeview, 1))
self.resource_treeview.bind("<<TreeviewSelect>>", self.select_resource)
self.resource_treeview.bind("<Button-1>", lambda event: self.resource_menu.unpost())
self.resource_treeview.bind("<Double-1>", lambda event: self.load_resource())
self.resource_treeview.bind("<Button-3>", self.open_resource_menu)
self.resource_treeview.bind("<FocusOut>", lambda event: self.resource_menu.unpost())
self.resource_treeview.bind("<4>", lambda event: self.resource_menu.unpost())
self.resource_treeview.bind("<5>", lambda event: self.resource_menu.unpost())
self.resource_treeview.bind("<MouseWheel>", lambda event: self.resource_menu.unpost())
vsb = ttk.Scrollbar(self.resource_treeview, orient="vertical", command=self.resource_treeview.yview)
vsb.pack(side='right', fill='y')
self.resource_treeview.configure(yscrollcommand=vsb.set)
self.resource_treeview.pack(fill='both', expand=True)
# object layout
object_frame = tk.Frame(main_tab, relief="sunken", padx=10, pady=10)
label_frame = ttk.LabelFrame(object_frame, text='Add Object')
label_frame.pack(fill="x", side="top", pady=10)
button = tk.Button(label_frame, text="Add Camera")
button.pack(fill="x", side="top")
button.bind("<Button-1>", self.add_camera)
button = tk.Button(label_frame, text="Add Light")
button.pack(fill="x", side="top")
button.bind("<Button-1>", self.add_light)
self.object_menu = tk.Menu(root, tearoff=0)
self.object_menu.add_command(label="Action", command=self.action_object)
self.object_menu.add_command(label="Focus", command=self.focus_object)
self.object_menu.add_command(label="Delete", command=self.delete_object)
self.object_menu.bind("<FocusOut>", self.object_menu.unpost)
self.object_treeview = ttk.Treeview(object_frame)
self.object_treeview["columns"] = ("#1",)
self.object_treeview.column("#0", width=property_width)
self.object_treeview.column("#1", width=property_width)
self.object_treeview.heading("#0", text="Object Name",
command=lambda: self.sort_treeview(self.object_treeview, 0))
self.object_treeview.heading("#1", text="Object Type",
command=lambda: self.sort_treeview(self.object_treeview, 1))
self.object_treeview.bind("<<TreeviewSelect>>", lambda event: self.select_object())
self.object_treeview.bind("<Button-1>", lambda event: self.object_menu.unpost())
self.object_treeview.bind("<Double-1>", lambda event: self.focus_object())
self.object_treeview.bind("<Button-3>", self.open_object_menu)
self.object_treeview.bind("<FocusOut>", lambda event: self.object_menu.unpost())
self.object_treeview.bind("<4>", lambda event: self.object_menu.unpost())
self.object_treeview.bind("<5>", lambda event: self.object_menu.unpost())
self.object_treeview.bind("<MouseWheel>", lambda event: self.object_menu.unpost())
vsb = ttk.Scrollbar(self.object_treeview, orient="vertical", command=self.object_treeview.yview)
vsb.pack(side='right', fill='y')
self.object_treeview.configure(yscrollcommand=vsb.set)
self.object_treeview.pack(fill='both', expand=True)
# attribute layout
self.attribute_menu = tk.Menu(root, tearoff=0)
self.attribute_menu.add_command(label="Add", command=self.add_attribute_component)
self.attribute_menu.add_command(label="Delete", command=self.delete_attribute_component)
self.object_menu.bind("<FocusOut>", self.attribute_menu.unpost)
attribute_frame = tk.Frame(main_frame, relief="sunken", padx=10, pady=10)
self.attribute_treeview = SimpleEditableTreeview(attribute_frame)
self.attribute_treeview.item_infos = dict()
self.attribute_treeview["columns"] = ("#1",)
self.attribute_treeview.column("#0", width=property_width)
self.attribute_treeview.column("#1", width=property_width)
self.attribute_treeview.heading("#0", text="Attribute", command=lambda: self.sort_treeview(self.attribute_treeview, 0))
self.attribute_treeview.heading("#1", text="Value", command=lambda: self.sort_treeview(self.attribute_treeview, 1))
self.attribute_treeview.bind("<<TreeviewSelect>>", self.select_attribute)
self.attribute_treeview.bind("<<TreeviewCellEdited>>", self.attribute_changed)
self.attribute_treeview.bind("<Button-1>", lambda event: self.attribute_menu.unpost())
self.attribute_treeview.bind("<Button-3>", self.open_attribute_menu)
def attribute_treeview_on_mouse_wheel(event):
self.attribute_menu.unpost()
self.attribute_treeview.clear_inplace_widgets()
# mouse wheel up, down, click
self.attribute_treeview.bind("<4>", attribute_treeview_on_mouse_wheel)
self.attribute_treeview.bind("<5>", attribute_treeview_on_mouse_wheel)
self.attribute_treeview.bind("<MouseWheel>", attribute_treeview_on_mouse_wheel)
self.attribute_treeview.pack(fill='both', side='left', expand=True)
vsb = ttk.Scrollbar(self.attribute_treeview, orient="vertical", command=self.attribute_treeview.yview)
vsb.pack(side='right', fill='y')
self.attribute_treeview.configure(yscrollcommand=vsb.set)
# tabs
main_tab.add(command_frame, text="Application")
main_tab.add(resource_frame, text="Resource List")
main_tab.add(object_frame, text="Object List")
main_frame.add(main_tab, width=frame_width)
main_frame.add(attribute_frame, width=frame_width)
# wait a UI_RUN message, and send success message
if self.cmdPipe:
self.cmdPipe.RecvAndSend(COMMAND.UI_RUN, None, COMMAND.UI_RUN_OK, None)
def exit(self, *args):
logger.info("Bye")
self.save_config()
self.root.quit()
self.appCmdQueue.put(COMMAND.CLOSE_APP)
sys.exit()
def load_config(self):
pass
def save_config(self):
pass
# x = self.root.winfo_rootx()
# y = self.root.winfo_rooty()
# width = self.root.winfo_width()
# height = self.root.winfo_height()
def show(self):
self.root.update()
self.root.deiconify()
def hide(self):
self.root.withdraw()
def set_window_title(self, title):
self.root.title(title)
# ------------------------- #
# Menu
# ------------------------- #
def sort_treeview(self, treeview, column_index):
if not hasattr(treeview, 'orders'):
treeview.orders = {}
if column_index not in treeview.orders:
treeview.orders[column_index] = False
else:
treeview.orders[column_index] = not treeview.orders[column_index]
def sort_func(item_id):
item = treeview.item(item_id)
if 0 == column_index:
return get_name(item)
else:
return get_value(item, column_index - 1)
items = list(treeview.get_children(''))
items.sort(key=sort_func, reverse=treeview.orders[column_index])
for i, item in enumerate(items):
treeview.move(item, '', i)
def sort_items(self):
self.sort_treeview(self.resource_treeview, 0)
self.sort_treeview(self.resource_treeview, 1)
self.sort_treeview(self.object_treeview, 0)
self.sort_treeview(self.object_treeview, 1)
def new_project(self):
filename = filedialog.asksaveasfilename(initialdir=".",
title="New Project",
filetypes=(("project name", "*.*"), ))
self.appCmdQueue.put(COMMAND.NEW_PROJECT, filename)
def open_project(self):
filename = filedialog.askopenfilename(initialdir=".",
title="Open Project",
filetypes=(("project file", "*.project"), ("all files", "*.*")))
self.appCmdQueue.put(COMMAND.OPEN_PROJECT, filename)
def save_project(self):
self.appCmdQueue.put(COMMAND.SAVE_PROJECT)
def new_scene(self):
self.appCmdQueue.put(COMMAND.NEW_SCENE)
def save_scene(self):
self.appCmdQueue.put(COMMAND.SAVE_SCENE)
def toggle_play(self, event):
if "Play" == self.play_button['text']:
self.play_button['text'] = "Stop"
self.appCmdQueue.put(COMMAND.PLAY)
else:
self.play_button['text'] = "Play"
self.appCmdQueue.put(COMMAND.STOP)
def set_view_mode(self, mode):
self.appCmdQueue.put(mode)
def set_screen_info(self, screen_info):
width, height, full_screen = screen_info
self.spinWidth.set(width)
self.spinHeight.set(height)
self.checkFullScreen.set(1 if full_screen else 0)
def clear_render_target_list(self):
combobox_clear(self.comboRenderTargets)
# Game Backend
def add_game_backend(self, game_backend_list):
for game_backend_name in game_backend_list:
combobox_add_item(self.comboGameBackend, game_backend_name)
def change_game_backend(self, event):
game_backend_index = self.comboGameBackend.current()
self.appCmdQueue.put(COMMAND.CHANGE_GAME_BACKEND, game_backend_index)
def set_game_backend_index(self, game_backend_index):
self.comboGameBackend.current(game_backend_index)
# Rendering Type
def add_rendering_type(self, rendering_type_list):
for rendering_type_name in rendering_type_list:
combobox_add_item(self.comboRenderingType, rendering_type_name)
def set_rendering_type(self, event):
rendering_type_index = self.comboRenderingType.current()
self.appCmdQueue.put(COMMAND.SET_RENDERING_TYPE, rendering_type_index)
# Anti Aliasing
def add_anti_aliasing(self, anti_aliasing_list):
for anti_aliasing_name in anti_aliasing_list:
combobox_add_item(self.comboAntiAliasing, anti_aliasing_name)
def set_anti_aliasing(self, event):
anti_aliasing_index = self.comboAntiAliasing.current()
self.appCmdQueue.put(COMMAND.SET_ANTIALIASING, anti_aliasing_index)
# Render Target
def add_render_target(self, rendertarget_name):
combobox_add_item(self.comboRenderTargets, rendertarget_name)
def view_rendertarget(self, event):
rendertarget_index = self.comboRenderTargets.current()
rendertarget_name = self.comboRenderTargets.get()
self.appCmdQueue.put(COMMAND.VIEW_RENDERTARGET, (rendertarget_index, rendertarget_name))
def change_resolution(self, event):
width = self.spinWidth.get()
height = self.spinHeight.get()
full_screen = False if 0 == self.checkFullScreen.get() else True
screen_info = (width, height, full_screen)
self.appCmdQueue.put(COMMAND.CHANGE_RESOLUTION, screen_info)
# ------------------------- #
# Widget - Propery Tree
# ------------------------- #
def attribute_changed(self, event):
if not self.isFillAttributeTree and self.selected_item is not None:
# item_id = self.attribute_treeview.identify('item', event.x, event.y)
column, item_id = self.attribute_treeview.get_event_info()
if item_id == '':
return
item = self.attribute_treeview.item(item_id)
item_info = self.attribute_treeview.item_infos[item_id]
if item_info.dataType in (tuple, list, numpy.ndarray, Attributes):
self.attribute_treeview.set(item_id, '#1', '')
return
try:
new_value = get_value(item)
# check value chaned
# if item_info.oldValue == new_value:
# return
item_info.oldValue = new_value
# check array type, then combine components
parent_id = self.attribute_treeview.parent(item_id)
parent = self.attribute_treeview.item(parent_id)
parent_info = self.attribute_treeview.item_infos.get(parent_id)
value = None
attribute_name = ''
if parent_info is not None and parent_info.dataType in (tuple, list, numpy.ndarray):
attribute_name = get_name(parent)
values = []
for child_id in self.attribute_treeview.get_children(parent_id):
child = self.attribute_treeview.item(child_id)
child_info = self.attribute_treeview.item_infos.get(child_id)
# evaluate value
value = get_value(child)
if 'True' == value:
value = True
elif 'False' == value:
value = False
else:
value = child_info.dataType(value)
values.append(value)
if parent_info.dataType == numpy.ndarray:
# numpy array
value = numpy.array(values)
else:
# list or tuple
value = parent_info.dataType(values)
else:
attribute_name = get_name(item)
dataType = item_info.dataType
if bool == dataType or numpy.bool == dataType:
# evaluate boolean
value = dataType(get_value(item) == "True")
elif type(dataType) == type(Enum):
index = [dataType(i).name for i in range(len(dataType))].index(get_value(item))
value = dataType(index)
else:
value = get_value(item)
try:
# try to evaluate int, float, string
value = dataType(value)
except:
pass
selectedItems = []
command = None
if self.selected_item_categoty == 'Object':
command = COMMAND.SET_OBJECT_ATTRIBUTE
selectedItems = self.get_selected_object()
elif self.selected_item_categoty == 'Resource':
command = COMMAND.SET_RESOURCE_ATTRIBUTE
selectedItems = self.get_selected_resource()
for selected_item in selectedItems:
selected_item_name = get_name(selected_item)
selected_item_type = get_value(selected_item)
item_info_history = [item_info]
parent_info = item_info.parent_info
while parent_info is not None:
item_info_history.insert(0, parent_info)
parent_info = parent_info.parent_info
# send changed data
self.appCmdQueue.put(command,
(selected_item_name,
selected_item_type,
attribute_name,
value,
item_info_history,
item_info.index))
except BaseException:
logger.error(traceback.format_exc())
# failed to convert string to dataType, so restore to old value
self.attribute_treeview.set(item_id, '#1', item_info.oldValue)
def select_attribute(self, event):
for item_id in self.attribute_treeview.selection():
item_info = self.attribute_treeview.item_infos[item_id]
if bool == item_info.dataType or numpy.bool == item_info.dataType:
self.attribute_treeview.inplace_checkbutton('#1', item_id)
elif type(item_info.dataType) == type(Enum):
dataType = item_info.dataType
values = [dataType(i).name for i in range(len(dataType))]
self.attribute_treeview.inplace_combobox('#1', item_id, values, readonly=False)
else:
self.attribute_treeview.inplace_entry('#1', item_id)
def get_selected_attribute(self):
return [self.attribute_treeview.item(item_id) for item_id in self.attribute_treeview.selection()]
def add_attribute(self, parent, attribute_name, value, dataType, parent_info=None, index=0):
item_id = self.attribute_treeview.insert(parent, 'end', text=attribute_name, open=True)
item_info = ItemInfo(attribute_name=attribute_name,
dataType=dataType,
parent_info=parent_info,
index=index)
self.attribute_treeview.item_infos[item_id] = item_info
if dataType in (tuple, list, numpy.ndarray):
for i, item_value in enumerate(value):
self.add_attribute(item_id, "[%d]" % i, item_value, type(item_value), item_info, i)
elif dataType in (dict, OrderedDict):
for key in value:
self.add_attribute(item_id, key, value[key], type(value[key]), item_info, key)
elif dataType is Attributes:
for attribute in value.get_attributes():
self.add_attribute(item_id, attribute.name, attribute.value, attribute.type, item_info, attribute.name)
else:
# set value - int, float, string
self.attribute_treeview.item(item_id, text=attribute_name, values=(value,))
item_info.set_old_value(value)
def open_attribute_menu(self, event):
item_id = self.attribute_treeview.identify('item', event.x, event.y)
item = self.attribute_treeview.item(item_id)
if item not in self.get_selected_attribute():
self.attribute_treeview.selection_set((item_id, ))
self.attribute_menu.post(event.x_root, event.y_root)
def add_attribute_component(self, *args):
if self.selected_item is None or '' == self.selected_item_categoty:
return
if 'Resource' == self.selected_item_categoty:
self.attribute_component_menu(COMMAND.ADD_RESOURCE_COMPONENT)
elif 'Object' == self.selected_item_categoty:
self.attribute_component_menu(COMMAND.ADD_OBJECT_COMPONENT)
def delete_attribute_component(self, *args):
if self.selected_item is None or '' == self.selected_item_categoty:
return
if 'Resource' == self.selected_item_categoty:
self.attribute_component_menu(COMMAND.DELETE_RESOURCE_COMPONENT)
elif 'Object' == self.selected_item_categoty:
self.attribute_component_menu(COMMAND.DELETE_OBJECT_COMPONENT)
def attribute_component_menu(self, command):
selected_item_name = get_name(self.selected_item)
selected_item_type = get_value(self.selected_item)
for item_id in self.attribute_treeview.selection():
item = self.attribute_treeview.item(item_id)
item_info = self.attribute_treeview.item_infos[item_id]
self.appCmdQueue.put(command,
(selected_item_name,
selected_item_type,
item_info.attribute_name,
item_info.parent_info,
item_info.index))
return
def fill_resource_attribute(self, attributes):
selected_items = self.get_selected_resource()
if 0 < len(selected_items):
self.selected_item = selected_items[0]
self.selected_item_categoty = 'Resource'
self.fill_attribute(attributes)
def fill_object_attribute(self, attributes):
selected_items = self.get_selected_object()
if 0 < len(selected_items):
self.selected_item = selected_items[0]
self.selected_item_categoty = 'Object'
self.fill_attribute(attributes)
def clear_attribute(self):
for item in self.attribute_treeview.get_children():
self.attribute_treeview.delete(item)
def fill_attribute(self, attributes):
# lock edit attribute ui
self.isFillAttributeTree = True
self.clear_attribute()
# fill properties of selected object
for attribute in attributes.get_attributes():
self.add_attribute("", attribute.name, attribute.value, attribute.type)
# unlock edit attribute ui
self.isFillAttributeTree = False
# ------------------------- #
# Widget - Resource List
# ------------------------- #
def get_selected_resource(self):
return [self.resource_treeview.item(item_id) for item_id in self.resource_treeview.selection()]
def add_resource_list(self, resourceList):
for resName, resType in resourceList:
self.resource_treeview.insert("", 'end', text=resName, values=(resType,))
def set_resource_info(self, resource_info):
resource_name, resource_type, is_loaded = resource_info
self.resource_treeview.tag_configure(TAG_NORMAL, foreground="gray")
self.resource_treeview.tag_configure(TAG_LOADED, foreground="black")
tag = TAG_LOADED if is_loaded else TAG_NORMAL
for item_id in self.resource_treeview.get_children(''):
item = self.resource_treeview.item(item_id)
# edit item
if get_name(item) == resource_name and get_value(item) == resource_type:
self.resource_treeview.item(item_id, text=resource_name, values=(resource_type,), tags=(tag, ))
break
else:
# insert item
self.resource_treeview.insert("", 'end', text=resource_name, values=(resource_type,), tags=(tag, ))
def select_resource(self, event):
items = self.get_selected_resource()
if items and len(items) > 0:
item = items[0]
if TAG_LOADED == get_tag(item):
self.appCmdQueue.put(COMMAND.REQUEST_RESOURCE_ATTRIBUTE, (get_name(item), get_value(item)))
else:
self.clear_attribute()
def open_resource_menu(self, event):
item_id = self.resource_treeview.identify('item', event.x, event.y)
item = self.resource_treeview.item(item_id)
if item not in self.get_selected_resource():
self.resource_treeview.selection_set((item_id, ))
self.resource_menu.post(event.x_root, event.y_root)
def load_resource(self, item=None):
items = self.get_selected_resource()
for item in items:
self.appCmdQueue.put(COMMAND.LOAD_RESOURCE, (get_name(item), get_value(item)))
def action_resource(self, item=None):
items = self.get_selected_resource()
for item in items:
self.appCmdQueue.put(COMMAND.ACTION_RESOURCE, (get_name(item), get_value(item)))
def duplicate_resource(self, item=None):
items = self.get_selected_resource()
for item in items:
self.appCmdQueue.put(COMMAND.DUPLICATE_RESOURCE, (get_name(item), get_value(item)))
def save_resource(self, item=None):
items = self.get_selected_resource()
for item in items:
self.appCmdQueue.put(COMMAND.SAVE_RESOURCE, (get_name(item), get_value(item)))
def delete_resource(self, item=None):
items = self.get_selected_resource()
if 0 < len(items):
contents = "\n".join(["%s : %s" % (get_value(item), get_name(item)) for item in items])
choice = messagebox.askyesno('Delete resource', 'Are you sure you want to delete the\n%s?' % contents)
if choice:
for item in items:
self.appCmdQueue.put(COMMAND.DELETE_RESOURCE, (get_name(item), get_value(item)))
def delete_resource_info(self, resource_info):
resource_name, resource_type_name, is_loaded = resource_info
for item_id in self.resource_treeview.get_children():
item = self.resource_treeview.item(item_id)
if get_name(item) == resource_name and get_value(item) == resource_type_name:
self.resource_treeview.delete(item_id)
# ------------------------- #
# Widget - Object List
# ------------------------- #
def add_camera(self, event):
self.appCmdQueue.put(COMMAND.ADD_CAMERA)
def add_light(self, event):
self.appCmdQueue.put(COMMAND.ADD_LIGHT)
def create_particle(self, event):
self.appCmdQueue.put(COMMAND.CREATE_PARTICLE)
def create_spline(self, event):
self.appCmdQueue.put(COMMAND.CREATE_SPLINE)
def create_collision(self, event):
selectedItems = self.get_selected_resource()
items = self.get_selected_resource()
for item in items:
self.appCmdQueue.put(COMMAND.CREATE_COLLISION, (get_name(item), get_value(item)))
def open_object_menu(self, event):
item_id = self.object_treeview.identify('item', event.x, event.y)
item = self.object_treeview.item(item_id)
if item not in self.get_selected_object():
self.object_treeview.selection_set((item_id, ))
self.object_menu.post(event.x_root, event.y_root)
def get_selected_object(self):
return [self.object_treeview.item(item_id) for item_id in self.object_treeview.selection()]
def add_object_info(self, object_info):
object_name, object_type = object_info
for item_id in self.object_treeview.get_children():
item = self.object_treeview.item(item_id)
if get_name(item) == object_name and get_value(item) == object_type:
self.object_treeview.item(item_id, text=object_name, values=(object_type, ))
break
else:
self.object_treeview.insert("", 'end', text=object_name, values=(object_type,))
def action_object(self, *args):
selectedItems = self.get_selected_object()
for selectedItem in selectedItems:
self.appCmdQueue.put(COMMAND.ACTION_OBJECT, get_name(selectedItem))
def delete_object(self, *args):
selectedItems = self.get_selected_object()
for selectedItem in selectedItems:
self.appCmdQueue.put(COMMAND.DELETE_OBJECT, get_name(selectedItem))
def delete_object_info(self, objName):
for item_id in self.object_treeview.get_children():
item = self.object_treeview.item(item_id)
if objName == get_name(item):
self.object_treeview.delete(item_id)
def clear_object_list(self, *args):
for item in self.object_treeview.get_children():
self.object_treeview.delete(item)
def select_object(self):
selectedItems = self.get_selected_object()
if selectedItems:
item = selectedItems[0]
selected_objectName = get_name(item)
selected_objectTypeName = get_value(item)
# request selected object infomation to fill attribute widget
self.appCmdQueue.put(COMMAND.SET_OBJECT_SELECT, selected_objectName)
self.appCmdQueue.put(COMMAND.REQUEST_OBJECT_ATTRIBUTE, (selected_objectName, selected_objectTypeName))
def focus_object(self, *args):
selectedItems = self.get_selected_object()
for selectedItem in selectedItems:
self.appCmdQueue.put(COMMAND.SET_OBJECT_FOCUS, get_name(selectedItem))
break
def run_editor(project_filename, cmdQueue, appCmdQueue, cmdPipe):
root = tk.Tk()
MainWindow(root, project_filename, cmdQueue, appCmdQueue, cmdPipe)
root.mainloop()
sys.exit()
|
jeschkies/nyan | shared_modules/unit_tests/cosine_esamodel_test.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
The MIT License (MIT)
Copyright (c) 2012-2013 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
'''
Created on 23.11.2012
@author: <NAME> <<EMAIL>>
'''
from cosine_esamodel import CosineEsaModel, DocumentTitles
from feature_extractor.extractors import (EsaFeatureExtractor,
TfidfFeatureExtractor,
LdaFeatureExtractor)
from gensim import utils, matutils
from gensim.corpora import Dictionary, MmCorpus
from gensim.models import tfidfmodel
import itertools
import logging
from models.mongodb_models import (Article, Features, User, UserModel,
RankedArticle, ReadArticleFeedback)
from mongoengine import *
import numpy as np
import unittest
from utils.helper import load_config
from random import sample
from sets import Set
from smote import SMOTE, borderlineSMOTE
import sys
logger = logging.getLogger("unittesting")
# set up vars used in testing ("Deerwester" from the web tutorial)
texts = [['human', 'interface', 'computer'], #human interface
['survey', 'user', 'computer', 'system', 'response', 'time'], #computer systems
['eps', 'user', 'interface', 'system'], #eps
['system', 'human', 'system', 'eps'], #human systems
['user', 'response', 'time'], #response time
['trees'], #trees
['graph', 'trees'], #graph
['graph', 'minors', 'trees'], #minor tress
['graph', 'minors', 'survey']] #minors survey
dictionary = Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
concepts = ['human interface', 'computer systems', 'eps', 'human systems',
'response time', 'tress', 'graph', 'minors tress', 'minors survey']
test_corpus_texts = [['graph', 'minors', 'eps'],
['human', 'system', 'computer'],
['user', 'system', 'human']
]
test_corpus = [dictionary.doc2bow(text) for text in test_corpus_texts]
UNREAD = 0
READ = 1
def get_features(article, extractor):
'''
Reaturns full features vector from article.
Article should be a mongodb model
'''
#check if features of article are current version
try:
feature_version = article.features.version
except AttributeError as e:
if str(e) == 'features':
logger.error("Article %s does not have any features." %
article.id)
#article seems not to exist anymore go on
raise
if feature_version != extractor.get_version():
clean_content = article.clean_content
#get new features
features = extractor.get_features(clean_content)
else:
features = article.features.data
#sparse2full converts list of 2-tuples to numpy array
article_features_as_full_vec = matutils.sparse2full(features,
extractor.get_feature_number())
return article_features_as_full_vec
def get_samples(extractor,
read_article_ids,
unread_article_ids,
p_synthetic_samples = 300,
p_majority_samples = 500,
k = 5):
'''
read_article_ids : Set
unread_article_ids : Set
n_synthetic_samples : Percentage of snythetic samples, 300 for 300%
k : neighbourhood for k nearest neighbour, standard 5
Returns
-------
array-like full vector samples, shape = [n_features, n_samples]
array-like marks, shape = [n_samples]
'''
#Under-sample unread ids
unread_article_ids = Set(sample(unread_article_ids,
min(p_majority_samples/100 * len(read_article_ids),
len(unread_article_ids))
)
)
#Create unread article vectors
unread_marks = np.empty(len(unread_article_ids))
unread_marks.fill(UNREAD)
unread_articles = np.empty(shape=(len(unread_article_ids),
extractor.get_feature_number()))
for i, article in enumerate(Article.objects(id__in = unread_article_ids)):
try:
article_features_as_full_vec = get_features(article, extractor)
unread_articles[i,:] = article_features_as_full_vec[:]
except AttributeError as e:
logger.error("Article %s does not have attribute: %s."
% (article.id, e))
#Create read article vectors
read_marks = np.empty(len(read_article_ids))
read_marks.fill(READ)
read_articles = np.empty(shape=(len(read_article_ids),
extractor.get_feature_number()))
for i, article in enumerate(Article.objects(id__in = read_article_ids)):
try:
article_features_as_full_vec = get_features(article, extractor)
read_articles[i,:] = article_features_as_full_vec[:]
except AttributeError as e:
logger.error("Article %s does not have attribute: %s."
% (article.id, e))
#SMOTE sample minorities
#synthetic_read_articles = SMOTE(read_articles, p_synthetic_samples, k)
#borderlineSMOTE sample minorites
X = np.concatenate((read_articles, unread_articles))
y = np.concatenate((read_marks, unread_marks))
new_read_articles, synthetic_read_articles, danger_read_articles = borderlineSMOTE(X = X,
y = y,
minority_target = READ,
N = p_synthetic_samples, k = k)
#Create synthetic read samples
synthetic_marks = np.zeros(len(synthetic_read_articles))
synthetic_marks.fill(READ)
read_marks = np.empty(len(new_read_articles))
read_marks.fill(READ)
danger_read_marks = np.empty(len(danger_read_articles))
danger_read_marks.fill(READ)
logger.info("Use %d read, %d unread, %d danger reads and %d synthetic samples." %
(len(read_marks), len(unread_marks),
len(danger_read_marks), len(synthetic_marks)))
return (np.concatenate((new_read_articles,
synthetic_read_articles,
danger_read_articles,
unread_articles)),
np.concatenate((read_marks,
synthetic_marks,
danger_read_marks,
unread_marks))
)
class TestCosineESAModel(unittest.TestCase):
def setUp(self):
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.DEBUG)
self.config_ = load_config(("/media/sdc1/Aptana Studio 3 Workspace/"
"configs/config.yaml"),
logger,
exit_with_error = True)
if self.config_ == None:
logger.error("No config. Exit.")
sys.exit(1)
def tearDown(self):
pass
@unittest.skip("Skip small test")
def test_constructor(self):
#create tf-idf model
tfidf_model = tfidfmodel.TfidfModel(corpus, normalize=True)
#transform corpus
tfidf_corpus = tfidf_model[corpus]
#train esa model
esa_model = CosineEsaModel(tfidf_corpus,
document_titles = concepts,
test_corpus = test_corpus,
test_corpus_targets = [1,2,2],
num_test_corpus = 3,
num_best_features = 2,
num_features = len(dictionary))
test_doc = ['graph', 'minors', 'trees']#['user', 'computer', 'time']#
tfidf_test_doc = tfidf_model[dictionary.doc2bow(test_doc)]
#transform test doc to esa
esa_test_doc = esa_model[tfidf_test_doc]
print esa_test_doc
#for concept_id, weight in sorted(esa_test_doc, key=lambda item: -item[1]):
# print "%s %.3f" % (esa_model.document_titles[concept_id], weight)
<EMAIL>("Skip bigger test")
def test_constructor_with_file_wikicorpus(self):
#load tf-idf model
tfidf_model = tfidfmodel.TfidfModel.load("/media/sdc1/test_dump/result/test_tfidf.model")
extractor = TfidfFeatureExtractor("/media/sdc1/test_dump/result/test")
#load tf-idf corpus
tfidf_corpus = MmCorpus('/media/sdc1/test_dump/result/test_tfidf_corpus.mm')
#load lda corpus
#lda_corpus = MmCorpus('/media/sdc1/test_dump/result/test_lda_corpus.mm')
#load dictionary
id2token = Dictionary.load("/media/sdc1/test_dump/result/test_wordids.dict")
#load article titles
document_titles = DocumentTitles.load("/media/sdc1/test_dump/result/test_articles.txt")
#Connect to mongo database
connect(self.config_['database']['db-name'],
username= self.config_['database']['user'],
password= self.config_['database']['passwd'],
port = self.config_['database']['port'])
#Load articles as test corpus
user = User.objects(email=u"<EMAIL>").first()
ranked_article_ids = (a.article.id
for a
in RankedArticle.objects(user_id = user.id).only("article"))
all_article_ids = Set(a.id
for a
in Article.objects(id__in = ranked_article_ids).only("id"))
read_article_ids = Set(a.article.id
for a
in ReadArticleFeedback.objects(user_id = user.id).only("article"))
unread_article_ids = all_article_ids - read_article_ids
#sample test articles
X, y = get_samples(extractor, read_article_ids, unread_article_ids)
s,f = X.shape
logger.debug("Traning with %d samples, %d features, %d marks" %
(s,f, len(y)))
#train esa model
esa_model = CosineEsaModel(tfidf_corpus,
document_titles = document_titles,
test_corpus = X,
test_corpus_targets = y,
num_test_corpus = len(y),
num_best_features = 15,
num_features = len(id2token))
print esa_model
esa_model.save('/media/sdc1/test_dump/result/test_cesa.model')
tmp_esa = CosineEsaModel.load('/media/sdc1/test_dump/result/test_cesa.model')
print tmp_esa
@unittest.skip("too big")
def test_constructor_with_big_file_wikicorpus(self):
#load tf-idf corpus
tfidf_corpus = MmCorpus('/media/sdc1/test_dump/result/wiki_tfidf_corpus.mm')
#load lda corpus
#lda_corpus = MmCorpus('/media/sdc1/test_dump/result/test_lda_corpus.mm')
#load dictionary
id2token = Dictionary.load("/media/sdc1/test_dump/result/wiki_wordids.dict")
#load article titles
document_titles = DocumentTitles.load("/media/sdc1/test_dump/result/wiki_articles.txt")
#train esa model
esa_model = EsaModel(tfidf_corpus, num_clusters = 15,
document_titles = document_titles,
num_features = len(id2token))
print esa_model
esa_model.save('/media/sdc1/test_dump/result/wiki_cesa.model')
tmp_esa = EsaModel.load('/media/sdc1/test_dump/result/wiki_cesa.model')
print tmp_esa
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() |
vorburger/flowsim | flowsim-ui/app/scripts/services/nd.js | <filename>flowsim-ui/app/scripts/services/nd.js
'use strict';
/**
* @ngdoc service
* @name flowsimUiApp.nd
* @description
* # nd
* Service in the flowsimUiApp.
*/
angular.module('flowsimUiApp')
.factory('ND', function(UInt, ETHERNET, IPV6) {
var NAME = 'ND';
var BYTES = 8;
var Payloads = {
'Payload' : 0
};
function ND(nd, target, hw) {
if(_.isObject(nd)) {
this._target = IPV6.mkAddress(nd._target);
this._hw = ETHERNET.mkMAC(nd._hw);
} else {
this._target = IPV6.mkAddress(target);
this._hw = ETHERNET.mkMAC(hw);
}
this.name = NAME;
this.bytes = BYTES;
}
function mkND(target, hw) {
return new ND(null, target, hw);
}
ND.prototype.target = function(target){
if(target){
this._target = IPV6.mkAddress(target);
} else {
return this._target;
}
};
function mkTarget(tar){
return new IPV6.mkAddress(tar);
}
function mkTargetMatch(value, mask){
var tmp = new IPV6.Address.Match(null,
IPV6.mkAddress(value), IPV6.mkAddress(mask));
tmp.summarize = function() {
return 'nd';
};
return tmp;
}
ND.prototype.hw = function(hw){
if(hw){
this._hw = ETHERNET.mkMAC(hw);
} else {
return this._hw;
}
};
ND.prototype.clone = function(){
return new ND(this);
};
ND.prototype.toString = function(){
return 'target: '+this.target().toString()+'\n'+
'hw: '+this.hw().toString();
};
function mkHWMatch(value, mask){
var tmp = new ETHERNET.MAC.Match(null, ETHERNET.mkMAC(value),
ETHERNET.mkMAC(mask));
tmp.summarize = function() {
return 'nd';
};
return tmp;
}
var TIPS = {
target: 'Target Address',
hw: 'Link Layer Address'
};
var TESTS = {
target: IPV6.Address.is,
hw: ETHERNET.MAC.is
};
function ND_UI(nd){
nd = nd ? new ND(nd) : new ND();
this.name = NAME;
this.bytes = nd.bytes;
this.attrs = [{
name: 'Target',
value: nd.target().toString(),
test: TESTS.target,
tip: TIPS.target
},{
name: 'HW Address',
value: nd.hw().toString(),
test: TESTS.hw,
tip: TIPS.hw
}];
}
ND_UI.prototype.toBase = function() {
return new ND(null, this.attrs[0].value, this.attrs[1].value);
};
ND_UI.prototype.setPayload = function() {
};
ND_UI.prototype.clearPayload = function() {
};
return {
name: NAME,
ND: ND,
mkND: mkND,
mkTarget: mkTarget,
mkTargetMatch: mkTargetMatch,
mkHWMatch: mkHWMatch,
target: '_target',
hw: '_hw',
create: function(nd){ return new ND(nd); },
createUI: function(nd){ return new ND_UI(nd); },
Payloads: _(Payloads).keys(),
TIPS: TIPS,
TESTS: TESTS
};
});
|
mamez/recicla | cbd-app/src/main/java/edu/recicla/app/entity/SolicitudRecoleccion.java | package edu.recicla.app.entity;
import java.io.Serializable;
import javax.persistence.*;
import java.util.Date;
import java.util.List;
/**
* The persistent class for the solicitud_recoleccion database table.
*
*/
@Entity
@Table(name="solicitud_recoleccion")
public class SolicitudRecoleccion implements Serializable {
private static final Long serialVersionUID = 1L;
@Id
@GeneratedValue(strategy=GenerationType.AUTO)
private Long id;
@Column(name="detalle_user")
private String detalleUser;
@Column(name="estado_recoleccion")
private String estadoRecoleccion;
@Temporal(TemporalType.DATE)
@Column(name="fecha_recoleccion")
private Date fechaRecoleccion;
@Temporal(TemporalType.DATE)
@Column(name="fecha_solicitud")
private Date fechaSolicitud;
@Column(name="observacion_admin")
private String observacionAdmin;
@Column(name="peso_aprox")
private String pesoAprox;
//bi-directional many-to-one association to DetalleSolicitud
@OneToMany(mappedBy="solicitudRecoleccion")
private List<DetalleSolicitud> detalleSolicituds;
//bi-directional many-to-one association to PrUsuario
@ManyToOne(fetch=FetchType.LAZY)
@JoinColumn(name="punto_recoleccion")
private PrUsuario prUsuario;
public SolicitudRecoleccion() {
}
public Long getId() {
return this.id;
}
public void setId(Long id) {
this.id = id;
}
public String getDetalleUser() {
return this.detalleUser;
}
public void setDetalleUser(String detalleUser) {
this.detalleUser = detalleUser;
}
public String getEstadoRecoleccion() {
return this.estadoRecoleccion;
}
public void setEstadoRecoleccion(String estadoRecoleccion) {
this.estadoRecoleccion = estadoRecoleccion;
}
public Date getFechaRecoleccion() {
return this.fechaRecoleccion;
}
public void setFechaRecoleccion(Date fechaRecoleccion) {
this.fechaRecoleccion = fechaRecoleccion;
}
public Date getFechaSolicitud() {
return this.fechaSolicitud;
}
public void setFechaSolicitud(Date fechaSolicitud) {
this.fechaSolicitud = fechaSolicitud;
}
public String getObservacionAdmin() {
return this.observacionAdmin;
}
public void setObservacionAdmin(String observacionAdmin) {
this.observacionAdmin = observacionAdmin;
}
public String getPesoAprox() {
return this.pesoAprox;
}
public void setPesoAprox(String pesoAprox) {
this.pesoAprox = pesoAprox;
}
public List<DetalleSolicitud> getDetalleSolicituds() {
return this.detalleSolicituds;
}
public void setDetalleSolicituds(List<DetalleSolicitud> detalleSolicituds) {
this.detalleSolicituds = detalleSolicituds;
}
public DetalleSolicitud addDetalleSolicitud(DetalleSolicitud detalleSolicitud) {
getDetalleSolicituds().add(detalleSolicitud);
detalleSolicitud.setSolicitudRecoleccion(this);
return detalleSolicitud;
}
public DetalleSolicitud removeDetalleSolicitud(DetalleSolicitud detalleSolicitud) {
getDetalleSolicituds().remove(detalleSolicitud);
detalleSolicitud.setSolicitudRecoleccion(null);
return detalleSolicitud;
}
public PrUsuario getPrUsuario() {
return this.prUsuario;
}
public void setPrUsuario(PrUsuario prUsuario) {
this.prUsuario = prUsuario;
}
} |
zhulianhai/apollo | modules/planning/common/obstacle_blocking_analyzer.h | <gh_stars>1-10
/******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include "modules/common/configs/vehicle_config_helper.h"
#include "modules/planning/common/frame.h"
namespace apollo {
namespace planning {
/**
* @brief Decide whether an obstacle is a blocking one that needs to be
* side-passed.
* @param The frame that contains reference_line and other info.
* @param The obstacle of interest.
* @param The speed threshold to tell whether an obstacle is stopped.
* @param The minimum distance to front blocking obstacle for side-pass.
* (if too close, don't side-pass for safety consideration)
* @param Whether to take into consideration that the blocking obstacle
* itself is blocked by others as well. In other words, if the
* front blocking obstacle is blocked by others, then don't try
* to side-pass it. (Parked obstacles are never blocked by others)
*/
bool IsBlockingObstacleToSidePass(
const Frame& frame, const Obstacle* obstacle,
double block_obstacle_min_speed, double min_front_sidepass_distance,
bool enable_obstacle_blocked_check);
double GetDistanceBetweenADCAndObstacle(
const Frame& frame, const Obstacle* obstacle);
// Check if the obstacle is blocking ADC's driving path (reference_line).
bool IsBlockingDrivingPathObstacle(
const ReferenceLine& reference_line, const Obstacle* obstacle);
bool IsParkedVehicle(const ReferenceLine& reference_line,
const Obstacle* obstacle);
} // namespace planning
} // namespace apollo
|
ArcBees/Jukito | jukito/src/test/java/org/jukito/InnerClassTest.java | /**
* Copyright 2014 ArcBees Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.jukito;
import org.junit.Test;
import org.junit.runner.RunWith;
import com.google.inject.ConfigurationException;
import com.google.inject.Inject;
import static org.junit.Assert.assertEquals;
/**
* Test to ensure that injecting inner classes throw a ConfigurationException, instead
* of simply injecting a mock. Additionally, test that injecting static inner classes
* still work properly.
*/
@RunWith(JukitoRunner.class)
public class InnerClassTest {
/**
* Test module, just bind anything to make sure regular injections still work properly.
*/
public static class Module extends JukitoModule {
@Override
protected void configureTest() {
bind(String.class).toInstance("hello world!");
}
}
/**
* Dummy inner class with a single inject.
*/
class InnerClass {
@Inject
String test;
public String toString() {
return test;
}
}
/**
* Dummy static inner class with a single inject.
*/
static class StaticInnerClass {
@Inject
String test;
public String toString() {
return test;
}
}
/**
* Verify that when you try to inject an inner class, a ConfigurationException is thrown.
*
* @param klass
*/
@Test(expected = ConfigurationException.class)
public void testInnerClass(InnerClass klass) {
assertEquals("hello world!", klass.toString());
}
/**
* Verify that when you try to inject a static inner class, everything works properly.
*
* @param klass
*/
@Test
public void testStaticInnerClass(StaticInnerClass klass) {
assertEquals("hello world!", klass.toString());
}
}
|
zero88/qwe-iot-gateway | connector/bacnet/src/main/java/io/github/zero88/qwe/iot/connector/bacnet/internal/listener/ReceiveIAmListener.java | package io.github.zero88.qwe.iot.connector.bacnet.internal.listener;
import java.util.function.Consumer;
import com.serotonin.bacnet4j.RemoteDevice;
import com.serotonin.bacnet4j.event.DeviceEventAdapter;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
@Slf4j
@RequiredArgsConstructor
public final class ReceiveIAmListener extends DeviceEventAdapter {
@NonNull
private final Consumer<RemoteDevice> handler;
@Override
public void iAmReceived(RemoteDevice d) {
handler.andThen(this::log).accept(d);
}
private void log(RemoteDevice d) {
log.info("Receive IAm from Instance: {} - Address: {}", d.getInstanceNumber(), d.getAddress().getDescription());
}
}
|
Capstone-Team-34/Mandoline | FlowDroid/soot-infoflow/src/soot/jimple/infoflow/rifl/RIFLConstants.java | <reponame>Capstone-Team-34/Mandoline
package soot.jimple.infoflow.rifl;
/**
* Constants for tag and attribute names in RIFL documents
*
* @author <NAME>
*
*/
class RIFLConstants {
public static final String RIFL_SPEC_TAG = "riflspec";
public static final String INTERFACE_SPEC_TAG = "interfacespec";
public static final String SOURCE_TAG = "source";
public static final String SINK_TAG = "sink";
public static final String CATEGORY_TAG = "category";
public static final String RETURN_VALUE_TAG = "returnvalue";
public static final String FIELD_TAG = "field";
public static final String PARAMETER_TAG = "parameter";
public static final String DOMAINS_TAG = "domains";
public static final String DOMAIN_TAG = "domain";
public static final String DOMAIN_ASSIGNMENT_TAG = "domainassignment";
public static final String ASSIGNABLE_TAG = "assignable";
public static final String ASSIGN_TAG = "assign";
public static final String FLOW_RELATION_TAG = "flowrelation";
public static final String FLOW_TAG = "flow";
public static final String CLASS_ATTRIBUTE = "class";
public static final String METHOD_ATTRIBUTE = "method";
public static final String PARAMETER_ATTRIBUTE = "parameter";
public static final String FIELD_ATTRIBUTE = "field";
public static final String NAME_ATTRIBUTE = "name";
public static final String HANDLE_ATTRIBUTE = "handle";
public static final String DOMAIN_ATTRIBUTE = "domain";
public static final String FROM_ATTRIBUTE = "from";
public static final String TO_ATTRIBUTE = "to";
}
|
eschnett/kotekan | lib/utils/datasetState.hpp | <reponame>eschnett/kotekan
#ifndef DATASETSTATE_HPP
#define DATASETSTATE_HPP
#include "Hash.hpp" // for Hash
#include "factory.hpp" // for REGISTER_NAMED_TYPE_WITH_FACTORY, CREATE_FACTORY, FACTORY, Factory
#include "gateSpec.hpp" // for gateSpec, _factory_aliasgateSpec
#include "visUtil.hpp" // for prod_ctype, rstack_ctype, time_ctype, input_ctype, freq_ctype
#include "fmt.hpp" // for format, fmt
#include "json.hpp" // for json, basic_json<>::object_t, json_ref, basic_json, basic_json<>...
#include <algorithm> // for copy
#include <cstdint> // for uint32_t
#include <exception> // for exception
#include <iosfwd> // for ostream
#include <memory> // for allocator, unique_ptr
#include <numeric> // for iota
#include <stddef.h> // for size_t
#include <stdexcept> // for runtime_error, out_of_range
#include <string> // for string
#include <utility> // for pair
#include <vector> // for vector, vector<>::iterator
class datasetManager;
class datasetState; // IWYU pragma: keep
/// Unique pointer to a datasetState
using state_uptr = std::unique_ptr<datasetState>;
/// DatasetState ID
using state_id_t = Hash;
/// DatasetID
using dset_id_t = Hash;
/**
* @brief A base class for representing state changes done to datasets.
*
* This is meant to be subclassed. All subclasses must implement a constructor
* that can build the type from a `json` argument, and a `data_to_json` method
* that can serialise the type into a `json` object.
*
* @author <NAME>, <NAME>
**/
class datasetState {
public:
virtual ~datasetState(){};
/**
* @brief Create a dataset state from a full json serialisation.
*
* This will correctly instantiate the correct type from the json.
*
* @param j Full JSON serialisation.
* @returns The created datasetState or a nullptr in a failure case.
**/
static state_uptr from_json(const nlohmann::json& j);
/**
* @brief Full serialisation of state into JSON.
*
* @returns JSON serialisation of state.
**/
nlohmann::json to_json() const;
/**
* @brief Save the internal data of this instance into JSON.
*
* This must be implement by any derived classes and should save the
* information needed to reconstruct any subclass specific internals.
*
* @returns JSON representing the internal state.
**/
virtual nlohmann::json data_to_json() const = 0;
/**
* @brief Compare to another dataset state.
* @param s State to compare with.
* @return True if states identical, False otherwise.
*/
bool equals(datasetState& s) const;
/**
* @brief Get the name of this state.
* @return The state name.
*/
std::string type() const;
private:
// Add as friend so it can walk the inner state
friend datasetManager;
};
CREATE_FACTORY(datasetState, const nlohmann::json&);
#define REGISTER_DATASET_STATE(T, s) REGISTER_NAMED_TYPE_WITH_FACTORY(datasetState, T, s);
// Printing for datasetState
std::ostream& operator<<(std::ostream&, const datasetState&);
/**
* @brief A dataset state that describes the frequencies in a datatset.
*
* @author <NAME>, <NAME>
*/
class freqState : public datasetState {
public:
/**
* @brief Constructor
* @param data The frequency information as serialized by
* freqState::to_json().
*/
freqState(const nlohmann::json& data) {
try {
_freqs = data.get<std::vector<std::pair<uint32_t, freq_ctype>>>();
} catch (std::exception& e) {
throw std::runtime_error(fmt::format(
fmt("freqState: Failure parsing json data ({:s}): {:s}"), data.dump(4), e.what()));
}
};
/**
* @brief Constructor
* @param freqs The frequency information as a vector of
* {frequency ID, frequency index map}.
*/
freqState(std::vector<std::pair<uint32_t, freq_ctype>> freqs) : _freqs(freqs){};
/**
* @brief Get frequency information (read only).
*
* @return The frequency information as a vector of
* {frequency ID, frequency index map}
*/
const std::vector<std::pair<uint32_t, freq_ctype>>& get_freqs() const {
return _freqs;
}
private:
/// Serialize the data of this state in a json object
nlohmann::json data_to_json() const override {
nlohmann::json j(_freqs);
return j;
}
/// IDs that describe the subset that this dataset state defines
std::vector<std::pair<uint32_t, freq_ctype>> _freqs;
};
/**
* @brief A dataset state that describes the inputs in a datatset.
*
* @author <NAME>, <NAME>
*/
class inputState : public datasetState {
public:
/**
* @brief Constructor
* @param data The input information as serialized by
* inputState::to_json().
*/
inputState(const nlohmann::json& data) {
try {
_inputs = data.get<std::vector<input_ctype>>();
} catch (std::exception& e) {
throw std::runtime_error(fmt::format(
fmt("inputState: Failure parsing json data ({:s}): {:s}"), data.dump(4), e.what()));
}
};
/**
* @brief Constructor
* @param inputs The input information as a vector of
* input index maps.
*/
inputState(std::vector<input_ctype> inputs) : _inputs(inputs){};
/**
* @brief Get input information (read only).
*
* @return The input information as a vector of input index maps.
*/
const std::vector<input_ctype>& get_inputs() const {
return _inputs;
}
private:
/// Serialize the data of this state in a json object
nlohmann::json data_to_json() const override {
nlohmann::json j(_inputs);
return j;
}
/// The subset that this dataset state defines
std::vector<input_ctype> _inputs;
};
/**
* @brief A dataset state that describes the products in a datatset.
*
* @author <NAME>, <NAME>
*/
class prodState : public datasetState {
public:
/**
* @brief Constructor
* @param data The product information as serialized by
* prodState::to_json().
*/
prodState(const nlohmann::json& data) {
try {
_prods = data.get<std::vector<prod_ctype>>();
} catch (std::exception& e) {
throw std::runtime_error(fmt::format(
fmt("prodState: Failure parsing json data ({:s}): {:s}"), data.dump(4), e.what()));
}
};
/**
* @brief Constructor
* @param prods The product information as a vector of
* product index maps.
*/
prodState(std::vector<prod_ctype> prods) : _prods(prods){};
/**
* @brief Get product information (read only).
*
* @return The prod information as a vector of product index maps.
*/
const std::vector<prod_ctype>& get_prods() const {
return _prods;
}
private:
/// Serialize the data of this state in a json object
nlohmann::json data_to_json() const override {
nlohmann::json j(_prods);
return j;
}
/// IDs that describe the subset that this dataset state defines
std::vector<prod_ctype> _prods;
};
/**
* @brief A dataset state that keeps the time information of a datatset.
*
* @author <NAME>
*/
class timeState : public datasetState {
public:
/**
* @brief Constructor
* @param data The time information as serialized by
* timeState::to_json().
*/
timeState(const nlohmann::json& data) {
try {
_times = data.get<std::vector<time_ctype>>();
} catch (std::exception& e) {
throw std::runtime_error(fmt::format(
fmt("timeState: Failure parsing json data ({:s}): {:s}"), data.dump(4), e.what()));
}
};
/**
* @brief Constructor
* @param times The time information as a vector of
* time index maps.
*/
timeState(std::vector<time_ctype> times) : _times(times){};
/**
* @brief Get time information (read only).
*
* @return The time information as a vector of time index maps.
*/
const std::vector<time_ctype>& get_times() const {
return _times;
}
private:
/// Serialize the data of this state in a json object
nlohmann::json data_to_json() const override {
nlohmann::json j(_times);
return j;
}
/// Time index map of the dataset state.
std::vector<time_ctype> _times;
};
/**
* @brief A dataset state that keeps the eigenvalues of a datatset.
*
* @author <NAME>
*/
class eigenvalueState : public datasetState {
public:
/**
* @brief Constructor
* @param data The eigenvalues as serialized by
* eigenvalueState::to_json().
*/
eigenvalueState(const nlohmann::json& data) {
try {
_ev = data.get<std::vector<uint32_t>>();
} catch (std::exception& e) {
throw std::runtime_error(fmt::format(fmt("eigenvectorState: Failure parsing json "
"data ({:s}): {:s}"),
data.dump(4), e.what()));
}
};
/**
* @brief Constructor
* @param ev The eigenvalues.
*/
eigenvalueState(std::vector<uint32_t> ev) : _ev(ev){};
/**
* @brief Constructor
* @param num_ev The number of eigenvalues. The indices will end up
* running from 0 to num_ev - 1
*/
eigenvalueState(size_t num_ev) : _ev(num_ev) {
std::iota(_ev.begin(), _ev.end(), 0);
}
/**
* @brief Get eigenvalues (read only).
*
* @return The eigenvalues.
*/
const std::vector<uint32_t>& get_ev() const {
return _ev;
}
/**
* @brief Get the number of eigenvalues
*
* @return The number of eigenvalues.
*/
size_t get_num_ev() const {
return _ev.size();
}
private:
/// Serialize the data of this state in a json object
nlohmann::json data_to_json() const override {
nlohmann::json j(_ev);
return j;
}
/// Eigenvalues of the dataset state.
std::vector<uint32_t> _ev;
};
/**
* @brief Take an rstack map and generate a stack->prod mapping.
*
* @param num_stack Total number of stacks.
* @param stack_map The prod->stack mapping.
*
* @returns The stack->prod mapping.
**/
std::vector<stack_ctype> invert_stack(uint32_t num_stack,
const std::vector<rstack_ctype>& stack_map);
/**
* @brief A dataset state that describes a redundant baseline stacking.
*
* @author <NAME>
*/
class stackState : public datasetState {
public:
/**
* @brief Constructor
* @param data The stack information as serialized by
* stackState::to_json().
*/
stackState(const nlohmann::json& data) {
try {
_rstack_map = data["rstack"].get<std::vector<rstack_ctype>>();
_num_stack = data["num_stack"].get<uint32_t>();
} catch (std::exception& e) {
throw std::runtime_error(
fmt::format(fmt("stackState: Failure parsing json data: {:s}"), e.what()));
}
};
/**
* @brief Constructor
* @param rstack_map Definition of how the products were stacked.
* @param num_stack Number of stacked visibilities.
*/
stackState(uint32_t num_stack, std::vector<rstack_ctype>&& rstack_map) :
_num_stack(num_stack),
_rstack_map(rstack_map) {}
/**
* @brief Get stack map information (read only).
*
* For every product this says which stack to add the product into and
* whether it needs conjugating before doing so.
*
* @return The stack map.
*/
const std::vector<rstack_ctype>& get_rstack_map() const {
return _rstack_map;
}
/**
* @brief Get the number of stacks.
*
* @return The number of stacks.
*/
uint32_t get_num_stack() const {
return _num_stack;
}
/**
* @brief Calculate and return the stack->prod mapping.
*
* This is calculated on demand and so a full fledged vector is returned.
*
* @returns The stack map.
**/
std::vector<stack_ctype> get_stack_map() const {
return invert_stack(_num_stack, _rstack_map);
}
/// Serialize the data of this state in a json object
nlohmann::json data_to_json() const override {
return {{"rstack", _rstack_map}, {"num_stack", _num_stack}};
}
private:
/// Total number of stacks
uint32_t _num_stack;
/// The stack definition
std::vector<rstack_ctype> _rstack_map;
};
/**
* @brief A dataset state that describes all the metadata that is written to
* file as "attributes", but not defined by other states yet.
*
* @author <NAME>
*/
class metadataState : public datasetState {
public:
/**
* @brief Constructor
* @param data The metadata as serialized by
* metadataState::to_json():
* weight_type: string
* instrument_name: string
* git_version_number: string
*
*/
metadataState(const nlohmann::json& data) {
try {
_weight_type = data.at("weight_type").get<std::string>();
_instrument_name = data.at("instrument_name").get<std::string>();
_git_version_tag = data.at("git_version_tag").get<std::string>();
} catch (std::exception& e) {
throw std::runtime_error(fmt::format(fmt("metadataState: Failure parsing json "
"data ({:s}): {:s}"),
data.dump(4), e.what()));
}
}
/**
* @brief Constructor
* @param weight_type The weight type attribute.
* @param instrument_name The instrument name attribute.
* @param git_version_tag The git version tag attribute.
*/
metadataState(std::string weight_type, std::string instrument_name,
std::string git_version_tag) :
_weight_type(weight_type),
_instrument_name(instrument_name),
_git_version_tag(git_version_tag) {}
/**
* @brief Get the weight type (read only).
*
* @return The weigh type.
*/
const std::string& get_weight_type() const {
return _weight_type;
}
/**
* @brief Get the instrument name (read only).
*
* @return The instrument name.
*/
const std::string& get_instrument_name() const {
return _instrument_name;
}
/**
* @brief Get the git version tag (read only).
*
* @return The git version tag.
*/
const std::string& get_git_version_tag() const {
return _git_version_tag;
}
private:
/// Serialize the data of this state in a json object
nlohmann::json data_to_json() const override {
nlohmann::json j;
j["weight_type"] = _weight_type;
j["instrument_name"] = _instrument_name;
j["git_version_tag"] = _git_version_tag;
return j;
}
// the actual metadata
std::string _weight_type, _instrument_name, _git_version_tag;
};
/**
* @brief A state to describe any applied gating.
*
* @author <NAME>
**/
class gatingState : public datasetState {
public:
/**
* @brief Construct a gating state
*
* @param spec gateSpec to describe what's happening.
**/
gatingState(const gateSpec& spec) :
gating_type(FACTORY(gateSpec)::label(spec)),
gating_data(spec.to_dm_json()) {}
/**
* @brief Construct a gating state
*
* @param data Full serialised data.
**/
gatingState(const nlohmann::json& data) :
gating_type(data["type"].get<std::string>()),
gating_data(data["data"]) {}
/**
* @brief Serialise the gatingState data.
*
* @return JSON serialisation.
**/
nlohmann::json data_to_json() const override {
return {{"type", gating_type}, {"data", gating_data}};
}
/// Type of gating
const std::string gating_type;
/// Type specific data
const nlohmann::json gating_data;
};
/**
* @brief A dataset state that describes the gains applied to the data.
*
* @author <NAME>
*/
class gainState : public datasetState {
public:
/**
* @brief Constructor
* @param data The product information as serialized by
* gainState::to_json().
*/
gainState(const nlohmann::json& data) {
try {
_update_id = data["update_id"].get<std::string>();
_transition_interval = data["transition_interval"].get<double>();
} catch (std::exception& e) {
throw std::runtime_error(fmt::format(
fmt("gainState: Failure parsing json data ({:s}): {:s}"), data.dump(4), e.what()));
}
};
/**
* @brief Constructor
* @param update_id The string update_id labelling the applied gains.
* @param transition_interval The length of time to blend updates over.
*/
gainState(std::string update_id, double transition_interval) :
_update_id(update_id),
_transition_interval(transition_interval){};
/**
* @brief Get the update_id
**/
const std::string& get_update_id() const {
return _update_id;
}
/**
* @brief Get the length of time to blend this new update with the previous one.
**/
double get_transition_interval() const {
return _transition_interval;
}
private:
/// Serialize the data of this state in a json object
nlohmann::json data_to_json() const override {
nlohmann::json j;
j["update_id"] = _update_id;
j["transition_interval"] = _transition_interval;
return j;
}
// The label for the gains
std::string _update_id;
// The length of time (in seconds) the previous gain update is blended with this one.
double _transition_interval;
};
/**
* @brief A dataset state that describes the input flags being applied.
*
* @author <NAME>
*/
class flagState : public datasetState {
public:
/**
* @brief Constructor
*
* @param data The product information as serialized by
* flagState::to_json().
*/
flagState(const nlohmann::json& data) {
try {
_update_id = data.get<std::string>();
} catch (std::exception& e) {
throw std::runtime_error(fmt::format(
fmt("flagState: Failure parsing json data ({:s}): {:s}"), data.dump(4), e.what()));
}
};
/**
* @brief Constructor
*
* @param update_id The string update_id labelling the applied flags.
*/
flagState(std::string update_id) : _update_id(update_id){};
const std::string& get_update_id() const {
return _update_id;
}
private:
/// Serialize the data of this state in a json object
nlohmann::json data_to_json() const override {
nlohmann::json j(_update_id);
return j;
}
// The label for the flags
std::string _update_id;
};
/**
* @brief A dataset state that keeps the beam information of a datatset.
*
* @author <NAME>
*/
class beamState : public datasetState {
public:
/**
* @brief Constructor
* @param data The beam information as serialized by
* beamState::to_json().
*/
beamState(const nlohmann::json& data) {
try {
_beams = data.get<std::vector<uint32_t>>();
} catch (std::exception& e) {
throw std::runtime_error(fmt::format(
fmt("beamState: Failure parsing json data ({:s}): {:s}"), data.dump(4), e.what()));
}
};
/**
* @brief Constructor
* @param beams The beam information as a vector of
* beam index maps.
*/
beamState(std::vector<uint32_t> beams) : _beams(beams){};
/**
* @brief Constructor
* @param num_beams The number of beams. The indices will end up
* running from 0 to num_beams - 1
*/
beamState(size_t num_beams) : _beams(num_beams) {
std::iota(_beams.begin(), _beams.end(), 0);
}
/**
* @brief Get beam information (read only).
*
* @return The beam information as a vector of beam index maps.
*/
const std::vector<uint32_t>& get_beams() const {
return _beams;
}
private:
/// Serialize the data of this state in a json object
nlohmann::json data_to_json() const override {
nlohmann::json j(_beams);
return j;
}
/// Time index map of the dataset state.
std::vector<uint32_t> _beams;
};
/**
* @brief A dataset state that keeps the sub-frequency information of a datatset.
*
* @author <NAME>
*/
class subfreqState : public datasetState {
public:
/**
* @brief Constructor
* @param data The sub-frequency information as serialized by
* subfreqState::to_json().
*/
subfreqState(const nlohmann::json& data) {
try {
_subfreqs = data.get<std::vector<uint32_t>>();
} catch (std::exception& e) {
throw std::runtime_error(
fmt::format(fmt("subfreqState: Failure parsing json data ({:s}): {:s}"),
data.dump(4), e.what()));
}
};
/**
* @brief Constructor
* @param subfreqs The sub-frequency information as a vector of
* subfreq index maps.
*/
subfreqState(std::vector<uint32_t> subfreqs) : _subfreqs(subfreqs){};
/**
* @brief Constructor
* @param num_subfreqs The number of sub-frequencies. The indices will end up
* running from 0 to num_subfreqs - 1
*/
subfreqState(size_t num_subfreqs) : _subfreqs(num_subfreqs) {
std::iota(_subfreqs.begin(), _subfreqs.end(), 0);
}
/**
* @brief Get sub-frequency information (read only).
*
* @return The sub-frequency information as a vector of subfreq index maps.
*/
const std::vector<uint32_t>& get_subfreqs() const {
return _subfreqs;
}
private:
/// Serialize the data of this state in a json object
nlohmann::json data_to_json() const override {
nlohmann::json j(_subfreqs);
return j;
}
/// Time index map of the dataset state.
std::vector<uint32_t> _subfreqs;
};
/**
* @brief A dataset state that keeps the RFI frame-dropping information of a datatset.
*
* @author <NAME>
*/
class RFIFrameDropState : public datasetState {
public:
/**
* @brief Constructor
* @param data The RFI frame-dropping information as serialized by
* RFIFrameDropState::to_json().
*/
RFIFrameDropState(const nlohmann::json& data) {
try {
enabled = data["enabled"].get<bool>();
thresholds = data["thresholds"].get<std::vector<std::pair<float, float>>>();
} catch (std::exception& e) {
throw std::runtime_error(
fmt::format(fmt("RFIFrameDropState: Failure parsing json data ({:s}): {:s}"),
data.dump(4), e.what()));
}
}
/**
* @brief Constructor
* @param enabled True, if RFI frame-dropping enabled
* @param thresholds Vector of pairs: thresholds and fractions
*/
RFIFrameDropState(bool enabled, std::vector<std::pair<float, float>> thresholds) :
enabled(enabled),
thresholds(thresholds) {}
/**
* @brief Get RFI frame-dropping enabled information.
*
* @return True if RFI frame-dropping enabled.
*/
bool get_enabled() const {
return enabled;
}
/**
* @brief Get RFI frame-dropping thresholds (read only).
*
* @return Vector of pairs containing <threshold, fraction>, each, in this order.
*/
const std::vector<std::pair<float, float>>& get_thresholds() const {
return thresholds;
}
private:
/// Serialize the data of this state in a json object
nlohmann::json data_to_json() const override {
nlohmann::json j;
j["enabled"] = enabled;
j["thresholds"] = thresholds;
return j;
}
/// Tells if frame dropping is enabled in the RFIFrameDrop stage.
bool enabled;
std::vector<std::pair<float, float>> thresholds;
};
#endif // DATASETSTATE_HPP
|
Gesserok/Market | nik.dany/src/main/java/market/BillTime.java | package market;
public class BillTime {
public int year;
public int month;
public int day;
public int hour;
public int minute;
public BillTime(int year, int month, int day, int hour, int minute) {
this.year = year;
this.month = month;
this.day = day;
this.hour = hour;
this.minute = minute;
}
public static void showTime(BillTime time){
System.out.printf( " %d.%d.%d \t %d:%d. \n",
time.year, time.month, time.day, time.hour, time.minute);
}
}
|
ijhajj/JavaRepository | HelloWorld/src/strategyPattern/Add.java | package strategyPattern;
public class Add implements MathExp{
public double calculate(double num1,double num2) {
System.out.println("Addition of " + num1 + " + " + num2 + " = " + (num1+num2));
return (num1+num2);
}
}
|
Grosskopf/openoffice | main/sc/source/ui/vba/vbaworksheets.hxx | /**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
#ifndef SC_VBA_WORKSHEETS_HXX
#define SC_VBA_WORKSHEETS_HXX
#include <cppuhelper/implbase1.hxx>
#include <ooo/vba/excel/XWorksheets.hpp>
#include <com/sun/star/sheet/XSpreadsheets.hpp>
#include <com/sun/star/sheet/XSpreadsheetDocument.hpp>
#include <com/sun/star/container/XEnumerationAccess.hpp>
#include <com/sun/star/uno/XComponentContext.hpp>
#include <vbahelper/vbacollectionimpl.hxx>
#include "address.hxx"
class ScModelObj;
typedef CollTestImplHelper< ov::excel::XWorksheets > ScVbaWorksheets_BASE;
class ScVbaWorksheets : public ScVbaWorksheets_BASE
{
css::uno::Reference< css::frame::XModel > mxModel;
css::uno::Reference< css::sheet::XSpreadsheets > m_xSheets;
protected:
// ScVbaWorksheets_BASE
virtual css::uno::Any getItemByStringIndex( const rtl::OUString& sIndex ) throw (css::uno::RuntimeException);
public:
ScVbaWorksheets( const css::uno::Reference< ov::XHelperInterface >& xParent, const css::uno::Reference< css::uno::XComponentContext > & xContext, const css::uno::Reference< css::container::XIndexAccess >& xSheets, const css::uno::Reference< css::frame::XModel >& xModel );
ScVbaWorksheets( const css::uno::Reference< ov::XHelperInterface >& xParent, const css::uno::Reference< css::uno::XComponentContext > & xContext, const css::uno::Reference< css::container::XEnumerationAccess >& xEnum, const css::uno::Reference< css::frame::XModel >& xModel );
virtual ~ScVbaWorksheets() {}
bool isSelectedSheets();
// XEnumerationAccess
virtual css::uno::Type SAL_CALL getElementType() throw (css::uno::RuntimeException);
virtual css::uno::Reference< css::container::XEnumeration > SAL_CALL createEnumeration() throw (css::uno::RuntimeException);
// XWorksheets
virtual css::uno::Any SAL_CALL getVisible() throw (css::uno::RuntimeException);
virtual void SAL_CALL setVisible( const css::uno::Any& _visible ) throw (css::uno::RuntimeException);
virtual css::uno::Any SAL_CALL Add( const css::uno::Any& Before, const css::uno::Any& After, const css::uno::Any& Count, const css::uno::Any& Type ) throw (css::uno::RuntimeException);
virtual void SAL_CALL Delete( ) throw (css::uno::RuntimeException);
virtual void SAL_CALL PrintOut( const css::uno::Any& From, const css::uno::Any& To, const css::uno::Any& Copies, const css::uno::Any& Preview, const css::uno::Any& ActivePrinter, const css::uno::Any& PrintToFile, const css::uno::Any& Collate, const css::uno::Any& PrToFileName ) throw (css::uno::RuntimeException);
virtual css::uno::Any createCollectionObject( const css::uno::Any& aSource );
virtual void SAL_CALL Select( const css::uno::Any& Replace ) throw (css::uno::RuntimeException);
// ScVbaWorksheets_BASE
virtual css::uno::Any SAL_CALL Item( const css::uno::Any& Index1, const css::uno::Any& Index2 ) throw
(css::uno::RuntimeException);
virtual rtl::OUString& getServiceImplName();
virtual css::uno::Sequence<rtl::OUString> getServiceNames();
static bool nameExists( css::uno::Reference <css::sheet::XSpreadsheetDocument>& xSpreadDoc, const ::rtl::OUString & name, SCTAB& nTab ) throw ( css::lang::IllegalArgumentException );
};
#endif /* SC_VBA_WORKSHEETS_HXX */
|
JoanAzpeitia/lp_sg | install/app_store/tk-framework-widget/v0.2.6/python/thumbnail_widget/thumbnail_widget.py | # Copyright (c) 2013 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import os
import sys
import tempfile
import subprocess
import tank
from tank.platform.qt import QtCore, QtGui
from .ui.thumbnail_widget import Ui_ThumbnailWidget
screen_grab = tank.platform.import_framework("tk-framework-qtwidgets", "screen_grab")
class ThumbnailWidget(QtGui.QWidget):
"""
Thumbnail widget that provides screen capture functionality
"""
thumbnail_changed = QtCore.Signal()
def __init__(self, parent=None):
"""
Construction
"""
QtGui.QWidget.__init__(self, parent)
self._ui = Ui_ThumbnailWidget()
self._ui.setupUi(self)
# create layout to control buttons frame
layout = QtGui.QHBoxLayout()
layout.addWidget(self._ui.buttons_frame)
layout.setContentsMargins(0,0,0,0)
layout.setSpacing(0)
self.setLayout(layout)
# connect to buttons:
self._ui.camera_btn.clicked.connect(self._on_camera_clicked)
self._btns_transition_anim = None
self._update_ui()
# @property
def _get_thumbnail(self):
pm = self._ui.thumbnail.pixmap()
return pm if pm and not pm.isNull() else None
# @thumbnail.setter
def _set_thumbnail(self, value):
self._ui.thumbnail.setPixmap(value if value else QtGui.QPixmap())
self._update_ui()
self.thumbnail_changed.emit()
thumbnail = property(_get_thumbnail, _set_thumbnail)
def enable_screen_capture(self, enable):
self._ui.camera_btn.setVisible(enable)
def resizeEvent(self, event):
self._update_ui()
def enterEvent(self, event):
"""
when the cursor enters the control, show the buttons
"""
if self.thumbnail and self._are_any_btns_enabled():
self._ui.buttons_frame.show()
if hasattr(QtCore, "QAbstractAnimation"):
self._run_btns_transition_anim(QtCore.QAbstractAnimation.Forward)
else:
# Q*Animation classes aren't available so just
# make sure the button is visible:
self.btn_visibility = 1.0
def leaveEvent(self, event):
"""
when the cursor leaves the control, hide the buttons
"""
if self.thumbnail and self._are_any_btns_enabled():
if hasattr(QtCore, "QAbstractAnimation"):
self._run_btns_transition_anim(QtCore.QAbstractAnimation.Backward)
else:
# Q*Animation classes aren't available so just
# make sure the button is hidden:
self._ui.buttons_frame.hide()
self.btn_visibility = 0.0
def _are_any_btns_enabled(self):
"""
Return if any of the buttons are enabled
"""
return not (self._ui.camera_btn.isHidden())
"""
button visibility property used by QPropertyAnimation
"""
def get_btn_visibility(self):
return self._btns_visibility
def set_btn_visibility(self, value):
self._btns_visibility = value
self._ui.buttons_frame.setStyleSheet("#buttons_frame {border-radius: 2px; background-color: rgba(32, 32, 32, %d);}" % (64 * value))
btn_visibility = QtCore.Property(float, get_btn_visibility, set_btn_visibility)
def _run_btns_transition_anim(self, direction):
"""
Run the transition animation for the buttons
"""
if not self._btns_transition_anim:
# set up anim:
self._btns_transition_anim = QtCore.QPropertyAnimation(self, "btn_visibility")
self._btns_transition_anim.setDuration(150)
self._btns_transition_anim.setStartValue(0.0)
self._btns_transition_anim.setEndValue(1.0)
self._btns_transition_anim.finished.connect(self._on_btns_transition_anim_finished)
if self._btns_transition_anim.state() == QtCore.QAbstractAnimation.Running:
if self._btns_transition_anim.direction() != direction:
self._btns_transition_anim.pause()
self._btns_transition_anim.setDirection(direction)
self._btns_transition_anim.resume()
else:
pass # just let animation continue!
else:
self._btns_transition_anim.setDirection(direction)
self._btns_transition_anim.start()
def _on_btns_transition_anim_finished(self):
if self._btns_transition_anim.direction() == QtCore.QAbstractAnimation.Backward:
self._ui.buttons_frame.hide()
def _on_camera_clicked(self):
pm = self._on_screenshot()
if pm:
self.thumbnail = pm
def _update_ui(self):
# maximum size of thumbnail is widget geom:
thumbnail_geom = self.geometry()
thumbnail_geom.moveTo(0,0)
scale_contents = False
pm = self.thumbnail
if pm:
# work out size thumbnail should be to maximize size
# whilst retaining aspect ratio
pm_sz = pm.size()
h_scale = float(thumbnail_geom.height()-4)/float(pm_sz.height())
w_scale = float(thumbnail_geom.width()-4)/float(pm_sz.width())
scale = min(1.0, h_scale, w_scale)
scale_contents = (scale < 1.0)
new_height = min(int(pm_sz.height() * scale), thumbnail_geom.height())
new_width = min(int(pm_sz.width() * scale), thumbnail_geom.width())
new_geom = QtCore.QRect(thumbnail_geom)
new_geom.moveLeft(((thumbnail_geom.width()-4)/2 - new_width/2)+2)
new_geom.moveTop(((thumbnail_geom.height()-4)/2 - new_height/2)+2)
new_geom.setWidth(new_width)
new_geom.setHeight(new_height)
thumbnail_geom = new_geom
self._ui.thumbnail.setScaledContents(scale_contents)
self._ui.thumbnail.setGeometry(thumbnail_geom)
# now update buttons based on current thumbnail:
if not self._btns_transition_anim or self._btns_transition_anim.state() == QtCore.QAbstractAnimation.Stopped:
if self.thumbnail or not self._are_any_btns_enabled():
self._ui.buttons_frame.hide()
self._btns_visibility = 0.0
else:
self._ui.buttons_frame.show()
self._btns_visibility = 1.0
def _safe_get_dialog(self):
"""
Get the widgets dialog parent.
just call self.window() but this is unstable in Nuke
Previously this would
causing a crash on exit - suspect that it's caching
something internally which then doesn't get cleaned
up properly...
"""
current_widget = self
while current_widget:
if isinstance(current_widget, QtGui.QDialog):
return current_widget
current_widget = current_widget.parentWidget()
return None
def _on_screenshot(self):
"""
Perform the actual screenshot
"""
# hide the containing window - we can't actuall hide
# the window as this will break modality! Instead
# we have to move the window off the screen:
win = self._safe_get_dialog()
win_geom = None
if win:
win_geom = win.geometry()
win.setGeometry(1000000, 1000000, win_geom.width(), win_geom.height())
# make sure this event is processed:
QtCore.QCoreApplication.processEvents()
QtCore.QCoreApplication.sendPostedEvents(None, 0)
QtCore.QCoreApplication.flush()
try:
# get temporary file to use:
# to be cross-platform and python 2.5 compliant, we can't use
# tempfile.NamedTemporaryFile with delete=False. Instead, we
# use tempfile.mkstemp which does practically the same thing!
tf, path = tempfile.mkstemp(suffix=".png", prefix="tanktmp")
if tf:
os.close(tf)
pm = screen_grab.screen_capture()
finally:
# restore the window:
if win:
win.setGeometry(win_geom)
QtCore.QCoreApplication.processEvents()
return pm
|
mir597/ml_safe | tests/typing_tests/TAJS_micro/test45.js | <reponame>mir597/ml_safe
var x2 = new Object;
//dumpObject(x2);
var __result1 = x2 instanceof Object; // for SAFE
var __expect1 = true; // for SAFE
var x3 = {};
//dumpObject(x3);
var __result2 = x3 instanceof Object; // for SAFE
var __expect2 = true; // for SAFE
|
yeshm/mini-ofbiz | hot-deploy/extcommon/src/org/miniofbiz/ext/jms/GenericJmsServiceJob.java | package org.miniofbiz.ext.jms;
import org.apache.activemq.command.ActiveMQDestination;
import org.miniofbiz.ext.redis.RedisLockWorker;
import org.miniofbiz.ext.redis.RedisWorker;
import org.ofbiz.base.util.Debug;
import org.ofbiz.base.util.ObjectType;
import org.ofbiz.base.util.UtilGenerics;
import org.ofbiz.base.util.UtilValidate;
import org.ofbiz.entity.GenericEntityException;
import org.ofbiz.entity.serialize.XmlSerializer;
import org.ofbiz.entity.transaction.GenericTransactionException;
import org.ofbiz.entity.transaction.TransactionUtil;
import org.ofbiz.service.GenericServiceException;
import org.ofbiz.service.LocalDispatcher;
import org.ofbiz.service.ModelService;
import org.ofbiz.service.ServiceUtil;
import org.ofbiz.service.job.InvalidJobException;
import org.ofbiz.service.job.Job;
import javax.jms.JMSException;
import javax.jms.MapMessage;
import javax.jms.MessageConsumer;
import javax.jms.Session;
import java.io.Serializable;
import java.util.Date;
import java.util.Map;
import java.util.concurrent.TimeUnit;
/**
* A generic Jms async-service job.
*/
@SuppressWarnings("serial")
public class GenericJmsServiceJob implements Job, Serializable {
public static final String module = GenericJmsServiceJob.class.getName();
protected final LocalDispatcher dispatcher;
protected final Session session;
protected final MessageConsumer consumer;
private final MapMessage message;
private final String jobId;
private final String jobName;
protected State currentState = State.CREATED;
private long elapsedTime = 0;
private final Date startTime = new Date();
private String serviceName;
private Map<String, ? extends Object> context;
public GenericJmsServiceJob(LocalDispatcher dispatcher, Session session, MessageConsumer consumer, MapMessage message) {
this.jobName = "JmsService";
this.jobId = this.jobName + "." + Long.toString(System.currentTimeMillis());
this.dispatcher = dispatcher;
this.session = session;
this.consumer = consumer;
this.message = message;
}
/**
* Invokes the service.
*/
public void exec() throws InvalidJobException {
if (currentState != State.QUEUED) {
throw new InvalidJobException("Illegal state change");
}
currentState = State.RUNNING;
init();
Throwable thrown = null;
Map<String, Object> result = null;
// no transaction is necessary since runSync handles this
try {
boolean redelivered = message.getJMSRedelivered();
String messageId = message.getJMSMessageID();
String key = "jms:queue:" + messageId;
String lockKey = key + RedisWorker.LOCK_KEY_SUFFIX;
//如果是重发的消息,进行业务层检验,避免消息被重复消费
if (redelivered && RedisWorker.exists(key)) {
RedisWorker.incr("jms:queue:duplicateFoundCount");
Debug.logError("message is duplicate comsume, messageId:" + messageId, module);
message.acknowledge();
} else {
if(RedisLockWorker.tryLock(lockKey, 0, TimeUnit.SECONDS)) {
Map results = runService(message);
if (UtilValidate.isNotEmpty(results) && ServiceUtil.isSuccess(results)) {
ActiveMQDestination destination = (ActiveMQDestination) message.getJMSDestination();
if (destination.isQueue()) {
RedisWorker.incr("jms:queue:acknowledgeCount");
//消息消费成功,进行记录以防重复消息
String re = RedisWorker.set(key, "1", "nx", "ex", 3600);
if (!"OK".equals(re)) {
RedisWorker.incr("jms:queue:duplicateErrorCount");
}
}
}else{
runServiceAsync();
}
}
}
} catch (Throwable t) {
thrown = t;
}
if (thrown == null) {
finish(result);
} else {
failed(thrown);
runServiceAsync();
}
}
/**
* persist service defined in the MapMessage to db
*/
protected void runServiceAsync() {
boolean beganTransaction = false;
try {
beganTransaction = TransactionUtil.begin();
dispatcher.runAsync(serviceName, context, true);
} catch (Throwable e) {
Debug.logError(e, module);
String key = RedisWorker.getRedisKey("Jms", "ErrorList2");
RedisWorker.sadd(key, message.toString());
try {
// only rollback the transaction if we started one...
TransactionUtil.rollback(beganTransaction, "Problems when put jms to async", e);
} catch (GenericEntityException e2) {
Debug.logError(e2, "Could not rollback transaction: " + e2.toString(), module);
}
} finally {
try {
TransactionUtil.commit(beganTransaction);
} catch (GenericEntityException e2) {
Debug.logError(e2, "Could not commit transaction: " + e2.toString(), module);
}
}
}
/**
* Runs the service defined in the MapMessage
*
* @param message
* @return Map
*/
protected Map<String, Object> runService(MapMessage message) {
context = null;
serviceName = null;
String xmlContext = null;
try {
serviceName = message.getString("serviceName");
xmlContext = message.getString("serviceContext");
if (serviceName == null || xmlContext == null) {
Debug.logError("Message received is not an OFB service message. Ignored!", module);
return null;
}
Object o = XmlSerializer.deserialize(xmlContext, dispatcher.getDelegator());
if (Debug.verboseOn()) Debug.logVerbose("De-Serialized Context --> " + o, module);
if (ObjectType.instanceOf(o, "java.util.Map"))
context = UtilGenerics.checkMap(o);
} catch (JMSException je) {
Debug.logError(je, "Problems reading message.", module);
} catch (Exception e) {
Debug.logError(e, "Problems deserializing the service context.", module);
}
try {
ModelService model = dispatcher.getDispatchContext().getModelService(serviceName);
if (!model.export) {
Debug.logWarning("Attempt to invoke a non-exported service: " + serviceName, module);
return null;
}
} catch (GenericServiceException e) {
Debug.logError(e, "Unable to get ModelService for service : " + serviceName, module);
}
if (Debug.verboseOn()) Debug.logVerbose("Running service: " + serviceName, module);
Map<String, Object> result = null;
if (context != null) {
try {
result = dispatcher.runSync(serviceName, context);
} catch (GenericServiceException gse) {
Debug.logError(gse, "Problems with service invocation.", module);
}
}
return result;
}
@Override
public void run() {
long startMillis = System.currentTimeMillis();
try {
exec();
} catch (InvalidJobException e) {
Debug.logWarning(e.getMessage(), module);
}
// sanity check; make sure we don't have any transactions in place
try {
// roll back current TX first
if (TransactionUtil.isTransactionInPlace()) {
Debug.logWarning("*** NOTICE: JobInvoker finished w/ a transaction in place! Rolling back.", module);
TransactionUtil.rollback();
}
// now resume/rollback any suspended txs
if (TransactionUtil.suspendedTransactionsHeld()) {
int suspended = TransactionUtil.cleanSuspendedTransactions();
Debug.logWarning("Resumed/Rolled Back [" + suspended + "] transactions.", module);
}
} catch (GenericTransactionException e) {
Debug.logWarning(e, module);
}
elapsedTime = System.currentTimeMillis() - startMillis;
}
/**
* Method is called prior to running the service.
*/
protected void init() throws InvalidJobException {
if (Debug.verboseOn()) Debug.logVerbose("Async-Service initializing.", module);
}
/**
* Method is called after the service has finished successfully.
*/
protected void finish(Map<String, Object> result) throws InvalidJobException {
if (currentState != State.RUNNING) {
throw new InvalidJobException("Illegal state change");
}
currentState = State.FINISHED;
if (Debug.verboseOn()) Debug.logVerbose("Async-Service finished.", module);
}
/**
* Method is called when the service fails.
*
* @param t Throwable
*/
protected void failed(Throwable t) throws InvalidJobException {
if (currentState != State.RUNNING) {
throw new InvalidJobException("Illegal state change");
}
currentState = State.FAILED;
Debug.logError(t, "Async-Service failed.", module);
}
@Override
public boolean isValid() {
return currentState == State.CREATED;
}
@Override
public void deQueue() throws InvalidJobException {
throw new InvalidJobException("Unable to queue job [" + getJobId() + "]");
}
@Override
public State currentState() {
return currentState;
}
@Override
public String getJobId() {
return this.jobId;
}
@Override
public String getJobName() {
return this.jobName;
}
@Override
public void queue() throws InvalidJobException {
if (currentState != State.CREATED) {
throw new InvalidJobException("Illegal state change");
}
this.currentState = State.QUEUED;
}
@Override
public long getRuntime() {
return elapsedTime;
}
@Override
public Date getStartTime() {
return startTime;
}
}
|
weinzierl-engineering/baos | kdrive/src/access/ldm/LocalDeviceManagement.cpp |
#include "pch/kdrive_pch.h"
#include "kdrive/access/core/KnxPort.h"
#include "kdrive/access/core/Exception.h"
#include "kdrive/access/ldm/LocalDeviceManagement.h"
#include "kdrive/access/core/KnxPacket.h"
#include "kdrive/connector/Wait.h"
#include <algorithm>
using namespace kdrive::access;using kdrive::connector::waitPacket;zec7fc1647d::
zec7fc1647d(zea2d083c85&port):zcdc6474106(port){}zec7fc1647d::~zec7fc1647d(){}
void zec7fc1647d::restart(){}unsigned short zec7fc1647d::z2afe3965f8(){return
zea2d083c85::z81eda64581;}zea2d083c85&zec7fc1647d::getPort(){return zcdc6474106;
}const zea2d083c85&zec7fc1647d::getPort()const{return zcdc6474106;}void
zec7fc1647d::za4528c2f93(unsigned int z1166f2fdb4,const std::string&msg){if(
z6f5601a8cf()!=z1166f2fdb4){throw WriteLocalDeviceMgmtException(msg);}}void
zec7fc1647d::z75eed006b0(unsigned int z83744e1e92,const std::string&msg){if(
zf670e5347d()!=z83744e1e92){throw WriteLocalDeviceMgmtException(msg);}}void
zec7fc1647d::z85480faf6f(unsigned int zf92894300e,const std::string&msg){if(
zfa42f5b322()!=zf92894300e){throw WriteLocalDeviceMgmtException(msg);}}void
zec7fc1647d::zd02eaa9824(const std::vector<unsigned char>&zf92894300e,const std
::string&msg){std::vector<unsigned char>da;z875dcf85de(da);if(da!=zf92894300e){
throw WriteLocalDeviceMgmtException(msg);}}void zec7fc1647d::z501eae5473(const
std::vector<unsigned char>&zd311e7ca26,const std::string&msg){std::vector<
unsigned char>z97d3478633;z8039b7bd80(z97d3478633);if(z97d3478633!=zd311e7ca26){
throw WriteLocalDeviceMgmtException(msg);}}void zec7fc1647d::z99dab55337(
unsigned int zfd606f58be,const std::string&msg){if(za4544ba4dc()!=zfd606f58be){
throw WriteLocalDeviceMgmtException(msg);}}void zec7fc1647d::za6acd56ff2(
unsigned int z83744e1e92,const std::string&msg){if(z587dd78f85()!=z83744e1e92){
throw WriteLocalDeviceMgmtException(msg);}}void zec7fc1647d::z0269f1e89b(const
std::vector<unsigned short>&addresses,const std::string&msg){std::vector<
unsigned short>z98b719801c;za8754b21ac(z98b719801c);if(z98b719801c!=addresses){
throw WriteLocalDeviceMgmtException(msg);}}void zec7fc1647d::z11e15a5722(
unsigned int ze2fcb9ca3a,const std::string&msg){if(zbab06955e5()!=ze2fcb9ca3a){
throw WriteLocalDeviceMgmtException(msg);}}void zec7fc1647d::z7475b3f872(const
std::string&zf15e490884,const std::string&msg){std::string z07f4508ba9;
zd0d1398630(z07f4508ba9);if(z07f4508ba9!=zf15e490884){throw
WriteLocalDeviceMgmtException(msg);}}void zec7fc1647d::z0f3b34e3a9(unsigned
short zb9dc9070a1,const std::string&msg){if(z7e6620eb94()!=zb9dc9070a1){throw
WriteLocalDeviceMgmtException(msg);}}KnxPacket::Ptr zec7fc1647d::zbe3acf8982(
unsigned int timeout){KnxPacket::Ptr packet;try{packet=waitPacket<KnxPacket>(
zcdc6474106,timeout);}catch(...){}return packet;}KnxPacket::Ptr zec7fc1647d::
zbe3acf8982(const unsigned char*buffer,unsigned int z3ba388a942,unsigned int
offset,unsigned int timeout){Poco::Timestamp timestamp;Poco::Timestamp::TimeDiff
z6485c7d3af=timeout*(0x12d4+5536-0x248c);bool elapsed=false;KnxPacket::Ptr
packet;while(!elapsed){packet=zbe3acf8982(timeout);if(packet){const KnxPacket::
Buffer&z852de6af8a=packet->getBuffer();if(offset+z3ba388a942<=z852de6af8a.size()
){KnxPacket::Buffer::const_iterator begin=z852de6af8a.begin()+offset;KnxPacket::
Buffer::const_iterator end=begin+z3ba388a942;if(std::equal(begin,end,buffer)){
return packet;}}}elapsed=timestamp.isElapsed(z6485c7d3af);if(!elapsed){const
unsigned long z662146e9f6=static_cast<unsigned long>(timestamp.elapsed())/
(0xe68+5277-0x1f1d);if(timeout<z662146e9f6){elapsed=true;}else{timeout-=
z662146e9f6;}}}throw TelegramTimeoutException();}
|
neuroradiology/Proton | lsteamclient/cppISteamUser_SteamUser010.h | #ifdef __cplusplus
extern "C" {
#endif
extern HSteamUser cppISteamUser_SteamUser010_GetHSteamUser(void *);
extern bool cppISteamUser_SteamUser010_BLoggedOn(void *);
extern CSteamID cppISteamUser_SteamUser010_GetSteamID(void *);
extern int cppISteamUser_SteamUser010_InitiateGameConnection(void *, void *, int, CSteamID, uint32, uint16, bool);
extern void cppISteamUser_SteamUser010_TerminateGameConnection(void *, uint32, uint16);
extern void cppISteamUser_SteamUser010_TrackAppUsageEvent(void *, CGameID, int, const char *);
#ifdef __cplusplus
}
#endif
|
lforite/dynamite | src/main/scala/org/dynamite/action/get/GetItemResult.scala | package org.dynamite.action.get
case class GetItemResult[A](item: Option[A]) extends AnyVal
|
chmp/misc-exp | chmp/src/chmp/ds/test_background.py | <reponame>chmp/misc-exp
import itertools as it
from chmp.ds import bgloop, wait, fast_product
def test_bgloop():
result = []
@bgloop("test", range(4), range(2))
def _(_, a, b):
result.append((a, b))
wait("test")
assert result == [*it.product(range(4), range(2))]
def test_fast_product():
assert [*fast_product()] == [*it.product()]
assert [*fast_product(range(5))] == [*it.product(range(5))]
assert [*fast_product(range(5), range(10))] == [*it.product(range(5), range(10))]
assert [*fast_product(range(5), range(10), range(3))] == [
*it.product(range(5), range(10), range(3))
]
|
aaron-michaux/giraffe | src/parser/parse-rules/accept-define.cpp | #include "stdinc.hpp"
#include "parser/parser-internal.hpp"
namespace giraffe
{
// '(' IDENTIFIER (',' IDENTIFIER)* ')'
vector<sso23::string> accept_arglist(Context& context) noexcept
{
Scanner& scanner = context.scanner();
vector<sso23::string> args;
auto push_arg = [&args](const auto text) { args.emplace_back(text.begin(), text.end()); };
assert(expect(scanner, TLPAREN));
scanner.consume(); // '('
bool is_first = true;
while(true) {
const auto id = scanner.current().id();
if(id == TRPAREN) {
scanner.consume();
break; // all is good
} else if(id == TIDENTIFIER && args.size() == 0) {
push_arg(scanner.consume().text());
} else if(id == TCOMMA && args.size() > 0) {
scanner.consume(); // The comma
if(scanner.current().id() == TIDENTIFIER) {
push_arg(scanner.consume().text());
} else {
context.push_error("unexpected token");
return args;
}
} else {
context.push_error("unexpected token");
return args;
}
}
return args;
}
// #define IDENTIFIER
// #define IDENTIFIER <text>...
// #define IDENTIFIER(arglist) <text>...
AstNode* accept_define(Context& context) noexcept
{
Scanner& scanner = context.scanner();
auto on_error = [&](std::string&& message) {
context.push_error(std::move(message));
skip_to_sequence(scanner, TNEWLINE); // Skip to newline
return make_empty_node();
};
assert(expect(scanner, TDEFINE));
const auto loc0 = scanner.consume().location();
const auto& ident_token = scanner.current();
if(ident_token.id() != TIDENTIFIER) { return on_error("expected identifier after #define"); }
const auto loc1 = scanner.consume().end_location(); // identifier
vector<sso23::string> arglist = {};
if(scanner.current().id() == TLPAREN) {
// Check to see if there's white-space between the current token and the previous identifier
const auto paren_loc = scanner.current().location();
if(paren_loc == loc1) arglist = accept_arglist(context);
}
// Grab the token sequence to "new line"
const auto pos1 = scanner.position();
sso23::string text = accept_to_newline(context);
const auto pos2 = scanner.position();
vector<Token> token_sequence;
token_sequence.reserve(pos2 - pos1);
for(auto i = pos1; i < pos2; ++i) token_sequence.push_back(scanner.at(i));
return new DefineNode{
{loc0, loc1}, ident_token.text(), std::move(arglist), std::move(token_sequence)};
}
} // namespace giraffe
|
hanswenzel/opticks | bin/git.py | <reponame>hanswenzel/opticks
#!/usr/bin/env python
"""
svn.py / git.py
=================
git.py is a symbolic link to svn.py that detects its name
to pick the flavor of version control
This script enables two svn working copies "local" and "remote"
to be kept in sync with each other without requiring the changes
to be committed to svn or git. It provides a workaround for operating with
a remote svn working copy over a slow connection when you do not yet
have permission to commit many of the changes.
This slim script avoids the need to wield the "git svn" sledgehammer
to crack a nut.
Essentially want to be able to locally "svn up" and make edits
then selectively scp over changed files (with different digests)
into the remote working copy for compilation and testing.
Generally it is best to avoid editing on the remote end, but it is sometimes
unavoidable. This script eases the pain of bringing both working copies
back in sync without having to commit the changes.
NB all the below commands do no harm, they only suggest the scp commands
that need to be manually run in the shell or piped there.
::
svn st | perl -ne 'm,\S\s*(\S*), && print "$1\n"' - | xargs md5 %
svn st | perl -ne 'm,\S\s*(\S*), && print "$1\n"' - | xargs md5sum %
TODO:
* record digests of put files into some cache, so a repeating put after edits
can put just the updated ?
* (hmm does cfu do that already ? perhaps do cfu for every put)
* basically assuming that edits are only done locally, the normal situation, can
minimize puts
Workflow::
loc> export PATH=$HOME/opticks/bin:$PATH
loc> svn up
rem> svn up
loc> vi ...
## local editing, adding files
loc> scp ~/opticks/bin/svn.py P:opticks/bin/svn.py
## update this script at remote
loc> ssh P opticks/bin/svn.py > ~/rstat.txt
## take snapshot of remote working copy digests
## OR instead do this with : svn.py rup
## that can be combined, eg svn.py rup cf
loc> svn.py loc
## list status of local working copy with file digests
loc> svn.py rem
## ditto for remote, using the ~/rstat.txt snapshot from above
loc> svn.py cf
## compare status showing which files are only loc/rem and which have different digests
loc> svn.py put
## emit to stdout scp commands to local to remote copy wc files of M/A/? status
loc> svn.py get
## emit to stdout scp commands to remote to local copy wc files of M/A/? status
loc> svn.py sync
## show the cf output interleaved with put/get commands to bring the two wc together
##
## NB where there are digest changes, no command is suggested as it is necessary to
## manually examine the differences to see which is ahead OR to merge changes
## from both ends if there has been a mixup and changes were made in the wrong file
loc> svn.py sync -p rem | grep scp
## with remote priority, show the sync scp commands
loc> svn.py sync -p rem | grep scp | sh
## pipe those commands to shell
"""
import os, sys, re, argparse, logging, platform
try:
from commands import getstatusoutput
except ImportError:
from subprocess import getstatusoutput
pass
from collections import OrderedDict as odict
try:
from hashlib import md5
except ImportError:
from md5 import md5
pass
def md5sum_py3(path):
with open(path, mode='rb') as f:
d = md5()
for buf in iter(partial(f.read, 4096), b''):
d.update(buf)
pass
return d.hexdigest()
def md5sum_py2(path):
dig = md5()
with open(path,'rb') as f:
for chunk in iter(lambda: f.read(8192),''):
dig.update(chunk)
pass
pass
return dig.hexdigest()
def md5sum(path, block_size=8192):
"""
:param path:
:return: md5 hexdigest of the content of the path or None if non-existing path
"""
dig = md5()
with open(path, "rb") as f:
#while chunk := f.read(block_size): walrus-operator only available in py38 + it gives error in py27
while True:
chunk = f.read(block_size)
if not chunk:
break
pass
dig.update(chunk)
pass
pass
return dig.hexdigest()
def md5sum_alt(path):
system = platform.system()
if system == "Darwin":
cmd = "md5 -q %s" ## just outputs the digest
rc,out = getstatusoutput(cmd % path)
assert rc == 0
dig = out
elif system == "Linux":
cmd = "md5sum %s" ## outputs the digest and the path
rc,out = getstatusoutput(cmd % path)
assert rc == 0
dig = out.split(" ")[0]
else:
dig = None
assert 0, system
pass
return dig
log = logging.getLogger(__name__)
expand_ = lambda p:os.path.expandvars(os.path.expanduser(p))
class Path(dict):
def __init__(self, *args, **kwa):
dict.__init__(self, *args, **kwa)
if not 'dig' in self:
self["dig"] = md5sum(self["path"])
if self.get('check', False) == True:
self["dig_alt"] = md5sum_alt(self["path"])
match = self["dig_alt"] == self["dig"]
fmt = "%(path)s %(dig)s %(dig_alt)s"
if not match:
log.fatal(" check FAIL : " + fmt % self )
else:
log.debug(" check OK : " + fmt % self )
pass
assert match, (self["dig_alt"], self["dig"])
pass
pass
if not 'dig5' in self:
self["dig5"] = self["dig"][:5]
pass
def __str__(self):
dig = self["dig"]
ldig = self.get("ldig", -1)
if ldig < 0: ldig = len(dig)
return "%1s %s %s " % (self["st"], dig[:ldig], self["path"])
class WC(object):
fstpat = re.compile("^(?P<st>\S)\s*(?P<dig>\S*)\s*(?P<path>\S*)$")
lstpat = re.compile("^\s*(?P<st>\S*)\s*(?P<path>\S*)$")
@classmethod
def detect_vc_from_dir(cls):
if os.path.isdir(".svn"):
vc = "svn"
elif os.path.isdir(".git"):
vc = "git"
else:
print("FATAL must invoke from svn or git top level working copy directory")
pass
#print("detected vc %s " % vc)
return vc
@classmethod
def detect_vc_from_scriptname(cls):
scriptname = os.path.basename(sys.argv[0])
if scriptname == "svn.py":
vc = "svn"
elif scriptname == "git.py":
vc = "git"
else:
assert 0
pass
#print("detected vc %s " % vc)
return vc
@classmethod
def parse_args(cls, doc):
vc = cls.detect_vc_from_scriptname()
defaults = {}
if vc == "svn":
defaults["chdir"] = "~/junotop/offline"
defaults["rbase"] = "P:junotop/offline"
defaults["rstatpath"] = "~/rstat.txt"
defaults["lstatpath"] = "~/lstat.txt"
defaults["rstatcmd"] = "ssh P opticks/bin/svn.py"
defaults["lstatcmd"] = "svn.py"
defaults["statcmd"] = "svn status"
elif vc == "git":
defaults["chdir"] = "~/opticks"
defaults["rbase"] = "P:opticks"
defaults["rstatpath"] = "~/rstat_opticks.txt"
defaults["lstatpath"] = "~/lstat_opticks.txt"
defaults["rstatcmd"] = "ssh P opticks/bin/git.py"
defaults["lstatcmd"] = "git.py"
defaults["statcmd"] = "git status --porcelain"
else:
pass
pass
parser = argparse.ArgumentParser(doc)
parser.add_argument( "cmd", default=["st"], nargs="*", choices=["rup","lup","loc","rem","st","get","put","cf","cfu","sync","scp", ["st"]],
help="command specifying what to do with the working copy" )
parser.add_argument( "--chdir", default=defaults["chdir"], help="chdir here" )
parser.add_argument( "--rstatpath", default=defaults["rstatpath"], help="local path to remote status file" )
parser.add_argument( "--lstatpath", default=defaults["lstatpath"], help="local path to local status file" )
parser.add_argument( "--rstatcmd", default=defaults["rstatcmd"], help="command to invoke the remote version of this script" )
parser.add_argument( "--lstatcmd", default=defaults["lstatcmd"], help="command to invoke the local version of this script" )
parser.add_argument( "--rbase", default=defaults["rbase"], help="remote svn working copy" )
parser.add_argument( "--check", default=False, action="store_true", help="check digest with os alternative md5 or md5sum" )
parser.add_argument( "--ldig", type=int, default=-1, help="length of digest" )
parser.add_argument( "-p", "--priority", choices=["loc","rem"], default="loc", help="Which version wins when a file exists at both ends" )
parser.add_argument( "--level", default="info", help="logging level" )
args = parser.parse_args()
fmt = '[%(asctime)s] p%(process)s {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s'
logging.basicConfig(level=getattr(logging,args.level.upper()), format=fmt)
args.chdir = expand_(args.chdir)
args.rstatpath = expand_(args.rstatpath)
args.lstatpath = expand_(args.lstatpath)
args.vc = vc
args.statcmd = defaults["statcmd"]
return args
@classmethod
def FromStatusFile(cls, statpath, ldig):
"""
:param statpath:
:param ldig: int length of digest
:return rem: WC instance
Parse the status output of a remote instance of this script
"""
log.debug("reading %s " % statpath)
lines = map(str.rstrip, open(statpath, "r").readlines())
paths = []
for line in lines:
if line.startswith("Warning: Permanently added"): continue
if len(line) == 3: continue # skip the rem/loc title
m = cls.fstpat.match(line)
assert m, line
d = m.groupdict()
d["ldig"] = ldig
paths.append(Path(d))
pass
return cls(paths, "rem")
@classmethod
def FromStatus(cls, args):
"""
:param args:
:return loc: WC instance
Parse the output of "svn status" collecting status strings and paths
"""
log.debug("ldig %s check %s statcmd %s " % (args.ldig,args.check, args.statcmd))
rc, out = getstatusoutput(args.statcmd)
assert rc == 0
log.debug(out)
paths = []
for line in filter(None,out.split("\n")):
log.debug("[%s]"%line)
m = cls.lstpat.match(line)
assert m, line
d = m.groupdict()
d["ldig"] = args.ldig
d["check"] = args.check
assert d["st"] in ["M","MM","A", "?", "??"], d["st"]
if d["st"] == "??": d["st"] = "?" # bring git into line
if os.path.isdir(d["path"]):
log.debug("skip dir %s " % d["path"] )
else:
paths.append(Path(d))
pass
pass
return cls(paths, "loc")
@classmethod
def FromComparison(cls, loc, rem, ldig):
"""
:param loc: WC instance
:param rem: WC instance
:param ldig: length of digest
:return cf: WC instance
"""
l = loc.d
r = rem.d
u = set(l).union(set(r))
paths = []
stfmt = "%2s %1s%1s %1s"
dgfmt = "%5s|%5s"
index_ = lambda ls,val:ls.index(val) if val in ls else -1
for k in sorted(list(u), key=lambda k:max(index_(list(l.keys()),k),index_(list(r.keys()),k))):
st = "".join(["l" if k in l else " ","r" if k in r else " "])
rk = r.get(k, None)
lk = l.get(k, None)
d = dict(path=k)
d["ldig"] = ldig
stdig = " "
if st == "lr":
stdig = "=" if lk["dig"] == rk["dig"] else "*"
stdat = (st, lk["st"], rk["st"], stdig )
d["dig"] = "%s|%s" % (lk["dig"],rk["dig"])
d["dig5"] = dgfmt % (lk["dig5"],rk["dig5"] )
elif st == "l ":
stdat = (st, lk["st"], "", " " )
d["dig"] = "%s|%s" % (lk["dig"],"-" * 32 )
d["dig5"] = dgfmt % (lk["dig5"], "-" * 5 )
elif st == " r":
stdat = (st, "", rk["st"], " " )
d["dig"] = "%s|%s" % ("-" * 32, rk["dig"])
d["dig5"] = dgfmt % ("-" * 5, rk["dig5"] )
pass
d["st"] = stfmt % stdat
d["stlr"] = st
d["stdig"] = stdig
paths.append(Path(d))
pass
return cls(paths, "cf")
def __init__(self, paths, name):
self.paths = paths
self.name = name
d = odict()
for p in paths:
d[p["path"]] = p
pass
self.d = d
@classmethod
def PutCmd(cls, path, rbase, chdir):
return "scp %s/%s %s/%s" % (chdir,path,rbase,path)
@classmethod
def GetCmd(cls, path, rbase, chdir):
return "scp %s/%s %s/%s" % (rbase,path,chdir,path)
def scp_put_cmds(self, rbase, chdir):
"""put from local to remote"""
return "\n".join(map(lambda d:self.PutCmd(d["path"],rbase,chdir), self.paths))
def scp_get_cmds(self, rbase, chdir):
"""get from remote to local"""
return "\n".join(map(lambda d:self.GetCmd(d["path"],rbase,chdir), self.paths))
def _get_hdr(self):
name = getattr(self, 'name', "noname")
return "%s" % name
hdr = property(_get_hdr)
def __str__(self):
return "\n".join([self.hdr]+list(map(str,self.paths)))
if __name__ == '__main__':
args = WC.parse_args(__doc__)
if "rup" in args.cmd or "cfu" in args.cmd:
log.info("running args.rstatcmd : %s " % args.rstatcmd )
rc,rup_out = getstatusoutput(args.rstatcmd)
assert rc == 0, (rc, "maybe the ssh tunnel is not running")
#print(rup_out)
log.info("writing rup_out to args.rstatpath : %s " % args.rstatpath)
open(args.rstatpath,"w").write(rup_out)
pass
if "lup" in args.cmd or "cfu" in args.cmd:
log.info("running args.lstatcmd : %s " % args.lstatcmd )
rc,lup_out = getstatusoutput(args.lstatcmd)
assert rc == 0, (rc, "lstatcmd failed")
#print(lup_out)
log.info("writing lup_out to args.lstatpath : %s " % args.lstatpath)
open(args.lstatpath,"w").write(lup_out)
pass
if os.path.exists(args.rstatpath):
rem = WC.FromStatusFile(args.rstatpath, args.ldig)
else:
rem = None
pass
if os.path.exists(args.lstatpath):
lup = WC.FromStatusFile(args.lstatpath, args.ldig)
else:
lup = None
pass
os.chdir(args.chdir)
loc = WC.FromStatus(args)
if loc and rem:
cf = WC.FromComparison(loc, rem, args.ldig)
else:
cf = None
pass
for cmd in args.cmd:
log.debug(cmd)
if cmd == "loc" or cmd == "st":
print(loc)
elif cmd == "rem":
print(rem)
elif cmd == "put": # scp local to remote
print(loc.scp_put_cmds(args.rbase, args.chdir))
elif cmd == "get": # scp remote to local
print(rem.scp_get_cmds(args.rbase, args.chdir))
elif cmd == "cf" or cmd == "cfu":
assert cf
print(cf)
elif cmd == "sync" or cmd == "scp":
assert cf
for p in cf.paths:
if cmd == "sync":
print(str(p))
pass
stlr = p["stlr"]
stdig = p["stdig"]
if stlr == "l ":
print(WC.PutCmd(p["path"], args.rbase, args.chdir))
elif stlr == " r":
print(WC.GetCmd(p["path"], args.rbase, args.chdir))
elif stlr == "lr":
if stdig == "*":
if args.priority == "rem":
print(WC.GetCmd(p["path"], args.rbase, args.chdir))
elif args.priority == "loc":
print(WC.PutCmd(p["path"], args.rbase, args.chdir))
else:
assert 0, args.priority
pass
pass
else:
assert 0, stlr
pass
pass
elif cmd == "rup" or cmd == "lup":
pass
else:
assert 0, cmd
pass
pass
|
kitinfo/garfield-shell | shell/sources/shell_find.c | <filename>shell/sources/shell_find.c
#include "../headers/shell_strings.h"
#include "../headers/shell_debug.h"
#include "../headers/shell_sec.h"
#include "../headers/shell_io.h"
#include "../headers/shell_exec.h"
#include "../headers/shell_cmd.h"
#include "../headers/shell_user.h"
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
int findSnack(char* snack) {
char* cmd = getCmd(snack, SNACKCMD, FINDCMD);
debug(cmd);
int status = popenAction(cmd);
free(cmd);
return status;
}
void find(char* input) {
if (begins(input, "snack ")) {
debug("exec find snack cmd");
findSnack(input + 6);
} else if (begins(input, "user ")) {
debug("exec find user cmd");
findUser(input + 5);
} else {
debug("no match found in find");
debug("try snack");
findSnack(input);
}
}
|
kevinpeterson/drools-chance | drools-informer/drools-informer-core/src/test/java/org/drools/informer/rules/ItemRulesTest.java | /*
* Copyright 2011 JBoss Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.drools.informer.rules;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.fail;
import java.util.Arrays;
import org.drools.KnowledgeBase;
import org.drools.KnowledgeBaseFactory;
import org.drools.builder.KnowledgeBuilder;
import org.drools.builder.KnowledgeBuilderFactory;
import org.drools.builder.ResourceType;
import org.drools.informer.Note;
import org.drools.io.ResourceFactory;
import org.drools.runtime.StatefulKnowledgeSession;
import org.drools.runtime.rule.ConsequenceException;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ItemRulesTest {
private static final Logger logger = LoggerFactory.getLogger(ItemRulesTest.class);
private KnowledgeBase knowledgeBase;
/**
* @throws Exception
*/
@Before
public void setUp() throws Exception {
KnowledgeBuilder knowledgeBuilder = KnowledgeBuilderFactory.newKnowledgeBuilder();
knowledgeBuilder.add(ResourceFactory.newClassPathResource("org/drools/informer/Item.drl"), ResourceType.DRL);
if (knowledgeBuilder.hasErrors()) {
logger.debug(Arrays.toString(knowledgeBuilder.getErrors().toArray()));
}
assertFalse(knowledgeBuilder.hasErrors());
knowledgeBase = KnowledgeBaseFactory.newKnowledgeBase();
knowledgeBase.addKnowledgePackages(knowledgeBuilder.getKnowledgePackages());
}
@Test
public void testUniqueItemId() {
StatefulKnowledgeSession knowledgeSession = knowledgeBase.newStatefulKnowledgeSession();
try {
Note note1 = new Note("note","label1");
note1.forceId("id");
Note note2 = new Note("note","label2");
note2.forceId("id");
knowledgeSession.insert(note1);
knowledgeSession.insert(note2);
knowledgeSession.fireAllRules();
fail();
} catch (ConsequenceException e) {
if (e.getCause() instanceof IllegalStateException) {
if (((IllegalStateException)e.getCause()).getMessage().equals("Duplicate item id: id")) {
return;
}
}
fail();
} finally {
knowledgeSession.dispose();
}
}
}
|
nickbrandt/gitlab-ui | src/components/base/form/form_textarea/form_textarea.stories.js | <reponame>nickbrandt/gitlab-ui
import { withKnobs, boolean } from '@storybook/addon-knobs';
import { documentedStoriesOf } from '../../../../../documentation/documented_stories';
import { GlFormTextarea } from '../../../../../index';
import readme from './form_textarea.md';
const components = {
GlFormTextarea,
};
const template = `
<gl-form-textarea
v-model="model"
:placeholder="placeholder"
:rows="5"
:no-resize="noResize"
/>
`;
function generateProps({ noResize = GlFormTextarea.props.noResize.default } = {}) {
return {
model: {
type: String,
default:
'We take inspiration from other companies, and we always go for the boring solutions. Just like the rest of our work, we continually adjust our values and strive always to make them better. We used to have more values, but it was difficult to remember them all, so we condensed them and gave sub-values and created an acronym. Everyone is welcome to suggest improvements.',
},
placeholder: {
type: String,
default: 'hello',
},
noResize: {
type: Boolean,
default: boolean('no-resize', noResize),
},
};
}
documentedStoriesOf('base/form/form-textarea', readme)
.addDecorator(withKnobs)
.add('default', () => ({
components,
props: generateProps(),
template,
}));
|
sebhoof/gambit_1.5 | Models/src/models/MSSM25atX.cpp | <filename>Models/src/models/MSSM25atX.cpp<gh_stars>1-10
// GAMBIT: Global and Modular BSM Inference Tool
// *********************************************
//
// MSSM25 translation function definitions
//
// Contains translation functions for
// MSSM25atQ --> MSSM30atQ
// MSSM25atQ_mA --> MSSM30atQ_mA
// MSSM25atMGUT --> MSSM30atMGUT
// MSSM25atMGUT_mA --> MSSM30atMGUT_mA
// MSSM25atMSUSY --> MSSM30atMSUSY
// MSSM25atMSUSY_mA --> MSSM30atMSUSY_mA
//
// *********************************************
//
// Authors
// =======
//
// (add name and date if you modify)
//
// <NAME>
// 2017 Oct
//
// *********************************************
#include <string>
#include <vector>
#include "gambit/Models/model_macros.hpp"
#include "gambit/Models/model_helpers.hpp"
#include "gambit/Logs/logger.hpp"
#include "gambit/Utils/util_functions.hpp"
#include "gambit/Models/models/MSSM25atQ.hpp"
#include "gambit/Models/models/MSSM25atQ_mA.hpp"
#include "gambit/Models/models/MSSM25atMGUT.hpp"
#include "gambit/Models/models/MSSM25atMGUT_mA.hpp"
#include "gambit/Models/models/MSSM25atMSUSY.hpp"
#include "gambit/Models/models/MSSM25atMSUSY_mA.hpp"
using namespace Gambit::Utils;
// General helper translation function definition
namespace Gambit {
void MSSM25atX_to_MSSM30atX(const ModelParameters &myP, ModelParameters &targetP)
{
// Copy all the common parameters of MSSM25atQ into MSSM30atQ
targetP.setValues(myP,false);
// Manually set the parameters which differ
// slepton trilinear couplings
// Off-diagonal elements set to zero by parent model
// First and second generation elements set equal
targetP.setValue("Ae_1", myP["Ae_12"] ); // Ae2_11 in MSSM63
targetP.setValue("Ae_2", myP["Ae_12"] ); // Ae2_22 " "
//targetP.setValue("Ae_3", myP["Ae_3"] ); // Ae2_33 // Taken care of by common parameter copy
// down-type trilinear couplings
// Off-diagonal elements set to zero by parent model
// First and second generation to zero
targetP.setValue("Ad_1", 0. ); // Ad2_11 in MSSM63
targetP.setValue("Ad_2", 0. ); // Ad2_22 " "
//targetP.setValue("Ad_3", myP["Ad_3"] ); // Ad2_33 // Taken care of by common parameter copy
// up-type trilinear couplings
// Off-diagonal elements set to zero by parent model
// First and second generation set to zero
targetP.setValue("Au_1", 0. ); // Au2_11 in MSSM63
targetP.setValue("Au_2", 0. ); // Au2_22 " "
// targetP.setValue("Au_3", myP["Au_3"] ); // Au2_33 // Taken care of by common parameter copy
// Done
}
}
/// @{ Interpret-as-parent function definitions
/// These are particularly repetitive so let's define them with the help of a macro
#define DEFINE_IAPFUNC(PARENT) \
void MODEL_NAMESPACE::CAT_3(MODEL,_to_,PARENT) (const ModelParameters &myP, ModelParameters &targetP) \
{ \
logger()<<"Running interpret_as_parent calculations for " STRINGIFY(MODEL) " --> " STRINGIFY(PARENT) "..."<<LogTags::info<<EOM; \
MSSM25atX_to_MSSM30atX(myP, targetP); \
} \
#define MODEL MSSM25atQ
DEFINE_IAPFUNC(MSSM30atQ)
#undef MODEL
#define MODEL MSSM25atQ_mA
DEFINE_IAPFUNC(MSSM30atQ_mA)
#undef MODEL
#define MODEL MSSM25atMGUT
DEFINE_IAPFUNC(MSSM30atMGUT)
#undef MODEL
#define MODEL MSSM25atMGUT_mA
DEFINE_IAPFUNC(MSSM30atMGUT_mA)
#undef MODEL
#define MODEL MSSM25atMSUSY
DEFINE_IAPFUNC(MSSM30atMSUSY)
#undef MODEL
#define MODEL MSSM25atMSUSY_mA
DEFINE_IAPFUNC(MSSM30atMSUSY_mA)
#undef MODEL
/// @}
|
dtransporte/dtransporte | js/FancyGrid/src/js/umd/end.js | <gh_stars>0
return Fancy;
})); |
jeromatron/virtdata-java | virtdata-lib-basics/src/main/java/io/virtdata/basicsmappers/from_long/to_int/HashRange.java | <reponame>jeromatron/virtdata-java
package io.virtdata.basicsmappers.from_long.to_int;
import io.virtdata.annotations.ThreadSafeMapper;
import io.virtdata.basicsmappers.from_long.to_long.Hash;
import java.util.function.LongToIntFunction;
@ThreadSafeMapper
public class HashRange implements LongToIntFunction {
private final long minValue;
private final long width;
private final Hash hash = new Hash();
public HashRange(long width) {
this.width=width;
this.minValue=0L;
}
public HashRange(long minValue, long maxValue) {
this.minValue = minValue;
if (maxValue<=minValue) {
throw new RuntimeException("HashRange must have min and max value in that order.");
}
this.width = maxValue - minValue;
}
@Override
public int applyAsInt(long operand) {
return (int) ((minValue + (hash.applyAsLong(operand) % width)) & Integer.MAX_VALUE);
}
}
|
joaowillamy/react-storybook-jest-lerna | packages/theme/src/index.js | <gh_stars>10-100
export { default as ThemeProvider } from "./ThemeProvider";
export { default as DEFAULT_THEME } from "./theme";
export { GlobalStyles } from "./GlobalStyles";
|
GrapixLeGrand/AppArt | app/src/main/java/ch/epfl/sdp/appart/database/FirestoreDatabaseService.java | <filename>app/src/main/java/ch/epfl/sdp/appart/database/FirestoreDatabaseService.java
package ch.epfl.sdp.appart.database;
import android.net.Uri;
import android.util.Log;
import androidx.annotation.NonNull;
import com.google.firebase.firestore.DocumentReference;
import com.google.firebase.firestore.DocumentSnapshot;
import com.google.firebase.firestore.FirebaseFirestore;
import com.google.firebase.firestore.FirebaseFirestoreSettings;
import com.google.firebase.firestore.QuerySnapshot;
import com.google.firebase.storage.FirebaseStorage;
import com.google.firebase.storage.StorageReference;
import org.jetbrains.annotations.NotNull;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.inject.Singleton;
import ch.epfl.sdp.appart.ad.Ad;
import ch.epfl.sdp.appart.ad.ContactInfo;
import ch.epfl.sdp.appart.database.exceptions.DatabaseServiceException;
import ch.epfl.sdp.appart.database.firebaselayout.AdLayout;
import ch.epfl.sdp.appart.database.firebaselayout.CardLayout;
import ch.epfl.sdp.appart.database.firebaselayout.FirebaseLayout;
import ch.epfl.sdp.appart.database.firestoreservicehelpers.FirestoreAdHelper;
import ch.epfl.sdp.appart.database.firestoreservicehelpers.FirestoreCardHelper;
import ch.epfl.sdp.appart.database.firestoreservicehelpers.FirestoreImageHelper;
import ch.epfl.sdp.appart.database.firestoreservicehelpers.FirestoreUserHelper;
import ch.epfl.sdp.appart.glide.visitor.GlideBitmapLoaderVisitor;
import ch.epfl.sdp.appart.glide.visitor.GlideLoaderListenerVisitor;
import ch.epfl.sdp.appart.glide.visitor.GlideLoaderVisitor;
import ch.epfl.sdp.appart.ad.PricePeriod;
import ch.epfl.sdp.appart.scrolling.card.Card;
import ch.epfl.sdp.appart.user.User;
import ch.epfl.sdp.appart.utils.serializers.AdSerializer;
import ch.epfl.sdp.appart.utils.serializers.CardSerializer;
import ch.epfl.sdp.appart.utils.serializers.UserSerializer;
/**
* Implementation of the DatabaseService with Firestore from Firebase.
*/
@Singleton
public class FirestoreDatabaseService implements DatabaseService {
private final static String STORAGE_URL = "gs://appart-ec344.appspot.com/";
private final FirebaseFirestore db;
private final FirebaseStorage storage;
private final FirestoreAdHelper adHelper;
private final FirestoreImageHelper imageHelper;
private final FirestoreUserHelper userHelper;
private final FirestoreCardHelper cardHelper;
@Inject
public FirestoreDatabaseService() {
db = FirebaseFirestore.getInstance();
/*
Do we use this or not ?
If we do, this "overrides" or local DB system. However, we know that the data will
have the same version that our local db. So maybe, we can think of the local db
as a backup for data that can't be cached by firestore.
db.clearPersistence();
FirebaseFirestoreSettings settings = new FirebaseFirestoreSettings.Builder()
.setPersistenceEnabled(false).build();
db.setFirestoreSettings(settings);
*/
storage = FirebaseStorage.getInstance();
adHelper = new FirestoreAdHelper();
imageHelper = new FirestoreImageHelper();
userHelper = new FirestoreUserHelper();
cardHelper = new FirestoreCardHelper();
}
@NotNull
@Override
@NonNull
public CompletableFuture<List<Card>> getCards() {
return cardHelper.getCards();
}
@NotNull
@Override
@NonNull
public CompletableFuture<List<Card>> getCardsFilter(String location) {
return cardHelper.getCardsFilter(location);
}
@NotNull
@Override
@NonNull
public CompletableFuture<List<Card>> getCardsFilterPrice(int min, int max) {
return cardHelper.getCardsFilterPrice(min, max);
}
@NotNull
@Override
@NonNull
public CompletableFuture<List<Card>> getCardsById(List<String> ids) {
return cardHelper.getCardsById(ids);
}
@NotNull
@Override
@NonNull
public CompletableFuture<Boolean> updateCard(@NotNull @NonNull Card card) {
return cardHelper.updateCard(card);
}
@NotNull
@Override
@NonNull
public CompletableFuture<User> getUser(@NonNull String userId) {
return userHelper.getUser(userId);
}
@NotNull
@Override
@NonNull
public CompletableFuture<Boolean> putUser(@NonNull User user) {
return userHelper.putUser(user);
}
@NotNull
@Override
@NonNull
public CompletableFuture<Boolean> updateUser(@NonNull User user) {
return userHelper.updateUser(user);
}
@NotNull
@Override
@NonNull
public CompletableFuture<Ad> getAd(String adId) {
return adHelper.getAd(adId);
}
@NotNull
@Override
@NonNull
public CompletableFuture<String> putAd(Ad ad, List<Uri> picturesUris, List<Uri> panoramasUris) {
return adHelper.putAd(ad, picturesUris, panoramasUris);
}
@NotNull
@NonNull
@Override
public CompletableFuture<Boolean> deleteAd(String adId, String cardId) {
return adHelper.deleteAd(adId, cardId);
}
@NotNull
@Override
@NonNull
public CompletableFuture<Boolean> putImage(Uri uri, String imagePathAndName) {
return imageHelper.putImage(uri, imagePathAndName);
}
@NonNull
@Override
@NotNull
public CompletableFuture<Boolean> deleteImage(String imagePathAndName) {
return imageHelper.deleteImage(imagePathAndName);
}
@Override
public CompletableFuture<Void> clearCache() {
CompletableFuture<Void> futureClear = new CompletableFuture<>();
db.clearPersistence().addOnCompleteListener(task -> {
if (task.isSuccessful()) {
futureClear.complete(null);
} else {
futureClear.completeExceptionally(new DatabaseServiceException(task.getException().getMessage()));
}
});
return futureClear;
}
@Override
public void accept(GlideLoaderVisitor visitor) {
visitor.visit(this);
}
@Override
public void accept(GlideBitmapLoaderVisitor visitor) {
visitor.visit(this);
}
@Override
public void accept(GlideLoaderListenerVisitor visitor) {
visitor.visit(this);
}
/**
* Returns the storage reference of a stored firebase object
*
* @param storageUrl the url in the storage like Cards/img.jpeg would return an image from
* the the
* Cards folder named img.jpeg
* @return the StorageReference of the object.
*/
public StorageReference getStorageReference(String storageUrl) {
return storage.getReferenceFromUrl(STORAGE_URL + storageUrl);
}
/**
* Utility function to clean up storage database
*
* @param ref reference to the folder/file to delete
*/
public void removeFromStorage(StorageReference ref) {
ref.delete();
}
/**
* Sets up the use of an emulator for the Firebase authentication service.
*
* @param ip the ip of the emulator
* @param port the port that corresponds to the authentication service emulation
*/
public void useEmulator(String ip, int port) {
if (ip == null) throw new IllegalArgumentException();
db.useEmulator(ip, port);
}
} |
IU-Libraries-Joint-Development/pumpkin | spec/presenters/collection_show_presenter_spec.rb | <gh_stars>0
require 'rails_helper'
RSpec.describe CollectionShowPresenter do
let(:cs_presenter) { described_class.new(solr_doc, nil) }
let(:solr_doc) { SolrDocument.new(doc) }
let(:doc) do
c = FactoryGirl.build(:collection, id: "collection")
c.members << scanned_resource
allow(c).to receive(:new_record?).and_return(false)
c.to_solr
end
let(:scanned_resource) do
s = FactoryGirl.build(:scanned_resource)
allow(s).to receive(:id).and_return("resource")
solr.add(s.to_solr)
solr.commit
s
end
let(:solr) { ActiveFedora.solr.conn }
describe "#member_presenters" do
it "returns presenters for each Scanned Resource" do
expect(cs_presenter.member_presenters.map(&:id)).to eq [scanned_resource.id]
end
end
describe "#logical_order" do
it "returns an empty hash" do
expect(cs_presenter.logical_order).to eq({})
end
end
describe "#viewing_hint" do
it "is always nil" do
expect(cs_presenter.viewing_hint).to eq nil
end
end
it "can be used to create a manifest" do
manifest = nil
expect { manifest = ManifestBuilder.new(cs_presenter).to_json } \
.not_to raise_error
json_manifest = JSON.parse(manifest)
expect(json_manifest['viewingHint']).not_to eq "multi-part"
expect(json_manifest['metadata'][0]['value'].first) \
.to eq cs_presenter.exhibit_id.first
expect(json_manifest['structures']).to eq nil
expect(json_manifest['viewingDirection']).to eq nil
end
describe "#label" do
it "is an empty array" do
expect(cs_presenter.label).to eq []
end
end
end
|
mfere/mfere_repository | src/main/java/com/analyzer/enricher/TimeSeriesLoader.java | package com.analyzer.enricher;
import com.analyzer.model.RawCandlestick;
import org.ta4j.core.BaseTick;
import org.ta4j.core.BaseTimeSeries;
import org.ta4j.core.Tick;
import org.ta4j.core.TimeSeries;
import java.time.ZoneId;
import java.time.ZonedDateTime;
import java.util.ArrayList;
import java.util.List;
/**
* This class build a Ta4j time series from different sources
*/
public class TimeSeriesLoader {
/**
* @return a time series from RawCandlestick series
*/
public static TimeSeries loadTimeSeries(List<RawCandlestick> rawCandlesticks) {
List<Tick> ticks = new ArrayList<>();
for (RawCandlestick rawCandlestick : rawCandlesticks) {
ZonedDateTime dateTime = rawCandlestick.getRawCandlestickKey().getDateTime().atZone(ZoneId.of("UTC"));
ticks.add(new BaseTick(
dateTime,
rawCandlestick.getMidRawCandlestickData().getOpen(),
rawCandlestick.getMidRawCandlestickData().getHigh(),
rawCandlestick.getMidRawCandlestickData().getLow(),
rawCandlestick.getMidRawCandlestickData().getClose(),
rawCandlestick.getVolume()));
}
return new BaseTimeSeries(rawCandlesticks.get(0).getRawCandlestickKey().getInstrument(), ticks);
}
}
|
1847123212/YoC-open | components/chip_TG7121B/ls_ble_sdk/dev/soc/arm_cm/le501x/integration/rtc_param.h | #ifndef _RTC_PARAM_H_
#define _RTC_PARAM_H_
#include <stddef.h>
#include "rtc.h"
#include "reg_rtc.h"
#include "field_manipulate.h"
void rtc_sw_reset(RTC_HandleTypeDef *inst);
void rtc_clock_enable(RTC_HandleTypeDef *inst,uint8_t status);
void rtc_int_op(void (*isr)(RTC_HandleTypeDef *),RTC_HandleTypeDef *inst,uint8_t states);
void RTC_status_set(RTC_HandleTypeDef *inst,uint8_t status);
#endif
|
weixiang0815/javapractice | self practice/Pt20210116/src/Random_Class.java | <gh_stars>0
import java.util.Random;
public class Random_Class {
public static void main(String[] arg) {
Random rd = new Random();
int x = rd.nextInt(6)+1;
System.out.println(x);
double y = rd.nextDouble();
System.out.println(y);
boolean z = rd.nextBoolean();
System.out.println(z);
}
}
|
5cript/nana-source-view | tests/data_store_tests.hpp | <reponame>5cript/nana-source-view
#pragma once
#include "test_base.hpp"
class DataStoreTests
: public TestBase
, public ::testing::Test
{
protected:
std::string testData =
# include "test_data/data1.txt"
;
nana_source_view::data_store store{testData};
};
TEST_F(DataStoreTests, DefaultConstruction)
{
EXPECT_EQ(store.caret_count(), 1);
EXPECT_EQ(store.caret_begin()->offset, store.size());
EXPECT_EQ(store.caret_begin()->range, 0);
EXPECT_EQ(store.utf8_string(), testData);
}
TEST_F(DataStoreTests, OverlayCaret)
{
store.add_caret(store.size());
EXPECT_EQ(store.caret_count(), 1);
EXPECT_EQ(store.caret_begin()->offset, store.size());
EXPECT_EQ(store.caret_begin()->range, 0);
}
TEST_F(DataStoreTests, AddCaret)
{
store.add_caret(0);
EXPECT_EQ(store.caret_count(), 2);
std::set <caret_type> expectedCarets = {
{0, 0},
{static_cast <index_type> (store.size()), 0}
};
EXPECT_EQ(store.retrieve_carets(), expectedCarets);
}
TEST_F(DataStoreTests, Clear)
{
store.clear();
EXPECT_EQ(store.size(), 0);
EXPECT_EQ(store.caret_count(), 1);
EXPECT_EQ(store.caret_begin()->offset, store.size());
EXPECT_EQ(store.caret_begin()->range, 0);
}
|
xuwaters/cp-algorithms | 09-graphs/02-components/02-bridge-offline/bridge_test.go | <filename>09-graphs/02-components/02-bridge-offline/bridge_test.go
package bridge
import (
"testing"
)
func TestBridges(t *testing.T) {
dataList := []struct {
n int
bridges [][2]int
edges [][2]int
}{
{
n: 5,
bridges: [][2]int{{0, 1}, {0, 2}},
edges: [][2]int{{0, 1}, {0, 2}, {2, 3}, {3, 4}, {2, 4}},
},
}
for _, data := range dataList {
g := NewGraph(data.n)
g.AddEdges(data.edges)
bridges := g.FindBridges()
if equals(bridges, data.bridges) {
t.Logf(" OK: bridges = %+v, data = %+v", bridges, data)
} else {
t.Fatalf("ERR: bridges = %+v, data = %+v", bridges, data)
}
}
}
func equals(a, b [][2]int) bool {
if len(a) != len(b) {
return false
}
for i, v := range a {
if b[i][0] != v[0] || b[i][1] != v[1] {
return false
}
}
return true
}
|
michael-hampton/tamtamcrm | resources/assets/js/components/cases/view/Case.js | import React, { Component } from 'react'
import { Alert, Card, CardBody, CardHeader, Col, Nav, NavItem, NavLink, Row, TabContent, TabPane } from 'reactstrap'
import { translations } from '../../utils/_translations'
import FileUploads from '../../documents/FileUploads'
import CaseModel from '../../models/CaseModel'
import BottomNavigationButtons from '../../common/BottomNavigationButtons'
import Overview from './Overview'
export default class Case extends Component {
constructor (props) {
super(props)
this.state = {
entity: this.props.entity,
activeTab: '1',
file_count: this.props.entity.files.length || 0,
obj_url: null,
show_success: false
}
this.caseModel = new CaseModel(this.state.entity)
this.toggleTab = this.toggleTab.bind(this)
this.triggerAction = this.triggerAction.bind(this)
this.loadPdf = this.loadPdf.bind(this)
this.refresh = this.refresh.bind(this)
}
refresh (entity) {
this.caseModel = new CaseModel(entity)
this.setState({ entity: entity })
}
triggerAction (action) {
this.caseModel.completeAction(this.state.entity, action).then(response => {
this.setState({ show_success: true }, () => {
this.props.updateState(response, this.refresh)
})
setTimeout(
function () {
this.setState({ show_success: false })
}
.bind(this),
2000
)
})
}
loadPdf () {
this.caseModel.loadPdf().then(url => {
console.log('url', url)
this.setState({ obj_url: url }, () => URL.revokeObjectURL(url))
})
}
toggleTab (tab) {
if (this.state.activeTab !== tab) {
this.setState({ activeTab: tab }, () => {
if (this.state.activeTab === '3') {
this.loadPdf()
}
})
}
}
render () {
const listClass = !Object.prototype.hasOwnProperty.call(localStorage, 'dark_theme') || (localStorage.getItem('dark_theme') && localStorage.getItem('dark_theme') === 'true') ? 'list-group-item-dark' : ''
const buttonClass = localStorage.getItem('dark_theme') && localStorage.getItem('dark_theme') === 'true' ? 'btn-dark' : ''
return (
<React.Fragment>
<Nav tabs className="nav-justified disable-scrollbars">
<NavItem>
<NavLink
className={this.state.activeTab === '1' ? 'active' : ''}
onClick={() => {
this.toggleTab('1')
}}
>
{translations.details}
</NavLink>
</NavItem>
<NavItem>
<NavLink
className={this.state.activeTab === '2' ? 'active' : ''}
onClick={() => {
this.toggleTab('2')
}}
>
{translations.documents} ({this.state.file_count})
</NavLink>
</NavItem>
</Nav>
<TabContent activeTab={this.state.activeTab}>
<TabPane tabId="1">
<Overview model={this.caseModel} entity={this.state.entity}
customers={this.props.customers}/>
</TabPane>
<TabPane tabId="2">
<Row>
<Col>
<Card>
<CardHeader>{translations.documents}</CardHeader>
<CardBody>
<FileUploads updateCount={(count) => {
this.setState({ file_count: count })
}} entity_type="Cases" entity={this.state.entity}
user_id={this.state.entity.user_id}/>
</CardBody>
</Card>
</Col>
</Row>
</TabPane>
<TabPane tabId="3">
<Row>
<Col>
<Card>
<CardHeader> {translations.pdf} </CardHeader>
<CardBody>
<iframe style={{ width: '400px', height: '400px' }}
className="embed-responsive-item" id="viewer"
src={this.state.obj_url}/>
</CardBody>
</Card>
</Col>
</Row>
</TabPane>
</TabContent>
{this.state.show_success &&
<Alert color="primary">
{translations.action_completed}
</Alert>
}
<BottomNavigationButtons button1_click={(e) => this.toggleTab('3')}
button1={{ label: translations.view_pdf }}
button2_click={(e) => this.triggerAction('clone_to_invoice')}
button2={{ label: translations.clone_to_invoice }}/>
</React.Fragment>
)
}
}
|
Thehackershivu/JAVA-Programs | EID2.java | import java.io.*;
import java.lang.*;
import java.math.*;
import java.util.*;
class EID2
{
public static void main(String[]args)throws IOException
{
BufferedReader venki=new BufferedReader(new InputStreamReader(System.in));
int T=Integer.parseInt(venki.readLine());
for(int t=0;t<T;t++)
{
StringTokenizer st=new StringTokenizer(venki.readLine());
int a1=Integer.parseInt(st.nextToken());
int a2=Integer.parseInt(st.nextToken());
int a3=Integer.parseInt(st.nextToken());
int c1=Integer.parseInt(st.nextToken());
int c2=Integer.parseInt(st.nextToken());
int c3=Integer.parseInt(st.nextToken());
int bigb=0,midb=0,smallb=0,monb=0,monm=0,mons=0;
if(a1>a2 && a1>a3)
{
bigb=a1;
monb=c1;
if(a2>a3)
{
midb=a2;
monm=c2;
smallb=a3;
mons=c3;
}
else
{
midb=a3;
monm=c3;
smallb=a2;
mons=c2;
}
}
else if(a2>a1 && a2>a3)
{
bigb=a2;
monb=c2;
if(a1>a3)
{
midb=a1;
monm=c1;
smallb=a3;
mons=c3;
}
else
{
midb=a3;
monm=c3;
smallb=a1;
mons=c1;
}
}
else if(a3>a2 && a3>a1)
{
bigb=a3;
monb=c3;
if(a2>a1)
{
midb=a2;
monm=c2;
smallb=a1;
mons=c1;
}
else
{
midb=a1;
monm=c1;
smallb=a2;
mons=c2;
}
}
else
{
if(a1==a2 && a2==a3)
{
bigb=a1;
monb=c1;
smallb=a2;
midb=a3;
mons=c2;
monm=c3;
}
else if(a1==a2)
{
if(a1>a3)
{
bigb=a1;
midb=a2;
monb=c1;
monm=c2;
smallb=a3;
mons=c3;
}
else
{
bigb=a3;
midb=a2;
smallb=a3;
monb=c3;
monm=c2;
mons=c3;
}
}
else if(a1==a3)
{
if(a1>a2)
{
bigb=a1;
midb=a3;
monb=c1;
monm=c3;
smallb=a2;
mons=c2;
}
else
{
bigb=a2;
midb=a1;
smallb=a3;
monb=c2;
monm=c1;
mons=c3;
}
}
else if(a2==a3)
{
if(a1>a2)
{
bigb=a1;
midb=a3;
monb=c1;
monm=c3;
smallb=a2;
mons=c2;
}
else
{
bigb=a2;
midb=a3;
smallb=a1;
monb=c2;
monm=c3;
mons=c1;
}
}
}
if(monb>monm && monb>mons && monm>mons && (bigb!=midb && midb!=smallb && smallb!=midb))
{
System.out.println("FAIR");
}
else if((bigb==midb && midb==bigb && midb==smallb))
{
if((monb==monm && monm==mons && mons==monb))
System.out.println("FAIR");
else
System.out.println("NOT FAIR");
}
else if(bigb==midb && midb>smallb)
{
if(monb==monm && monb>mons)
System.out.println("FAIR");
else
System.out.println("NOT FAIR");
}
else if(midb==smallb && bigb>midb)
{
if(monm==mons && monb>monm)
System.out.println("FAIR");
else
System.out.println("NOT FAIR");
}
else
{
System.out.println("NOT FAIR");
}
}
}
} |
mnemonic01/verrazzano | application-operator/test/integ/k8s/resource.go | <reponame>mnemonic01/verrazzano<filename>application-operator/test/integ/k8s/resource.go
// Copyright (C) 2020, 2021, Oracle and/or its affiliates.
// Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
package k8s
import (
"context"
"encoding/json"
"fmt"
"strings"
oamv1 "github.com/crossplane/oam-kubernetes-runtime/apis/core/v1alpha2"
"github.com/onsi/ginkgo"
clustersv1alpha1 "github.com/verrazzano/verrazzano/application-operator/apis/clusters/v1alpha1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// DoesCRDExist returns true if the given CRD exists
func (c Client) DoesCRDExist(crdName string) bool {
crds, err := c.apixClient.CustomResourceDefinitions().List(context.Background(), metav1.ListOptions{})
if err != nil {
ginkgo.Fail("ginkgo.Failed to get list of CustomResourceDefinitions")
}
for i := range crds.Items {
if strings.Compare(crds.Items[i].ObjectMeta.Name, crdName) == 0 {
return true
}
}
return false
}
// DoesClusterRoleExist returns true if the given ClusterRole exists
func (c Client) DoesClusterRoleExist(name string) bool {
_, err := c.clientset.RbacV1().ClusterRoles().Get(context.TODO(), name, metav1.GetOptions{})
return procExistsStatus(err, "ClusterRole")
}
// DoesClusterRoleBindingExist returns true if the given ClusterRoleBinding exists
func (c Client) DoesClusterRoleBindingExist(name string) bool {
_, err := c.clientset.RbacV1().ClusterRoleBindings().Get(context.TODO(), name, metav1.GetOptions{})
return procExistsStatus(err, "ClusterRoleBinding")
}
// DoesRoleBindingContainSubject returns true if the RoleBinding exists and it contains the
// specified subject
func (c Client) DoesRoleBindingContainSubject(name, namespace, subjectKind, subjectName string) bool {
rb, err := c.clientset.RbacV1().RoleBindings(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if exists := procExistsStatus(err, "RoleBinding"); !exists {
return false
}
for _, s := range rb.Subjects {
if s.Kind == subjectKind && s.Name == subjectName {
return true
}
}
return false
}
// DoesNamespaceExist returns true if the given Namespace exists
func (c Client) DoesNamespaceExist(name string) bool {
_, err := c.clientset.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{})
return procExistsStatus(err, "Namespace")
}
// DoesSecretExist returns true if the given Secret exists
func (c Client) DoesSecretExist(name string, namespace string) bool {
_, err := c.clientset.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{})
return procExistsStatus(err, "Secret")
}
// DoesDaemonsetExist returns true if the given DaemonSet exists
func (c Client) DoesDaemonsetExist(name string, namespace string) bool {
_, err := c.clientset.AppsV1().DaemonSets(namespace).Get(context.TODO(), name, metav1.GetOptions{})
return procExistsStatus(err, "DaemonSet")
}
// DoesDeploymentExist returns true if the given Deployment exists
func (c Client) DoesDeploymentExist(name string, namespace string) bool {
_, err := c.clientset.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{})
return procExistsStatus(err, "Deployment")
}
// IsDeploymentUpdated returns true if the given Deployment has been updated with sidecar container
func (c Client) IsDeploymentUpdated(name string, namespace string) bool {
dep, err := c.clientset.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return false
}
return len(dep.Spec.Template.Spec.Containers) > 0
}
// DoesPodExist returns true if a Pod with the given prefix exists
func (c Client) DoesPodExist(name string, namespace string) bool {
return (c.getPod(name, namespace) != nil)
}
// DoesContainerExist returns true if a container with the given name exists in the pod
func (c Client) DoesContainerExist(namespace, podName, containerName string) bool {
pods, err := c.clientset.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
ginkgo.Fail("Could not get list of pods" + err.Error())
return false
}
for _, pod := range pods.Items {
if strings.HasPrefix(pod.Name, podName) {
for _, container := range pod.Status.ContainerStatuses {
if container.Name == containerName && container.Ready {
return true
}
}
}
}
return false
}
// IsPodRunning returns true if a Pod with the given prefix is running
func (c Client) IsPodRunning(name string, namespace string) bool {
pod := c.getPod(name, namespace)
if pod != nil {
if pod.Status.Phase == corev1.PodRunning {
for _, c := range pod.Status.ContainerStatuses {
if !c.Ready {
return false
}
}
return len(pod.Status.ContainerStatuses) != 0
}
}
return false
}
func (c Client) getPod(name string, namespace string) *corev1.Pod {
pods, err := c.clientset.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
ginkgo.Fail("Could not get list of pods" + err.Error())
return nil
}
for i := range pods.Items {
if strings.HasPrefix(pods.Items[i].Name, name) {
return &pods.Items[i]
}
}
return nil
}
// DoesServiceExist returns true if the given Service exists
func (c Client) DoesServiceExist(name string, namespace string) bool {
_, err := c.clientset.CoreV1().Services(namespace).Get(context.TODO(), name, metav1.GetOptions{})
return procExistsStatus(err, "Service")
}
// DoesServiceAccountExist returns true if the given ServiceAccount exists
func (c Client) DoesServiceAccountExist(name string, namespace string) bool {
_, err := c.clientset.CoreV1().ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{})
return procExistsStatus(err, "ServiceAccount")
}
func procExistsStatus(err error, msg string) bool {
if err == nil {
return true
}
if !errors.IsNotFound(err) {
ginkgo.Fail(fmt.Sprintf("ginkgo.Failed calling API to get %s: %v", msg, err))
}
return false
}
// GetAppConfig gets OAM custom-resource ApplicationConfiguration
func (c Client) GetAppConfig(namespace, name string) (*oamv1.ApplicationConfiguration, error) {
bytes, err := c.getRaw("/apis/core.oam.dev/v1alpha2", "applicationconfigurations", namespace, name)
if err != nil {
return nil, err
}
var appConfig oamv1.ApplicationConfiguration
err = json.Unmarshal(bytes, &appConfig)
return &appConfig, err
}
// GetMultiClusterSecret gets the specified MultiClusterSecret resource
func (c Client) GetMultiClusterSecret(namespace, name string) (*clustersv1alpha1.MultiClusterSecret, error) {
bytes, err := c.getRaw("/apis/clusters.verrazzano.io/v1alpha1", "multiclustersecrets", namespace, name)
if err != nil {
return nil, err
}
var mcSecret clustersv1alpha1.MultiClusterSecret
err = json.Unmarshal(bytes, &mcSecret)
return &mcSecret, err
}
// GetSecret gets the specified K8S secret
func (c Client) GetSecret(namespace, name string) (*corev1.Secret, error) {
return c.clientset.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{})
}
// GetNamespace gets the specified K8S namespace
func (c Client) GetNamespace(name string) (*corev1.Namespace, error) {
return c.clientset.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{})
}
// GetMultiClusterComponent gets the specified MultiClusterComponent
func (c Client) GetMultiClusterComponent(namespace string, name string) (*clustersv1alpha1.MultiClusterComponent, error) {
bytes, err := c.getRaw("/apis/clusters.verrazzano.io/v1alpha1", "multiclustercomponents", namespace, name)
if err != nil {
return nil, err
}
var mcComp clustersv1alpha1.MultiClusterComponent
err = json.Unmarshal(bytes, &mcComp)
return &mcComp, err
}
// GetOAMComponent gets the specified OAM Component
func (c Client) GetOAMComponent(namespace string, name string) (*oamv1.Component, error) {
bytes, err := c.getRaw("/apis/core.oam.dev/v1alpha2", "components", namespace, name)
if err != nil {
return nil, err
}
var comp oamv1.Component
err = json.Unmarshal(bytes, &comp)
return &comp, err
}
// GetMultiClusterConfigMap gets the specified MultiClusterConfigMap
func (c Client) GetMultiClusterConfigMap(namespace string, name string) (*clustersv1alpha1.MultiClusterConfigMap, error) {
bytes, err := c.getRaw("/apis/clusters.verrazzano.io/v1alpha1", "multiclusterconfigmaps", namespace, name)
if err != nil {
return nil, err
}
var mcConfigMap clustersv1alpha1.MultiClusterConfigMap
err = json.Unmarshal(bytes, &mcConfigMap)
return &mcConfigMap, err
}
// GetConfigMap gets the specified K8S ConfigMap
func (c Client) GetConfigMap(namespace string, name string) (*corev1.ConfigMap, error) {
return c.clientset.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{})
}
// GetMultiClusterAppConfig gets the specified MultiClusterApplicationConfiguration
func (c Client) GetMultiClusterAppConfig(namespace string, name string) (*clustersv1alpha1.MultiClusterApplicationConfiguration, error) {
bytes, err := c.getRaw("/apis/clusters.verrazzano.io/v1alpha1", "multiclusterapplicationconfigurations", namespace, name)
if err != nil {
return nil, err
}
var mcAppConf clustersv1alpha1.MultiClusterApplicationConfiguration
err = json.Unmarshal(bytes, &mcAppConf)
return &mcAppConf, err
}
// GetVerrazzanoProject gets the specified VerrazzanoProject
func (c Client) GetVerrazzanoProject(namespace string, name string) (*clustersv1alpha1.VerrazzanoProject, error) {
bytes, err := c.getRaw("/apis/clusters.verrazzano.io/v1alpha1", "verrazzanoprojects", namespace, name)
if err != nil {
return nil, err
}
var vp clustersv1alpha1.VerrazzanoProject
err = json.Unmarshal(bytes, &vp)
return &vp, err
}
// GetOAMAppConfig gets the specified OAM ApplicationConfiguration
func (c Client) GetOAMAppConfig(namespace string, name string) (*oamv1.ApplicationConfiguration, error) {
bytes, err := c.getRaw("/apis/core.oam.dev/v1alpha2", "applicationconfigurations", namespace, name)
if err != nil {
return nil, err
}
var appConf oamv1.ApplicationConfiguration
err = json.Unmarshal(bytes, &appConf)
return &appConf, err
}
func (c Client) getRaw(absPath, resource, namespace, name string) ([]byte, error) {
return c.clientset.RESTClient().
Get().
AbsPath(absPath).
Namespace(namespace).
Resource(resource).
Name(name).
DoRaw(context.TODO())
}
|
egunay/GWT-OpenLayers | gwt-openlayers-client/src/main/java/org/gwtopenmaps/openlayers/client/format/format/XML.java | <reponame>egunay/GWT-OpenLayers
/*
* Copyright 2014 geoSDI.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gwtopenmaps.openlayers.client.format.format;
import com.google.gwt.dom.client.Element;
import com.google.gwt.dom.client.Node;
import com.google.gwt.dom.client.NodeList;
import org.gwtopenmaps.openlayers.client.format.Format;
import org.gwtopenmaps.openlayers.client.util.JSObject;
/**
*
* @author <NAME> - CNR IMAA geoSDI Group
* @email <EMAIL>
*/
public class XML<E extends Node> extends Format<E, String> {
protected XML(JSObject format) {
super(format);
}
public XML() {
this(XMLImpl.create());
}
public XML(XMLOptions xmlOptions) {
this(XMLImpl.create(xmlOptions.getJSObject()));
}
/**
* Method: setNamespace Set a namespace alias and URI for the format.
*
* Parameters: alias - {@link String} The namespace alias (prefix). uri -
* {String} The namespace URI.
*
* @param alias
* @param uri
*/
public void setNamespace(String alias, String uri) {
XMLImpl.setNamespace(getJSObject(), alias, uri);
}
/**
* APIMethod: read Deserialize a XML string and return a DOM node.
*
* Parameters: text - {@link String} A XML string
*
* Returns: {@link Element} A DOM node
*
* @param text
* @return
*/
@Override
public E read(String text) {
return (E) XMLImpl.read(getJSObject(), text);
}
/**
* APIMethod: write Serialize a DOM node into a XML string.
*
* Parameters: node - {DOMElement} A DOM node.
*
* Returns: {String} The XML string representation of the input node.
*
* @param el
*
* @return String
*/
public String write(E el) {
return XMLImpl.write(getJSObject(), el);
}
/**
* APIMethod: getElementsByTagNameNS Get a list of elements on a node given
* the namespace URI and local name. To return all nodes in a given
* namespace, use '*' for the name argument. To return all nodes of a given
* (local) name, regardless of namespace, use '*' for the uri argument.
*
* Parameters: node - {Element} Node on which to search for other nodes. uri
* - {String} Namespace URI. name - {String} Local name of the tag (without
* the prefix).
*
* Returns: {NodeList} A node list or array of elements.
*
* @param <T>
* @param el
* @param uri
* @param name
* @return {@link NodeList<T>} nodeList
*/
public <T extends Node> NodeList<T> getElementsByTagNameNS(Element el,
String uri, String name) {
return XMLImpl.getElementsByTagNameNS(getJSObject(), el, uri, name);
}
/**
* APIMethod: hasAttributeNS Determine whether a node has a particular
* attribute matching the given name and namespace.
*
* Parameters: node - {Element} Node on which to search for an attribute.
* uri - {String} Namespace URI. name - {String} Local name of the attribute
* (without the prefix).
*
* Returns: {Boolean} The node has an attribute matching the name and
* namespace.
*
* @param el
* @param uri
* @param name
* @return {@link Boolean} value
*/
public boolean hasAttributeNS(Element el, String uri, String name) {
return XMLImpl.hasAttributeNS(getJSObject(), el, uri, name);
}
/**
* APIMethod: getAttributeNodeNS Get an attribute node given the namespace
* URI and local name.
*
* Parameters: node - {Element} Node on which to search for attribute nodes.
* uri - {String} Namespace URI. name - {String} Local name of the attribute
* (without the prefix).
*
* Returns: {DOMElement} An attribute node or null if none found.
*
* @param el
* @param uri
* @param name
* @return {@link Node} value
*/
public Element getAttributeNodeNS(Element el, String uri, String name) {
return XMLImpl.getAttributeNodeNS(getJSObject(), el, uri, name);
}
/**
* APIMethod: getAttributeNS Get an attribute value given the namespace URI
* and local name.
*
* Parameters: node - {Element} Node on which to search for an attribute.
* uri - {String} Namespace URI. name - {String} Local name of the attribute
* (without the prefix).
*
* Returns: {String} An attribute value or and empty string if none found.
*
* @param el
* @param uri
* @param name
*
* @return {@link String} node value
*/
public String getAttributeNS(Element el, String uri, String name) {
return XMLImpl.getAttributeNS(getJSObject(), el, uri, name);
}
/**
* APIMethod: createElementNS Create a new element with namespace. This node
* can be appended to another node with the standard node.appendChild
* method. For cross-browser support, this method must be used instead of
* document.createElementNS.
*
* Parameters: uri - {String} Namespace URI for the element. name - {String}
* The qualified name of the element (prefix:localname).
*
* Returns: {Element} A DOM element with namespace.
*
* @param uri
* @param name
*
* @return {@link Element} element
*/
public Element createElementNS(String uri, String name) {
return XMLImpl.createElementNS(getJSObject(), uri, name);
}
/**
* APIMethod: createTextNode Create a text node. This node can be appended
* to another node with the standard node.appendChild method. For
* cross-browser support, this method must be used instead of
* document.createTextNode.
*
* Parameters: text - {String} The text of the node.
*
* Returns: {DOMElement} A DOM text node.
*
* @param text
*
* @return {@link Element} element
*/
public Element createTextNode(String text) {
return XMLImpl.createTextNode(getJSObject(), text);
}
}
|
newnius/code4hadoop | src/main/java/com/newnius/code4hadoop/book/ch3/ListStatus.java | <filename>src/main/java/com/newnius/code4hadoop/book/ch3/ListStatus.java<gh_stars>0
package com.newnius.code4hadoop.book.ch3;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
import java.net.URI;
/**
* Created by newnius on 12/7/16.
* call hadoop jar code4hadoop-1.0.jar com.newnius.code4hadoop.book.ch3.ListStatus hdfs://localhost/ hdfs://localhost/user/root
*/
public class ListStatus {
public static void main(String[] args) throws IOException {
String uri = args[0];
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(URI.create(uri), conf);
Path[] paths = new Path[args.length];
for(int i=0;i<paths.length; i++){
paths[i] = new Path(args[i]);
}
FileStatus[] stats = fs.listStatus(paths);
Path[] listPaths = FileUtil.stat2Paths(stats);
for(Path p: listPaths){
System.out.println(p);
}
}
}
|
tfisher1226/ARIES | nam/nam-view/src/main/java/nam/ui/invocation/InvocationWizard.java | <reponame>tfisher1226/ARIES
package nam.ui.invocation;
import java.io.Serializable;
import javax.enterprise.context.SessionScoped;
import javax.inject.Inject;
import javax.inject.Named;
import nam.model.Project;
import nam.ui.Invocation;
import nam.ui.design.AbstractDomainElementWizard;
import nam.ui.design.SelectionContext;
import org.apache.commons.lang.StringUtils;
import org.aries.util.NameUtil;
@SessionScoped
@Named("invocationWizard")
@SuppressWarnings("serial")
public class InvocationWizard extends AbstractDomainElementWizard<Invocation> implements Serializable {
@Inject
private InvocationDataManager invocationDataManager;
@Inject
private InvocationPageManager invocationPageManager;
@Inject
private InvocationEventManager invocationEventManager;
@Inject
private SelectionContext selectionContext;
@Override
public String getName() {
return "Invocation";
}
@Override
public String getUrlContext() {
return invocationPageManager.getInvocationWizardPage();
}
@Override
public void initialize(Invocation invocation) {
setOrigin(getSelectionContext().getUrl());
assignPages(invocationPageManager.getSections());
super.initialize(invocation);
}
@Override
public boolean isBackEnabled() {
return super.isBackEnabled();
}
@Override
public boolean isNextEnabled() {
return super.isNextEnabled();
}
@Override
public boolean isFinishEnabled() {
return super.isFinishEnabled();
}
@Override
public String refresh() {
String url = super.showPage();
selectionContext.setUrl(url);
invocationPageManager.updateState();
return url;
}
@Override
public String first() {
String url = super.first();
invocationPageManager.updateState();
return url;
}
@Override
public String back() {
String url = super.back();
invocationPageManager.updateState();
return url;
}
@Override
public String next() {
String url = super.next();
invocationPageManager.updateState();
return url;
}
@Override
public boolean isValid() {
return super.isValid();
}
@Override
public String finish() {
Invocation invocation = getInstance();
invocationDataManager.saveInvocation(invocation);
invocationEventManager.fireSavedEvent(invocation);
String url = selectionContext.popOrigin();
return url;
}
@Override
public String cancel() {
Invocation invocation = getInstance();
//TODO take this out soon
if (invocation == null)
invocation = new Invocation();
invocationEventManager.fireCancelledEvent(invocation);
String url = selectionContext.popOrigin();
return url;
}
public String populateDefaultValues() {
Invocation invocation = selectionContext.getSelection("invocation");
String name = invocation.getName();
if (StringUtils.isEmpty(name)) {
display = getFromSession("display");
display.setModule("invocationWizard");
display.error("Invocation name must be specified");
return null;
}
Project project = selectionContext.getSelection("project");
String nameCapped = NameUtil.capName(name);
String nameUncapped = NameUtil.uncapName(name);
return getUrl();
}
}
|
aasensio/pyiacsun | pyiacsun/linalg/rpcaRow.py | """Summary"""
from __future__ import print_function
__all__ = ['rpcaRow']
import numpy as np
def _proxLowRank(X, mu):
"""Summary
Args:
X (TYPE): Description
mu (TYPE): Description
Returns:
TYPE: Description
"""
U, w, V = np.linalg.svd(X, full_matrices=True)
ind = np.where(np.abs(w) < mu)[0]
w[ind] = 0.0
ind = np.where(np.abs(w) > mu)[0]
w[ind] = w[ind] - mu * np.sign(w[ind])
n = V.shape[0]
S = np.zeros((U.shape[1], V.shape[0]))
S[:n,:n] = np.diag(w)
return U.dot(S).dot(V)
def _proxSparse(X, mu):
"""Summary
Args:
X (TYPE): Description
mu (TYPE): Description
Returns:
TYPE: Description
"""
nRow, nCol = X.shape
out = np.copy(X)
for i in range(nRow):
colNorm = np.linalg.norm(out[i,:], 2)
if (colNorm < mu):
out[i,:] = np.zeros(nCol)
else:
out[i,:] = out[i,:] - mu / colNorm * out[i,:]
return out
def rpcaRow(X, lamb, Omega=None, tolerance=1e-5):
"""Compute the decomposition of the matrix X on the sum of two components
X = L + S
where L is low-rank and S is row-sparse. It is based on the paper "Robust PCA via Outlier Pursuit" (Xu et al. 2012)
http://guppy.mpe.nus.edu.sg/~mpexuh/papers/OutlierPursuit-TIT.pdf
http://guppy.mpe.nus.edu.sg/~mpexuh/publication.html
Example:
nl = 50
nobs = 200
rank = 3
ncorr = 12
x = np.random.randn(nobs,rank)
y = np.random.randn(rank,nl)
LObs = x.dot(y)
CObs = np.zeros_like(LObs)
ind = np.random.permutation(nobs)
for i in range(ncorr):
CObs[ind[i],:] = np.random.randn(nl)
MObs = LObs + CObs #+ 1e-4 * np.random.randn(nobs, nl)
L, S = rpcaRow(MObs, 0.35)
Args:
X (float): nxm matrix
lamb (float): regularization parameter
Omega (float, optional): matrix of active elements of the matrix. If not passed, all elements are assumed to be observed
tolerance (float, optional): tolerance
Returns:
float: L (the low-rank component) and S (the row-sparse component)
"""
if (Omega == None):
Omega = np.ones_like(X)
L0 = np.zeros_like(X)
C0 = np.zeros_like(X)
L1 = np.zeros_like(X)
C1 = np.zeros_like(X)
t0 = 1.0
t1 = 1.0
delta = 0.00001
mu = 0.5 * np.linalg.norm(X)
muBar = delta * mu
eta = 0.9
tol = tolerance * np.linalg.norm(X, 'fro')
norm = 1e10
loop = 0
while (norm > tol**2):
YL = L0 + (t1 - 1) / t0 * (L0 - L1)
YC = C0 + (t1 - 1) / t0 * (C0 - C1)
MDiff = (YL + YC - X) * Omega
GL = YL - 0.5 * MDiff
LNew = _proxLowRank(GL, 0.5 * mu)
GC = YC - 0.5 * MDiff
CNew = _proxSparse(GC, 0.5 * mu * lamb)
t1 = np.copy(t0)
t0 = 0.5 * (1.0 + np.sqrt(4.0 * t0**2 + 1))
L1 = np.copy(L0)
L0 = np.copy(LNew)
C1 = np.copy(C0)
C0 = np.copy(CNew)
mu = np.max([eta * mu, muBar])
SL = 2.0 * (YL - LNew) + (LNew + CNew - YL - YC)
SC = 2.0 * (YC - CNew) + (LNew + CNew - YL - YC)
norm = np.linalg.norm(SL, 'fro')**2 + np.linalg.norm(SC, 'fro')**2
print("it: {0} - norm={1}".format(loop, norm))
loop += 1
return LNew, CNew
|
UTSAAH/Dhwani-HA_Library | src/FFT_Overlapped_F32.h | /*
* FFT_Overrlapped_F32
*
* Purpose: Encapsulate the ARM floating point FFT/IFFT functions
* in a way that naturally interfaces to my float32
* extension of the Teensy Audio Library.
*
* Provides functionality to do overlapped FFT/IFFT where
* each audio block is a fraction (1, 1/2, 1/4) of the
* totaly FFT length. This class handles all of the
* data shuffling to composite the previous data blocks
* with the current data block to provide the full FFT.
* Does similar data shuffling (overlapp-add) for IFFT.
*
* Created: <NAME> (openaudio.blogspot.com)
* Jan-Jul 2017
*
* Typical Usage as FFT:
*
* //setup the audio stuff
* float sample_rate_Hz = 44100.0; //define sample rate
* int audio_block_samples = 32; //define size of audio blocks
* AudioSettings_F32 audio_settings(sample_rate_Hz, audio_block_samples);
* // ... continue creating all of your Audio Processing Blocks ...
*
* // within a custom audio processing algorithm that you've written
* // you'd create the FFT and IFFT elements
* int NFFT = 128; //define length of FFT that you want (multiple of audio_block_samples)
* FFT_Overrlapped_F32 FFT_obj(audio_settings,NFFT); //Creare FFT object
* FFT_Overrlapped_F32 IFFT_obj(audio_settings,NFFT); //Creare IFFT object
* float complex_2N_buffer[2*NFFT]; //create buffer to hold the FFT output
*
* // within your own algorithm's "update()" function (which is what
* // is called automatically by the Teensy Audio Libarary approach
* // to audio processing), you can execute the FFT and IFFT
*
* // First, get the audio and convert to frequency-domain using an FFT
* audio_block_f32_t *in_audio_block = AudioStream_F32::receiveReadOnly_f32();
* FFT_obj.execute(in_audio_block, complex_2N_buffer); //output is in complex_2N_buffer
* AudioStream_F32::release(in_audio_block); //We just passed ownership to FFT_obj, so release it here.
*
* // Next do whatever processing you'd like on the frequency domain data
* // that is held in complex_2N_buffer
*
* // Finally, you can convert back to the time domain via IFFT
* audio_block_f32_t *out_audio_block = IFFT_obj.execute(complex_2N_buffer);
* //note that the "out_audio_block" is mananged by IFFT_obj, so don't worry about releasing it.
*
* License: MIT License
*/
#ifndef _FFT_Overlapped_F32_h
#define _FFT_Overlapped_F32_h
#include "AudioStream_F32.h"
#include <arm_math.h>
#include "FFT_F32.h"
//#include "utility/dspinst.h" //copied from analyze_fft256.cpp. Do we need this?
// set the max amount of allowed overlap...some number larger than you'll want to use
#define MAX_N_BUFF_BLOCKS 32 //32 blocks x 16 sample blocks enables NFFT = 512, if the Teensy could keep up.
class FFT_Overlapped_Base_F32 { //handles all the data structures for the overlapping stuff. Doesn't care if FFT or IFFT
public:
FFT_Overlapped_Base_F32(void) {};
~FFT_Overlapped_Base_F32(void) {
if (N_BUFF_BLOCKS > 0) {
for (int i = 0; i < N_BUFF_BLOCKS; i++) {
if (buff_blocks[i] != NULL) AudioStream_F32::release(buff_blocks[i]);
}
}
if (complex_buffer != NULL) delete complex_buffer;
}
virtual int setup(const AudioSettings_F32 &settings, const int _N_FFT) {
int N_FFT;
///choose valid _N_FFT
if (!FFT_F32::is_valid_N_FFT(_N_FFT)) {
Serial.println(F("FFT_Overlapped_Base_F32: *** ERROR ***"));
Serial.print(F(" : N_FFT ")); Serial.print(_N_FFT);
Serial.print(F(" is not allowed. Try a power of 2 between 16 and 2048"));
N_FFT = -1;
return N_FFT;
}
//how many buffers will compose each FFT?
audio_block_samples = settings.audio_block_samples;
N_BUFF_BLOCKS = _N_FFT / audio_block_samples; //truncates!
N_BUFF_BLOCKS = max(1,min(MAX_N_BUFF_BLOCKS,N_BUFF_BLOCKS));
//what does the fft length actually end up being?
N_FFT = N_BUFF_BLOCKS * audio_block_samples;
//allocate memory for buffers...this is dynamic allocation. Always dangerous.
complex_buffer = new float32_t[2*N_FFT]; //should I check to see if it was successfully allcoated?
//initialize the blocks for holding the previous data
for (int i = 0; i < N_BUFF_BLOCKS; i++) {
buff_blocks[i] = AudioStream_F32::allocate_f32();
clear_audio_block(buff_blocks[i]);
}
return N_FFT;
}
virtual int getNFFT(void) = 0;
virtual int getNBuffBlocks(void) { return N_BUFF_BLOCKS; }
protected:
int N_BUFF_BLOCKS = 0;
int audio_block_samples;
audio_block_f32_t *buff_blocks[MAX_N_BUFF_BLOCKS];
float32_t *complex_buffer;
void clear_audio_block(audio_block_f32_t *block) {
for (int i = 0; i < block->length; i++) block->data[i] = 0.f;
}
};
class FFT_Overlapped_F32: public FFT_Overlapped_Base_F32
{
public:
//constructors
FFT_Overlapped_F32(void): FFT_Overlapped_Base_F32() {};
FFT_Overlapped_F32(const AudioSettings_F32 &settings): FFT_Overlapped_Base_F32() { }
FFT_Overlapped_F32(const AudioSettings_F32 &settings, const int _N_FFT): FFT_Overlapped_Base_F32() {
setup(settings,_N_FFT);
}
virtual int setup(const AudioSettings_F32 &settings, const int _N_FFT) {
int N_FFT = FFT_Overlapped_Base_F32::setup(settings, _N_FFT);
//setup the FFT routines
N_FFT = myFFT.setup(N_FFT);
return N_FFT;
}
virtual void execute(audio_block_f32_t *block, float *complex_2N_buffer);
virtual int getNFFT(void) { return myFFT.getNFFT(); };
FFT_F32* getFFTObject(void) { return &myFFT; };
virtual void rebuildNegativeFrequencySpace(float *complex_2N_buffer) { myFFT.rebuildNegativeFrequencySpace(complex_2N_buffer); }
private:
FFT_F32 myFFT;
};
class IFFT_Overlapped_F32: public FFT_Overlapped_Base_F32
{
public:
//constructors
IFFT_Overlapped_F32(void): FFT_Overlapped_Base_F32() {};
IFFT_Overlapped_F32(const AudioSettings_F32 &settings): FFT_Overlapped_Base_F32() { }
IFFT_Overlapped_F32(const AudioSettings_F32 &settings, const int _N_FFT): FFT_Overlapped_Base_F32() {
setup(settings,_N_FFT);
}
virtual int setup(const AudioSettings_F32 &settings, const int _N_FFT) {
int N_FFT = FFT_Overlapped_Base_F32::setup(settings, _N_FFT);
//setup the FFT routines
N_FFT = myIFFT.setup(N_FFT);
return N_FFT;
}
virtual audio_block_f32_t* execute(float *complex_2N_buffer);
virtual int getNFFT(void) { return myIFFT.getNFFT(); };
IFFT_F32* getFFTObject(void) { return &myIFFT; };
IFFT_F32* getIFFTObject(void) { return &myIFFT; };
private:
IFFT_F32 myIFFT;
};
#endif
|
0katekate0/WxJava | weixin-java-pay/src/main/java/com/github/binarywang/wxpay/service/PayrollService.java | <gh_stars>1-10
package com.github.binarywang.wxpay.service;
import com.github.binarywang.wxpay.bean.marketing.payroll.*;
import com.github.binarywang.wxpay.exception.WxPayException;
/**
* 微工卡-对接微信api
*
* @author xiaoqiang
* @date 2021/12/7 14:26
*/
public interface PayrollService {
/**
* 生成授权token
* 适用对象:服务商
* 请求URL:https://api.mch.weixin.qq.com/v3/payroll-card/tokens
* 请求方式:POST
*
* @param request 请求参数
* @return 返回数据
* @throws WxPayException the wx pay exception
*/
TokensResult payrollCardTokens(TokensRequest request) throws WxPayException;
/**
* 查询微工卡授权关系API
* 适用对象:服务商
* 请求URL:https://api.mch.weixin.qq.com/v3/payroll-card/relations/{openid}
* 请求方式:GET
*
* @param request 请求参数
* @return 返回数据
* @throws WxPayException the wx pay exception
*/
RelationsResult payrollCardRelations(RelationsRequest request) throws WxPayException;
/**
* 微工卡核身预下单API
* 适用对象:服务商
* 请求URL:https://api.mch.weixin.qq.com/v3/payroll-card/authentications/pre-order
* 请求方式:POST
*
* @param request 请求参数
* @return 返回数据
* @throws WxPayException the wx pay exception
*/
PreOrderResult payrollCardPreOrder(PreOrderRequest request) throws WxPayException;
/**
* 获取核身结果API
* 适用对象:服务商
* 请求URL:https://api.mch.weixin.qq.com/v3/payroll-card/authentications/{authenticate_number}
* 请求方式:GET
*
* @param subMchid 子商户号
* @param authenticateNumber 商家核身单号
* @return 返回数据
* @throws WxPayException the wx pay exception
*/
AuthenticationsResult payrollCardAuthenticationsNumber(String subMchid, String authenticateNumber) throws WxPayException;
/**
* 查询核身记录API
* 适用对象:服务商
* 请求URL:https://api.mch.weixin.qq.com/v3/payroll-card/authentications
* 请求方式:GET
*
* @param request 请求参数
* @return 返回数据
* @throws WxPayException the wx pay exception
*/
AuthRecordResult payrollCardAuthentications(AuthRecordRequest request) throws WxPayException;
/**
* 微工卡核身预下单(流程中完成授权)
* 适用对象:服务商
* 请求URL:https://api.mch.weixin.qq.com/v3/payroll-card/authentications/pre-order-with-auth
* 请求方式:POST
*
* @param request 请求参数
* @return 返回数据
* @throws WxPayException the wx pay exception
*/
PreOrderWithAuthResult payrollCardPreOrderWithAuth(PreOrderWithAuthRequest request) throws WxPayException;
/**
* 按日下载提现异常文件API
* 适用对象:服务商
* 请求URL:https://api.mch.weixin.qq.com/v3/merchant/fund/withdraw/bill-type/{bill_type}
* 请求方式:GET
*
* @param billType 账单类型
* NO_SUCC:提现异常账单,包括提现失败和提现退票账单。
* 示例值:NO_SUCC
* @param billDate 账单日期 表示所在日期的提现账单,格式为YYYY-MM-DD。
* 例如:2008-01-01日发起的提现,2008-01-03日银行返回提现失败,则该提现数据将出现在bill_date为2008-01-03日的账单中。
* 示例值:2019-08-17
* @return 返回数据
* @throws WxPayException the wx pay exception
*/
PreOrderWithAuthResult merchantFundWithdrawBillType(String billType, String billDate) throws WxPayException;
}
|
arc03/30-DaysOfCode-March-2021 | answers/Aditri/Day1/question1.java | import java.util.*;
class ques1d1
{
public static void main(String args[])
{
Scanner sc=new Scanner(System.in);
System.out.println("Enter the number of terms");
int n=sc.nextInt();
int i,x=0;
for(i=1;i<=n;i++)
{
x=i*2+(int) Math.pow(i,3);
System.out.println(x);
}
}
}
|
jilen/quill | quill-core/src/main/scala/io/getquill/ast/Ast.scala | <gh_stars>0
package io.getquill.ast
import io.getquill.ast.AstShow.astShow
import io.getquill.util.Show.Shower
//************************************************************
sealed trait Ast {
override def toString = {
import io.getquill.util.Show._
import io.getquill.ast.AstShow._
this.show
}
}
//************************************************************
sealed trait Query extends Ast
case class Entity(name: String, alias: Option[String] = None, properties: List[PropertyAlias] = List()) extends Query
case class PropertyAlias(property: String, alias: String)
case class Filter(query: Ast, alias: Ident, body: Ast) extends Query
case class Map(query: Ast, alias: Ident, body: Ast) extends Query
case class FlatMap(query: Ast, alias: Ident, body: Ast) extends Query
case class SortBy(query: Ast, alias: Ident, criterias: Ast, ordering: Ordering) extends Query
sealed trait Ordering
case class TupleOrdering(elems: List[Ordering]) extends Ordering
sealed trait PropertyOrdering extends Ordering
case object Asc extends PropertyOrdering
case object Desc extends PropertyOrdering
case object AscNullsFirst extends PropertyOrdering
case object DescNullsFirst extends PropertyOrdering
case object AscNullsLast extends PropertyOrdering
case object DescNullsLast extends PropertyOrdering
case class GroupBy(query: Ast, alias: Ident, body: Ast) extends Query
case class Aggregation(operator: AggregationOperator, ast: Ast) extends Query
case class Take(query: Ast, n: Ast) extends Query
case class Drop(query: Ast, n: Ast) extends Query
case class Union(a: Ast, b: Ast) extends Query
case class UnionAll(a: Ast, b: Ast) extends Query
case class OuterJoin(typ: OuterJoinType, a: Ast, b: Ast, aliasA: Ident, aliasB: Ident, on: Ast) extends Query
//************************************************************
case class Infix(parts: List[String], params: List[Ast]) extends Ast
case class Function(params: List[Ident], body: Ast) extends Ast
case class Ident(name: String) extends Ast
case class Property(ast: Ast, name: String) extends Ast
case class OptionOperation(t: OptionOperationType, ast: Ast, alias: Ident, body: Ast) extends Ast
case class If(condition: Ast, `then`: Ast, `else`: Ast) extends Ast
//************************************************************
sealed trait Operation extends Ast
case class UnaryOperation(operator: UnaryOperator, ast: Ast) extends Operation
case class BinaryOperation(a: Ast, operator: BinaryOperator, b: Ast) extends Operation
case class FunctionApply(function: Ast, values: List[Ast]) extends Operation
//************************************************************
sealed trait Value extends Ast
case class Constant(v: Any) extends Value
object NullValue extends Value
case class Tuple(values: List[Ast]) extends Value
//************************************************************
sealed trait Action extends Ast
case class Update(query: Ast) extends Action
case class Insert(query: Ast) extends Action
case class Delete(query: Ast) extends Action
case class AssignedAction(action: Ast, assignments: List[Assignment]) extends Action
case class Assignment(input: Ident, property: String, value: Ast)
//************************************************************
case class Dynamic(tree: Any) extends Ast
|
npocmaka/Windows-Server-2003 | com/netfx/src/clr/dlls/mscordbc/mscordbc.cpp | // ==++==
//
// Copyright (c) Microsoft Corporation. All rights reserved.
//
// ==--==
//*****************************************************************************
// MSCorDBC.cpp
//
// COM+ Debugging Services -- Runtime Controller DLL
//
// Dll* routines for entry points.
//
//*****************************************************************************
#include "stdafx.h"
//*****************************************************************************
// The main dll entry point for this module. This routine is called by the
// OS when the dll gets loaded. Nothing needs to be done for this DLL.
//*****************************************************************************
BOOL WINAPI DllMain(HINSTANCE hInstance, DWORD dwReason, LPVOID lpReserved)
{
//@todo: Shoud we call DisableThreadLibraryCalls? Or does this code
// need native thread attatch/detatch notifications?
OnUnicodeSystem();
return TRUE;
}
|
achilex/MgDev | Common/PlatformBase/Services/ServiceRegistry.cpp | //
// Copyright (C) 2004-2011 by Autodesk, Inc.
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of version 2.1 of the GNU Lesser
// General Public License as published by the Free Software Foundation.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
//
#include "PlatformBase.h"
#ifdef _WIN32
#include "windows.h"
#else
#include <dlfcn.h>
#endif
#include "Foundation.h"
// Process-wide MgServiceRegistry
Ptr<MgServiceRegistry> MgServiceRegistry::m_serviceRegistry = (MgServiceRegistry*)NULL;
MgServiceRegistry::MgServiceRegistry()
{
}
MgServiceRegistry* MgServiceRegistry::GetInstance()
{
MG_TRY()
ACE_TRACE ("MgServiceRegistry::GetInstance");
if (MgServiceRegistry::m_serviceRegistry == NULL)
{
// Perform Double-Checked Locking Optimization.
ACE_MT (ACE_GUARD_RETURN (ACE_Recursive_Thread_Mutex, ace_mon, *ACE_Static_Object_Lock::instance (), 0));
if (MgServiceRegistry::m_serviceRegistry == NULL)
{
MgServiceRegistry::m_serviceRegistry = new MgServiceRegistry();
}
}
MG_CATCH_AND_THROW(L"MgServiceRegistry.GetInstance")
// To avoid overhead and maintain thread safety,
// do not assign this returned static singleton to a Ptr object.
return MgServiceRegistry::m_serviceRegistry;
}
void MgServiceRegistry::Dispose()
{
delete this;
}
MgService* MgServiceRegistry::CreateService(INT16 serviceType, ServerConnectionType connType)
{
ServiceRegistry& registry = m_serviceCreators[connType];
const ServiceCreatorFunc& func = registry[serviceType];
if (NULL == func)
{
throw new MgServiceNotSupportedException(L"MgServiceRegistry.CreateService", __LINE__, __WFILE__, NULL, L"", NULL);
}
MgService* obj = (*func)();
return obj;
}
void MgServiceRegistry::RegisterService(INT16 serviceType, ServiceCreatorFunc creator, ServerConnectionType connType)
{
if (NULL == creator)
{
throw new MgInvalidArgumentException(L"MgServiceRegistry.RegisterClass", __LINE__, __WFILE__, NULL, L"", NULL);
}
m_serviceCreators[connType][serviceType] = creator;
}
static bool InitializeStaticPlatformData();
static bool initStatic = InitializeStaticPlatformData();
#define EXCEPTION_CLASS_CREATOR(className) \
fact->Register(PlatformBase_Exception_##className, className::CreateObject);
bool InitializeStaticPlatformData()
{
MgClassFactory* fact = MgClassFactory::GetInstance();
//put in the map any class that can be serialized
fact->Register(PlatformBase_Collection_IntCollection, MgIntCollection::CreateObject);
fact->Register(PlatformBase_Collection_PropertyDefinitionCollection, MgPropertyDefinitionCollection::CreateObject);
fact->Register(PlatformBase_Collection_SerializableCollection, MgSerializableCollection::CreateObject);
fact->Register(PlatformBase_Collection_StringPropertyCollection, MgStringPropertyCollection::CreateObject);
fact->Register(PlatformBase_Collection_ParameterCollection, MgParameterCollection::CreateObject);
fact->Register(PlatformBase_Data_Color, MgColor::CreateObject);
fact->Register(PlatformBase_Data_Point3D, MgPoint3D::CreateObject);
fact->Register(PlatformBase_Data_Size2D, MgSize2D::CreateObject);
fact->Register(PlatformBase_Data_Margin, MgMargin::CreateObject);
fact->Register(PlatformBase_Data_Vector3D, MgVector3D::CreateObject);
fact->Register(PlatformBase_Property_PropertyDefinition, MgPropertyDefinition::CreateObject);
fact->Register(PlatformBase_Property_BlobProperty, MgBlobProperty::CreateObject);
fact->Register(PlatformBase_Property_BooleanProperty, MgBooleanProperty::CreateObject);
fact->Register(PlatformBase_Property_ByteProperty, MgByteProperty::CreateObject);
fact->Register(PlatformBase_Property_ClobProperty, MgClobProperty::CreateObject);
fact->Register(PlatformBase_Property_DateTimeProperty, MgDateTimeProperty::CreateObject);
fact->Register(PlatformBase_Property_DoubleProperty, MgDoubleProperty::CreateObject);
fact->Register(PlatformBase_Property_Int16Property, MgInt16Property::CreateObject);
fact->Register(PlatformBase_Property_Int32Property, MgInt32Property::CreateObject);
fact->Register(PlatformBase_Property_Int64Property, MgInt64Property::CreateObject);
fact->Register(PlatformBase_Property_SingleProperty, MgSingleProperty::CreateObject);
fact->Register(PlatformBase_Service_Warning, MgWarnings::CreateObject);
fact->Register(PlatformBase_FeatureService_ClassDefinition, MgClassDefinition::CreateObject);
fact->Register(PlatformBase_FeatureService_DataPropertyDefinition, MgDataPropertyDefinition::CreateObject);
fact->Register(PlatformBase_FeatureService_GeometricPropertyDefinition, MgGeometricPropertyDefinition::CreateObject);
fact->Register(PlatformBase_FeatureService_GeometryProperty, MgGeometryProperty::CreateObject);
fact->Register(PlatformBase_FeatureService_ObjectPropertyDefinition, MgObjectPropertyDefinition::CreateObject);
fact->Register(PlatformBase_FeatureService_Parameter, MgParameter::CreateObject);
fact->Register(PlatformBase_FeatureService_SpatialContextReader, MgSpatialContextReader::CreateObject);
fact->Register(PlatformBase_FeatureService_FeatureSet, MgFeatureSet::CreateObject);
fact->Register(PlatformBase_FeatureService_FeatureProperty, MgFeatureProperty::CreateObject);
fact->Register(PlatformBase_FeatureService_SpatialContextData, MgSpatialContextData::CreateObject);
fact->Register(PlatformBase_FeatureService_LongTransactionReader, MgLongTransactionReader::CreateObject);
fact->Register(PlatformBase_FeatureService_LongTransactionData, MgLongTransactionData::CreateObject);
fact->Register(PlatformBase_FeatureService_RasterPropertyDefinition, MgRasterPropertyDefinition::CreateObject);
fact->Register(PlatformBase_FeatureService_Raster, MgRaster::CreateObject);
fact->Register(PlatformBase_FeatureService_RasterProperty, MgRasterProperty::CreateObject);
fact->Register(PlatformBase_FeatureService_FeatureQueryOptions, MgFeatureQueryOptions::CreateObject);
fact->Register(PlatformBase_FeatureService_FeatureAggregateOptions, MgFeatureAggregateOptions::CreateObject);
fact->Register(PlatformBase_FeatureService_FeatureCommandCollection, MgFeatureCommandCollection::CreateObject);
fact->Register(PlatformBase_FeatureService_DeleteFeatures, MgDeleteFeatures::CreateObject);
fact->Register(PlatformBase_FeatureService_InsertFeatures, MgInsertFeatures::CreateObject);
fact->Register(PlatformBase_FeatureService_UpdateFeatures, MgUpdateFeatures::CreateObject);
fact->Register(PlatformBase_FeatureService_LockFeatures, MgLockFeatures::CreateObject);
fact->Register(PlatformBase_FeatureService_UnlockFeatures, MgUnlockFeatures::CreateObject);
fact->Register(PlatformBase_FeatureService_FeatureSchema, MgFeatureSchema::CreateObject);
fact->Register(PlatformBase_FeatureService_FeatureSchemaCollection, MgFeatureSchemaCollection::CreateObject);
fact->Register(PlatformBase_FeatureService_ClassDefinitionCollection, MgClassDefinitionCollection::CreateObject);
fact->Register(PlatformBase_FeatureService_FileFeatureSourceParams, MgFileFeatureSourceParams::CreateObject);
fact->Register(PlatformBase_ResourceService_ResourceIdentifier, MgResourceIdentifier::CreateObject);
fact->Register(PlatformBase_MapLayer_MapBase, MgMapBase::CreateObject);
fact->Register(PlatformBase_MapLayer_LayerGroup, MgLayerGroup::CreateObject);
fact->Register(PlatformBase_MapLayer_LayerBase, MgLayerBase::CreateObject);
fact->Register(PlatformBase_MapLayer_SelectionBase, MgSelectionBase::CreateObject);
fact->Register(PlatformBase_PrintLayoutService_PrintLayoutBase, MgPrintLayoutBase::CreateObject);
fact->Register(PlatformBase_PrintLayoutService_PrintLayoutElementBase, MgPrintLayoutElementBase::CreateObject);
fact->Register(PlatformBase_PrintLayoutService_MapViewportBase, MgMapViewportBase::CreateObject);
fact->Register(PlatformBase_PrintLayoutService_MapView, MgMapView::CreateObject);
fact->Register(PlatformBase_PrintLayoutService_PropertyMapping, MgPropertyMapping::CreateObject);
fact->Register(PlatformBase_PrintLayoutService_PrintLayoutElementCollection, MgPrintLayoutElementCollection::CreateObject);
fact->Register(PlatformBase_PrintLayoutService_PropertyMappingCollection, MgPropertyMappingCollection::CreateObject);
EXCEPTION_CLASS_CREATOR(MgArrayTypeMismatchException)
EXCEPTION_CLASS_CREATOR(MgDuplicateResourceDataException)
EXCEPTION_CLASS_CREATOR(MgDuplicateResourceException)
EXCEPTION_CLASS_CREATOR(MgEmptyFeatureSetException)
EXCEPTION_CLASS_CREATOR(MgFdoException)
EXCEPTION_CLASS_CREATOR(MgFeatureServiceException)
EXCEPTION_CLASS_CREATOR(MgInvalidMapDefinitionException)
EXCEPTION_CLASS_CREATOR(MgInvalidRepositoryNameException)
EXCEPTION_CLASS_CREATOR(MgInvalidRepositoryTypeException)
EXCEPTION_CLASS_CREATOR(MgInvalidResourceDataNameException)
EXCEPTION_CLASS_CREATOR(MgInvalidResourceDataTypeException)
EXCEPTION_CLASS_CREATOR(MgInvalidResourceNameException)
EXCEPTION_CLASS_CREATOR(MgInvalidResourcePathException)
EXCEPTION_CLASS_CREATOR(MgInvalidResourcePreProcessingTypeException)
EXCEPTION_CLASS_CREATOR(MgInvalidResourceTypeException)
EXCEPTION_CLASS_CREATOR(MgLayerNotFoundException)
EXCEPTION_CLASS_CREATOR(MgResourceBusyException)
EXCEPTION_CLASS_CREATOR(MgResourceDataNotFoundException)
EXCEPTION_CLASS_CREATOR(MgResourceNotFoundException)
EXCEPTION_CLASS_CREATOR(MgServiceNotAvailableException)
EXCEPTION_CLASS_CREATOR(MgServiceNotSupportedException)
EXCEPTION_CLASS_CREATOR(MgUserNotFoundException)
return true;
}
|
areyliu6/watpo-book | resources/src/components/Reservation/Alert.js | <gh_stars>0
export default class Alert extends React.Component{
render(){
return ( <p style={{whiteSpace: "pre-line"}}>{this.props.text}<span style={{color: "red" }}>{this.props.notice?"("+this.props.notice+")":""}</span></p>);
}
} |
TeacherManoj0131/HacktoberFest2020-Contributions | Algorithms/Bubble Sort/bubble_sort.c | //C program for sorting the array using bubble sort
#include <stdio.h>
#include <stdlib.h>
void accept(int a[], int n);
void display(int a[], int n);
void bubble(int a[], int n);
void main()
{
int n, a[100],i;
printf("\n Enter the no of elements");
scanf("%d", &n);
accept(a, n);
bubble(a, n);
display(a, n);
}
void display(int a[], int n)
{
int i;
for (i = 0; i < n; i++)
{
printf("\t%d", a[i]);
}
}
void accept(int a[], int n)
{
int i;
for (i = 0; i < n; i++)
{
scanf("%d",&a[i]);
}
}
void bubble(int a[], int n)
{
int i, j, temp;
for (i = 0; i < n - 1; i++)
{
for (j = 0; j < n - i - 1; j++)
{
if (a[j] > a[j + 1])
{
temp = a[j];
a[j] = a[j + 1];
a[j + 1] = temp;
}
}
}
}
|
010001111/Vx-Suites | Win32.Rose.c/pnp.h | BOOL pnp(EXINFO exinfo); |
mueller/mysql-shell | unittest/scripts/js_dev_api_examples/statement_execution/Transaction_Handling.js | <reponame>mueller/mysql-shell
var mysqlx = require('mysqlx');
// Connect to server
var session = mysqlx.getSession( {
host: 'localhost', port: 33060,
user: 'mike', password: '<PASSWORD>' } );
// Get the Schema test
var db = session.getSchema('test');
// Create a new collection
var myColl = db.createCollection('my_collection');
// Start a transaction
session.startTransaction();
try {
myColl.add({name: 'Jack', age: 15, height: 1.76, weight: 69.4}).execute();
myColl.add({name: 'Susanne', age: 24, height: 1.65}).execute();
myColl.add({name: 'Mike', age: 39, height: 1.9, weight: 74.3}).execute();
// Commit the transaction if everything went well
session.commit();
print('Data inserted successfully.');
}
catch (err) {
// Rollback the transaction in case of an error
session.rollback();
// Printing the error message
print('Data could not be inserted: ' + err.message);
}
|
Andreas237/AndroidPolicyAutomation | ExtractedJars/Apk_Extractor_com.ext.ui.apk/javafiles/android/support/graphics/drawable/ArgbEvaluator.java | // Decompiled by Jad v1.5.8g. Copyright 2001 <NAME>.
// Jad home page: http://www.kpdus.com/jad.html
// Decompiler options: packimports(3) annotate safe
package android.support.graphics.drawable;
import android.animation.TypeEvaluator;
public class ArgbEvaluator
implements TypeEvaluator
{
public ArgbEvaluator()
{
// 0 0:aload_0
// 1 1:invokespecial #21 <Method void Object()>
// 2 4:return
}
public static ArgbEvaluator getInstance()
{
return sInstance;
// 0 0:getstatic #19 <Field ArgbEvaluator sInstance>
// 1 3:areturn
}
public Object evaluate(float f, Object obj, Object obj1)
{
int i = ((Integer)obj).intValue();
// 0 0:aload_2
// 1 1:checkcast #27 <Class Integer>
// 2 4:invokevirtual #31 <Method int Integer.intValue()>
// 3 7:istore 12
float f1 = (float)(i >> 24 & 0xff) / 255F;
// 4 9:iload 12
// 5 11:bipush 24
// 6 13:ishr
// 7 14:sipush 255
// 8 17:iand
// 9 18:i2f
// 10 19:ldc1 #32 <Float 255F>
// 11 21:fdiv
// 12 22:fstore 4
float f4 = (float)(i >> 16 & 0xff) / 255F;
// 13 24:iload 12
// 14 26:bipush 16
// 15 28:ishr
// 16 29:sipush 255
// 17 32:iand
// 18 33:i2f
// 19 34:ldc1 #32 <Float 255F>
// 20 36:fdiv
// 21 37:fstore 7
float f5 = (float)(i >> 8 & 0xff) / 255F;
// 22 39:iload 12
// 23 41:bipush 8
// 24 43:ishr
// 25 44:sipush 255
// 26 47:iand
// 27 48:i2f
// 28 49:ldc1 #32 <Float 255F>
// 29 51:fdiv
// 30 52:fstore 8
float f6 = (float)(i & 0xff) / 255F;
// 31 54:iload 12
// 32 56:sipush 255
// 33 59:iand
// 34 60:i2f
// 35 61:ldc1 #32 <Float 255F>
// 36 63:fdiv
// 37 64:fstore 9
i = ((Integer)obj1).intValue();
// 38 66:aload_3
// 39 67:checkcast #27 <Class Integer>
// 40 70:invokevirtual #31 <Method int Integer.intValue()>
// 41 73:istore 12
float f2 = (float)(i >> 24 & 0xff) / 255F;
// 42 75:iload 12
// 43 77:bipush 24
// 44 79:ishr
// 45 80:sipush 255
// 46 83:iand
// 47 84:i2f
// 48 85:ldc1 #32 <Float 255F>
// 49 87:fdiv
// 50 88:fstore 5
float f8 = (float)(i >> 16 & 0xff) / 255F;
// 51 90:iload 12
// 52 92:bipush 16
// 53 94:ishr
// 54 95:sipush 255
// 55 98:iand
// 56 99:i2f
// 57 100:ldc1 #32 <Float 255F>
// 58 102:fdiv
// 59 103:fstore 11
float f7 = (float)(i >> 8 & 0xff) / 255F;
// 60 105:iload 12
// 61 107:bipush 8
// 62 109:ishr
// 63 110:sipush 255
// 64 113:iand
// 65 114:i2f
// 66 115:ldc1 #32 <Float 255F>
// 67 117:fdiv
// 68 118:fstore 10
float f3 = (float)(i & 0xff) / 255F;
// 69 120:iload 12
// 70 122:sipush 255
// 71 125:iand
// 72 126:i2f
// 73 127:ldc1 #32 <Float 255F>
// 74 129:fdiv
// 75 130:fstore 6
f4 = (float)Math.pow(f4, 2.2000000000000002D);
// 76 132:fload 7
// 77 134:f2d
// 78 135:ldc2w #33 <Double 2.2000000000000002D>
// 79 138:invokestatic #40 <Method double Math.pow(double, double)>
// 80 141:d2f
// 81 142:fstore 7
f5 = (float)Math.pow(f5, 2.2000000000000002D);
// 82 144:fload 8
// 83 146:f2d
// 84 147:ldc2w #33 <Double 2.2000000000000002D>
// 85 150:invokestatic #40 <Method double Math.pow(double, double)>
// 86 153:d2f
// 87 154:fstore 8
f6 = (float)Math.pow(f6, 2.2000000000000002D);
// 88 156:fload 9
// 89 158:f2d
// 90 159:ldc2w #33 <Double 2.2000000000000002D>
// 91 162:invokestatic #40 <Method double Math.pow(double, double)>
// 92 165:d2f
// 93 166:fstore 9
f8 = (float)Math.pow(f8, 2.2000000000000002D);
// 94 168:fload 11
// 95 170:f2d
// 96 171:ldc2w #33 <Double 2.2000000000000002D>
// 97 174:invokestatic #40 <Method double Math.pow(double, double)>
// 98 177:d2f
// 99 178:fstore 11
f7 = (float)Math.pow(f7, 2.2000000000000002D);
// 100 180:fload 10
// 101 182:f2d
// 102 183:ldc2w #33 <Double 2.2000000000000002D>
// 103 186:invokestatic #40 <Method double Math.pow(double, double)>
// 104 189:d2f
// 105 190:fstore 10
f3 = (float)Math.pow(f3, 2.2000000000000002D);
// 106 192:fload 6
// 107 194:f2d
// 108 195:ldc2w #33 <Double 2.2000000000000002D>
// 109 198:invokestatic #40 <Method double Math.pow(double, double)>
// 110 201:d2f
// 111 202:fstore 6
f4 = (float)Math.pow(f4 + (f8 - f4) * f, 0.45454545454545453D);
// 112 204:fload 7
// 113 206:fload 11
// 114 208:fload 7
// 115 210:fsub
// 116 211:fload_1
// 117 212:fmul
// 118 213:fadd
// 119 214:f2d
// 120 215:ldc2w #41 <Double 0.45454545454545453D>
// 121 218:invokestatic #40 <Method double Math.pow(double, double)>
// 122 221:d2f
// 123 222:fstore 7
f5 = (float)Math.pow(f5 + (f7 - f5) * f, 0.45454545454545453D);
// 124 224:fload 8
// 125 226:fload 10
// 126 228:fload 8
// 127 230:fsub
// 128 231:fload_1
// 129 232:fmul
// 130 233:fadd
// 131 234:f2d
// 132 235:ldc2w #41 <Double 0.45454545454545453D>
// 133 238:invokestatic #40 <Method double Math.pow(double, double)>
// 134 241:d2f
// 135 242:fstore 8
f3 = (float)Math.pow(f6 + (f3 - f6) * f, 0.45454545454545453D);
// 136 244:fload 9
// 137 246:fload 6
// 138 248:fload 9
// 139 250:fsub
// 140 251:fload_1
// 141 252:fmul
// 142 253:fadd
// 143 254:f2d
// 144 255:ldc2w #41 <Double 0.45454545454545453D>
// 145 258:invokestatic #40 <Method double Math.pow(double, double)>
// 146 261:d2f
// 147 262:fstore 6
i = Math.round((f1 + (f2 - f1) * f) * 255F);
// 148 264:fload 4
// 149 266:fload 5
// 150 268:fload 4
// 151 270:fsub
// 152 271:fload_1
// 153 272:fmul
// 154 273:fadd
// 155 274:ldc1 #32 <Float 255F>
// 156 276:fmul
// 157 277:invokestatic #46 <Method int Math.round(float)>
// 158 280:istore 12
int j = Math.round(f4 * 255F);
// 159 282:fload 7
// 160 284:ldc1 #32 <Float 255F>
// 161 286:fmul
// 162 287:invokestatic #46 <Method int Math.round(float)>
// 163 290:istore 13
int k = Math.round(f5 * 255F);
// 164 292:fload 8
// 165 294:ldc1 #32 <Float 255F>
// 166 296:fmul
// 167 297:invokestatic #46 <Method int Math.round(float)>
// 168 300:istore 14
return ((Object) (Integer.valueOf(Math.round(f3 * 255F) | (i << 24 | j << 16 | k << 8))));
// 169 302:fload 6
// 170 304:ldc1 #32 <Float 255F>
// 171 306:fmul
// 172 307:invokestatic #46 <Method int Math.round(float)>
// 173 310:iload 12
// 174 312:bipush 24
// 175 314:ishl
// 176 315:iload 13
// 177 317:bipush 16
// 178 319:ishl
// 179 320:ior
// 180 321:iload 14
// 181 323:bipush 8
// 182 325:ishl
// 183 326:ior
// 184 327:ior
// 185 328:invokestatic #50 <Method Integer Integer.valueOf(int)>
// 186 331:areturn
}
private static final ArgbEvaluator sInstance = new ArgbEvaluator();
static
{
// 0 0:new #2 <Class ArgbEvaluator>
// 1 3:dup
// 2 4:invokespecial #17 <Method void ArgbEvaluator()>
// 3 7:putstatic #19 <Field ArgbEvaluator sInstance>
//* 4 10:return
}
}
|
UnderratedDev/bon-ui | Sources/Views/Navigation/Router.js | <reponame>UnderratedDev/bon-ui
//
// Router.js
// Created on 18/04/2020
//
// Copyright (c) 2020 Teplovs
// This file is under Apache License v2.0
//
// See https://www.apache.org/licenses/LICENSE-2.0 for license information
//
import { View } from "../View"
import { Route } from "./Route"
/**
* View that is used for routing
*/
export class Router extends View {
/**
* @param {String} path Current web path
* @param {Route[]} routes Routes for the app
*/
constructor(path, routes) {
super({ path, routes })
}
getBody() {
for (let i in this.options.routes) {
if (this.options.routes[i] instanceof Route && this.options.routes[i].pathMatches(this.options.path)) {
let node = this.options.routes[i].getBody()
return node
}
}
return null
}
}
|
oliv-yu/liferay-npm-tools | packages/liferay-npm-scripts/src/scripts/lint/stylelint/isSCSS.js | <filename>packages/liferay-npm-scripts/src/scripts/lint/stylelint/isSCSS.js<gh_stars>10-100
/**
* SPDX-FileCopyrightText: © 2019 Liferay, Inc. <https://liferay.com>
* SPDX-License-Identifier: BSD-3-Clause
*/
const hasExtension = require('../../../utils/hasExtension');
const EXTENSIONS = new Set(['.scss']);
function isSCSS(filePath) {
return hasExtension(filePath, EXTENSIONS);
}
module.exports = isSCSS;
|
T-binson/fullStack | typescript/interface.js | <filename>typescript/interface.js
function area(shape) {
var area = shape.width * shape.height;
return "The shape is " + shape.name + " having an area of " + area + " cm squared";
}
console.log(area({ name: "rectangle", width: 20, height: 15 }));
console.log(area({ name: 'square', width: 20, height: 25, color: 'blue' }));
|
pniekamp/datum-studio | src/plugins/particle/curveeditor.h | <reponame>pniekamp/datum-studio
//
// Curve Editor
//
//
// Copyright (C) 2017 <NAME>
//
#pragma once
#include "particlesystem.h"
#include "ui_curveeditor.h"
#include <QDialog>
//-------------------------- CurveEditor ------------------------------------
//---------------------------------------------------------------------------
class CurveEditor : public QDialog
{
Q_OBJECT
public:
CurveEditor(QWidget *parent = nullptr);
template<typename T>
ParticleSystemDocument::Distribution<T> distribution();
template<typename T>
void set_distribution(ParticleSystemDocument::Distribution<T> const &distribution, T const &minvalue, T const &maxvalue);
signals:
void distribution_changed();
protected slots:
void on_DistributionType_activated(int index);
void on_ComponentList_currentRowChanged(int index);
void on_ScaleMax_valueChanged(double value);
void on_ScaleMax_editingFinished();
protected:
enum Type
{
Constant,
Uniform,
Curve,
UniformCurve
};
void update();
void keyPressEvent(QKeyEvent *event);
void mousePressEvent(QMouseEvent *event);
void mouseMoveEvent(QMouseEvent *event);
void mouseReleaseEvent(QMouseEvent *event);
void resizeEvent(QResizeEvent *event);
void paintEvent(QPaintEvent *event);
private:
struct Component
{
struct Curve
{
std::vector<float> xa;
std::vector<float> ya;
QPolygonF points;
QPolygonF polyline;
float value(float u) const;
};
QString name;
QColor color;
float minvalue;
float maxvalue;
float scalemax;
std::vector<Curve> curves;
void add_curve(std::vector<float> const &xa, std::vector<float> const &ya);
};
std::vector<Component> m_components;
void add_component(QString name, QColor const &color, float minvalue, float maxvalue);
std::vector<float> timebase() const;
private:
Component *m_component;
int m_selectedcurve;
int m_selectedindex;
QPoint m_mousepresspos, m_mousemovepos;
Ui::CurveEditor ui;
};
|
Blithe66/garage-server-dev | garage-server/src/main/java/com/yixin/garage/dto/AuditResultDTO.java | package com.yixin.garage.dto;
import java.io.Serializable;
import com.yixin.garage.dto.api.QywxBaseDTO;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.experimental.Accessors;
/**
* <p>
* 融资项目
* </p>
*
* @author lizhongxin
* @since 2018-12-27
*/
@Data
@EqualsAndHashCode(callSuper = false)
@Accessors(chain = true)
@ApiModel
public class AuditResultDTO implements Serializable{
private static final long serialVersionUID = 1L;
/**
* 记录ID
*/
@ApiModelProperty(value="业务记录id")
private String bussId;
/**
* 审批结果 通过 true , 驳回false
*/
@ApiModelProperty(value="审批结果 通过 true , 驳回false")
private Boolean auditResult;
/**
* 审批意见
*/
@ApiModelProperty(value="审批意见")
private String remark;
}
|
lordbitin/TII-2018 | APFE/src/APFE/guard_conditions/GuardCondition.java | <reponame>lordbitin/TII-2018
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package APFE.guard_conditions;
import APFE.data_log.LogEntry;
import java.util.Collection;
import java.util.function.Predicate;
/**
*
* @author victor
*/
public abstract class GuardCondition {
public abstract boolean evaluate(Collection<LogEntry> logEntries);
public Collection<String> getInvolvedVariables() {
return null;
}
}
|
scheduleonce/rudder-transformer | v0/destinations/kustomer/config.js | <gh_stars>10-100
const { getMappingConfig } = require("../../util");
const BASE_ENDPOINT = "https://api.kustomerapp.com";
const CONFIG_CATEGORIES = {
IDENTIFY: { name: "KustomerIdentify" },
PAGE: { name: "KustomerPage" },
SCREEN: { name: "KustomerScreen" },
TRACK: { name: "KustomerTrack" }
};
const MAPPING_CONFIG = getMappingConfig(CONFIG_CATEGORIES, __dirname);
module.exports = {
BASE_ENDPOINT,
CONFIG_CATEGORIES,
MAPPING_CONFIG
};
|
jackqk/mystudy | DrvApp/StlInclude/dloadsup.h | <filename>DrvApp/StlInclude/dloadsup.h
/*++
Copyright (c) 2014 Microsoft Corporation
Module Name:
dloadsup.h
Abstract:
This module implements downlevel support for read only delay load import
tables. (From nt source: minkernel\dload\helper\dloadsup.h)
For the non-explicitly-unloading delay load helper library :
DLOAD_UNLOAD is not defined or is defined to 0.
If the host OS supports delay load natively in the loader, then
the native loader support is invoked. Otherwise, the built-in support for
resolving delay load imports is invoked.
For the explicitly-unloading delay load helper library (the "VS" version) :
DLOAD_UNLOAD is defined to 1.
If the image load config directory marks the image as having a protected
delay load section, then on each delay load resolution request, the module
delay load protection is changed for the duration of the request (and then
changed back afterwards). Local support for delay loads is used so as to
retain compatibility with programs customizing the behavior of the delay
load notification hook (and the ability to perform an explicit module
unload).
Author:
<NAME> (kejohns) 4-Jul-2014
Revision History:
--*/
#pragma once
#ifndef _DLOAD_DLOADSUP_H
#define _DLOAD_DLOADSUP_H
#pragma warning(push)
#pragma warning(disable:4714) // forceinline function 'YieldProcessor' not inlined
#pragma warning(disable:28112) // Disable Prefast warning about variables being accessed through both interlocked and non-interlocked functions
#define DLOAD_INLINE __inline
#define FAST_FAIL_DLOAD_PROTECTION_FAILURE 25
#define IMAGE_GUARD_PROTECT_DELAYLOAD_IAT 0x00001000
#if DBG
#define DLOAD_ASSERT(_exp) \
((!(_exp)) ? \
(__annotation(L"Debug", L"AssertFail", L#_exp), \
DbgRaiseAssertionFailure(), FALSE) : \
TRUE)
#else
#define DLOAD_ASSERT(_exp) ((void) 0)
#endif
#if DLOAD_UNLOAD
//++
//
// ULONG
// DLOAD_BYTE_OFFSET (
// _In_ PVOID Va,
// _In_ SIZE_T PageSize
// )
//
// Routine Description:
//
// The DLOAD_BYTE_OFFSET macro takes a virtual address and returns the byte offset
// of that address within the page.
//
// Arguments:
//
// Va - Virtual address.
//
// PageSize - System page size in bytes.
//
// Return Value:
//
// Returns the byte offset portion of the virtual address.
//
//--
#define DLOAD_BYTE_OFFSET(Va, PageSize) ((ULONG)((LONG_PTR)(Va) & (PageSize - 1)))
//++
//
// ULONG
// ADDRESS_AND_SIZE_TO_SPAN_PAGES (
// _In_ PVOID Va,
// _In_ ULONG Size,
// _In_ SIZE_T PageSize
// )
//
// Routine Description:
//
// The ADDRESS_AND_SIZE_TO_SPAN_PAGES macro takes a virtual address and
// size and returns the number of pages spanned by the size.
//
// Arguments:
//
// Va - Virtual address.
//
// Size - Size in bytes.
//
// PageSize - System page size in bytes.
//
// Return Value:
//
// Returns the number of pages spanned by the size.
//
//--
#define DLOAD_ADDRESS_AND_SIZE_TO_SPAN_PAGES(Va,Size,PageSize) \
((ULONG)((((ULONG_PTR)(Size)) / PageSize) + ((DLOAD_BYTE_OFFSET (Va, PageSize) + DLOAD_BYTE_OFFSET (Size, PageSize) + PageSize - 1) / PageSize)))
#define SRWLOCK_UNINITIALIZED ((HMODULE)0x0)
#define SRWLOCK_UNSUPPORTED ((HMODULE)0x1)
typedef ULONG_PTR SRWLOCK_TYPE;
typedef
VOID
(NTAPI *
AcquireSRWLockExclusiveProc) (
_Inout_ _Acquires_exclusive_lock_(*SRWLock) SRWLOCK_TYPE *SRWLock
);
typedef
VOID
(NTAPI *
ReleaseSRWLockExclusiveProc) (
_Inout_ _Releases_exclusive_lock_(*SRWLock) SRWLOCK_TYPE *SRWLock
);
HMODULE DloadKernel32 = SRWLOCK_UNINITIALIZED;
AcquireSRWLockExclusiveProc DloadAcquireSRWLockExclusive;
ReleaseSRWLockExclusiveProc DloadReleaseSRWLockExclusive;
SRWLOCK_TYPE DloadSrwLock = 0x0;
ULONG DloadSectionLockCount;
DWORD DloadSectionOldProtection;
ULONG DloadSectionCommitPermanent;
extern "C" IMAGE_LOAD_CONFIG_DIRECTORY _load_config_used;
extern "C" const IMAGE_DOS_HEADER __ImageBase;
#else
#define DLOAD_UNSUPPORTED ((HMODULE)0x1)
typedef
PVOID
(NTAPI *
ResolveDelayLoadedAPIProc) (
_In_ PVOID ParentModuleBase,
_In_ PCIMAGE_DELAYLOAD_DESCRIPTOR DelayloadDescriptor,
_In_opt_ PDELAYLOAD_FAILURE_DLL_CALLBACK FailureDllHook,
_In_opt_ PDELAYLOAD_FAILURE_SYSTEM_ROUTINE FailureSystemHook,
_Out_ PIMAGE_THUNK_DATA ThunkAddress,
_Reserved_ ULONG Flags
);
typedef
NTSTATUS
(NTAPI *
ResolveDelayLoadsFromDllProc) (
_In_ PVOID ParentBase,
_In_ LPCSTR TargetDllName,
_Reserved_ ULONG Flags
);
HMODULE DloadKernel32;
ResolveDelayLoadedAPIProc DloadResolveDelayLoadedAPI;
ResolveDelayLoadsFromDllProc DloadResolveDelayLoadsFromDll;
#endif
//
// The following dload support APIs are used for the explicit-unloading version
// of the delay load helper.
//
#if DLOAD_UNLOAD
DLOAD_INLINE
BOOLEAN
DloadGetSRWLockFunctionPointers (
VOID
)
/*++
Routine Description:
This function obtains pointers to SRWLock Acquire and Release
functions.
Arguments:
None.
Return Value:
TRUE is returned as the function value if the host OS supports SRW locks
and the function pointers have been initialized.
--*/
{
FARPROC FunctionPointer;
HMODULE Kernel32;
HMODULE OldValue;
Kernel32 = (HMODULE)ReadPointerAcquire((PVOID *) &DloadKernel32);
if (Kernel32 == SRWLOCK_UNSUPPORTED) {
return FALSE;
}
if (Kernel32 != NULL) {
return TRUE;
}
Kernel32 = GetModuleHandleW(L"KERNEL32.DLL");
if (Kernel32 == NULL) {
Kernel32 = SRWLOCK_UNSUPPORTED;
goto Done;
}
FunctionPointer = GetProcAddress(Kernel32, "AcquireSRWLockExclusive");
if (FunctionPointer == NULL) {
Kernel32 = SRWLOCK_UNSUPPORTED;
goto Done;
}
DloadAcquireSRWLockExclusive = (AcquireSRWLockExclusiveProc)FunctionPointer;
FunctionPointer = GetProcAddress(Kernel32, "ReleaseSRWLockExclusive");
if (FunctionPointer == NULL) {
Kernel32 = SRWLOCK_UNSUPPORTED;
goto Done;
}
DloadReleaseSRWLockExclusive = (ReleaseSRWLockExclusiveProc)FunctionPointer;
Done:
OldValue = (HMODULE)InterlockedCompareExchangePointer((PVOID *)&DloadKernel32,
(PVOID)Kernel32,
SRWLOCK_UNINITIALIZED);
if (((OldValue == SRWLOCK_UNINITIALIZED) &&
(Kernel32 == SRWLOCK_UNSUPPORTED)) ||
(OldValue == SRWLOCK_UNSUPPORTED)) {
return FALSE;
}
return TRUE;
}
DLOAD_INLINE
VOID
DloadLock (
VOID
)
/*++
Routine Description:
This function obtains the delay load unload lock.
Arguments:
None.
Return Value:
None.
--*/
{
if (DloadGetSRWLockFunctionPointers() != FALSE) {
DloadAcquireSRWLockExclusive(&DloadSrwLock);
return;
}
for ( ; ; ) {
while (ReadPointerAcquire((PVOID *)&DloadSrwLock) != 0) {
YieldProcessor();
}
if (InterlockedCompareExchangePointer((PVOID *)&DloadSrwLock, (PVOID)1, 0) == 0) {
break;
}
}
return;
}
DLOAD_INLINE
VOID
DloadUnlock (
VOID
)
/*++
Routine Description:
This function releases the delay load unload lock.
Arguments:
None.
Return Value:
None.
--*/
{
if (DloadGetSRWLockFunctionPointers() != FALSE) {
DloadReleaseSRWLockExclusive(&DloadSrwLock);
} else {
#pragma warning(suppress:6387) // Passing 0 is valid, but WritePointerRelease isn't annotated properly
WritePointerRelease((PVOID *)&DloadSrwLock, 0);
}
return;
}
_Success_(return != nullptr)
DLOAD_INLINE
PVOID
DloadObtainSection (
_Out_ PULONG SectionSize,
_Out_ PULONG SectionCharacteristics
)
/*++
Routine Description:
This function locates the delay load import table section for the current
module.
Arguments:
SectionSize - Receives the size, in bytes, of the delay load import
section.
SectionCharacteristics - Receives the section characteristics.
Return Value:
A pointer to the delay load section base is returned, else NULL if the
image does not require processing.
--*/
{
PIMAGE_DATA_DIRECTORY DataDir;
PCIMAGE_DELAYLOAD_DESCRIPTOR DloadDesc;
ULONG Entries;
PUCHAR ImageBase;
ULONG Index;
PIMAGE_NT_HEADERS NtHeaders;
ULONG Rva;
PIMAGE_SECTION_HEADER SectionHeader;
ImageBase = (PUCHAR)&__ImageBase;
NtHeaders = (PIMAGE_NT_HEADERS)(ImageBase + __ImageBase.e_lfanew);
Entries = NtHeaders->OptionalHeader.NumberOfRvaAndSizes;
if (IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT >= Entries) {
return NULL;
}
DataDir = &NtHeaders->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT];
Rva = DataDir->VirtualAddress;
if (Rva == 0) {
return NULL;
}
DloadDesc = (PCIMAGE_DELAYLOAD_DESCRIPTOR)(ImageBase + Rva);
SectionHeader = IMAGE_FIRST_SECTION(NtHeaders);
Rva = DloadDesc->ImportAddressTableRVA;
for (Index = 0;
Index < NtHeaders->FileHeader.NumberOfSections;
Index += 1, SectionHeader += 1) {
if ((Rva >= SectionHeader->VirtualAddress) &&
(Rva < SectionHeader->VirtualAddress + SectionHeader->Misc.VirtualSize)) {
*SectionSize = SectionHeader->Misc.VirtualSize;
*SectionCharacteristics = SectionHeader->Characteristics;
return ImageBase + SectionHeader->VirtualAddress;
}
}
return NULL;
}
DECLSPEC_NOINLINE
DLOAD_INLINE
VOID
DloadMakePermanentImageCommit (
_Inout_updates_ (Length) PVOID BaseAddress,
_In_ SIZE_T Length
)
/*++
Routine Description:
This routine takes a set of image pages that are protected as writable
(i.e. which have commit precharged) and performs a dummy write to each
contained page so that the commit currently charged cannot be released by
memory management if any pages were not yet dirtied when they are
protected to PAGE_READONLY.
This avoids a low resources failure path during future reprotections where
the region spanned will be repeatedly toggled between PAGE_READWRITE and
PAGE_READONLY.
N.B. This routine is called with delay load unload lock held and returns
with the delay load lock held (i.e., there can be no competing call
to change the section protection).
Arguments:
BaseAddress - Supplies the base of the region of pages.
Length - Supplies the length, in bytes, of the region.
Return Value:
None.
--*/
{
MEMORY_BASIC_INFORMATION MemoryInfo;
PUCHAR Page;
SIZE_T PageCount;
SIZE_T PageSize;
SYSTEM_INFO SystemInfo;
//
// Determine if the delay load section is already read only. If so, i.e.
// the loader was not enlightened to support protected delay load
// internally, then ensure that commit precharged for the writable section
// is made permanent before the section is made read only. Otherwise,
// future calls to VirtualProtect from PAGE_READONLY to PAGE_READWRITE
// could require memory management to obtain additional commit charges -
// which could fail in low resources conditions. In order to maintain
// existing contractural behaviors with delay loads it is necessary to not
// introduce additional low resources falure paths with protected delay
// load support.
//
// N.B. This call to VirtualQuery cannot fail without an invalid argument
// which should not be possible as all arguments are sourced from
// the image header which is constant data.
//
if (VirtualQuery(BaseAddress, &MemoryInfo, sizeof(MemoryInfo)) == 0) {
DLOAD_ASSERT(FALSE);
__fastfail(FAST_FAIL_DLOAD_PROTECTION_FAILURE);
}
//
// If the page is not writeable then the loader has already assumed
// responsibility for charging commit. In this case, nothing must be done
// to ensure forward progress.
//
if ((MemoryInfo.Protect & (PAGE_READWRITE | PAGE_EXECUTE_READWRITE)) == 0) {
return;
}
GetSystemInfo(&SystemInfo);
PageSize = SystemInfo.dwPageSize;
//
// The loader is not guaranteed to have charged commit. Access every page
// in the delay import section to make the commit already precharged
// "permanent", such that it cannot be revoked by memory management once
// the pages are transitioned from PAGE_READWRITE to PAGE_READONLY. A
// dummy write will dirty each page in succession so that the copy on write
// split happens now (which precludes memory management returning the
// commit charges when the pages are returned to PAGE_READONLY protection).
//
PageCount = DLOAD_ADDRESS_AND_SIZE_TO_SPAN_PAGES(BaseAddress,
Length,
PageSize);
Page = (PUCHAR)((ULONG_PTR)BaseAddress & ~(PageSize - 1));
while (PageCount != 0) {
InterlockedOr((PLONG)Page, 0);
Page += PageSize;
PageCount -= 1;
}
return;
}
DLOAD_INLINE
VOID
DloadProtectSection (
_In_ ULONG Protection,
_Out_ PULONG OldProtection
)
/*++
Routine Description:
This function changes the protection of the delay load section for this
module.
N.B. This function is called with the dload protection lock held and
returns with the dload protection lock held.
Arguments:
Protection - Supplies the new section protection.
OldProtection - Receives the old section protection.
Return Value:
None. This function does not fail.
--*/
{
ULONG Characteristics;
PVOID Section;
ULONG Size;
Section = DloadObtainSection(&Size, &Characteristics);
if (Section == NULL) {
*OldProtection = PAGE_READWRITE;
return;
}
//
// Ensure that commit obtained by memory management for the read/write
// state of the delay load import section cannot be released by a
// VirtualProtect to a read only protection, before the first transition to
// read only. This is required in order to ensure that future calls to
// VirtualProtect are guaranteed to support forward progress under low
// resources.
//
if (DloadSectionCommitPermanent == 0) {
DloadSectionCommitPermanent = 1;
if ((Characteristics & IMAGE_SCN_MEM_WRITE) == 0) {
//
// This delay load helper module does not support merging the delay
// load section to a read only section because memory management
// would not guarantee that there is commit available - and thus a
// low memory failure path where the delay load failure hook could
// not be safely invoked (the delay load section would still be
// read only) might be encountered.
//
// It is a build time configuration problem to produce such a
// binary so abort here and now so that the problem can be
// identified & fixed.
//
__fastfail(FAST_FAIL_DLOAD_PROTECTION_FAILURE);
}
DloadMakePermanentImageCommit(Section, Size);
}
//
// Protect the delay load import section.
//
// N.B. This call cannot fail unless an argument is invalid and all
// arguments come from the image header.
//
if (VirtualProtect(Section, Size, Protection, OldProtection) == FALSE) {
DLOAD_ASSERT(FALSE);
__fastfail(FAST_FAIL_DLOAD_PROTECTION_FAILURE);
}
return;
}
DLOAD_INLINE
VOID
DloadAcquireSectionWriteAccess (
VOID
)
/*++
Routine Description:
This function obtains write access to the delay load section. Until a
matched call to DloadReleaseSectionAccess is made the section is still
considered writeable.
Arguments:
None.
Return Value:
None.
--*/
{
//
// If protected delay load is not in use, there is nothing to do.
//
if ((_load_config_used.GuardFlags & IMAGE_GUARD_PROTECT_DELAYLOAD_IAT) == 0) {
return;
}
//
// Acquire the Dload protection lock for this module and change protection.
//
DloadLock();
DloadSectionLockCount += 1;
if (DloadSectionLockCount == 1) {
DloadProtectSection(PAGE_READWRITE, &DloadSectionOldProtection);
}
DloadUnlock();
return;
}
DLOAD_INLINE
VOID
DloadReleaseSectionWriteAccess (
VOID
)
/*++
Routine Description:
This function relinquishes write access to the delay load section.
Arguments:
None.
Return Value:
None.
--*/
{
ULONG OldProtect;
//
// If protected delay load is not in use, there is nothing to do.
//
if ((_load_config_used.GuardFlags & IMAGE_GUARD_PROTECT_DELAYLOAD_IAT) == 0) {
return;
}
//
// Acquire the Dload protection lock for this module and change protection.
//
DloadLock();
DloadSectionLockCount -= 1;
if (DloadSectionLockCount == 0) {
DloadProtectSection(DloadSectionOldProtection, &OldProtect);
}
DloadUnlock();
return;
}
#else
//
// The following dload support APIs are used for the non-explicit-unloading
// version of the delay load helper.
//
DLOAD_INLINE
BOOLEAN
DloadResolve (
VOID
)
/*++
Routine Description:
This function resolves support for native loader-based delay load handling.
Arguments:
None.
Return Value:
TRUE is returned as the function value if the host OS supports delay load
in the loader (in which case all linkage to the host OS native support has
been initialized).
--*/
{
HMODULE Kernel32;
Kernel32 = (HMODULE)ReadPointerAcquire((PVOID *) &DloadKernel32);
if (Kernel32 == DLOAD_UNSUPPORTED) {
return FALSE;
}
if (Kernel32 != NULL) {
return TRUE;
}
Kernel32 = GetModuleHandleW(L"api-ms-win-core-delayload-l1-1-1.dll");
if (Kernel32 == NULL) {
Kernel32 = GetModuleHandleW(L"KERNEL32.DLL");
if (Kernel32 == NULL) {
Kernel32 = DLOAD_UNSUPPORTED;
goto Done;
}
}
DloadResolveDelayLoadedAPI =
(ResolveDelayLoadedAPIProc)GetProcAddress(Kernel32,
"ResolveDelayLoadedAPI");
if (DloadResolveDelayLoadedAPI == NULL) {
Kernel32 = DLOAD_UNSUPPORTED;
goto Done;
}
DloadResolveDelayLoadsFromDll =
(ResolveDelayLoadsFromDllProc)GetProcAddress(Kernel32,
"ResolveDelayLoadsFromDll");
if (DloadResolveDelayLoadsFromDll == NULL) {
Kernel32 = DLOAD_UNSUPPORTED;
goto Done;
}
Done:
WritePointerRelease((PVOID *)&DloadKernel32, Kernel32);
return (Kernel32 != DLOAD_UNSUPPORTED);
}
DLOAD_INLINE
PVOID
WINAPI
Dload__delayLoadHelper2 (
_In_ PCIMAGE_DELAYLOAD_DESCRIPTOR DelayloadDescriptor,
_Out_ PIMAGE_THUNK_DATA ThunkAddress,
_Out_ PBOOLEAN NativeHandled
)
/*++
Routine Description:
This function is a thin wrapper around the loader functionality in ntdll to
resolve a particular delayload thunk from a delayload descriptor.
Arguments:
DelayloadDescriptor - Supplies a pointer to a structure that describes the
module to be loaded in order to satisfy the delayed load.
ThunkAddress - Supplies a pointer to the thunk to be filled in with the
appropriate target function. This thunk pointer is used to find
the specific name table entry of the function to be imported.
NativeHandled - Receives TRUE if the implementation was handled by the
native loader, else FALSE if it was not handled by the
native loader and the caller must satisfy the request.
Return Value:
Address of the import, or the failure stub for it.
--*/
{
PVOID Symbol;
if (DloadResolve() == FALSE) {
*NativeHandled = FALSE;
return NULL;
}
*NativeHandled = TRUE;
Symbol = DloadResolveDelayLoadedAPI((PVOID)&__ImageBase,
DelayloadDescriptor,
(PDELAYLOAD_FAILURE_DLL_CALLBACK)__pfnDliFailureHook2,
DelayLoadFailureHook,
ThunkAddress,
0);
return Symbol;
}
DLOAD_INLINE
HRESULT
WINAPI
Dload__HrLoadAllImportsForDll (
_In_ LPCSTR DllName,
_Out_ PBOOLEAN NativeHandled
)
/*++
Routine Description:
This function is a thin wrapper around the loader functionality in ntdll to
resolve all delayload thunks in the current binary from a target
module.
Arguments:
DllName - Supplies the case-insensitive name of the delayloaded target
module whose imports are to be resolved.
NativeHandled - Receives TRUE if the implementation was handled by the
native loader, else FALSE if it was not handled by the
native loader and the caller must satisfy the request.
Return Value:
HRESULT
--*/
{
NTSTATUS Status;
if (DloadResolve() == FALSE) {
*NativeHandled = FALSE;
return HRESULT_FROM_WIN32(ERROR_MOD_NOT_FOUND);
}
*NativeHandled = TRUE;
Status = DloadResolveDelayLoadsFromDll((PVOID)&__ImageBase,
DllName,
0);
if (Status == STATUS_DLL_NOT_FOUND) {
return HRESULT_FROM_WIN32(ERROR_MOD_NOT_FOUND);
} else {
return S_OK;
}
}
#endif
#pragma warning(pop)
#endif
|
zhenlong1987/vagrant-2018 | test/unit/plugins/communicators/winrm/config_test.rb | require File.expand_path("../../../../base", __FILE__)
require Vagrant.source_root.join("plugins/communicators/winrm/config")
describe VagrantPlugins::CommunicatorWinRM::Config do
let(:machine) { double("machine") }
subject { described_class.new }
it "is valid by default" do
subject.finalize!
result = subject.validate(machine)
expect(result["WinRM"]).to be_empty
end
end
|
GYosifov88/Python-Basics | Yard Greening.py | squares = float(input())
price = squares * 7.61
discount = price * 18 / 100
final_price = price - discount
print(f'The final price is: {final_price} lv.')
print(f'The discount is: {discount} lv.') |
vishalbelsare/jina | tests/unit/math/test_distance_tf.py | import numpy as np
import pytest
import paddle
import tensorflow as tf
from jina.math.distance.numpy import cosine as cosine_numpy, sqeuclidean as sqe_numpy
from jina.math.distance.tensorflow import cosine as cosine_tf
from jina.math.distance.paddle import sqeuclidean as sqe_paddle
@pytest.fixture
def get_a_b():
a = np.random.random([7, 100])
b = np.random.random([9, 100])
yield a, b
def test_cosine(get_a_b):
a, b = get_a_b
r_torch = cosine_tf(tf.constant(a), tf.constant(b))
r_numpy = cosine_numpy(a, b)
np.testing.assert_almost_equal(r_torch, r_numpy)
def test_sqeuclidean(get_a_b):
a, b = get_a_b
r_torch = sqe_paddle(paddle.Tensor(a), paddle.Tensor(b))
r_numpy = sqe_numpy(a, b)
np.testing.assert_almost_equal(r_torch, r_numpy)
|
rand4711/traceleft | vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1/clusters.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/cloud/dataproc/v1/clusters.proto
package dataproc // import "google.golang.org/genproto/googleapis/cloud/dataproc/v1"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import duration "github.com/golang/protobuf/ptypes/duration"
import timestamp "github.com/golang/protobuf/ptypes/timestamp"
import _ "google.golang.org/genproto/googleapis/api/annotations"
import longrunning "google.golang.org/genproto/googleapis/longrunning"
import field_mask "google.golang.org/genproto/protobuf/field_mask"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// The cluster state.
type ClusterStatus_State int32
const (
// The cluster state is unknown.
ClusterStatus_UNKNOWN ClusterStatus_State = 0
// The cluster is being created and set up. It is not ready for use.
ClusterStatus_CREATING ClusterStatus_State = 1
// The cluster is currently running and healthy. It is ready for use.
ClusterStatus_RUNNING ClusterStatus_State = 2
// The cluster encountered an error. It is not ready for use.
ClusterStatus_ERROR ClusterStatus_State = 3
// The cluster is being deleted. It cannot be used.
ClusterStatus_DELETING ClusterStatus_State = 4
// The cluster is being updated. It continues to accept and process jobs.
ClusterStatus_UPDATING ClusterStatus_State = 5
)
var ClusterStatus_State_name = map[int32]string{
0: "UNKNOWN",
1: "CREATING",
2: "RUNNING",
3: "ERROR",
4: "DELETING",
5: "UPDATING",
}
var ClusterStatus_State_value = map[string]int32{
"UNKNOWN": 0,
"CREATING": 1,
"RUNNING": 2,
"ERROR": 3,
"DELETING": 4,
"UPDATING": 5,
}
func (x ClusterStatus_State) String() string {
return proto.EnumName(ClusterStatus_State_name, int32(x))
}
func (ClusterStatus_State) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_29f9b85b3c7e3a5f, []int{8, 0}
}
type ClusterStatus_Substate int32
const (
ClusterStatus_UNSPECIFIED ClusterStatus_Substate = 0
// The cluster is known to be in an unhealthy state
// (for example, critical daemons are not running or HDFS capacity is
// exhausted).
//
// Applies to RUNNING state.
ClusterStatus_UNHEALTHY ClusterStatus_Substate = 1
// The agent-reported status is out of date (may occur if
// Cloud Dataproc loses communication with Agent).
//
// Applies to RUNNING state.
ClusterStatus_STALE_STATUS ClusterStatus_Substate = 2
)
var ClusterStatus_Substate_name = map[int32]string{
0: "UNSPECIFIED",
1: "UNHEALTHY",
2: "STALE_STATUS",
}
var ClusterStatus_Substate_value = map[string]int32{
"UNSPECIFIED": 0,
"UNHEALTHY": 1,
"STALE_STATUS": 2,
}
func (x ClusterStatus_Substate) String() string {
return proto.EnumName(ClusterStatus_Substate_name, int32(x))
}
func (ClusterStatus_Substate) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_29f9b85b3c7e3a5f, []int{8, 1}
}
// Describes the identifying information, config, and status of
// a cluster of Google Compute Engine instances.
type Cluster struct {
// Required. The Google Cloud Platform project ID that the cluster belongs to.
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
// Required. The cluster name. Cluster names within a project must be
// unique. Names of deleted clusters can be reused.
ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
// Required. The cluster config. Note that Cloud Dataproc may set
// default values, and values may change when clusters are updated.
Config *ClusterConfig `protobuf:"bytes,3,opt,name=config,proto3" json:"config,omitempty"`
// Optional. The labels to associate with this cluster.
// Label **keys** must contain 1 to 63 characters, and must conform to
// [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
// Label **values** may be empty, but, if present, must contain 1 to 63
// characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
// No more than 32 labels can be associated with a cluster.
Labels map[string]string `protobuf:"bytes,8,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Output-only. Cluster status.
Status *ClusterStatus `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"`
// Output-only. The previous cluster status.
StatusHistory []*ClusterStatus `protobuf:"bytes,7,rep,name=status_history,json=statusHistory,proto3" json:"status_history,omitempty"`
// Output-only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc
// generates this value when it creates the cluster.
ClusterUuid string `protobuf:"bytes,6,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`
// Contains cluster daemon metrics such as HDFS and YARN stats.
//
// **Beta Feature**: This report is available for testing purposes only. It may
// be changed before final release.
Metrics *ClusterMetrics `protobuf:"bytes,9,opt,name=metrics,proto3" json:"metrics,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Cluster) Reset() { *m = Cluster{} }
func (m *Cluster) String() string { return proto.CompactTextString(m) }
func (*Cluster) ProtoMessage() {}
func (*Cluster) Descriptor() ([]byte, []int) {
return fileDescriptor_29f9b85b3c7e3a5f, []int{0}
}
func (m *Cluster) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Cluster.Unmarshal(m, b)
}
func (m *Cluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Cluster.Marshal(b, m, deterministic)
}
func (m *Cluster) XXX_Merge(src proto.Message) {
xxx_messageInfo_Cluster.Merge(m, src)
}
func (m *Cluster) XXX_Size() int {
return xxx_messageInfo_Cluster.Size(m)
}
func (m *Cluster) XXX_DiscardUnknown() {
xxx_messageInfo_Cluster.DiscardUnknown(m)
}
var xxx_messageInfo_Cluster proto.InternalMessageInfo
func (m *Cluster) GetProjectId() string {
if m != nil {
return m.ProjectId
}
return ""
}
func (m *Cluster) GetClusterName() string {
if m != nil {
return m.ClusterName
}
return ""
}
func (m *Cluster) GetConfig() *ClusterConfig {
if m != nil {
return m.Config
}
return nil
}
func (m *Cluster) GetLabels() map[string]string {
if m != nil {
return m.Labels
}
return nil
}
func (m *Cluster) GetStatus() *ClusterStatus {
if m != nil {
return m.Status
}
return nil
}
func (m *Cluster) GetStatusHistory() []*ClusterStatus {
if m != nil {
return m.StatusHistory
}
return nil
}
func (m *Cluster) GetClusterUuid() string {
if m != nil {
return m.ClusterUuid
}
return ""
}
func (m *Cluster) GetMetrics() *ClusterMetrics {
if m != nil {
return m.Metrics
}
return nil
}
// The cluster config.
type ClusterConfig struct {
// Optional. A Google Cloud Storage staging bucket used for sharing generated
// SSH keys and config. If you do not specify a staging bucket, Cloud
// Dataproc will determine an appropriate Cloud Storage location (US,
// ASIA, or EU) for your cluster's staging bucket according to the Google
// Compute Engine zone where your cluster is deployed, and then it will create
// and manage this project-level, per-location bucket for you.
ConfigBucket string `protobuf:"bytes,1,opt,name=config_bucket,json=configBucket,proto3" json:"config_bucket,omitempty"`
// Required. The shared Google Compute Engine config settings for
// all instances in a cluster.
GceClusterConfig *GceClusterConfig `protobuf:"bytes,8,opt,name=gce_cluster_config,json=gceClusterConfig,proto3" json:"gce_cluster_config,omitempty"`
// Optional. The Google Compute Engine config settings for
// the master instance in a cluster.
MasterConfig *InstanceGroupConfig `protobuf:"bytes,9,opt,name=master_config,json=masterConfig,proto3" json:"master_config,omitempty"`
// Optional. The Google Compute Engine config settings for
// worker instances in a cluster.
WorkerConfig *InstanceGroupConfig `protobuf:"bytes,10,opt,name=worker_config,json=workerConfig,proto3" json:"worker_config,omitempty"`
// Optional. The Google Compute Engine config settings for
// additional worker instances in a cluster.
SecondaryWorkerConfig *InstanceGroupConfig `protobuf:"bytes,12,opt,name=secondary_worker_config,json=secondaryWorkerConfig,proto3" json:"secondary_worker_config,omitempty"`
// Optional. The config settings for software inside the cluster.
SoftwareConfig *SoftwareConfig `protobuf:"bytes,13,opt,name=software_config,json=softwareConfig,proto3" json:"software_config,omitempty"`
// Optional. Commands to execute on each node after config is
// completed. By default, executables are run on master and all worker nodes.
// You can test a node's `role` metadata to run an executable on
// a master or worker node, as shown below using `curl` (you can also use `wget`):
//
// ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role)
// if [[ "${ROLE}" == 'Master' ]]; then
// ... master specific actions ...
// else
// ... worker specific actions ...
// fi
InitializationActions []*NodeInitializationAction `protobuf:"bytes,11,rep,name=initialization_actions,json=initializationActions,proto3" json:"initialization_actions,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ClusterConfig) Reset() { *m = ClusterConfig{} }
func (m *ClusterConfig) String() string { return proto.CompactTextString(m) }
func (*ClusterConfig) ProtoMessage() {}
func (*ClusterConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_29f9b85b3c7e3a5f, []int{1}
}
func (m *ClusterConfig) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ClusterConfig.Unmarshal(m, b)
}
func (m *ClusterConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ClusterConfig.Marshal(b, m, deterministic)
}
func (m *ClusterConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_ClusterConfig.Merge(m, src)
}
func (m *ClusterConfig) XXX_Size() int {
return xxx_messageInfo_ClusterConfig.Size(m)
}
func (m *ClusterConfig) XXX_DiscardUnknown() {
xxx_messageInfo_ClusterConfig.DiscardUnknown(m)
}
var xxx_messageInfo_ClusterConfig proto.InternalMessageInfo
func (m *ClusterConfig) GetConfigBucket() string {
if m != nil {
return m.ConfigBucket
}
return ""
}
func (m *ClusterConfig) GetGceClusterConfig() *GceClusterConfig {
if m != nil {
return m.GceClusterConfig
}
return nil
}
func (m *ClusterConfig) GetMasterConfig() *InstanceGroupConfig {
if m != nil {
return m.MasterConfig
}
return nil
}
func (m *ClusterConfig) GetWorkerConfig() *InstanceGroupConfig {
if m != nil {
return m.WorkerConfig
}
return nil
}
func (m *ClusterConfig) GetSecondaryWorkerConfig() *InstanceGroupConfig {
if m != nil {
return m.SecondaryWorkerConfig
}
return nil
}
func (m *ClusterConfig) GetSoftwareConfig() *SoftwareConfig {
if m != nil {
return m.SoftwareConfig
}
return nil
}
func (m *ClusterConfig) GetInitializationActions() []*NodeInitializationAction {
if m != nil {
return m.InitializationActions
}
return nil
}
// Common config settings for resources of Google Compute Engine cluster
// instances, applicable to all instances in the cluster.
type GceClusterConfig struct {
// Optional. The zone where the Google Compute Engine cluster will be located.
// On a create request, it is required in the "global" region. If omitted
// in a non-global Cloud Dataproc region, the service will pick a zone in the
// corresponding Compute Engine region. On a get request, zone will
// always be present.
//
// A full URL, partial URI, or short name are valid. Examples:
//
// * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`
// * `projects/[project_id]/zones/[zone]`
// * `us-central1-f`
ZoneUri string `protobuf:"bytes,1,opt,name=zone_uri,json=zoneUri,proto3" json:"zone_uri,omitempty"`
// Optional. The Google Compute Engine network to be used for machine
// communications. Cannot be specified with subnetwork_uri. If neither
// `network_uri` nor `subnetwork_uri` is specified, the "default" network of
// the project is used, if it exists. Cannot be a "Custom Subnet Network" (see
// [Using Subnetworks](/compute/docs/subnetworks) for more information).
//
// A full URL, partial URI, or short name are valid. Examples:
//
// * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`
// * `projects/[project_id]/regions/global/default`
// * `default`
NetworkUri string `protobuf:"bytes,2,opt,name=network_uri,json=networkUri,proto3" json:"network_uri,omitempty"`
// Optional. The Google Compute Engine subnetwork to be used for machine
// communications. Cannot be specified with network_uri.
//
// A full URL, partial URI, or short name are valid. Examples:
//
// * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0`
// * `projects/[project_id]/regions/us-east1/sub0`
// * `sub0`
SubnetworkUri string `protobuf:"bytes,6,opt,name=subnetwork_uri,json=subnetworkUri,proto3" json:"subnetwork_uri,omitempty"`
// Optional. If true, all instances in the cluster will only have internal IP
// addresses. By default, clusters are not restricted to internal IP addresses,
// and will have ephemeral external IP addresses assigned to each instance.
// This `internal_ip_only` restriction can only be enabled for subnetwork
// enabled networks, and all off-cluster dependencies must be configured to be
// accessible without external IP addresses.
InternalIpOnly bool `protobuf:"varint,7,opt,name=internal_ip_only,json=internalIpOnly,proto3" json:"internal_ip_only,omitempty"`
// Optional. The service account of the instances. Defaults to the default
// Google Compute Engine service account. Custom service accounts need
// permissions equivalent to the folloing IAM roles:
//
// * roles/logging.logWriter
// * roles/storage.objectAdmin
//
// (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts
// for more information).
// Example: `[account_id]@[project_id].<EMAIL>`
ServiceAccount string `protobuf:"bytes,8,opt,name=service_account,json=serviceAccount,proto3" json:"service_account,omitempty"`
// Optional. The URIs of service account scopes to be included in Google
// Compute Engine instances. The following base set of scopes is always
// included:
//
// * https://www.googleapis.com/auth/cloud.useraccounts.readonly
// * https://www.googleapis.com/auth/devstorage.read_write
// * https://www.googleapis.com/auth/logging.write
//
// If no scopes are specified, the following defaults are also provided:
//
// * https://www.googleapis.com/auth/bigquery
// * https://www.googleapis.com/auth/bigtable.admin.table
// * https://www.googleapis.com/auth/bigtable.data
// * https://www.googleapis.com/auth/devstorage.full_control
ServiceAccountScopes []string `protobuf:"bytes,3,rep,name=service_account_scopes,json=serviceAccountScopes,proto3" json:"service_account_scopes,omitempty"`
// The Google Compute Engine tags to add to all instances (see
// [Tagging instances](/compute/docs/label-or-tag-resources#tags)).
Tags []string `protobuf:"bytes,4,rep,name=tags,proto3" json:"tags,omitempty"`
// The Google Compute Engine metadata entries to add to all instances (see
// [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
Metadata map[string]string `protobuf:"bytes,5,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GceClusterConfig) Reset() { *m = GceClusterConfig{} }
func (m *GceClusterConfig) String() string { return proto.CompactTextString(m) }
func (*GceClusterConfig) ProtoMessage() {}
func (*GceClusterConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_29f9b85b3c7e3a5f, []int{2}
}
func (m *GceClusterConfig) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GceClusterConfig.Unmarshal(m, b)
}
func (m *GceClusterConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GceClusterConfig.Marshal(b, m, deterministic)
}
func (m *GceClusterConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_GceClusterConfig.Merge(m, src)
}
func (m *GceClusterConfig) XXX_Size() int {
return xxx_messageInfo_GceClusterConfig.Size(m)
}
func (m *GceClusterConfig) XXX_DiscardUnknown() {
xxx_messageInfo_GceClusterConfig.DiscardUnknown(m)
}
var xxx_messageInfo_GceClusterConfig proto.InternalMessageInfo
func (m *GceClusterConfig) GetZoneUri() string {
if m != nil {
return m.ZoneUri
}
return ""
}
func (m *GceClusterConfig) GetNetworkUri() string {
if m != nil {
return m.NetworkUri
}
return ""
}
func (m *GceClusterConfig) GetSubnetworkUri() string {
if m != nil {
return m.SubnetworkUri
}
return ""
}
func (m *GceClusterConfig) GetInternalIpOnly() bool {
if m != nil {
return m.InternalIpOnly
}
return false
}
func (m *GceClusterConfig) GetServiceAccount() string {
if m != nil {
return m.ServiceAccount
}
return ""
}
func (m *GceClusterConfig) GetServiceAccountScopes() []string {
if m != nil {
return m.ServiceAccountScopes
}
return nil
}
func (m *GceClusterConfig) GetTags() []string {
if m != nil {
return m.Tags
}
return nil
}
func (m *GceClusterConfig) GetMetadata() map[string]string {
if m != nil {
return m.Metadata
}
return nil
}
// Optional. The config settings for Google Compute Engine resources in
// an instance group, such as a master or worker group.
type InstanceGroupConfig struct {
// Optional. The number of VM instances in the instance group.
// For master instance groups, must be set to 1.
NumInstances int32 `protobuf:"varint,1,opt,name=num_instances,json=numInstances,proto3" json:"num_instances,omitempty"`
// Optional. The list of instance names. Cloud Dataproc derives the names from
// `cluster_name`, `num_instances`, and the instance group if not set by user
// (recommended practice is to let Cloud Dataproc derive the name).
InstanceNames []string `protobuf:"bytes,2,rep,name=instance_names,json=instanceNames,proto3" json:"instance_names,omitempty"`
// Output-only. The Google Compute Engine image resource used for cluster
// instances. Inferred from `SoftwareConfig.image_version`.
ImageUri string `protobuf:"bytes,3,opt,name=image_uri,json=imageUri,proto3" json:"image_uri,omitempty"`
// Optional. The Google Compute Engine machine type used for cluster instances.
//
// A full URL, partial URI, or short name are valid. Examples:
//
// * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
// * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
// * `n1-standard-2`
MachineTypeUri string `protobuf:"bytes,4,opt,name=machine_type_uri,json=machineTypeUri,proto3" json:"machine_type_uri,omitempty"`
// Optional. Disk option config settings.
DiskConfig *DiskConfig `protobuf:"bytes,5,opt,name=disk_config,json=diskConfig,proto3" json:"disk_config,omitempty"`
// Optional. Specifies that this instance group contains preemptible instances.
IsPreemptible bool `protobuf:"varint,6,opt,name=is_preemptible,json=isPreemptible,proto3" json:"is_preemptible,omitempty"`
// Output-only. The config for Google Compute Engine Instance Group
// Manager that manages this group.
// This is only used for preemptible instance groups.
ManagedGroupConfig *ManagedGroupConfig `protobuf:"bytes,7,opt,name=managed_group_config,json=managedGroupConfig,proto3" json:"managed_group_config,omitempty"`
// Optional. The Google Compute Engine accelerator configuration for these
// instances.
//
// **Beta Feature**: This feature is still under development. It may be
// changed before final release.
Accelerators []*AcceleratorConfig `protobuf:"bytes,8,rep,name=accelerators,proto3" json:"accelerators,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *InstanceGroupConfig) Reset() { *m = InstanceGroupConfig{} }
func (m *InstanceGroupConfig) String() string { return proto.CompactTextString(m) }
func (*InstanceGroupConfig) ProtoMessage() {}
func (*InstanceGroupConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_29f9b85b3c7e3a5f, []int{3}
}
func (m *InstanceGroupConfig) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_InstanceGroupConfig.Unmarshal(m, b)
}
func (m *InstanceGroupConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_InstanceGroupConfig.Marshal(b, m, deterministic)
}
func (m *InstanceGroupConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_InstanceGroupConfig.Merge(m, src)
}
func (m *InstanceGroupConfig) XXX_Size() int {
return xxx_messageInfo_InstanceGroupConfig.Size(m)
}
func (m *InstanceGroupConfig) XXX_DiscardUnknown() {
xxx_messageInfo_InstanceGroupConfig.DiscardUnknown(m)
}
var xxx_messageInfo_InstanceGroupConfig proto.InternalMessageInfo
func (m *InstanceGroupConfig) GetNumInstances() int32 {
if m != nil {
return m.NumInstances
}
return 0
}
func (m *InstanceGroupConfig) GetInstanceNames() []string {
if m != nil {
return m.InstanceNames
}
return nil
}
func (m *InstanceGroupConfig) GetImageUri() string {
if m != nil {
return m.ImageUri
}
return ""
}
func (m *InstanceGroupConfig) GetMachineTypeUri() string {
if m != nil {
return m.MachineTypeUri
}
return ""
}
func (m *InstanceGroupConfig) GetDiskConfig() *DiskConfig {
if m != nil {
return m.DiskConfig
}
return nil
}
func (m *InstanceGroupConfig) GetIsPreemptible() bool {
if m != nil {
return m.IsPreemptible
}
return false
}
func (m *InstanceGroupConfig) GetManagedGroupConfig() *ManagedGroupConfig {
if m != nil {
return m.ManagedGroupConfig
}
return nil
}
func (m *InstanceGroupConfig) GetAccelerators() []*AcceleratorConfig {
if m != nil {
return m.Accelerators
}
return nil
}
// Specifies the resources used to actively manage an instance group.
type ManagedGroupConfig struct {
// Output-only. The name of the Instance Template used for the Managed
// Instance Group.
InstanceTemplateName string `protobuf:"bytes,1,opt,name=instance_template_name,json=instanceTemplateName,proto3" json:"instance_template_name,omitempty"`
// Output-only. The name of the Instance Group Manager for this group.
InstanceGroupManagerName string `protobuf:"bytes,2,opt,name=instance_group_manager_name,json=instanceGroupManagerName,proto3" json:"instance_group_manager_name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ManagedGroupConfig) Reset() { *m = ManagedGroupConfig{} }
func (m *ManagedGroupConfig) String() string { return proto.CompactTextString(m) }
func (*ManagedGroupConfig) ProtoMessage() {}
func (*ManagedGroupConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_29f9b85b3c7e3a5f, []int{4}
}
func (m *ManagedGroupConfig) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ManagedGroupConfig.Unmarshal(m, b)
}
func (m *ManagedGroupConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ManagedGroupConfig.Marshal(b, m, deterministic)
}
func (m *ManagedGroupConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_ManagedGroupConfig.Merge(m, src)
}
func (m *ManagedGroupConfig) XXX_Size() int {
return xxx_messageInfo_ManagedGroupConfig.Size(m)
}
func (m *ManagedGroupConfig) XXX_DiscardUnknown() {
xxx_messageInfo_ManagedGroupConfig.DiscardUnknown(m)
}
var xxx_messageInfo_ManagedGroupConfig proto.InternalMessageInfo
func (m *ManagedGroupConfig) GetInstanceTemplateName() string {
if m != nil {
return m.InstanceTemplateName
}
return ""
}
func (m *ManagedGroupConfig) GetInstanceGroupManagerName() string {
if m != nil {
return m.InstanceGroupManagerName
}
return ""
}
// Specifies the type and number of accelerator cards attached to the instances
// of an instance group (see [GPUs on Compute Engine](/compute/docs/gpus/)).
type AcceleratorConfig struct {
// Full URL, partial URI, or short name of the accelerator type resource to
// expose to this instance. See [Google Compute Engine AcceleratorTypes](
// /compute/docs/reference/beta/acceleratorTypes)
//
// Examples
// * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
// * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
// * `nvidia-tesla-k80`
AcceleratorTypeUri string `protobuf:"bytes,1,opt,name=accelerator_type_uri,json=acceleratorTypeUri,proto3" json:"accelerator_type_uri,omitempty"`
// The number of the accelerator cards of this type exposed to this instance.
AcceleratorCount int32 `protobuf:"varint,2,opt,name=accelerator_count,json=acceleratorCount,proto3" json:"accelerator_count,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AcceleratorConfig) Reset() { *m = AcceleratorConfig{} }
func (m *AcceleratorConfig) String() string { return proto.CompactTextString(m) }
func (*AcceleratorConfig) ProtoMessage() {}
func (*AcceleratorConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_29f9b85b3c7e3a5f, []int{5}
}
func (m *AcceleratorConfig) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AcceleratorConfig.Unmarshal(m, b)
}
func (m *AcceleratorConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AcceleratorConfig.Marshal(b, m, deterministic)
}
func (m *AcceleratorConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_AcceleratorConfig.Merge(m, src)
}
func (m *AcceleratorConfig) XXX_Size() int {
return xxx_messageInfo_AcceleratorConfig.Size(m)
}
func (m *AcceleratorConfig) XXX_DiscardUnknown() {
xxx_messageInfo_AcceleratorConfig.DiscardUnknown(m)
}
var xxx_messageInfo_AcceleratorConfig proto.InternalMessageInfo
func (m *AcceleratorConfig) GetAcceleratorTypeUri() string {
if m != nil {
return m.AcceleratorTypeUri
}
return ""
}
func (m *AcceleratorConfig) GetAcceleratorCount() int32 {
if m != nil {
return m.AcceleratorCount
}
return 0
}
// Specifies the config of disk options for a group of VM instances.
type DiskConfig struct {
// Optional. Size in GB of the boot disk (default is 500GB).
BootDiskSizeGb int32 `protobuf:"varint,1,opt,name=boot_disk_size_gb,json=bootDiskSizeGb,proto3" json:"boot_disk_size_gb,omitempty"`
// Optional. Number of attached SSDs, from 0 to 4 (default is 0).
// If SSDs are not attached, the boot disk is used to store runtime logs and
// [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data.
// If one or more SSDs are attached, this runtime bulk
// data is spread across them, and the boot disk contains only basic
// config and installed binaries.
NumLocalSsds int32 `protobuf:"varint,2,opt,name=num_local_ssds,json=numLocalSsds,proto3" json:"num_local_ssds,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DiskConfig) Reset() { *m = DiskConfig{} }
func (m *DiskConfig) String() string { return proto.CompactTextString(m) }
func (*DiskConfig) ProtoMessage() {}
func (*DiskConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_29f9b85b3c7e3a5f, []int{6}
}
func (m *DiskConfig) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DiskConfig.Unmarshal(m, b)
}
func (m *DiskConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DiskConfig.Marshal(b, m, deterministic)
}
func (m *DiskConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_DiskConfig.Merge(m, src)
}
func (m *DiskConfig) XXX_Size() int {
return xxx_messageInfo_DiskConfig.Size(m)
}
func (m *DiskConfig) XXX_DiscardUnknown() {
xxx_messageInfo_DiskConfig.DiscardUnknown(m)
}
var xxx_messageInfo_DiskConfig proto.InternalMessageInfo
func (m *DiskConfig) GetBootDiskSizeGb() int32 {
if m != nil {
return m.BootDiskSizeGb
}
return 0
}
func (m *DiskConfig) GetNumLocalSsds() int32 {
if m != nil {
return m.NumLocalSsds
}
return 0
}
// Specifies an executable to run on a fully configured node and a
// timeout period for executable completion.
type NodeInitializationAction struct {
// Required. Google Cloud Storage URI of executable file.
ExecutableFile string `protobuf:"bytes,1,opt,name=executable_file,json=executableFile,proto3" json:"executable_file,omitempty"`
// Optional. Amount of time executable has to complete. Default is
// 10 minutes. Cluster creation fails with an explanatory error message (the
// name of the executable that caused the error and the exceeded timeout
// period) if the executable is not completed at end of the timeout period.
ExecutionTimeout *duration.Duration `protobuf:"bytes,2,opt,name=execution_timeout,json=executionTimeout,proto3" json:"execution_timeout,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *NodeInitializationAction) Reset() { *m = NodeInitializationAction{} }
func (m *NodeInitializationAction) String() string { return proto.CompactTextString(m) }
func (*NodeInitializationAction) ProtoMessage() {}
func (*NodeInitializationAction) Descriptor() ([]byte, []int) {
return fileDescriptor_29f9b85b3c7e3a5f, []int{7}
}
func (m *NodeInitializationAction) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_NodeInitializationAction.Unmarshal(m, b)
}
func (m *NodeInitializationAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_NodeInitializationAction.Marshal(b, m, deterministic)
}
func (m *NodeInitializationAction) XXX_Merge(src proto.Message) {
xxx_messageInfo_NodeInitializationAction.Merge(m, src)
}
func (m *NodeInitializationAction) XXX_Size() int {
return xxx_messageInfo_NodeInitializationAction.Size(m)
}
func (m *NodeInitializationAction) XXX_DiscardUnknown() {
xxx_messageInfo_NodeInitializationAction.DiscardUnknown(m)
}
var xxx_messageInfo_NodeInitializationAction proto.InternalMessageInfo
func (m *NodeInitializationAction) GetExecutableFile() string {
if m != nil {
return m.ExecutableFile
}
return ""
}
func (m *NodeInitializationAction) GetExecutionTimeout() *duration.Duration {
if m != nil {
return m.ExecutionTimeout
}
return nil
}
// The status of a cluster and its instances.
type ClusterStatus struct {
// Output-only. The cluster's state.
State ClusterStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.cloud.dataproc.v1.ClusterStatus_State" json:"state,omitempty"`
// Output-only. Optional details of cluster's state.
Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"`
// Output-only. Time when this state was entered.
StateStartTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=state_start_time,json=stateStartTime,proto3" json:"state_start_time,omitempty"`
// Output-only. Additional state information that includes
// status reported by the agent.
Substate ClusterStatus_Substate `protobuf:"varint,4,opt,name=substate,proto3,enum=google.cloud.dataproc.v1.ClusterStatus_Substate" json:"substate,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ClusterStatus) Reset() { *m = ClusterStatus{} }
func (m *ClusterStatus) String() string { return proto.CompactTextString(m) }
func (*ClusterStatus) ProtoMessage() {}
func (*ClusterStatus) Descriptor() ([]byte, []int) {
return fileDescriptor_29f9b85b3c7e3a5f, []int{8}
}
func (m *ClusterStatus) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ClusterStatus.Unmarshal(m, b)
}
func (m *ClusterStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ClusterStatus.Marshal(b, m, deterministic)
}
func (m *ClusterStatus) XXX_Merge(src proto.Message) {
xxx_messageInfo_ClusterStatus.Merge(m, src)
}
func (m *ClusterStatus) XXX_Size() int {
return xxx_messageInfo_ClusterStatus.Size(m)
}
func (m *ClusterStatus) XXX_DiscardUnknown() {
xxx_messageInfo_ClusterStatus.DiscardUnknown(m)
}
var xxx_messageInfo_ClusterStatus proto.InternalMessageInfo
func (m *ClusterStatus) GetState() ClusterStatus_State {
if m != nil {
return m.State
}
return ClusterStatus_UNKNOWN
}
func (m *ClusterStatus) GetDetail() string {
if m != nil {
return m.Detail
}
return ""
}
func (m *ClusterStatus) GetStateStartTime() *timestamp.Timestamp {
if m != nil {
return m.StateStartTime
}
return nil
}
func (m *ClusterStatus) GetSubstate() ClusterStatus_Substate {
if m != nil {
return m.Substate
}
return ClusterStatus_UNSPECIFIED
}
// Specifies the selection and config of software inside the cluster.
type SoftwareConfig struct {
// Optional. The version of software inside the cluster. It must match the
// regular expression `[0-9]+\.[0-9]+`. If unspecified, it defaults to the
// latest version (see [Cloud Dataproc Versioning](/dataproc/versioning)).
ImageVersion string `protobuf:"bytes,1,opt,name=image_version,json=imageVersion,proto3" json:"image_version,omitempty"`
// Optional. The properties to set on daemon config files.
//
// Property keys are specified in `prefix:property` format, such as
// `core:fs.defaultFS`. The following are supported prefixes
// and their mappings:
//
// * capacity-scheduler: `capacity-scheduler.xml`
// * core: `core-site.xml`
// * distcp: `distcp-default.xml`
// * hdfs: `hdfs-site.xml`
// * hive: `hive-site.xml`
// * mapred: `mapred-site.xml`
// * pig: `pig.properties`
// * spark: `spark-defaults.conf`
// * yarn: `yarn-site.xml`
//
// For more information, see
// [Cluster properties](/dataproc/docs/concepts/cluster-properties).
Properties map[string]string `protobuf:"bytes,2,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SoftwareConfig) Reset() { *m = SoftwareConfig{} }
func (m *SoftwareConfig) String() string { return proto.CompactTextString(m) }
func (*SoftwareConfig) ProtoMessage() {}
func (*SoftwareConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_29f9b85b3c7e3a5f, []int{9}
}
func (m *SoftwareConfig) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SoftwareConfig.Unmarshal(m, b)
}
func (m *SoftwareConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SoftwareConfig.Marshal(b, m, deterministic)
}
func (m *SoftwareConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_SoftwareConfig.Merge(m, src)
}
func (m *SoftwareConfig) XXX_Size() int {
return xxx_messageInfo_SoftwareConfig.Size(m)
}
func (m *SoftwareConfig) XXX_DiscardUnknown() {
xxx_messageInfo_SoftwareConfig.DiscardUnknown(m)
}
var xxx_messageInfo_SoftwareConfig proto.InternalMessageInfo
func (m *SoftwareConfig) GetImageVersion() string {
if m != nil {
return m.ImageVersion
}
return ""
}
func (m *SoftwareConfig) GetProperties() map[string]string {
if m != nil {
return m.Properties
}
return nil
}
// Contains cluster daemon metrics, such as HDFS and YARN stats.
//
// **Beta Feature**: This report is available for testing purposes only. It may
// be changed before final release.
type ClusterMetrics struct {
// The HDFS metrics.
HdfsMetrics map[string]int64 `protobuf:"bytes,1,rep,name=hdfs_metrics,json=hdfsMetrics,proto3" json:"hdfs_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
// The YARN metrics.
YarnMetrics map[string]int64 `protobuf:"bytes,2,rep,name=yarn_metrics,json=yarnMetrics,proto3" json:"yarn_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ClusterMetrics) Reset() { *m = ClusterMetrics{} }
func (m *ClusterMetrics) String() string { return proto.CompactTextString(m) }
func (*ClusterMetrics) ProtoMessage() {}
func (*ClusterMetrics) Descriptor() ([]byte, []int) {
return fileDescriptor_29f9b85b3c7e3a5f, []int{10}
}
func (m *ClusterMetrics) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ClusterMetrics.Unmarshal(m, b)
}
func (m *ClusterMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ClusterMetrics.Marshal(b, m, deterministic)
}
func (m *ClusterMetrics) XXX_Merge(src proto.Message) {
xxx_messageInfo_ClusterMetrics.Merge(m, src)
}
func (m *ClusterMetrics) XXX_Size() int {
return xxx_messageInfo_ClusterMetrics.Size(m)
}
func (m *ClusterMetrics) XXX_DiscardUnknown() {
xxx_messageInfo_ClusterMetrics.DiscardUnknown(m)
}
var xxx_messageInfo_ClusterMetrics proto.InternalMessageInfo
func (m *ClusterMetrics) GetHdfsMetrics() map[string]int64 {
if m != nil {
return m.HdfsMetrics
}
return nil
}
func (m *ClusterMetrics) GetYarnMetrics() map[string]int64 {
if m != nil {
return m.YarnMetrics
}
return nil
}
// A request to create a cluster.
type CreateClusterRequest struct {
// Required. The ID of the Google Cloud Platform project that the cluster
// belongs to.
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
// Required. The Cloud Dataproc region in which to handle the request.
Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
// Required. The cluster to create.
Cluster *Cluster `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CreateClusterRequest) Reset() { *m = CreateClusterRequest{} }
func (m *CreateClusterRequest) String() string { return proto.CompactTextString(m) }
func (*CreateClusterRequest) ProtoMessage() {}
func (*CreateClusterRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_29f9b85b3c7e3a5f, []int{11}
}
func (m *CreateClusterRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CreateClusterRequest.Unmarshal(m, b)
}
func (m *CreateClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CreateClusterRequest.Marshal(b, m, deterministic)
}
func (m *CreateClusterRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CreateClusterRequest.Merge(m, src)
}
func (m *CreateClusterRequest) XXX_Size() int {
return xxx_messageInfo_CreateClusterRequest.Size(m)
}
func (m *CreateClusterRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CreateClusterRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CreateClusterRequest proto.InternalMessageInfo
func (m *CreateClusterRequest) GetProjectId() string {
if m != nil {
return m.ProjectId
}
return ""
}
func (m *CreateClusterRequest) GetRegion() string {
if m != nil {
return m.Region
}
return ""
}
func (m *CreateClusterRequest) GetCluster() *Cluster {
if m != nil {
return m.Cluster
}
return nil
}
// A request to update a cluster.
type UpdateClusterRequest struct {
// Required. The ID of the Google Cloud Platform project the
// cluster belongs to.
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
// Required. The Cloud Dataproc region in which to handle the request.
Region string `protobuf:"bytes,5,opt,name=region,proto3" json:"region,omitempty"`
// Required. The cluster name.
ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
// Required. The changes to the cluster.
Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster,proto3" json:"cluster,omitempty"`
// Required. Specifies the path, relative to `Cluster`, of
// the field to update. For example, to change the number of workers
// in a cluster to 5, the `update_mask` parameter would be
// specified as `config.worker_config.num_instances`,
// and the `PATCH` request body would specify the new value, as follows:
//
// {
// "config":{
// "workerConfig":{
// "numInstances":"5"
// }
// }
// }
// Similarly, to change the number of preemptible workers in a cluster to 5,
// the `update_mask` parameter would be
// `config.secondary_worker_config.num_instances`, and the `PATCH` request
// body would be set as follows:
//
// {
// "config":{
// "secondaryWorkerConfig":{
// "numInstances":"5"
// }
// }
// }
// <strong>Note:</strong> Currently, only the following fields can be updated:
//
// <table>
// <tbody>
// <tr>
// <td><strong>Mask</strong></td>
// <td><strong>Purpose</strong></td>
// </tr>
// <tr>
// <td><strong><em>labels</em></strong></td>
// <td>Update labels</td>
// </tr>
// <tr>
// <td><strong><em>config.worker_config.num_instances</em></strong></td>
// <td>Resize primary worker group</td>
// </tr>
// <tr>
// <td><strong><em>config.secondary_worker_config.num_instances</em></strong></td>
// <td>Resize secondary worker group</td>
// </tr>
// </tbody>
// </table>
UpdateMask *field_mask.FieldMask `protobuf:"bytes,4,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *UpdateClusterRequest) Reset() { *m = UpdateClusterRequest{} }
func (m *UpdateClusterRequest) String() string { return proto.CompactTextString(m) }
func (*UpdateClusterRequest) ProtoMessage() {}
func (*UpdateClusterRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_29f9b85b3c7e3a5f, []int{12}
}
func (m *UpdateClusterRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_UpdateClusterRequest.Unmarshal(m, b)
}
func (m *UpdateClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_UpdateClusterRequest.Marshal(b, m, deterministic)
}
func (m *UpdateClusterRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_UpdateClusterRequest.Merge(m, src)
}
func (m *UpdateClusterRequest) XXX_Size() int {
return xxx_messageInfo_UpdateClusterRequest.Size(m)
}
func (m *UpdateClusterRequest) XXX_DiscardUnknown() {
xxx_messageInfo_UpdateClusterRequest.DiscardUnknown(m)
}
var xxx_messageInfo_UpdateClusterRequest proto.InternalMessageInfo
func (m *UpdateClusterRequest) GetProjectId() string {
if m != nil {
return m.ProjectId
}
return ""
}
func (m *UpdateClusterRequest) GetRegion() string {
if m != nil {
return m.Region
}
return ""
}
func (m *UpdateClusterRequest) GetClusterName() string {
if m != nil {
return m.ClusterName
}
return ""
}
func (m *UpdateClusterRequest) GetCluster() *Cluster {
if m != nil {
return m.Cluster
}
return nil
}
func (m *UpdateClusterRequest) GetUpdateMask() *field_mask.FieldMask {
if m != nil {
return m.UpdateMask
}
return nil
}
// A request to delete a cluster.
type DeleteClusterRequest struct {
// Required. The ID of the Google Cloud Platform project that the cluster
// belongs to.
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
// Required. The Cloud Dataproc region in which to handle the request.
Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
// Required. The cluster name.
ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DeleteClusterRequest) Reset() { *m = DeleteClusterRequest{} }
func (m *DeleteClusterRequest) String() string { return proto.CompactTextString(m) }
func (*DeleteClusterRequest) ProtoMessage() {}
func (*DeleteClusterRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_29f9b85b3c7e3a5f, []int{13}
}
func (m *DeleteClusterRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DeleteClusterRequest.Unmarshal(m, b)
}
func (m *DeleteClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DeleteClusterRequest.Marshal(b, m, deterministic)
}
func (m *DeleteClusterRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_DeleteClusterRequest.Merge(m, src)
}
func (m *DeleteClusterRequest) XXX_Size() int {
return xxx_messageInfo_DeleteClusterRequest.Size(m)
}
func (m *DeleteClusterRequest) XXX_DiscardUnknown() {
xxx_messageInfo_DeleteClusterRequest.DiscardUnknown(m)
}
var xxx_messageInfo_DeleteClusterRequest proto.InternalMessageInfo
func (m *DeleteClusterRequest) GetProjectId() string {
if m != nil {
return m.ProjectId
}
return ""
}
func (m *DeleteClusterRequest) GetRegion() string {
if m != nil {
return m.Region
}
return ""
}
func (m *DeleteClusterRequest) GetClusterName() string {
if m != nil {
return m.ClusterName
}
return ""
}
// Request to get the resource representation for a cluster in a project.
type GetClusterRequest struct {
// Required. The ID of the Google Cloud Platform project that the cluster
// belongs to.
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
// Required. The Cloud Dataproc region in which to handle the request.
Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
// Required. The cluster name.
ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetClusterRequest) Reset() { *m = GetClusterRequest{} }
func (m *GetClusterRequest) String() string { return proto.CompactTextString(m) }
func (*GetClusterRequest) ProtoMessage() {}
func (*GetClusterRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_29f9b85b3c7e3a5f, []int{14}
}
func (m *GetClusterRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetClusterRequest.Unmarshal(m, b)
}
func (m *GetClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GetClusterRequest.Marshal(b, m, deterministic)
}
func (m *GetClusterRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetClusterRequest.Merge(m, src)
}
func (m *GetClusterRequest) XXX_Size() int {
return xxx_messageInfo_GetClusterRequest.Size(m)
}
func (m *GetClusterRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GetClusterRequest.DiscardUnknown(m)
}
var xxx_messageInfo_GetClusterRequest proto.InternalMessageInfo
func (m *GetClusterRequest) GetProjectId() string {
if m != nil {
return m.ProjectId
}
return ""
}
func (m *GetClusterRequest) GetRegion() string {
if m != nil {
return m.Region
}
return ""
}
func (m *GetClusterRequest) GetClusterName() string {
if m != nil {
return m.ClusterName
}
return ""
}
// A request to list the clusters in a project.
type ListClustersRequest struct {
// Required. The ID of the Google Cloud Platform project that the cluster
// belongs to.
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
// Required. The Cloud Dataproc region in which to handle the request.
Region string `protobuf:"bytes,4,opt,name=region,proto3" json:"region,omitempty"`
// Optional. A filter constraining the clusters to list. Filters are
// case-sensitive and have the following syntax:
//
// field = value [AND [field = value]] ...
//
// where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`,
// and `[KEY]` is a label key. **value** can be `*` to match all values.
// `status.state` can be one of the following: `ACTIVE`, `INACTIVE`,
// `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE`
// contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE`
// contains the `DELETING` and `ERROR` states.
// `clusterName` is the name of the cluster provided at creation time.
// Only the logical `AND` operator is supported; space-separated items are
// treated as having an implicit `AND` operator.
//
// Example filter:
//
// status.state = ACTIVE AND clusterName = mycluster
// AND labels.env = staging AND labels.starred = *
Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"`
// Optional. The standard List page size.
PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
// Optional. The standard List page token.
PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ListClustersRequest) Reset() { *m = ListClustersRequest{} }
func (m *ListClustersRequest) String() string { return proto.CompactTextString(m) }
func (*ListClustersRequest) ProtoMessage() {}
func (*ListClustersRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_29f9b85b3c7e3a5f, []int{15}
}
func (m *ListClustersRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListClustersRequest.Unmarshal(m, b)
}
func (m *ListClustersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ListClustersRequest.Marshal(b, m, deterministic)
}
func (m *ListClustersRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListClustersRequest.Merge(m, src)
}
func (m *ListClustersRequest) XXX_Size() int {
return xxx_messageInfo_ListClustersRequest.Size(m)
}
func (m *ListClustersRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ListClustersRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ListClustersRequest proto.InternalMessageInfo
func (m *ListClustersRequest) GetProjectId() string {
if m != nil {
return m.ProjectId
}
return ""
}
func (m *ListClustersRequest) GetRegion() string {
if m != nil {
return m.Region
}
return ""
}
func (m *ListClustersRequest) GetFilter() string {
if m != nil {
return m.Filter
}
return ""
}
func (m *ListClustersRequest) GetPageSize() int32 {
if m != nil {
return m.PageSize
}
return 0
}
func (m *ListClustersRequest) GetPageToken() string {
if m != nil {
return m.PageToken
}
return ""
}
// The list of all clusters in a project.
type ListClustersResponse struct {
// Output-only. The clusters in the project.
Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"`
// Output-only. This token is included in the response if there are more
// results to fetch. To fetch additional results, provide this value as the
// `page_token` in a subsequent `ListClustersRequest`.
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ListClustersResponse) Reset() { *m = ListClustersResponse{} }
func (m *ListClustersResponse) String() string { return proto.CompactTextString(m) }
func (*ListClustersResponse) ProtoMessage() {}
func (*ListClustersResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_29f9b85b3c7e3a5f, []int{16}
}
func (m *ListClustersResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListClustersResponse.Unmarshal(m, b)
}
func (m *ListClustersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ListClustersResponse.Marshal(b, m, deterministic)
}
func (m *ListClustersResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListClustersResponse.Merge(m, src)
}
func (m *ListClustersResponse) XXX_Size() int {
return xxx_messageInfo_ListClustersResponse.Size(m)
}
func (m *ListClustersResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ListClustersResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ListClustersResponse proto.InternalMessageInfo
func (m *ListClustersResponse) GetClusters() []*Cluster {
if m != nil {
return m.Clusters
}
return nil
}
func (m *ListClustersResponse) GetNextPageToken() string {
if m != nil {
return m.NextPageToken
}
return ""
}
// A request to collect cluster diagnostic information.
type DiagnoseClusterRequest struct {
// Required. The ID of the Google Cloud Platform project that the cluster
// belongs to.
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
// Required. The Cloud Dataproc region in which to handle the request.
Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
// Required. The cluster name.
ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DiagnoseClusterRequest) Reset() { *m = DiagnoseClusterRequest{} }
func (m *DiagnoseClusterRequest) String() string { return proto.CompactTextString(m) }
func (*DiagnoseClusterRequest) ProtoMessage() {}
func (*DiagnoseClusterRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_29f9b85b3c7e3a5f, []int{17}
}
func (m *DiagnoseClusterRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DiagnoseClusterRequest.Unmarshal(m, b)
}
func (m *DiagnoseClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DiagnoseClusterRequest.Marshal(b, m, deterministic)
}
func (m *DiagnoseClusterRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_DiagnoseClusterRequest.Merge(m, src)
}
func (m *DiagnoseClusterRequest) XXX_Size() int {
return xxx_messageInfo_DiagnoseClusterRequest.Size(m)
}
func (m *DiagnoseClusterRequest) XXX_DiscardUnknown() {
xxx_messageInfo_DiagnoseClusterRequest.DiscardUnknown(m)
}
var xxx_messageInfo_DiagnoseClusterRequest proto.InternalMessageInfo
func (m *DiagnoseClusterRequest) GetProjectId() string {
if m != nil {
return m.ProjectId
}
return ""
}
func (m *DiagnoseClusterRequest) GetRegion() string {
if m != nil {
return m.Region
}
return ""
}
func (m *DiagnoseClusterRequest) GetClusterName() string {
if m != nil {
return m.ClusterName
}
return ""
}
// The location of diagnostic output.
type DiagnoseClusterResults struct {
// Output-only. The Google Cloud Storage URI of the diagnostic output.
// The output report is a plain text file with a summary of collected
// diagnostics.
OutputUri string `protobuf:"bytes,1,opt,name=output_uri,json=outputUri,proto3" json:"output_uri,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DiagnoseClusterResults) Reset() { *m = DiagnoseClusterResults{} }
func (m *DiagnoseClusterResults) String() string { return proto.CompactTextString(m) }
func (*DiagnoseClusterResults) ProtoMessage() {}
func (*DiagnoseClusterResults) Descriptor() ([]byte, []int) {
return fileDescriptor_29f9b85b3c7e3a5f, []int{18}
}
func (m *DiagnoseClusterResults) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DiagnoseClusterResults.Unmarshal(m, b)
}
func (m *DiagnoseClusterResults) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DiagnoseClusterResults.Marshal(b, m, deterministic)
}
func (m *DiagnoseClusterResults) XXX_Merge(src proto.Message) {
xxx_messageInfo_DiagnoseClusterResults.Merge(m, src)
}
func (m *DiagnoseClusterResults) XXX_Size() int {
return xxx_messageInfo_DiagnoseClusterResults.Size(m)
}
func (m *DiagnoseClusterResults) XXX_DiscardUnknown() {
xxx_messageInfo_DiagnoseClusterResults.DiscardUnknown(m)
}
var xxx_messageInfo_DiagnoseClusterResults proto.InternalMessageInfo
func (m *DiagnoseClusterResults) GetOutputUri() string {
if m != nil {
return m.OutputUri
}
return ""
}
func init() {
proto.RegisterType((*Cluster)(nil), "google.cloud.dataproc.v1.Cluster")
proto.RegisterMapType((map[string]string)(nil), "google.cloud.dataproc.v1.Cluster.LabelsEntry")
proto.RegisterType((*ClusterConfig)(nil), "google.cloud.dataproc.v1.ClusterConfig")
proto.RegisterType((*GceClusterConfig)(nil), "google.cloud.dataproc.v1.GceClusterConfig")
proto.RegisterMapType((map[string]string)(nil), "google.cloud.dataproc.v1.GceClusterConfig.MetadataEntry")
proto.RegisterType((*InstanceGroupConfig)(nil), "google.cloud.dataproc.v1.InstanceGroupConfig")
proto.RegisterType((*ManagedGroupConfig)(nil), "google.cloud.dataproc.v1.ManagedGroupConfig")
proto.RegisterType((*AcceleratorConfig)(nil), "google.cloud.dataproc.v1.AcceleratorConfig")
proto.RegisterType((*DiskConfig)(nil), "google.cloud.dataproc.v1.DiskConfig")
proto.RegisterType((*NodeInitializationAction)(nil), "google.cloud.dataproc.v1.NodeInitializationAction")
proto.RegisterType((*ClusterStatus)(nil), "google.cloud.dataproc.v1.ClusterStatus")
proto.RegisterType((*SoftwareConfig)(nil), "google.cloud.dataproc.v1.SoftwareConfig")
proto.RegisterMapType((map[string]string)(nil), "google.cloud.dataproc.v1.SoftwareConfig.PropertiesEntry")
proto.RegisterType((*ClusterMetrics)(nil), "google.cloud.dataproc.v1.ClusterMetrics")
proto.RegisterMapType((map[string]int64)(nil), "google.cloud.dataproc.v1.ClusterMetrics.HdfsMetricsEntry")
proto.RegisterMapType((map[string]int64)(nil), "google.cloud.dataproc.v1.ClusterMetrics.YarnMetricsEntry")
proto.RegisterType((*CreateClusterRequest)(nil), "google.cloud.dataproc.v1.CreateClusterRequest")
proto.RegisterType((*UpdateClusterRequest)(nil), "google.cloud.dataproc.v1.UpdateClusterRequest")
proto.RegisterType((*DeleteClusterRequest)(nil), "google.cloud.dataproc.v1.DeleteClusterRequest")
proto.RegisterType((*GetClusterRequest)(nil), "google.cloud.dataproc.v1.GetClusterRequest")
proto.RegisterType((*ListClustersRequest)(nil), "google.cloud.dataproc.v1.ListClustersRequest")
proto.RegisterType((*ListClustersResponse)(nil), "google.cloud.dataproc.v1.ListClustersResponse")
proto.RegisterType((*DiagnoseClusterRequest)(nil), "google.cloud.dataproc.v1.DiagnoseClusterRequest")
proto.RegisterType((*DiagnoseClusterResults)(nil), "google.cloud.dataproc.v1.DiagnoseClusterResults")
proto.RegisterEnum("google.cloud.dataproc.v1.ClusterStatus_State", ClusterStatus_State_name, ClusterStatus_State_value)
proto.RegisterEnum("google.cloud.dataproc.v1.ClusterStatus_Substate", ClusterStatus_Substate_name, ClusterStatus_Substate_value)
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// ClusterControllerClient is the client API for ClusterController service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type ClusterControllerClient interface {
// Creates a cluster in a project.
CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Updates a cluster in a project.
UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Deletes a cluster in a project.
DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Gets the resource representation for a cluster in a project.
GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error)
// Lists all regions/{region}/clusters in a project.
ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error)
// Gets cluster diagnostic information.
// After the operation completes, the Operation.response field
// contains `DiagnoseClusterOutputLocation`.
DiagnoseCluster(ctx context.Context, in *DiagnoseClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
}
type clusterControllerClient struct {
cc *grpc.ClientConn
}
func NewClusterControllerClient(cc *grpc.ClientConn) ClusterControllerClient {
return &clusterControllerClient{cc}
}
func (c *clusterControllerClient) CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
out := new(longrunning.Operation)
err := c.cc.Invoke(ctx, "/google.cloud.dataproc.v1.ClusterController/CreateCluster", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *clusterControllerClient) UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
out := new(longrunning.Operation)
err := c.cc.Invoke(ctx, "/google.cloud.dataproc.v1.ClusterController/UpdateCluster", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *clusterControllerClient) DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
out := new(longrunning.Operation)
err := c.cc.Invoke(ctx, "/google.cloud.dataproc.v1.ClusterController/DeleteCluster", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *clusterControllerClient) GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) {
out := new(Cluster)
err := c.cc.Invoke(ctx, "/google.cloud.dataproc.v1.ClusterController/GetCluster", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *clusterControllerClient) ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) {
out := new(ListClustersResponse)
err := c.cc.Invoke(ctx, "/google.cloud.dataproc.v1.ClusterController/ListClusters", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *clusterControllerClient) DiagnoseCluster(ctx context.Context, in *DiagnoseClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
out := new(longrunning.Operation)
err := c.cc.Invoke(ctx, "/google.cloud.dataproc.v1.ClusterController/DiagnoseCluster", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// ClusterControllerServer is the server API for ClusterController service.
type ClusterControllerServer interface {
// Creates a cluster in a project.
CreateCluster(context.Context, *CreateClusterRequest) (*longrunning.Operation, error)
// Updates a cluster in a project.
UpdateCluster(context.Context, *UpdateClusterRequest) (*longrunning.Operation, error)
// Deletes a cluster in a project.
DeleteCluster(context.Context, *DeleteClusterRequest) (*longrunning.Operation, error)
// Gets the resource representation for a cluster in a project.
GetCluster(context.Context, *GetClusterRequest) (*Cluster, error)
// Lists all regions/{region}/clusters in a project.
ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error)
// Gets cluster diagnostic information.
// After the operation completes, the Operation.response field
// contains `DiagnoseClusterOutputLocation`.
DiagnoseCluster(context.Context, *DiagnoseClusterRequest) (*longrunning.Operation, error)
}
func RegisterClusterControllerServer(s *grpc.Server, srv ClusterControllerServer) {
s.RegisterService(&_ClusterController_serviceDesc, srv)
}
func _ClusterController_CreateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateClusterRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ClusterControllerServer).CreateCluster(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.dataproc.v1.ClusterController/CreateCluster",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ClusterControllerServer).CreateCluster(ctx, req.(*CreateClusterRequest))
}
return interceptor(ctx, in, info, handler)
}
func _ClusterController_UpdateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpdateClusterRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ClusterControllerServer).UpdateCluster(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.dataproc.v1.ClusterController/UpdateCluster",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ClusterControllerServer).UpdateCluster(ctx, req.(*UpdateClusterRequest))
}
return interceptor(ctx, in, info, handler)
}
func _ClusterController_DeleteCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteClusterRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ClusterControllerServer).DeleteCluster(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.dataproc.v1.ClusterController/DeleteCluster",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ClusterControllerServer).DeleteCluster(ctx, req.(*DeleteClusterRequest))
}
return interceptor(ctx, in, info, handler)
}
func _ClusterController_GetCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetClusterRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ClusterControllerServer).GetCluster(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.dataproc.v1.ClusterController/GetCluster",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ClusterControllerServer).GetCluster(ctx, req.(*GetClusterRequest))
}
return interceptor(ctx, in, info, handler)
}
func _ClusterController_ListClusters_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListClustersRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ClusterControllerServer).ListClusters(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.dataproc.v1.ClusterController/ListClusters",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ClusterControllerServer).ListClusters(ctx, req.(*ListClustersRequest))
}
return interceptor(ctx, in, info, handler)
}
func _ClusterController_DiagnoseCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DiagnoseClusterRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ClusterControllerServer).DiagnoseCluster(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.dataproc.v1.ClusterController/DiagnoseCluster",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ClusterControllerServer).DiagnoseCluster(ctx, req.(*DiagnoseClusterRequest))
}
return interceptor(ctx, in, info, handler)
}
var _ClusterController_serviceDesc = grpc.ServiceDesc{
ServiceName: "google.cloud.dataproc.v1.ClusterController",
HandlerType: (*ClusterControllerServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "CreateCluster",
Handler: _ClusterController_CreateCluster_Handler,
},
{
MethodName: "UpdateCluster",
Handler: _ClusterController_UpdateCluster_Handler,
},
{
MethodName: "DeleteCluster",
Handler: _ClusterController_DeleteCluster_Handler,
},
{
MethodName: "GetCluster",
Handler: _ClusterController_GetCluster_Handler,
},
{
MethodName: "ListClusters",
Handler: _ClusterController_ListClusters_Handler,
},
{
MethodName: "DiagnoseCluster",
Handler: _ClusterController_DiagnoseCluster_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "google/cloud/dataproc/v1/clusters.proto",
}
func init() {
proto.RegisterFile("google/cloud/dataproc/v1/clusters.proto", fileDescriptor_29f9b85b3c7e3a5f)
}
var fileDescriptor_29f9b85b3c7e3a5f = []byte{
// 1944 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x73, 0x23, 0x47,
0x15, 0xcf, 0x58, 0xfe, 0x90, 0x9f, 0x3e, 0x2c, 0x77, 0x1c, 0xa3, 0x28, 0x09, 0x71, 0x26, 0x81,
0x75, 0x36, 0x20, 0xed, 0x3a, 0x50, 0x24, 0x6b, 0x12, 0xb0, 0x2d, 0xad, 0xd7, 0xc4, 0x96, 0xcd,
0x48, 0xda, 0x24, 0x14, 0x30, 0xd5, 0x9a, 0x69, 0x6b, 0x1b, 0x8f, 0x66, 0x26, 0xd3, 0x3d, 0x4e,
0xbc, 0x5b, 0x7b, 0xe1, 0x40, 0x15, 0x70, 0xa4, 0x8a, 0x33, 0x07, 0xa8, 0xa2, 0x72, 0x84, 0x1b,
0xff, 0x00, 0x17, 0x8a, 0x0b, 0x47, 0xae, 0x9c, 0xf8, 0x2b, 0xa8, 0xfe, 0x18, 0x69, 0xc6, 0x5f,
0x92, 0x97, 0xad, 0x9c, 0x34, 0xf3, 0xfa, 0xf7, 0xde, 0xfb, 0xf5, 0x7b, 0xaf, 0x5f, 0xbf, 0x11,
0xdc, 0x1a, 0x04, 0xc1, 0xc0, 0x23, 0x0d, 0xc7, 0x0b, 0x62, 0xb7, 0xe1, 0x62, 0x8e, 0xc3, 0x28,
0x70, 0x1a, 0xa7, 0x77, 0x1b, 0x8e, 0x17, 0x33, 0x4e, 0x22, 0x56, 0x0f, 0xa3, 0x80, 0x07, 0xa8,
0xaa, 0x80, 0x75, 0x09, 0xac, 0x27, 0xc0, 0xfa, 0xe9, 0xdd, 0xda, 0xab, 0xda, 0x04, 0x0e, 0x69,
0x03, 0xfb, 0x7e, 0xc0, 0x31, 0xa7, 0x81, 0xaf, 0xf5, 0x6a, 0x6f, 0x5f, 0xe9, 0x20, 0x08, 0x49,
0x94, 0x81, 0xbe, 0xa9, 0xa1, 0x5e, 0xe0, 0x0f, 0xa2, 0xd8, 0xf7, 0xa9, 0x3f, 0xb8, 0x08, 0xfa,
0xba, 0x06, 0xc9, 0xb7, 0x7e, 0x7c, 0xdc, 0x70, 0x63, 0x05, 0xd0, 0xeb, 0x6b, 0xe7, 0xd7, 0x8f,
0x29, 0xf1, 0x5c, 0x7b, 0x88, 0xd9, 0x89, 0x46, 0xbc, 0x7e, 0x1e, 0xc1, 0xe9, 0x90, 0x30, 0x8e,
0x87, 0xa1, 0x02, 0x98, 0xbf, 0x9a, 0x85, 0x85, 0x1d, 0xb5, 0x7b, 0xf4, 0x1a, 0x40, 0x18, 0x05,
0xbf, 0x20, 0x0e, 0xb7, 0xa9, 0x5b, 0x35, 0xd6, 0x8c, 0xf5, 0x45, 0x6b, 0x51, 0x4b, 0xf6, 0x5c,
0xf4, 0x06, 0x14, 0x75, 0x9c, 0x6c, 0x1f, 0x0f, 0x49, 0x75, 0x46, 0x02, 0x0a, 0x5a, 0xd6, 0xc6,
0x43, 0x82, 0x7e, 0x00, 0xf3, 0x4e, 0xe0, 0x1f, 0xd3, 0x41, 0x35, 0xb7, 0x66, 0xac, 0x17, 0x36,
0x6e, 0xd5, 0xaf, 0x8a, 0x64, 0x5d, 0x3b, 0xdd, 0x91, 0x70, 0x4b, 0xab, 0xa1, 0x16, 0xcc, 0x7b,
0xb8, 0x4f, 0x3c, 0x56, 0xcd, 0xaf, 0xe5, 0xd6, 0x0b, 0x1b, 0xdf, 0x9e, 0x68, 0xa0, 0xbe, 0x2f,
0xf1, 0x2d, 0x9f, 0x47, 0x67, 0x96, 0x56, 0x16, 0x3c, 0x18, 0xc7, 0x3c, 0x66, 0xd5, 0xd9, 0x29,
0x79, 0x74, 0x24, 0xdc, 0xd2, 0x6a, 0xa8, 0x0d, 0x65, 0xf5, 0x64, 0x3f, 0xa2, 0x8c, 0x07, 0xd1,
0x59, 0x75, 0x41, 0xf2, 0x99, 0xda, 0x50, 0x49, 0xa9, 0x3f, 0x50, 0xda, 0xe9, 0xd8, 0xc5, 0x31,
0x75, 0xab, 0xf3, 0x99, 0xd8, 0xf5, 0x62, 0xea, 0xa2, 0x6d, 0x58, 0x18, 0x12, 0x1e, 0x51, 0x87,
0x55, 0x17, 0x25, 0xe9, 0xf5, 0x89, 0xbe, 0x0e, 0x14, 0xde, 0x4a, 0x14, 0x6b, 0xef, 0x43, 0x21,
0x15, 0x0e, 0x54, 0x81, 0xdc, 0x09, 0x39, 0xd3, 0x99, 0x14, 0x8f, 0x68, 0x05, 0xe6, 0x4e, 0xb1,
0x17, 0x27, 0xc9, 0x53, 0x2f, 0xf7, 0x66, 0xde, 0x33, 0xcc, 0x7f, 0xcf, 0x42, 0x29, 0x93, 0x13,
0xf4, 0x26, 0x94, 0x54, 0x56, 0xec, 0x7e, 0xec, 0x9c, 0x10, 0xae, 0xed, 0x14, 0x95, 0x70, 0x5b,
0xca, 0xd0, 0x27, 0x80, 0x06, 0x0e, 0xb1, 0x93, 0xcd, 0xe9, 0xec, 0xe7, 0xe5, 0x06, 0x6e, 0x5f,
0xbd, 0x81, 0x5d, 0x87, 0x64, 0x0b, 0xa0, 0x32, 0x38, 0x27, 0x41, 0x16, 0x94, 0x86, 0x38, 0x6d,
0x54, 0x45, 0xe5, 0x9a, 0x8a, 0xd8, 0xf3, 0x19, 0xc7, 0xbe, 0x43, 0x76, 0xa3, 0x20, 0x0e, 0xb5,
0xdd, 0xa2, 0xb2, 0x31, 0xb6, 0xf9, 0x79, 0x10, 0x9d, 0x8c, 0x6d, 0xc2, 0x33, 0xd9, 0x54, 0x36,
0xb4, 0x4d, 0x02, 0x5f, 0x63, 0xc4, 0x09, 0x7c, 0x17, 0x47, 0x67, 0x76, 0xd6, 0x7a, 0xf1, 0x59,
0xac, 0xbf, 0x34, 0xb2, 0xf6, 0x71, 0xda, 0xcd, 0x8f, 0x61, 0x89, 0x05, 0xc7, 0xfc, 0x73, 0x1c,
0x91, 0xc4, 0x7c, 0x69, 0x52, 0x99, 0x74, 0xb4, 0x82, 0xb6, 0x5c, 0x66, 0x99, 0x77, 0x44, 0x61,
0x95, 0xfa, 0x94, 0x53, 0xec, 0xd1, 0xc7, 0xb2, 0xad, 0xd8, 0xd8, 0x91, 0xed, 0xa7, 0x5a, 0x90,
0xc5, 0xbe, 0x71, 0xb5, 0xe5, 0x76, 0xe0, 0x92, 0xbd, 0x8c, 0xee, 0x96, 0x54, 0xb5, 0x5e, 0xa2,
0x97, 0x48, 0x99, 0xf9, 0xa7, 0x1c, 0x54, 0xce, 0xe7, 0x1c, 0xbd, 0x0c, 0xf9, 0xc7, 0x81, 0x4f,
0xec, 0x38, 0xa2, 0xba, 0xb6, 0x16, 0xc4, 0x7b, 0x2f, 0xa2, 0xe8, 0x75, 0x28, 0xf8, 0x84, 0x8b,
0x68, 0xca, 0x55, 0x55, 0xad, 0xa0, 0x45, 0x02, 0xf0, 0x0d, 0x28, 0xb3, 0xb8, 0x9f, 0xc6, 0xa8,
0x23, 0x55, 0x1a, 0x4b, 0x05, 0x6c, 0x1d, 0x2a, 0xd4, 0xe7, 0x24, 0xf2, 0xb1, 0x67, 0xd3, 0xd0,
0x0e, 0x7c, 0x4f, 0x9c, 0x64, 0x63, 0x3d, 0x6f, 0x95, 0x13, 0xf9, 0x5e, 0x78, 0xe8, 0x7b, 0x67,
0xe8, 0x16, 0x2c, 0x31, 0x12, 0x9d, 0x52, 0x87, 0xd8, 0xd8, 0x71, 0x82, 0xd8, 0xe7, 0xb2, 0x8a,
0x17, 0xad, 0xb2, 0x16, 0x6f, 0x29, 0x29, 0xfa, 0x0e, 0xac, 0x9e, 0x03, 0xda, 0xcc, 0x09, 0x42,
0xc2, 0xaa, 0xb9, 0xb5, 0xdc, 0xfa, 0xa2, 0xb5, 0x92, 0xc5, 0x77, 0xe4, 0x1a, 0x42, 0x30, 0xcb,
0xf1, 0x40, 0xf4, 0x23, 0x81, 0x91, 0xcf, 0xa8, 0x0b, 0xf9, 0x21, 0xe1, 0x58, 0xc4, 0xb5, 0x3a,
0x27, 0x23, 0xfe, 0xde, 0xf4, 0x27, 0xa6, 0x7e, 0xa0, 0x55, 0x55, 0xe7, 0x1b, 0x59, 0xaa, 0x6d,
0x42, 0x29, 0xb3, 0x74, 0xa3, 0x2e, 0xf0, 0xf7, 0x1c, 0xbc, 0x78, 0x49, 0x51, 0x8a, 0x5e, 0xe0,
0xc7, 0x43, 0x9b, 0xea, 0x25, 0x26, 0xad, 0xcd, 0x59, 0x45, 0x3f, 0x1e, 0x26, 0x70, 0x26, 0x72,
0x92, 0x00, 0xe4, 0x0d, 0xc1, 0xaa, 0x33, 0x72, 0xb7, 0xa5, 0x44, 0x2a, 0xee, 0x08, 0x86, 0x5e,
0x81, 0x45, 0x3a, 0xc4, 0x03, 0x95, 0xf7, 0x9c, 0x64, 0x90, 0x97, 0x02, 0x9d, 0xb0, 0x21, 0x76,
0x1e, 0x51, 0x9f, 0xd8, 0xfc, 0x2c, 0x54, 0x98, 0x59, 0x95, 0x07, 0x2d, 0xef, 0x9e, 0x85, 0x12,
0xd9, 0x82, 0x82, 0x4b, 0xd9, 0x49, 0x72, 0x18, 0xe6, 0xe4, 0x61, 0x78, 0xeb, 0xea, 0x00, 0x36,
0x29, 0x3b, 0xd1, 0x07, 0x01, 0xdc, 0xd1, 0xb3, 0x24, 0xcd, 0xec, 0x30, 0x22, 0x64, 0x18, 0x72,
0xda, 0xf7, 0x88, 0x2c, 0xa4, 0xbc, 0x55, 0xa2, 0xec, 0x68, 0x2c, 0x44, 0x3f, 0x87, 0x95, 0x21,
0xf6, 0xf1, 0x80, 0xb8, 0xf6, 0x40, 0xc4, 0x25, 0x71, 0xbb, 0x20, 0xdd, 0x7e, 0xeb, 0x6a, 0xb7,
0x07, 0x4a, 0x2b, 0x7d, 0xc2, 0xd1, 0xf0, 0x82, 0x0c, 0x1d, 0x42, 0x11, 0x3b, 0x0e, 0xf1, 0xc4,
0x04, 0x10, 0x44, 0xc9, 0xf5, 0xf7, 0xce, 0xd5, 0x76, 0xb7, 0xc6, 0xe8, 0xa4, 0x2d, 0xa5, 0x0d,
0x98, 0xbf, 0x36, 0x00, 0x5d, 0xf4, 0x2d, 0xaa, 0x77, 0x94, 0x23, 0x4e, 0x86, 0xa1, 0x87, 0xb9,
0x4a, 0x96, 0xae, 0x8f, 0x95, 0x64, 0xb5, 0xab, 0x17, 0xe5, 0xbd, 0xfe, 0x01, 0xbc, 0x32, 0xd2,
0x52, 0xdb, 0x57, 0x5b, 0xc8, 0x4c, 0x02, 0x55, 0x9a, 0x2e, 0x1c, 0xe5, 0x5b, 0x8e, 0x05, 0x66,
0x04, 0xcb, 0x17, 0xe8, 0xa2, 0x3b, 0xb0, 0x92, 0x22, 0x3c, 0xce, 0xb6, 0xe2, 0x81, 0x52, 0x6b,
0x49, 0xc6, 0xdf, 0x81, 0xe5, 0xb4, 0x86, 0x3a, 0xa4, 0x33, 0xb2, 0x10, 0x2b, 0x38, 0x6d, 0x3f,
0xf6, 0xb9, 0xf9, 0x33, 0x80, 0x71, 0xc6, 0xd1, 0xdb, 0xb0, 0xdc, 0x0f, 0x02, 0x6e, 0xcb, 0x8a,
0x61, 0xf4, 0x31, 0xb1, 0x07, 0x7d, 0x5d, 0xc3, 0x65, 0xb1, 0x20, 0xa0, 0x1d, 0xfa, 0x98, 0xec,
0xf6, 0xd1, 0x5b, 0x50, 0x16, 0xa5, 0xee, 0x05, 0x0e, 0xf6, 0x6c, 0xc6, 0x5c, 0xa6, 0x5d, 0x88,
0x5a, 0xdf, 0x17, 0xc2, 0x0e, 0x73, 0x99, 0xf9, 0x5b, 0x03, 0xaa, 0x57, 0x35, 0x41, 0xd1, 0x4b,
0xc8, 0x17, 0xc4, 0x89, 0x39, 0xee, 0x7b, 0xc4, 0x3e, 0xa6, 0x5e, 0x12, 0xdd, 0xf2, 0x58, 0x7c,
0x9f, 0x7a, 0x04, 0xdd, 0x87, 0x65, 0x25, 0x11, 0xcd, 0x57, 0x8c, 0x66, 0x41, 0xac, 0x76, 0x54,
0xd8, 0x78, 0x39, 0x49, 0x7d, 0x32, 0xba, 0xd5, 0x9b, 0x7a, 0xf8, 0xb3, 0x2a, 0x23, 0x9d, 0xae,
0x52, 0x31, 0x7f, 0x9f, 0x1b, 0x5d, 0xde, 0x6a, 0xfe, 0x40, 0x3b, 0x30, 0x27, 0x26, 0x10, 0xe5,
0xb8, 0x3c, 0xc5, 0x1c, 0xa5, 0xf4, 0xea, 0xe2, 0x87, 0x58, 0x4a, 0x17, 0xad, 0xc2, 0xbc, 0x4b,
0x38, 0xa6, 0x9e, 0xce, 0xb0, 0x7e, 0x43, 0x4d, 0xa8, 0x48, 0x80, 0xcd, 0x38, 0x8e, 0xb8, 0x24,
0xae, 0x07, 0xbe, 0xda, 0x05, 0xd6, 0xdd, 0x64, 0xe0, 0xb4, 0xe4, 0x44, 0x45, 0x3a, 0x42, 0x45,
0x08, 0xd1, 0x3e, 0xe4, 0x59, 0xdc, 0x57, 0x2c, 0x67, 0x25, 0xcb, 0x3b, 0x53, 0xb3, 0xd4, 0x7a,
0xd6, 0xc8, 0x82, 0xf9, 0x10, 0xe6, 0x24, 0x77, 0x54, 0x80, 0x85, 0x5e, 0xfb, 0xa3, 0xf6, 0xe1,
0xc7, 0xed, 0xca, 0x0b, 0xa8, 0x08, 0xf9, 0x1d, 0xab, 0xb5, 0xd5, 0xdd, 0x6b, 0xef, 0x56, 0x0c,
0xb1, 0x64, 0xf5, 0xda, 0x6d, 0xf1, 0x32, 0x83, 0x16, 0x61, 0xae, 0x65, 0x59, 0x87, 0x56, 0x25,
0x27, 0x50, 0xcd, 0xd6, 0x7e, 0x4b, 0xa2, 0x66, 0xc5, 0x5b, 0xef, 0xa8, 0xa9, 0x74, 0xe6, 0xcc,
0xef, 0x43, 0x3e, 0xf1, 0x86, 0x96, 0xa0, 0xd0, 0x6b, 0x77, 0x8e, 0x5a, 0x3b, 0x7b, 0xf7, 0xf7,
0x5a, 0xcd, 0xca, 0x0b, 0xa8, 0x04, 0x8b, 0xbd, 0xf6, 0x83, 0xd6, 0xd6, 0x7e, 0xf7, 0xc1, 0xa7,
0x15, 0x03, 0x55, 0xa0, 0xd8, 0xe9, 0x6e, 0xed, 0xb7, 0xec, 0x4e, 0x77, 0xab, 0xdb, 0xeb, 0x54,
0x66, 0xcc, 0x7f, 0x1a, 0x50, 0xce, 0xde, 0xc2, 0xa2, 0x95, 0xaa, 0xf6, 0x77, 0x4a, 0x22, 0x46,
0x03, 0x3f, 0x19, 0xab, 0xa4, 0xf0, 0xa1, 0x92, 0xa1, 0x4f, 0xe4, 0x28, 0x1e, 0x92, 0x88, 0x53,
0xdd, 0x46, 0xaf, 0xbd, 0x1c, 0xb2, 0x2e, 0xea, 0x47, 0x23, 0x55, 0x75, 0x39, 0xa4, 0x6c, 0xd5,
0x3e, 0x80, 0xa5, 0x73, 0xcb, 0x37, 0xba, 0x20, 0xfe, 0x31, 0x03, 0xe5, 0xec, 0xf4, 0x89, 0x7e,
0x0a, 0xc5, 0x47, 0xee, 0x31, 0xb3, 0x93, 0xe9, 0xd5, 0x90, 0x6c, 0xdf, 0x9f, 0x76, 0x7a, 0xad,
0x3f, 0x70, 0x8f, 0x99, 0x7e, 0x56, 0x74, 0x0b, 0x8f, 0xc6, 0x12, 0x61, 0xfd, 0x0c, 0x47, 0xfe,
0xc8, 0xfa, 0xcc, 0x0d, 0xad, 0x7f, 0x8a, 0x23, 0x3f, 0x6b, 0xfd, 0x6c, 0x2c, 0xa9, 0x7d, 0x08,
0x95, 0xf3, 0xee, 0x27, 0x85, 0x23, 0x97, 0x0a, 0x87, 0xd0, 0x3f, 0xef, 0xe0, 0x26, 0xfa, 0xe6,
0x6f, 0x0c, 0x58, 0xd9, 0x89, 0x08, 0xe6, 0xc9, 0xe5, 0x6e, 0x91, 0xcf, 0x62, 0xc2, 0xf8, 0xa4,
0x6f, 0xb1, 0x55, 0x98, 0x8f, 0xc8, 0x40, 0x54, 0x8f, 0xba, 0x40, 0xf5, 0x1b, 0xda, 0x84, 0x05,
0x3d, 0x8a, 0xeb, 0x36, 0xf2, 0xc6, 0xc4, 0x40, 0x59, 0x89, 0x86, 0xf9, 0x5f, 0x03, 0x56, 0x7a,
0xa1, 0xfb, 0x7f, 0x90, 0x99, 0xcb, 0x90, 0x99, 0xe2, 0x83, 0x31, 0xc5, 0x37, 0x77, 0x53, 0xbe,
0x68, 0x13, 0x0a, 0xb1, 0xa4, 0x2b, 0xbf, 0x78, 0xf5, 0xa7, 0xde, 0xc5, 0x0e, 0x74, 0x5f, 0x7c,
0x14, 0x1f, 0x60, 0x76, 0x62, 0x81, 0x82, 0x8b, 0x67, 0x33, 0x84, 0x95, 0x26, 0xf1, 0xc8, 0xf3,
0x0a, 0xfc, 0xe4, 0xbd, 0x9a, 0x43, 0x58, 0xde, 0x25, 0xfc, 0x2b, 0x73, 0xf7, 0x07, 0x03, 0x5e,
0xdc, 0xa7, 0x2c, 0x71, 0xc8, 0x6e, 0xec, 0x71, 0x36, 0xe3, 0x71, 0x15, 0xe6, 0x8f, 0xa9, 0x27,
0x12, 0xa5, 0x93, 0xac, 0xde, 0xc4, 0x34, 0x17, 0x8a, 0x6e, 0x26, 0x2e, 0x55, 0x7d, 0x53, 0xe6,
0x85, 0x40, 0xdc, 0xa6, 0xd2, 0x97, 0x58, 0xe4, 0xc1, 0x09, 0x49, 0xb6, 0x20, 0xe1, 0x5d, 0x21,
0x30, 0x9f, 0xc2, 0x4a, 0x96, 0x21, 0x0b, 0x03, 0x9f, 0x89, 0x71, 0x23, 0x9f, 0xfc, 0x23, 0xa3,
0xbb, 0xc9, 0x14, 0x65, 0x31, 0x52, 0x41, 0xdf, 0x84, 0x25, 0x9f, 0x7c, 0xc1, 0xed, 0x94, 0x6b,
0x15, 0x9f, 0x92, 0x10, 0x1f, 0x8d, 0xdc, 0x47, 0xb0, 0xda, 0xa4, 0x78, 0xe0, 0x07, 0xec, 0xab,
0x2b, 0x82, 0xef, 0x5d, 0xe2, 0x93, 0xc5, 0x1e, 0x67, 0xc2, 0x67, 0x10, 0xf3, 0x30, 0xe6, 0xa9,
0x29, 0x68, 0x51, 0x49, 0x7a, 0x11, 0xdd, 0xf8, 0x4b, 0x1e, 0x96, 0xc7, 0x1f, 0x00, 0x3c, 0x0a,
0x3c, 0x8f, 0x44, 0xe8, 0x8f, 0x06, 0x94, 0x32, 0xfd, 0x03, 0xd5, 0xaf, 0x89, 0xd4, 0x25, 0x8d,
0xa6, 0xf6, 0x5a, 0x82, 0x4f, 0xfd, 0x13, 0x55, 0x3f, 0x4c, 0xfe, 0x89, 0x32, 0x9b, 0xbf, 0xfc,
0xd7, 0x7f, 0x7e, 0x37, 0xf3, 0xa1, 0xf9, 0x6e, 0xe3, 0xf4, 0x6e, 0x43, 0x47, 0x80, 0x35, 0x9e,
0x8c, 0xa3, 0xf3, 0xb4, 0xa1, 0x36, 0xcf, 0x1a, 0x4f, 0xd4, 0xc3, 0xd3, 0xd1, 0xbf, 0x69, 0xf7,
0x46, 0x27, 0xf5, 0xaf, 0x06, 0x94, 0x32, 0x9d, 0xe5, 0x3a, 0x9a, 0x97, 0xb5, 0xa0, 0x49, 0x34,
0x3b, 0x92, 0xe6, 0xc1, 0xc6, 0xf6, 0x33, 0xd0, 0x6c, 0x3c, 0x49, 0x27, 0xed, 0xe9, 0x98, 0xf5,
0x97, 0x06, 0x94, 0x32, 0x3d, 0xe2, 0x3a, 0xd6, 0x97, 0x35, 0x93, 0x49, 0xac, 0x7f, 0x24, 0x59,
0x37, 0x6f, 0x3f, 0x07, 0xd6, 0xe8, 0xcf, 0x06, 0xc0, 0xb8, 0xbd, 0xa0, 0x6b, 0xbe, 0x1c, 0x2e,
0x34, 0xa1, 0xda, 0xe4, 0xd3, 0x95, 0x50, 0x45, 0xcf, 0x83, 0xea, 0x97, 0x06, 0x14, 0xd3, 0xe7,
0x1e, 0x5d, 0x33, 0x9d, 0x5e, 0xd2, 0xc1, 0x6a, 0xf5, 0x69, 0xe1, 0xaa, 0x9d, 0x98, 0x9b, 0x92,
0xfb, 0x77, 0xd1, 0xb3, 0xd4, 0x30, 0xfa, 0x9b, 0x01, 0x4b, 0xe7, 0x4e, 0x2c, 0xba, 0x73, 0xdd,
0x57, 0xe6, 0x65, 0x0d, 0x65, 0x52, 0x21, 0x3c, 0x94, 0x0c, 0x8f, 0xcc, 0x8f, 0x9e, 0x43, 0xf9,
0xba, 0x9a, 0xc1, 0x3d, 0xe3, 0xf6, 0xf6, 0x67, 0xf0, 0xaa, 0x13, 0x0c, 0xaf, 0x64, 0xbb, 0x9d,
0x7c, 0x34, 0xb0, 0x23, 0x71, 0x59, 0x1e, 0x19, 0x3f, 0xf9, 0xa1, 0x86, 0x0e, 0x02, 0x0f, 0xfb,
0x83, 0x7a, 0x10, 0x0d, 0x1a, 0x03, 0xe2, 0xcb, 0xab, 0xb4, 0xa1, 0x96, 0x70, 0x48, 0xd9, 0xc5,
0x3f, 0xb8, 0x37, 0x93, 0xe7, 0xfe, 0xbc, 0x04, 0xbf, 0xfb, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff,
0xe5, 0xf5, 0x02, 0xd0, 0x6d, 0x17, 0x00, 0x00,
}
|
ScalablyTyped/SlinkyTyped | a/aws-sdk/src/main/scala/typingsSlinky/awsSdk/iotthingsgraphMod/EntityFilter.scala | <gh_stars>10-100
package typingsSlinky.awsSdk.iotthingsgraphMod
import org.scalablytyped.runtime.StObject
import scala.scalajs.js
import scala.scalajs.js.`|`
import scala.scalajs.js.annotation.{JSGlobalScope, JSGlobal, JSImport, JSName, JSBracketAccess}
@js.native
trait EntityFilter extends StObject {
/**
* The name of the entity search filter field. REFERENCED_ENTITY_ID filters on entities that are used by the entity in the result set. For example, you can filter on the ID of a property that is used in a state.
*/
var name: js.UndefOr[EntityFilterName] = js.native
/**
* An array of string values for the search filter field. Multiple values function as AND criteria in the search.
*/
var value: js.UndefOr[EntityFilterValues] = js.native
}
object EntityFilter {
@scala.inline
def apply(): EntityFilter = {
val __obj = js.Dynamic.literal()
__obj.asInstanceOf[EntityFilter]
}
@scala.inline
implicit class EntityFilterMutableBuilder[Self <: EntityFilter] (val x: Self) extends AnyVal {
@scala.inline
def setName(value: EntityFilterName): Self = StObject.set(x, "name", value.asInstanceOf[js.Any])
@scala.inline
def setNameUndefined: Self = StObject.set(x, "name", js.undefined)
@scala.inline
def setValue(value: EntityFilterValues): Self = StObject.set(x, "value", value.asInstanceOf[js.Any])
@scala.inline
def setValueUndefined: Self = StObject.set(x, "value", js.undefined)
@scala.inline
def setValueVarargs(value: EntityFilterValue*): Self = StObject.set(x, "value", js.Array(value :_*))
}
}
|
AjayGhanwat/AllWork | SampleFirestore/app/src/main/java/com/bridgelabz/samplefirestore/AddData.java | package com.bridgelabz.samplefirestore;
import android.os.Bundle;
import android.support.annotation.NonNull;
import android.support.v7.app.AppCompatActivity;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.RecyclerView;
import android.util.Log;
import android.view.View;
import android.widget.Button;
import android.widget.EditText;
import android.widget.Toast;
import com.google.android.gms.tasks.OnCompleteListener;
import com.google.android.gms.tasks.OnSuccessListener;
import com.google.android.gms.tasks.Task;
import com.google.firebase.firestore.CollectionReference;
import com.google.firebase.firestore.DocumentReference;
import com.google.firebase.firestore.DocumentSnapshot;
import com.google.firebase.firestore.EventListener;
import com.google.firebase.firestore.FirebaseFirestore;
import com.google.firebase.firestore.FirebaseFirestoreException;
import com.google.firebase.firestore.Query;
import com.google.firebase.firestore.QuerySnapshot;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
public class AddData extends AppCompatActivity {
EditText addTitle, addDesc;
Button StoreData, featchData;
RecyclerView recyclerView;
ArrayList<userData> datas, user;
ViewAdapter viewAdapter, adapter;
Date date = new Date();
String fDate = new SimpleDateFormat("yyyy-MM-dd").format(date);
private CollectionReference mRef = FirebaseFirestore.getInstance().collection("UserData").document("wmvpfoUxRvF2FiuF9flY").collection("Notes");
//private CollectionReference docRef = FirebaseFirestore.getInstance().collection("UserData").document("wmvpfoUxRvF2FiuF9flY").collection("Date");
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_add_data);
datas = new ArrayList<>();
user = new ArrayList<>();
addTitle = (EditText) findViewById(R.id.addTitle);
addDesc = (EditText) findViewById(R.id.addDesc);
StoreData = (Button) findViewById(R.id.StoreData);
featchData = (Button) findViewById(R.id.FeatchData);
recyclerView = (RecyclerView) findViewById(R.id.notes);
recyclerView.setLayoutManager(new LinearLayoutManager(this));
StoreData.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
final String title = addTitle.getText().toString();
final String desc = addDesc.getText().toString();
final String key = mRef.document().getId();
Map<String, Object> user = new HashMap<String, Object>();
user.put("Title", title);
user.put("Desc", desc);
user.put("Key", key);
user.put("Date", fDate);
mRef.document(key).set(user);
addTitle.setText("");
addDesc.setText("");
}
});
featchData.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(final View view) {
user.clear();
mRef.whereEqualTo("Date", "2017-10-07").get().addOnSuccessListener(new OnSuccessListener<QuerySnapshot>() {
@Override
public void onSuccess(QuerySnapshot documentSnapshots) {
for (DocumentSnapshot document : documentSnapshots.getDocuments()) {
userData data = document.toObject(userData.class);
user.add(data);
adapter = new ViewAdapter(user);
recyclerView.setAdapter(adapter);
recyclerView.invalidate();
adapter.notifyDataSetChanged();
}
}
});
}
});
}
@Override
protected void onStart() {
super.onStart();
mRef.addSnapshotListener(new EventListener<QuerySnapshot>() {
@Override
public void onEvent(QuerySnapshot documentSnapshots, FirebaseFirestoreException e) {
user.clear();
for (DocumentSnapshot document : documentSnapshots.getDocuments()) {
userData data = document.toObject(userData.class);
user.add(data);
adapter = new ViewAdapter(user);
recyclerView.setAdapter(adapter);
recyclerView.invalidate();
adapter.notifyDataSetChanged();
}
}
});
}
}
|
bpawluk/online-fsm | web/src/common-utils.js | <reponame>bpawluk/online-fsm<filename>web/src/common-utils.js
'use strict'
export let ArrayUtils = {
remove: function (element, array) {
let success = false;
if (array && array instanceof Array) {
for (let i = 0, len = array.length; i < len; i++) {
if (array[i] === element) {
array.splice(i, 1);
success = true;
}
}
}
return success;
}
};
export let ObjectUtils = {
forEachOwnProperty: function (object, action) {
for (let key in object) {
if (object.hasOwnProperty(key))
action(key, object[key]);
}
}
}
export let MathUtils = {
distance: function (a, b) {
let vectorX = b.x - a.x;
let vectorY = b.y - a.y
return Math.sqrt(vectorX * vectorX + vectorY * vectorY);
},
arePointsColinear: function (a, b, c, tolerance) {
tolerance = tolerance === undefined ? 0.01 : tolerance;
return Math.abs(a.x * (b.y - c.y) + b.x * (c.y - a.y) + c.x * (a.y - b.y)) <= tolerance;
},
arePointsEqual: function (a, b) {
return a.x === b.x && a.y === b.y;
},
isPointCloseToSegment: function (a, b, point, maxDistance) {
maxDistance = maxDistance === undefined ? 0.01 : maxDistance;
let x = point.x;
let y = point.y;
let dx = b.x - a.x;
let dy = b.y - a.y;
let closestPointOnLine = {};
if (dx === 0 && dy === 0) {
closestPointOnLine = a;
}
else {
let direction = ((x - a.x) * dx + (y - a.y) * dy) / (dx * dx + dy * dy);
closestPointOnLine.x = a.x + direction * (b.x - a.x);
closestPointOnLine.y = a.y + direction * (b.y - a.y);
}
if (!this.isPointOnSegment(a, b, closestPointOnLine)) {
return false;
}
else {
dx = x - closestPointOnLine.x;
dy = y - closestPointOnLine.y;
return Math.sqrt(dx * dx + dy * dy) <= maxDistance;
}
},
isPointOnSegment: function (a, b, point) {
let minX = Math.min(a.x, b.x);
let maxX = Math.max(a.x, b.x);
let minY = Math.min(a.y, b.y);
let maxY = Math.max(a.y, b.y);
return point.x >= minX && point.x <= maxX && point.y >= minY && point.y <= maxY;
},
getNormalVector: function (a, b, reversed) {
if (this.arePointsEqual(a, b)) {
throw new Error("Cannot create normal vector from equal points");
}
let dx = b.x - a.x;
let dy = b.y - a.y;
let length = this.distance(a, b);
if (reversed) {
return {
x: -dy / length,
y: dx / length
}
}
else {
return {
x: dy / length,
y: -dx / length
}
}
},
getVector: function (a, b) {
if (this.arePointsEqual(a, b)) {
throw new Error("Cannot create vector from equal points");
}
let vector = {};
vector.x = b.x - a.x;
vector.y = b.y - a.y;
return vector;
},
getUnitVector: function (a, b) {
let vector = this.getVector(a, b);
let length = Math.sqrt(vector.x * vector.x + vector.y * vector.y);
vector.x = vector.x / length;
vector.y = vector.y / length;
return vector;
},
translateVector: function (vector, point) {
return {
x: vector.x + point.x,
y: vector.y + point.y
}
},
vecByScalMul: function (vector, scalar) {
return {
x: vector.x * scalar,
y: vector.y * scalar
}
},
isPointInCircle: function (point, center, radius) {
let dx = Math.abs(center.x - point.x);
if (dx > radius) {
return false;
}
let dy = Math.abs(center.y - point.y);
if (dy > radius) {
return false
}
if (dx + dy <= radius) {
return true
}
return dx * dx + dy * dy <= radius * radius
},
getPointOnCircleGivenAngle: function (center, radius, angle) {
return {
x: center.x + radius * Math.cos(angle),
y: center.y + radius * Math.sin(angle)
}
},
centerOfCircleFrom3Points: function (a, b, c) {
let xa = a.x - c.x;
let ya = a.y - c.y;
let xb = b.x - c.x;
let yb = b.y - c.y;
let det = 2 * (xa * yb - xb * ya);
let tempA = xa * xa + ya * ya;
let tempB = xb * xb + yb * yb;
let center = {};
center.x = ((yb * tempA - ya * tempB) / det) + c.x;
center.y = ((xa * tempB - xb * tempA) / det) + c.y;
return center;
},
getMidAngleOfArc: function(start, end, reversed){
let mid = (start + end) / 2
if((start <= end && reversed) || (start > end && !reversed)){
if(mid <= 0){
return mid + Math.PI;
} else {
return mid - Math.PI;
}
}
return mid;
},
capValue: function(min, value, max) {
return Math.min(Math.max(value, min), max);
}
} |
as/transcode-orchestrator | provider/provider.go | <reponame>as/transcode-orchestrator<filename>provider/provider.go<gh_stars>0
package provider
import (
"context"
"errors"
"fmt"
"sort"
"github.com/cbsinteractive/transcode-orchestrator/client/transcoding/job"
"github.com/cbsinteractive/transcode-orchestrator/config"
)
var providers = map[string]Factory{}
var (
ErrRegistered = errors.New("provider is already registered")
ErrNotFound = errors.New("provider not found")
ErrConfig = errors.New("bad provider configuration")
ErrPreset = errors.New("preset not found in provider")
)
// Provider knows how to manage jobs for media transcoding
type Provider interface {
Create(context.Context, *job.Job) (*job.Status, error)
Status(context.Context, *job.Job) (*job.Status, error)
Cancel(ctx context.Context, id string) error
Healthcheck() error
Capabilities() Capabilities
}
// Factory is the function responsible for creating the instance of a
// provider.
type Factory func(cfg *config.Config) (Provider, error)
// InvalidConfigError is returned if a provider could not be configured properly
type InvalidConfigError string
// JobNotFoundError is returned if a job with a given id could not be found by the provider
type JobNotFoundError struct {
ID string
}
func (err InvalidConfigError) Error() string {
return string(err)
}
func (err JobNotFoundError) Error() string {
return fmt.Sprintf("could not found job with id: %s", err.ID)
}
// Register register a new provider in the internal list of providers.
func Register(name string, provider Factory) error {
if _, ok := providers[name]; ok {
return ErrRegistered
}
providers[name] = provider
return nil
}
// GetProviderFactory looks up the list of registered providers and returns the
// factory function for the given provider name, if it's available.
func GetFactory(name string) (Factory, error) {
factory, ok := providers[name]
if !ok {
return nil, ErrNotFound
}
return factory, nil
}
// List returns the list of currently registered providers,
// alphabetically ordered.
func List(c *config.Config) []string {
providerNames := make([]string, 0, len(providers))
for name, factory := range providers {
if _, err := factory(c); err == nil {
providerNames = append(providerNames, name)
}
}
sort.Strings(providerNames)
return providerNames
}
// Describe describes the given provider. It includes information about
// the provider's capabilities and its current health state.
func Describe(name string, c *config.Config) (*Description, error) {
factory, err := GetFactory(name)
if err != nil {
return nil, err
}
description := Description{Name: name}
provider, err := factory(c)
if err != nil {
return &description, nil
}
description.Enabled = true
description.Capabilities = provider.Capabilities()
description.Health = Health{OK: true}
if err = provider.Healthcheck(); err != nil {
description.Health = Health{OK: false, Message: err.Error()}
}
return &description, nil
}
|
YoMama84/intergalactic | website/docs/data-display/d3-chart/examples/export-in-image.js | <filename>website/docs/data-display/d3-chart/examples/export-in-image.js
import React, { useState, useEffect } from 'react';
import { scaleLinear } from 'd3-scale';
import { Line, minMax, Plot, XAxis, YAxis } from '@semcore/d3-chart';
import { Flex } from '@semcore/flex-box';
import DropdownMenu from '@semcore/dropdown-menu';
import Button from '@semcore/button';
import FileExportXS from '@semcore/icon/lib/FileExport/xs';
const EXPORTS = ['PNG', 'JPEG', 'WEBP'];
export default () => {
const [visible, updateVisible] = useState(false);
const [linkElements, updateLinkElements] = useState(
EXPORTS.map((name) => ({ key: name, children: name })),
);
const svg = React.createRef();
const download = React.createRef();
const MARGIN = 40;
const width = 500;
const height = 300;
const xScale = scaleLinear()
.range([MARGIN, width - MARGIN])
.domain(minMax(data, 'x'));
const yScale = scaleLinear()
.range([height - MARGIN, MARGIN])
.domain([0, 10]);
useEffect(() => {
const svgElement = svg.current;
const svgString = getSVGString(svgElement);
EXPORTS.forEach((name, ind) => {
const format = name.toLowerCase();
svgString2Image(svgString, 2 * width, 2 * height, format, save);
function save(image) {
linkElements[ind] = {
...linkElements[ind],
download: `image.${format}`,
href: image,
};
updateLinkElements([...linkElements]);
}
});
}, []);
return (
<Flex>
<Plot ref={svg} data={data} scale={[xScale, yScale]} width={width} height={height}>
<YAxis ticks={yScale.ticks()}>
<YAxis.Ticks />
<YAxis.Grid />
</YAxis>
<XAxis ticks={xScale.ticks()}>
<XAxis.Ticks />
</XAxis>
<Line x="x" y="y">
<Line.Dots display />
</Line>
</Plot>
<DropdownMenu onVisibleChange={updateVisible}>
<DropdownMenu.Trigger tag={Button}>
<Button.Addon tag={FileExportXS} />
<Button.Text>Export</Button.Text>
</DropdownMenu.Trigger>
<DropdownMenu.Popper wMax="257px">
<DropdownMenu.List ref={download}>
{EXPORTS.map((name, ind) => (
<DropdownMenu.Item tag="a" {...linkElements[ind]} />
))}
</DropdownMenu.List>
</DropdownMenu.Popper>
</DropdownMenu>
</Flex>
);
};
const data = Array(20)
.fill({})
.map((d, i) => ({
x: i,
y: Math.random().toFixed(1) * 10,
}));
function getSVGString(svgNode) {
svgNode.setAttribute('xlink', 'http://www.w3.org/1999/xlink');
const cssStyleText = getCSSStyles(svgNode);
appendCSS(cssStyleText, svgNode);
const serializer = new XMLSerializer();
let svgString = serializer.serializeToString(svgNode);
svgString = svgString.replace(/(\w+)?:?xlink=/g, 'xmlns:xlink='); // Fix root xlink without namespace
svgString = svgString.replace(/NS\d+:href/g, 'xlink:href'); // Safari NS namespace fix
return svgString;
function getCSSStyles(parentElement) {
const selectorTextArr = [];
for (let c = 0; c < parentElement.classList.length; c++) {
if (!contains('.' + parentElement.classList[c], selectorTextArr))
selectorTextArr.push('.' + parentElement.classList[c]);
}
// Add Children element Ids and Classes to the list
const nodes = parentElement.getElementsByTagName('*');
for (let i = 0; i < nodes.length; i++) {
const id = nodes[i].id;
if (!contains('#' + id, selectorTextArr)) selectorTextArr.push('#' + id);
const classes = nodes[i].classList;
for (let c = 0; c < classes.length; c++)
if (!contains('.' + classes[c], selectorTextArr)) selectorTextArr.push('.' + classes[c]);
}
// Extract CSS Rules
let extractedCSSText = '';
for (let i = 0; i < document.styleSheets.length; i++) {
const s = document.styleSheets[i];
try {
if (!s.cssRules) continue;
} catch (e) {
if (e.name !== 'SecurityError') throw e; // for Firefox
continue;
}
const cssRules = s.cssRules;
for (let r = 0; r < cssRules.length; r++) {
if (
cssRules[r].selectorText &&
selectorTextArr.some((s) => cssRules[r].selectorText.includes(s))
)
extractedCSSText += cssRules[r].cssText;
}
}
return extractedCSSText;
function contains(str, arr) {
return arr.indexOf(str) === -1 ? false : true;
}
}
function appendCSS(cssText, element) {
const styleElement = document.createElement('style');
styleElement.setAttribute('type', 'text/css');
styleElement.innerHTML = cssText;
const refNode = element.hasChildNodes() ? element.children[0] : null;
element.insertBefore(styleElement, refNode);
}
}
function svgString2Image(svgString, width, height, format, callback) {
format = format ? format : 'png';
const imgsrc = 'data:image/svg+xml;base64,' + btoa(unescape(encodeURIComponent(svgString)));
const canvas = document.createElement('canvas');
const context = canvas.getContext('2d');
canvas.width = width;
canvas.height = height;
const image = new Image();
image.onload = function() {
context.clearRect(0, 0, width, height);
context.drawImage(image, 0, 0, width, height);
const img = canvas.toDataURL(`image/${format}`);
callback(img);
};
image.src = imgsrc;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.