repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/janitor/JanitorEmailNotifier.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.janitor;
import com.amazonaws.services.simpleemail.AmazonSimpleEmailServiceClient;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.Resource.CleanupState;
import com.netflix.simianarmy.aws.AWSEmailNotifier;
import org.apache.commons.lang.Validate;
import org.apache.commons.lang.StringUtils;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/** The email notifier implemented for Janitor Monkey. */
public class JanitorEmailNotifier extends AWSEmailNotifier {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(JanitorEmailNotifier.class);
private static final String UNKNOWN_EMAIL = "UNKNOWN";
/**
* If the scheduled termination date is within 2 hours of notification date + headsup days,
* we don't need to extend the termination date.
*/
private static final int HOURS_IN_MARGIN = 2;
private final String region;
private final String defaultEmail;
private final List<String> ccEmails;
private final JanitorResourceTracker resourceTracker;
private final JanitorEmailBuilder emailBuilder;
private final MonkeyCalendar calendar;
private final int daysBeforeTermination;
private final String sourceEmail;
private final String ownerEmailDomain;
private final Map<String, Collection<Resource>> invalidEmailToResources =
new HashMap<String, Collection<Resource>>();
/**
* The Interface Context.
*/
public interface Context {
/**
* Gets the Amazon Simple Email Service client.
* @return the Amazon Simple Email Service client
*/
AmazonSimpleEmailServiceClient sesClient();
/**
* Gets the source email the notifier uses to send email.
* @return the source email
*/
String sourceEmail();
/**
* Gets the default email the notifier sends to when there is no owner specified for a resource.
* @return the default email
*/
String defaultEmail();
/**
* Gets the number of days a notification is sent before the expected termination date..
* @return the number of days a notification is sent before the expected termination date.
*/
int daysBeforeTermination();
/**
* Gets the region the notifier is running in.
* @return the region the notifier is running in.
*/
String region();
/** Gets the janitor resource tracker.
* @return the janitor resource tracker
*/
JanitorResourceTracker resourceTracker();
/** Gets the janitor email builder.
* @return the janitor email builder
*/
JanitorEmailBuilder emailBuilder();
/** Gets the calendar.
* @return the calendar
*/
MonkeyCalendar calendar();
/** Gets the cc email addresses.
* @return the cc email addresses
*/
String[] ccEmails();
/** Get the default domain of email addresses.
* @return the default domain of email addresses
*/
String ownerEmailDomain();
}
/**
* Constructor.
* @param ctx the context.
*/
public JanitorEmailNotifier(Context ctx) {
super(ctx.sesClient());
this.region = ctx.region();
this.defaultEmail = ctx.defaultEmail();
this.daysBeforeTermination = ctx.daysBeforeTermination();
this.resourceTracker = ctx.resourceTracker();
this.emailBuilder = ctx.emailBuilder();
this.calendar = ctx.calendar();
this.ccEmails = new ArrayList<String>();
String[] ctxCCs = ctx.ccEmails();
if (ctxCCs != null) {
for (String ccEmail : ctxCCs) {
this.ccEmails.add(ccEmail);
}
}
this.sourceEmail = ctx.sourceEmail();
this.ownerEmailDomain = ctx.ownerEmailDomain();
}
/**
* Gets all the resources that are marked and no notifications have been sent. Send email notifications
* for these resources. If there is a valid email address in the ownerEmail field of the resource, send
* to that address. Otherwise send to the default email address.
*/
public void sendNotifications() {
validateEmails();
Map<String, Collection<Resource>> emailToResources = new HashMap<String, Collection<Resource>>();
invalidEmailToResources.clear();
for (Resource r : getMarkedResources()) {
if (r.isOptOutOfJanitor()) {
LOGGER.info(String.format("Resource %s is opted out of Janitor Monkey so no notification is sent.",
r.getId()));
continue;
}
if (canNotify(r)) {
String email = r.getOwnerEmail();
if (email != null && !email.contains("@")
&& StringUtils.isNotBlank(this.ownerEmailDomain)) {
email = String.format("%s@%s", email, this.ownerEmailDomain);
}
if (!isValidEmail(email)) {
if (defaultEmail != null) {
LOGGER.info(String.format("Email %s is not valid, send to the default email address %s",
email, defaultEmail));
putEmailAndResource(emailToResources, defaultEmail, r);
} else {
if (email == null) {
email = UNKNOWN_EMAIL;
}
LOGGER.info(String.format("Email %s is not valid and default email is not set for resource %s",
email, r.getId()));
putEmailAndResource(invalidEmailToResources, email, r);
}
} else {
putEmailAndResource(emailToResources, email, r);
}
} else {
LOGGER.debug(String.format("Not the time to send notification for resource %s", r.getId()));
}
}
emailBuilder.setEmailToResources(emailToResources);
Date now = calendar.now().getTime();
for (Map.Entry<String, Collection<Resource>> entry : emailToResources.entrySet()) {
String email = entry.getKey();
String emailBody = emailBuilder.buildEmailBody(email);
String subject = buildEmailSubject(email);
sendEmail(email, subject, emailBody);
for (Resource r : entry.getValue()) {
LOGGER.debug(String.format("Notification is sent for resource %s", r.getId()));
r.setNotificationTime(now);
resourceTracker.addOrUpdate(r);
}
LOGGER.info(String.format("Email notification has been sent to %s for %d resources.",
email, entry.getValue().size()));
}
}
/**
* Gets the marked resources for notification. Allow overriding in subclasses.
* @return the marked resources
*/
protected Collection<Resource> getMarkedResources() {
return resourceTracker.getResources(null, CleanupState.MARKED, region);
}
private void validateEmails() {
if (defaultEmail != null) {
Validate.isTrue(isValidEmail(defaultEmail), String.format("Default email %s is invalid", defaultEmail));
}
if (ccEmails != null) {
for (String ccEmail : ccEmails) {
Validate.isTrue(isValidEmail(ccEmail), String.format("CC email %s is invalid", ccEmail));
}
}
}
@Override
public String buildEmailSubject(String email) {
return String.format("Janitor Monkey Notification for %s", email);
}
/**
* Decides if it is time for sending notification for the resource. This method can be
* overridden in subclasses so notifications can be send earlier or later.
* @param resource the resource
* @return true if it is OK to send notification now, otherwise false.
*/
protected boolean canNotify(Resource resource) {
Validate.notNull(resource);
if (resource.getState() != CleanupState.MARKED || resource.isOptOutOfJanitor()) {
return false;
}
Date notificationTime = resource.getNotificationTime();
// We don't want to send notification too early (since things may change) or too late (we need
// to give owners enough time to take actions.
Date windowStart = new Date(new DateTime(
calendar.getBusinessDay(calendar.now().getTime(), daysBeforeTermination).getTime())
.minusHours(HOURS_IN_MARGIN).getMillis());
Date windowEnd = calendar.getBusinessDay(calendar.now().getTime(), daysBeforeTermination + 1);
Date terminationDate = resource.getExpectedTerminationTime();
if (notificationTime == null
|| notificationTime.getTime() == 0
|| resource.getMarkTime().after(notificationTime)) { // remarked after a notification
if (!terminationDate.before(windowStart) && !terminationDate.after(windowEnd)) {
// The expected termination time is close enough for sending notification
return true;
} else if (terminationDate.before(windowStart)) {
// The expected termination date is too close. To give the owner time to take possible actions,
// we extend the expected termination time here.
LOGGER.info(String.format("It is less than %d days before the expected termination date,"
+ " of resource %s, extending the termination time to %s.",
daysBeforeTermination, resource.getId(), windowStart));
resource.setExpectedTerminationTime(windowStart);
resourceTracker.addOrUpdate(resource);
return true;
} else {
return false;
}
}
return false;
}
/**
* Gets the map from invalid email address to the resources that were supposed to be sent to the address.
*
* @return the map from invalid address to resources that failed to be delivered
*/
public Map<String, Collection<Resource>> getInvalidEmailToResources() {
return Collections.unmodifiableMap(invalidEmailToResources);
}
@Override
public String[] getCcAddresses(String to) {
return ccEmails.toArray(new String[ccEmails.size()]);
}
@Override
public String getSourceAddress(String to) {
return sourceEmail;
}
private void putEmailAndResource(
Map<String, Collection<Resource>> map, String email, Resource resource) {
Collection<Resource> resources = map.get(email);
if (resources == null) {
resources = new ArrayList<Resource>();
map.put(email, resources);
}
resources.add(resource);
}
}
| 11,932
| 38.909699
| 119
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/SimpleDBRecorder.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.amazonaws.AmazonClientException;
import com.amazonaws.services.simpledb.AmazonSimpleDB;
import com.amazonaws.services.simpledb.model.Attribute;
import com.amazonaws.services.simpledb.model.CreateDomainRequest;
import com.amazonaws.services.simpledb.model.Item;
import com.amazonaws.services.simpledb.model.ListDomainsResult;
import com.amazonaws.services.simpledb.model.PutAttributesRequest;
import com.amazonaws.services.simpledb.model.ReplaceableAttribute;
import com.amazonaws.services.simpledb.model.SelectRequest;
import com.amazonaws.services.simpledb.model.SelectResult;
import com.netflix.simianarmy.EventType;
import com.netflix.simianarmy.MonkeyRecorder;
import com.netflix.simianarmy.MonkeyType;
import com.netflix.simianarmy.NamedType;
import com.netflix.simianarmy.basic.BasicRecorderEvent;
import com.netflix.simianarmy.client.aws.AWSClient;
/**
* The Class SimpleDBRecorder. Records events to and fetched events from a Amazon SimpleDB table (default SIMIAN_ARMY)
*/
@SuppressWarnings("serial")
public class SimpleDBRecorder implements MonkeyRecorder {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(SimpleDBRecorder.class);
private final AmazonSimpleDB simpleDBClient;
private final String region;
/** The domain. */
private final String domain;
/**
* The Enum Keys.
*/
private enum Keys {
/** The event id. */
id,
/** The event time. */
eventTime,
/** The region. */
region,
/** The record type. */
recordType,
/** The monkey type. */
monkeyType,
/** The event type. */
eventType;
/** The Constant KEYSET. */
public static final Set<String> KEYSET = Collections.unmodifiableSet(new HashSet<String>() {
{
for (Keys k : Keys.values()) {
add(k.toString());
}
}
});
};
/**
* Instantiates a new simple db recorder.
*
* @param awsClient
* the AWS client
* @param domain
* the domain
*/
public SimpleDBRecorder(AWSClient awsClient, String domain) {
Validate.notNull(awsClient);
Validate.notNull(domain);
this.simpleDBClient = awsClient.sdbClient();
this.region = awsClient.region();
this.domain = domain;
}
/**
* simple client. abstracted to aid testing
*
* @return the amazon simple db
*/
protected AmazonSimpleDB sdbClient() {
return simpleDBClient;
}
/**
* Enum to value. Converts an enum to "name|type" string
*
* @param e
* the e
* @return the string
*/
public static String enumToValue(NamedType e) {
return String.format("%s|%s", e.name(), e.getClass().getName());
}
/**
* Value to enum. Converts a "name|type" string back to an enum.
*
* @param value
* the value
* @return the enum
*/
public static <T extends NamedType> T valueToEnum(
Class<T> type, String value) {
// parts = [enum value, enum class type]
String[] parts = value.split("\\|", 2);
if (parts.length < 2) {
throw new RuntimeException("value " + value + " does not appear to be an internal enum format");
}
Class<?> enumClass;
try {
enumClass = Class.forName(parts[1]);
} catch (ClassNotFoundException e) {
throw new RuntimeException("class for enum value " + value + " not found");
}
if (!enumClass.isEnum()) {
throw new RuntimeException("value " + value + " does not appear to be of an enum type");
}
if (!type.isAssignableFrom(enumClass)) {
throw new RuntimeException("value " + value + " cannot be assigned to a variable of this type: "
+ type.getCanonicalName());
}
@SuppressWarnings("rawtypes")
Class<? extends Enum> enumType = enumClass.asSubclass(Enum.class);
@SuppressWarnings("unchecked")
T enumValue = (T) Enum.valueOf(enumType, parts[0]);
return enumValue;
}
/** {@inheritDoc} */
@Override
public Event newEvent(MonkeyType monkeyType, EventType eventType, String reg, String id) {
return new BasicRecorderEvent(monkeyType, eventType, reg, id);
}
/** {@inheritDoc} */
@Override
public void recordEvent(Event evt) {
String evtTime = String.valueOf(evt.eventTime().getTime());
List<ReplaceableAttribute> attrs = new LinkedList<ReplaceableAttribute>();
attrs.add(new ReplaceableAttribute(Keys.id.name(), evt.id(), true));
attrs.add(new ReplaceableAttribute(Keys.eventTime.name(), evtTime, true));
attrs.add(new ReplaceableAttribute(Keys.region.name(), evt.region(), true));
attrs.add(new ReplaceableAttribute(Keys.recordType.name(), "MonkeyEvent", true));
attrs.add(new ReplaceableAttribute(Keys.monkeyType.name(), enumToValue(evt.monkeyType()), true));
attrs.add(new ReplaceableAttribute(Keys.eventType.name(), enumToValue(evt.eventType()), true));
for (Map.Entry<String, String> pair : evt.fields().entrySet()) {
if (pair.getValue() == null || pair.getValue().equals("") || Keys.KEYSET.contains(pair.getKey())) {
continue;
}
attrs.add(new ReplaceableAttribute(pair.getKey(), pair.getValue(), true));
}
// Let pk contain the timestamp so that the same resource can have multiple events.
String pk = String.format("%s-%s-%s-%s", evt.monkeyType().name(), evt.id(), region, evtTime);
PutAttributesRequest putReq = new PutAttributesRequest(domain, pk, attrs);
sdbClient().putAttributes(putReq);
}
/**
* Find events.
*
* @param queryMap
* the query map
* @param after
* the start time to query for all events after
* @return the list
*/
protected List<Event> findEvents(Map<String, String> queryMap, long after) {
StringBuilder query = new StringBuilder(
String.format("select * from `%s` where region = '%s'", domain, region));
for (Map.Entry<String, String> pair : queryMap.entrySet()) {
query.append(String.format(" and %s = '%s'", pair.getKey(), pair.getValue()));
}
query.append(String.format(" and eventTime > '%d'", after));
// always return with most recent record first
query.append(" order by eventTime desc");
List<Event> list = new LinkedList<Event>();
SelectRequest request = new SelectRequest(query.toString());
request.setConsistentRead(Boolean.TRUE);
SelectResult result = new SelectResult();
do {
result = sdbClient().select(request.withNextToken(result.getNextToken()));
for (Item item : result.getItems()) {
Map<String, String> fields = new HashMap<String, String>();
Map<String, String> res = new HashMap<String, String>();
for (Attribute attr : item.getAttributes()) {
if (Keys.KEYSET.contains(attr.getName())) {
res.put(attr.getName(), attr.getValue());
} else {
fields.put(attr.getName(), attr.getValue());
}
}
String eid = res.get(Keys.id.name());
String ereg = res.get(Keys.region.name());
MonkeyType monkeyType = valueToEnum(MonkeyType.class, res.get(Keys.monkeyType.name()));
EventType eventType = valueToEnum(EventType.class, res.get(Keys.eventType.name()));
long eventTime = Long.parseLong(res.get(Keys.eventTime.name()));
list.add(new BasicRecorderEvent(monkeyType, eventType, ereg, eid, eventTime).addFields(fields));
}
} while (result.getNextToken() != null);
return list;
}
/** {@inheritDoc} */
@Override
public List<Event> findEvents(Map<String, String> query, Date after) {
return findEvents(query, after.getTime());
}
/** {@inheritDoc} */
@Override
public List<Event> findEvents(MonkeyType monkeyType, Map<String, String> query, Date after) {
Map<String, String> copy = new LinkedHashMap<String, String>(query);
copy.put(Keys.monkeyType.name(), enumToValue(monkeyType));
return findEvents(copy, after);
}
/** {@inheritDoc} */
@Override
public List<Event> findEvents(MonkeyType monkeyType, EventType eventType, Map<String, String> query, Date after) {
Map<String, String> copy = new LinkedHashMap<String, String>(query);
copy.put(Keys.monkeyType.name(), enumToValue(monkeyType));
copy.put(Keys.eventType.name(), enumToValue(eventType));
return findEvents(copy, after);
}
/**
* Creates the SimpleDB domain, if it does not already exist.
*/
public void init() {
try {
if (this.region == null || this.region.equals("region-null")) {
// This is a mock with an invalid region; avoid a slow timeout
LOGGER.debug("Region=null; skipping SimpleDB domain creation");
return;
}
ListDomainsResult listDomains = sdbClient().listDomains();
for (String d : listDomains.getDomainNames()) {
if (d.equals(domain)) {
LOGGER.debug("SimpleDB domain found: {}", domain);
return;
}
}
LOGGER.info("Creating SimpleDB domain: {}", domain);
CreateDomainRequest createDomainRequest = new CreateDomainRequest(
domain);
sdbClient().createDomain(createDomainRequest);
} catch (AmazonClientException e) {
LOGGER.warn("Error while trying to auto-create SimpleDB domain", e);
}
}
}
| 11,108
| 37.175258
| 118
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/RDSRecorder.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws;
import com.amazonaws.AmazonClientException;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.simianarmy.EventType;
import com.netflix.simianarmy.MonkeyRecorder;
import com.netflix.simianarmy.MonkeyType;
import com.netflix.simianarmy.basic.BasicRecorderEvent;
import com.zaxxer.hikari.HikariDataSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import java.io.IOException;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;
/**
* The Class RDSRecorder. Records events to and fetched events from a RDS table (default SIMIAN_ARMY)
*/
@SuppressWarnings("serial")
public class RDSRecorder implements MonkeyRecorder {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(RDSRecorder.class);
private final String region;
/** The table. */
private final String table;
/** the jdbcTemplate */
JdbcTemplate jdbcTemplate = null;
public static final String FIELD_ID = "eventId";
public static final String FIELD_EVENT_TIME = "eventTime";
public static final String FIELD_MONKEY_TYPE = "monkeyType";
public static final String FIELD_EVENT_TYPE = "eventType";
public static final String FIELD_REGION = "region";
public static final String FIELD_DATA_JSON = "dataJson";
/**
* Instantiates a new RDS recorder.
*
*/
public RDSRecorder(String dbDriver, String dbUser,
String dbPass, String dbUrl, String dbTable, String region) {
HikariDataSource dataSource = new HikariDataSource();
dataSource.setDriverClassName(dbDriver);
dataSource.setJdbcUrl(dbUrl);
dataSource.setUsername(dbUser);
dataSource.setPassword(dbPass);
dataSource.setMaximumPoolSize(2);
this.jdbcTemplate = new JdbcTemplate(dataSource);
this.table = dbTable;
this.region = region;
}
/**
* Instantiates a new RDS recorder. This constructor is intended
* for unit testing.
*
*/
public RDSRecorder(JdbcTemplate jdbcTemplate, String table, String region) {
this.jdbcTemplate = jdbcTemplate;
this.table = table;
this.region = region;
}
public JdbcTemplate getJdbcTemplate() {
return jdbcTemplate;
}
/** {@inheritDoc} */
@Override
public Event newEvent(MonkeyType monkeyType, EventType eventType, String reg, String id) {
return new BasicRecorderEvent(monkeyType, eventType, reg, id);
}
/** {@inheritDoc} */
@Override
public void recordEvent(Event evt) {
String evtTime = String.valueOf(evt.eventTime().getTime());
String name = String.format("%s-%s-%s-%s", evt.monkeyType().name(), evt.id(), region, evtTime);
String json;
try {
json = new ObjectMapper().writeValueAsString(evt.fields());
} catch (JsonProcessingException e) {
LOGGER.error("ERROR generating JSON when saving resource " + name, e);
return;
}
LOGGER.debug(String.format("Saving event %s to RDS table %s", name, table));
StringBuilder sb = new StringBuilder();
sb.append("insert into ").append(table);
sb.append(" (");
sb.append(FIELD_ID).append(",");
sb.append(FIELD_EVENT_TIME).append(",");
sb.append(FIELD_MONKEY_TYPE).append(",");
sb.append(FIELD_EVENT_TYPE).append(",");
sb.append(FIELD_REGION).append(",");
sb.append(FIELD_DATA_JSON).append(") values (?,?,?,?,?,?)");
LOGGER.debug(String.format("Insert statement is '%s'", sb));
int updated = this.jdbcTemplate.update(sb.toString(),
evt.id(),
evt.eventTime().getTime(),
SimpleDBRecorder.enumToValue(evt.monkeyType()),
SimpleDBRecorder.enumToValue(evt.eventType()),
evt.region(),
json);
LOGGER.debug(String.format("%d rows inserted", updated));
}
/** {@inheritDoc} */
@Override
public List<Event> findEvents(Map<String, String> query, Date after) {
return findEvents(null, null, query, after);
}
/** {@inheritDoc} */
@Override
public List<Event> findEvents(MonkeyType monkeyType, Map<String, String> query, Date after) {
return findEvents(monkeyType, null, query, after);
}
/** {@inheritDoc} */
@Override
public List<Event> findEvents(MonkeyType monkeyType, EventType eventType, Map<String, String> query, Date after) {
ArrayList<Object> args = new ArrayList<>();
StringBuilder sqlquery = new StringBuilder(
String.format("select * from %s where region = ?", table));
args.add(region);
if (monkeyType != null) {
sqlquery.append(String.format(" and %s = ?", FIELD_MONKEY_TYPE));
args.add(SimpleDBRecorder.enumToValue(monkeyType));
}
if (eventType != null) {
sqlquery.append(String.format(" and %s = ?", FIELD_EVENT_TYPE));
args.add(SimpleDBRecorder.enumToValue(eventType));
}
for (Map.Entry<String, String> pair : query.entrySet()) {
sqlquery.append(String.format(" and %s like ?", FIELD_DATA_JSON));
args.add((String.format("%s: \"%s\"", pair.getKey(), pair.getValue())));
}
sqlquery.append(String.format(" and %s > ? order by %s desc", FIELD_EVENT_TIME, FIELD_EVENT_TIME));
args.add(new Long(after.getTime()));
LOGGER.debug(String.format("Query is '%s'", sqlquery));
List<Event> events = jdbcTemplate.query(sqlquery.toString(), args.toArray(), new RowMapper<Event>() {
public Event mapRow(ResultSet rs, int rowNum) throws SQLException {
return mapEvent(rs);
}
});
return events;
}
private Event mapEvent(ResultSet rs) throws SQLException {
String json = rs.getString("dataJson");
ObjectMapper mapper = new ObjectMapper();
Event event = null;
try {
String id = rs.getString(FIELD_ID);
MonkeyType monkeyType = SimpleDBRecorder.valueToEnum(MonkeyType.class, rs.getString(FIELD_MONKEY_TYPE));
EventType eventType = SimpleDBRecorder.valueToEnum(EventType.class, rs.getString(FIELD_EVENT_TYPE));
String region = rs.getString(FIELD_REGION);
long time = rs.getLong(FIELD_EVENT_TIME);
event = new BasicRecorderEvent(monkeyType, eventType, region, id, time);
TypeReference<Map<String,String>> typeRef = new TypeReference<Map<String,String>>() {};
Map<String, String> map = mapper.readValue(json, typeRef);
for(String key : map.keySet()) {
event.addField(key, map.get(key));
}
}catch(IOException ie) {
LOGGER.error("Error parsing resource from json", ie);
}
return event;
}
/**
* Creates the RDS table, if it does not already exist.
*/
public void init() {
try {
if (this.region == null || this.region.equals("region-null")) {
// This is a mock with an invalid region; avoid a slow timeout
LOGGER.debug("Region=null; skipping RDS table creation");
return;
}
LOGGER.info("Creating RDS table: {}", table);
String sql = String.format("create table if not exists %s ("
+ " %s varchar(255),"
+ " %s BIGINT,"
+ " %s varchar(255),"
+ " %s varchar(255),"
+ " %s varchar(255),"
+ " %s varchar(4096) )",
table,
FIELD_ID,
FIELD_EVENT_TIME,
FIELD_MONKEY_TYPE,
FIELD_EVENT_TYPE,
FIELD_REGION,
FIELD_DATA_JSON);
LOGGER.debug("Create SQL is: '{}'", sql);
jdbcTemplate.execute(sql);
} catch (AmazonClientException e) {
LOGGER.warn("Error while trying to auto-create RDS table", e);
}
}
}
| 9,320
| 37.358025
| 118
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/AWSEmailNotifier.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws;
import java.util.regex.Pattern;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.amazonaws.services.simpleemail.AmazonSimpleEmailServiceClient;
import com.amazonaws.services.simpleemail.model.Body;
import com.amazonaws.services.simpleemail.model.Content;
import com.amazonaws.services.simpleemail.model.Destination;
import com.amazonaws.services.simpleemail.model.Message;
import com.amazonaws.services.simpleemail.model.SendEmailRequest;
import com.amazonaws.services.simpleemail.model.SendEmailResult;
import com.netflix.simianarmy.MonkeyEmailNotifier;
/**
* The class implements the monkey email notifier using AWS simple email service
* for sending email.
*/
public abstract class AWSEmailNotifier implements MonkeyEmailNotifier {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(AWSEmailNotifier.class);
private static final String EMAIL_PATTERN =
"^[_A-Za-z0-9-\\+\\.#]+(.[_A-Za-z0-9-#]+)*@"
+ "[A-Za-z0-9-]+(\\.[A-Za-z0-9]+)*(\\.[A-Za-z]{2,})$";
private final Pattern emailPattern;
private final AmazonSimpleEmailServiceClient sesClient;
/**
* The constructor.
*/
public AWSEmailNotifier(AmazonSimpleEmailServiceClient sesClient) {
super();
this.sesClient = sesClient;
this.emailPattern = Pattern.compile(EMAIL_PATTERN);
}
@Override
public void sendEmail(String to, String subject, String body) {
if (!isValidEmail(to)) {
LOGGER.error(String.format("The destination email address %s is not valid, no email is sent.", to));
return;
}
if (sesClient == null) {
String msg = "The email client is not set.";
LOGGER.error(msg);
throw new RuntimeException(msg);
}
Destination destination = new Destination().withToAddresses(to)
.withCcAddresses(getCcAddresses(to));
Content subjectContent = new Content(subject);
Content bodyContent = new Content();
Body msgBody = new Body(bodyContent);
msgBody.setHtml(new Content(body));
Message msg = new Message(subjectContent, msgBody);
String sourceAddress = getSourceAddress(to);
SendEmailRequest request = new SendEmailRequest(sourceAddress, destination, msg);
request.setReturnPath(sourceAddress);
LOGGER.debug(String.format("Sending email with subject '%s' to %s",
subject, to));
SendEmailResult result = null;
try {
result = sesClient.sendEmail(request);
} catch (Exception e) {
throw new RuntimeException(String.format("Failed to send email to %s", to), e);
}
LOGGER.info(String.format("Email to %s, result id is %s, subject is %s",
to, result.getMessageId(), subject));
}
@Override
public boolean isValidEmail(String email) {
if (email == null) {
return false;
}
if (!emailPattern.matcher(email).matches()) {
LOGGER.error(String.format("Invalid email address: %s", email));
return false;
}
if (email.equals("foo@bar.com")) {
LOGGER.error(String.format("Email address not changed from default; treating as invalid: %s", email));
return false;
}
return true;
}
}
| 4,078
| 36.768519
| 114
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/AWSResource.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws;
import java.util.*;
import org.apache.commons.lang.Validate;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import com.netflix.simianarmy.NamedType;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
/**
* The class represents general AWS resources that are managed by janitor monkey.
*/
public class AWSResource implements Resource {
private String id;
private ResourceType resourceType;
private String region;
private String ownerEmail;
private String description;
private String terminationReason;
private CleanupState state;
private Date expectedTerminationTime;
private Date actualTerminationTime;
private Date notificationTime;
private Date launchTime;
private Date markTime;
private boolean optOutOfJanitor;
private String awsResourceState;
/** The field name for resourceId. **/
public static final String FIELD_RESOURCE_ID = "resourceId";
/** The field name for resourceType. **/
public static final String FIELD_RESOURCE_TYPE = "resourceType";
/** The field name for region. **/
public static final String FIELD_REGION = "region";
/** The field name for owner email. **/
public static final String FIELD_OWNER_EMAIL = "ownerEmail";
/** The field name for description. **/
public static final String FIELD_DESCRIPTION = "description";
/** The field name for state. **/
public static final String FIELD_STATE = "state";
/** The field name for terminationReason. **/
public static final String FIELD_TERMINATION_REASON = "terminationReason";
/** The field name for expectedTerminationTime. **/
public static final String FIELD_EXPECTED_TERMINATION_TIME = "expectedTerminationTime";
/** The field name for actualTerminationTime. **/
public static final String FIELD_ACTUAL_TERMINATION_TIME = "actualTerminationTime";
/** The field name for notificationTime. **/
public static final String FIELD_NOTIFICATION_TIME = "notificationTime";
/** The field name for launchTime. **/
public static final String FIELD_LAUNCH_TIME = "launchTime";
/** The field name for markTime. **/
public static final String FIELD_MARK_TIME = "markTime";
/** The field name for isOptOutOfJanitor. **/
public static final String FIELD_OPT_OUT_OF_JANITOR = "optOutOfJanitor";
/** The field name for awsResourceState. **/
public static final String FIELD_AWS_RESOURCE_STATE = "awsResourceState";
/** The date format used to print or parse a Date value. **/
public static final DateTimeFormatter DATE_FORMATTER = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss");
/** The map from name to value for additional fields used by the resource. **/
private final Map<String, String> additionalFields = new HashMap<String, String>();
/** The map from AWS tag key to value for the resource. **/
private final Map<String, String> tags = new HashMap<String, String>();
/** {@inheritDoc} */
@Override
public Map<String, String> getFieldToValueMap() {
Map<String, String> fieldToValue = new HashMap<String, String>();
putToMapIfNotNull(fieldToValue, FIELD_RESOURCE_ID, getId());
putToMapIfNotNull(fieldToValue, FIELD_RESOURCE_TYPE, getResourceType());
putToMapIfNotNull(fieldToValue, FIELD_REGION, getRegion());
putToMapIfNotNull(fieldToValue, FIELD_OWNER_EMAIL, getOwnerEmail());
putToMapIfNotNull(fieldToValue, FIELD_DESCRIPTION, getDescription());
putToMapIfNotNull(fieldToValue, FIELD_STATE, getState());
putToMapIfNotNull(fieldToValue, FIELD_TERMINATION_REASON, getTerminationReason());
putToMapIfNotNull(fieldToValue, FIELD_EXPECTED_TERMINATION_TIME, printDate(getExpectedTerminationTime()));
putToMapIfNotNull(fieldToValue, FIELD_ACTUAL_TERMINATION_TIME, printDate(getActualTerminationTime()));
putToMapIfNotNull(fieldToValue, FIELD_NOTIFICATION_TIME, printDate(getNotificationTime()));
putToMapIfNotNull(fieldToValue, FIELD_LAUNCH_TIME, printDate(getLaunchTime()));
putToMapIfNotNull(fieldToValue, FIELD_MARK_TIME, printDate(getMarkTime()));
putToMapIfNotNull(fieldToValue, FIELD_AWS_RESOURCE_STATE, getAWSResourceState());
// Additional fields are serialized while tags are not. So if any tags need to be
// serialized as well, put them to additional fields.
fieldToValue.put(FIELD_OPT_OUT_OF_JANITOR, String.valueOf(isOptOutOfJanitor()));
fieldToValue.putAll(additionalFields);
return fieldToValue;
}
/**
* Parse a map from field name to value to a resource.
* @param fieldToValue the map from field name to value
* @return the resource that is de-serialized from the map
*/
public static AWSResource parseFieldtoValueMap(Map<String, String> fieldToValue) {
AWSResource resource = new AWSResource();
for (Map.Entry<String, String> field : fieldToValue.entrySet()) {
String name = field.getKey();
String value = field.getValue();
if (name.equals(FIELD_RESOURCE_ID)) {
resource.setId(value);
} else if (name.equals(FIELD_RESOURCE_TYPE)) {
resource.setResourceType(AWSResourceType.valueOf(value));
} else if (name.equals(FIELD_REGION)) {
resource.setRegion(value);
} else if (name.equals(FIELD_OWNER_EMAIL)) {
resource.setOwnerEmail(value);
} else if (name.equals(FIELD_DESCRIPTION)) {
resource.setDescription(value);
} else if (name.equals(FIELD_STATE)) {
resource.setState(CleanupState.valueOf(value));
} else if (name.equals(FIELD_TERMINATION_REASON)) {
resource.setTerminationReason(value);
} else if (name.equals(FIELD_EXPECTED_TERMINATION_TIME)) {
resource.setExpectedTerminationTime(new Date(DATE_FORMATTER.parseDateTime(value).getMillis()));
} else if (name.equals(FIELD_ACTUAL_TERMINATION_TIME)) {
resource.setActualTerminationTime(new Date(DATE_FORMATTER.parseDateTime(value).getMillis()));
} else if (name.equals(FIELD_NOTIFICATION_TIME)) {
resource.setNotificationTime(new Date(DATE_FORMATTER.parseDateTime(value).getMillis()));
} else if (name.equals(FIELD_LAUNCH_TIME)) {
resource.setLaunchTime(new Date(DATE_FORMATTER.parseDateTime(value).getMillis()));
} else if (name.equals(FIELD_MARK_TIME)) {
resource.setMarkTime(new Date(DATE_FORMATTER.parseDateTime(value).getMillis()));
} else if (name.equals(FIELD_AWS_RESOURCE_STATE)) {
resource.setAWSResourceState(value);
} else if (name.equals(FIELD_OPT_OUT_OF_JANITOR)) {
resource.setOptOutOfJanitor("true".equals(value));
} else {
// put all other fields into additional fields
resource.setAdditionalField(name, value);
}
}
return resource;
}
public String getAWSResourceState() {
return awsResourceState;
}
public void setAWSResourceState(String awsState) {
this.awsResourceState = awsState;
}
/** {@inheritDoc} */
@Override
public String getId() {
return id;
}
/** {@inheritDoc} */
@Override
public void setId(String id) {
this.id = id;
}
/** {@inheritDoc} */
@Override
public Resource withId(String resourceId) {
setId(resourceId);
return this;
}
/** {@inheritDoc} */
@Override
public ResourceType getResourceType() {
return resourceType;
}
/** {@inheritDoc} */
@Override
public void setResourceType(ResourceType resourceType) {
this.resourceType = resourceType;
}
/** {@inheritDoc} */
@Override
public Resource withResourceType(ResourceType type) {
setResourceType(type);
return this;
}
/** {@inheritDoc} */
@Override
public String getRegion() {
return region;
}
/** {@inheritDoc} */
@Override
public void setRegion(String region) {
this.region = region;
}
/** {@inheritDoc} */
@Override
public Resource withRegion(String resourceRegion) {
setRegion(resourceRegion);
return this;
}
/** {@inheritDoc} */
@Override
public String getOwnerEmail() {
return ownerEmail;
}
/** {@inheritDoc} */
@Override
public void setOwnerEmail(String ownerEmail) {
this.ownerEmail = ownerEmail;
}
/** {@inheritDoc} */
@Override
public Resource withOwnerEmail(String resourceOwner) {
setOwnerEmail(resourceOwner);
return this;
}
/** {@inheritDoc} */
@Override
public String getDescription() {
return description;
}
/** {@inheritDoc} */
@Override
public void setDescription(String description) {
this.description = description;
}
/** {@inheritDoc} */
@Override
public Resource withDescription(String resourceDescription) {
setDescription(resourceDescription);
return this;
}
/** {@inheritDoc} */
@Override
public Date getLaunchTime() {
return getCopyOfDate(launchTime);
}
/** {@inheritDoc} */
@Override
public void setLaunchTime(Date launchTime) {
this.launchTime = getCopyOfDate(launchTime);
}
/** {@inheritDoc} */
@Override
public Resource withLaunchTime(Date resourceLaunchTime) {
setLaunchTime(resourceLaunchTime);
return this;
}
/** {@inheritDoc} */
@Override
public Date getMarkTime() {
return getCopyOfDate(markTime);
}
/** {@inheritDoc} */
@Override
public void setMarkTime(Date markTime) {
this.markTime = getCopyOfDate(markTime);
}
/** {@inheritDoc} */
@Override
public Resource withMarkTime(Date resourceMarkTime) {
setMarkTime(resourceMarkTime);
return this;
}
/** {@inheritDoc} */
@Override
public Date getExpectedTerminationTime() {
return getCopyOfDate(expectedTerminationTime);
}
/** {@inheritDoc} */
@Override
public void setExpectedTerminationTime(Date expectedTerminationTime) {
this.expectedTerminationTime = getCopyOfDate(expectedTerminationTime);
}
/** {@inheritDoc} */
@Override
public Resource withExpectedTerminationTime(Date resourceExpectedTerminationTime) {
setExpectedTerminationTime(resourceExpectedTerminationTime);
return this;
}
/** {@inheritDoc} */
@Override
public Date getActualTerminationTime() {
return getCopyOfDate(actualTerminationTime);
}
/** {@inheritDoc} */
@Override
public void setActualTerminationTime(Date actualTerminationTime) {
this.actualTerminationTime = getCopyOfDate(actualTerminationTime);
}
/** {@inheritDoc} */
@Override
public Resource withActualTerminationTime(Date resourceActualTerminationTime) {
setActualTerminationTime(resourceActualTerminationTime);
return this;
}
/** {@inheritDoc} */
@Override
public Date getNotificationTime() {
return getCopyOfDate(notificationTime);
}
/** {@inheritDoc} */
@Override
public void setNotificationTime(Date notificationTime) {
this.notificationTime = getCopyOfDate(notificationTime);
}
/** {@inheritDoc} */
@Override
public Resource withNnotificationTime(Date resourceNotificationTime) {
setNotificationTime(resourceNotificationTime);
return this;
}
/** {@inheritDoc} */
@Override
public CleanupState getState() {
return state;
}
/** {@inheritDoc} */
@Override
public void setState(CleanupState state) {
this.state = state;
}
/** {@inheritDoc} */
@Override
public Resource withState(CleanupState resourceState) {
setState(resourceState);
return this;
}
/** {@inheritDoc} */
@Override
public String getTerminationReason() {
return terminationReason;
}
/** {@inheritDoc} */
@Override
public void setTerminationReason(String terminationReason) {
this.terminationReason = terminationReason;
}
/** {@inheritDoc} */
@Override
public Resource withTerminationReason(String resourceTerminationReason) {
setTerminationReason(resourceTerminationReason);
return this;
}
/** {@inheritDoc} */
@Override
public boolean isOptOutOfJanitor() {
return optOutOfJanitor;
}
/** {@inheritDoc} */
@Override
public void setOptOutOfJanitor(boolean optOutOfJanitor) {
this.optOutOfJanitor = optOutOfJanitor;
}
/** {@inheritDoc} */
@Override
public Resource withOptOutOfJanitor(boolean optOut) {
setOptOutOfJanitor(optOut);
return this;
}
private static Date getCopyOfDate(Date date) {
if (date == null) {
return null;
}
return new Date(date.getTime());
}
private static void putToMapIfNotNull(Map<String, String> map, String key, String value) {
Validate.notNull(map);
Validate.notNull(key);
if (value != null) {
map.put(key, value);
}
}
private static void putToMapIfNotNull(Map<String, String> map, String key, Enum<?> value) {
Validate.notNull(map);
Validate.notNull(key);
if (value != null) {
map.put(key, value.name());
}
}
private static void putToMapIfNotNull(Map<String, String> map, String key, NamedType value) {
Validate.notNull(map);
Validate.notNull(key);
if (value != null) {
map.put(key, value.name());
}
}
private static String printDate(Date date) {
if (date == null) {
return null;
}
return DATE_FORMATTER.print(date.getTime());
}
@Override
public Resource setAdditionalField(String fieldName, String fieldValue) {
Validate.notNull(fieldName);
Validate.notNull(fieldValue);
putToMapIfNotNull(additionalFields, fieldName, fieldValue);
return this;
}
@Override
public String getAdditionalField(String fieldName) {
return additionalFields.get(fieldName);
}
@Override
public Collection<String> getAdditionalFieldNames() {
return additionalFields.keySet();
}
@Override
public Resource cloneResource() {
Resource clone = new AWSResource()
.withActualTerminationTime(getActualTerminationTime())
.withDescription(getDescription())
.withExpectedTerminationTime(getExpectedTerminationTime())
.withId(getId())
.withLaunchTime(getLaunchTime())
.withMarkTime(getMarkTime())
.withNnotificationTime(getNotificationTime())
.withOwnerEmail(getOwnerEmail())
.withRegion(getRegion())
.withResourceType(getResourceType())
.withState(getState())
.withTerminationReason(getTerminationReason())
.withOptOutOfJanitor(isOptOutOfJanitor());
((AWSResource) clone).setAWSResourceState(awsResourceState);
((AWSResource) clone).additionalFields.putAll(additionalFields);
for (String key : this.getAllTagKeys()) {
clone.setTag(key, this.getTag(key));
}
return clone;
}
/** {@inheritDoc} */
@Override
public void setTag(String key, String value) {
tags.put(key, value);
}
/** {@inheritDoc} */
@Override
public String getTag(String key) {
return tags.get(key);
}
/** {@inheritDoc} */
@Override
public Collection<String> getAllTagKeys() {
return tags.keySet();
}
@Override
public String toString() {
return "AWSResource{" +
"id='" + id + '\'' +
", resourceType=" + resourceType +
", region='" + region + '\'' +
", ownerEmail='" + ownerEmail + '\'' +
", description='" + description + '\'' +
", terminationReason='" + terminationReason + '\'' +
", state=" + state +
", expectedTerminationTime=" + expectedTerminationTime +
", actualTerminationTime=" + actualTerminationTime +
", notificationTime=" + notificationTime +
", launchTime=" + launchTime +
", markTime=" + markTime +
", optOutOfJanitor=" + optOutOfJanitor +
", awsResourceState='" + awsResourceState + '\'' +
", additionalFields=" + additionalFields +
", tags=" + tags +
'}';
}
@Override
public boolean equals(Object o) {
// consider two resources to be equivalent if id, resourceType and region match
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
AWSResource that = (AWSResource) o;
return Objects.equals(id, that.id) &&
Objects.equals(resourceType, that.resourceType) &&
Objects.equals(region, that.region);
}
@Override
public int hashCode() {
// consider two resources to be equivalent if id, resourceType and region match
return Objects.hash(id, resourceType, region);
}
}
| 18,329
| 31.849462
| 114
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/STSAssumeRoleSessionCredentialsProvider.java
|
/*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws;
import java.util.Date;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.AWSSessionCredentials;
import com.amazonaws.auth.BasicSessionCredentials;
import com.amazonaws.services.securitytoken.AWSSecurityTokenService;
import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClient;
import com.amazonaws.services.securitytoken.model.AssumeRoleRequest;
import com.amazonaws.services.securitytoken.model.AssumeRoleResult;
import com.amazonaws.services.securitytoken.model.Credentials;
/**
* AWSCredentialsProvider implementation that uses the AWS Security Token
* Service to assume a Role and create temporary, short-lived sessions to use
* for authentication.
*/
public class STSAssumeRoleSessionCredentialsProvider implements AWSCredentialsProvider {
/** Default duration for started sessions. */
public static final int DEFAULT_DURATION_SECONDS = 900;
/** Time before expiry within which credentials will be renewed. */
private static final int EXPIRY_TIME_MILLIS = 60 * 1000;
/** The client for starting STS sessions. */
private final AWSSecurityTokenService securityTokenService;
/** The current session credentials. */
private AWSSessionCredentials sessionCredentials;
/** The expiration time for the current session credentials. */
private Date sessionCredentialsExpiration;
/** The arn of the role to be assumed. */
private String roleArn;
/**
* Constructs a new STSAssumeRoleSessionCredentialsProvider, which makes a
* request to the AWS Security Token Service (STS), uses the provided
* {@link #roleArn} to assume a role and then request short lived session
* credentials, which will then be returned by this class's
* {@link #getCredentials()} method.
* @param roleArn
* The AWS ARN of the Role to be assumed.
*/
public STSAssumeRoleSessionCredentialsProvider(String roleArn) {
this.roleArn = roleArn;
securityTokenService = new AWSSecurityTokenServiceClient();
}
/**
* Constructs a new STSAssumeRoleSessionCredentialsProvider, which makes a
* request to the AWS Security Token Service (STS), uses the provided
* {@link #roleArn} to assume a role and then request short lived session
* credentials, which will then be returned by this class's
* {@link #getCredentials()} method.
* @param roleArn
* The AWS ARN of the Role to be assumed.
* @param clientConfiguration
* The AWS ClientConfiguration to use when making AWS API requests.
*/
public STSAssumeRoleSessionCredentialsProvider(String roleArn, ClientConfiguration clientConfiguration) {
this.roleArn = roleArn;
securityTokenService = new AWSSecurityTokenServiceClient(clientConfiguration);
}
/**
* Constructs a new STSAssumeRoleSessionCredentialsProvider, which will use
* the specified long lived AWS credentials to make a request to the AWS
* Security Token Service (STS), uses the provided {@link #roleArn} to
* assume a role and then request short lived session credentials, which
* will then be returned by this class's {@link #getCredentials()} method.
* @param longLivedCredentials
* The main AWS credentials for a user's account.
* @param roleArn
* The AWS ARN of the Role to be assumed.
*/
public STSAssumeRoleSessionCredentialsProvider(AWSCredentials longLivedCredentials, String roleArn) {
this(longLivedCredentials, roleArn, new ClientConfiguration());
}
/**
* Constructs a new STSAssumeRoleSessionCredentialsProvider, which will use
* the specified long lived AWS credentials to make a request to the AWS
* Security Token Service (STS), uses the provided {@link #roleArn} to
* assume a role and then request short lived session credentials, which
* will then be returned by this class's {@link #getCredentials()} method.
* @param longLivedCredentials
* The main AWS credentials for a user's account.
* @param roleArn
* The AWS ARN of the Role to be assumed.
* @param clientConfiguration
* Client configuration connection parameters.
*/
public STSAssumeRoleSessionCredentialsProvider(AWSCredentials longLivedCredentials, String roleArn,
ClientConfiguration clientConfiguration) {
this.roleArn = roleArn;
securityTokenService = new AWSSecurityTokenServiceClient(longLivedCredentials, clientConfiguration);
}
/**
* Constructs a new STSAssumeRoleSessionCredentialsProvider, which will use
* the specified credentials provider (which vends long lived AWS
* credentials) to make a request to the AWS Security Token Service (STS),
* usess the provided {@link #roleArn} to assume a role and then request
* short lived session credentials, which will then be returned by this
* class's {@link #getCredentials()} method.
* @param longLivedCredentialsProvider
* Credentials provider for the main AWS credentials for a user's
* account.
* @param roleArn
* The AWS ARN of the Role to be assumed.
*/
public STSAssumeRoleSessionCredentialsProvider(AWSCredentialsProvider longLivedCredentialsProvider,
String roleArn) {
this.roleArn = roleArn;
securityTokenService = new AWSSecurityTokenServiceClient(longLivedCredentialsProvider);
}
/**
* Constructs a new STSAssumeRoleSessionCredentialsProvider, which will use
* the specified credentials provider (which vends long lived AWS
* credentials) to make a request to the AWS Security Token Service (STS),
* uses the provided {@link #roleArn} to assume a role and then request
* short lived session credentials, which will then be returned by this
* class's {@link #getCredentials()} method.
* @param longLivedCredentialsProvider
* Credentials provider for the main AWS credentials for a user's
* account.
* @param roleArn
* The AWS ARN of the Role to be assumed.
* @param clientConfiguration
* Client configuration connection parameters.
*/
public STSAssumeRoleSessionCredentialsProvider(AWSCredentialsProvider longLivedCredentialsProvider, String roleArn,
ClientConfiguration clientConfiguration) {
this.roleArn = roleArn;
securityTokenService = new AWSSecurityTokenServiceClient(longLivedCredentialsProvider, clientConfiguration);
}
@Override
public AWSCredentials getCredentials() {
if (needsNewSession()) {
startSession();
}
return sessionCredentials;
}
@Override
public void refresh() {
startSession();
}
/**
* Starts a new session by sending a request to the AWS Security Token
* Service (STS) to assume a Role using the long lived AWS credentials. This
* class then vends the short lived session credentials for the assumed Role
* sent back from STS.
*/
private void startSession() {
AssumeRoleResult assumeRoleResult = securityTokenService.assumeRole(new AssumeRoleRequest()
.withRoleArn(roleArn).withDurationSeconds(DEFAULT_DURATION_SECONDS).withRoleSessionName("SimianArmy"));
Credentials stsCredentials = assumeRoleResult.getCredentials();
sessionCredentials = new BasicSessionCredentials(stsCredentials.getAccessKeyId(),
stsCredentials.getSecretAccessKey(), stsCredentials.getSessionToken());
sessionCredentialsExpiration = stsCredentials.getExpiration();
}
/**
* Returns true if a new STS session needs to be started. A new STS session
* is needed when no session has been started yet, or if the last session is
* within {@link #EXPIRY_TIME_MILLIS} seconds of expiring.
* @return True if a new STS session needs to be started.
*/
private boolean needsNewSession() {
if (sessionCredentials == null) {
return true;
}
long timeRemaining = sessionCredentialsExpiration.getTime() - System.currentTimeMillis();
return timeRemaining < EXPIRY_TIME_MILLIS;
}
}
| 9,080
| 43.297561
| 119
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/AWSResourceType.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws;
import com.netflix.simianarmy.ResourceType;
/**
* The enum of resource types of AWS.
*/
public enum AWSResourceType implements ResourceType {
/** AWS instance. */
INSTANCE,
/** AWS EBS volume. */
EBS_VOLUME,
/** AWS EBS snapshot. */
EBS_SNAPSHOT,
/** AWS auto scaling group. */
ASG,
/** AWS launch configuration. */
LAUNCH_CONFIG,
/** AWS S3 bucket. */
S3_BUCKET,
/** AWS security group. */
SECURITY_GROUP,
/** AWS Amazon Machine Image. **/
IMAGE,
/** AWS Elastic Load Balancer. **/
ELB
}
| 1,242
| 26.021739
| 79
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/ImageJanitor.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.janitor.AbstractJanitor;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The Janitor responsible for launch configuration cleanup.
*/
public class ImageJanitor extends AbstractJanitor {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(ImageJanitor.class);
private final AWSClient awsClient;
/**
* Constructor.
* @param awsClient the AWS client
* @param ctx the context
*/
public ImageJanitor(AWSClient awsClient, AbstractJanitor.Context ctx) {
super(ctx, AWSResourceType.IMAGE);
Validate.notNull(awsClient);
this.awsClient = awsClient;
}
@Override
protected void postMark(Resource resource) {
}
@Override
protected void cleanup(Resource resource) {
LOGGER.info(String.format("Deleting image %s", resource.getId()));
awsClient.deleteImage(resource.getId());
}
@Override
protected void postCleanup(Resource resource) {
}
}
| 1,906
| 28.338462
| 85
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/InstanceJanitor.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.janitor.AbstractJanitor;
/**
* The Janitor responsible for auto scaling instance cleanup.
*/
public class InstanceJanitor extends AbstractJanitor {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(InstanceJanitor.class);
private final AWSClient awsClient;
/**
* Constructor.
* @param awsClient the AWS client
* @param ctx the context
*/
public InstanceJanitor(AWSClient awsClient, AbstractJanitor.Context ctx) {
super(ctx, AWSResourceType.INSTANCE);
Validate.notNull(awsClient);
this.awsClient = awsClient;
}
@Override
protected void postMark(Resource resource) {
}
@Override
protected void cleanup(Resource resource) {
LOGGER.info(String.format("Terminating instance %s", resource.getId()));
awsClient.terminateInstance(resource.getId());
}
@Override
protected void postCleanup(Resource resource) {
}
}
| 1,931
| 28.723077
| 88
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/ELBJanitor.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.janitor.AbstractJanitor;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The Janitor responsible for elastic load balancer cleanup.
*/
public class ELBJanitor extends AbstractJanitor {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(ELBJanitor.class);
private final AWSClient awsClient;
/**
* Constructor.
* @param awsClient the AWS client
* @param ctx the context
*/
public ELBJanitor(AWSClient awsClient, Context ctx) {
super(ctx, AWSResourceType.ELB);
Validate.notNull(awsClient);
this.awsClient = awsClient;
}
@Override
protected void postMark(Resource resource) {
}
@Override
protected void cleanup(Resource resource) {
LOGGER.info(String.format("Deleting ELB %s", resource.getId()));
awsClient.deleteElasticLoadBalancer(resource.getId());
// delete any DNS records attached to this ELB
String dnsNames = resource.getAdditionalField("referencedDNS");
String dnsTypes = resource.getAdditionalField("referencedDNSTypes");
String dnsZones = resource.getAdditionalField("referencedDNSZones");
if (StringUtils.isNotBlank(dnsNames) && StringUtils.isNotBlank(dnsTypes) && StringUtils.isNotBlank(dnsZones)) {
String[] dnsNamesSplit = StringUtils.split(dnsNames,',');
String[] dnsTypesSplit = StringUtils.split(dnsTypes,',');
String[] dnsZonesSplit = StringUtils.split(dnsZones,',');
if (dnsNamesSplit.length != dnsTypesSplit.length) {
LOGGER.error(String.format("DNS Name count does not match DNS Type count, aborting DNS delete for ELB %s"), resource.getId());
LOGGER.error(String.format("DNS Names found but not deleted: %s for ELB %s"), dnsNames, resource.getId());
return;
}
if (dnsNamesSplit.length != dnsZonesSplit.length) {
LOGGER.error(String.format("DNS Name count does not match DNS Zone count, aborting DNS delete for ELB %s"), resource.getId());
LOGGER.error(String.format("DNS Names found but not deleted: %s for ELB %s"), dnsNames, resource.getId());
return;
}
for(int i=0; i<dnsNamesSplit.length; i++) {
LOGGER.info(String.format("Deleting DNS Record %s for ELB %s of type %s in zone %s", dnsNamesSplit[i], resource.getId(), dnsTypesSplit[i], dnsZonesSplit[i]));
awsClient.deleteDNSRecord(dnsNamesSplit[i], dnsTypesSplit[i], dnsZonesSplit[i]);
}
}
}
@Override
protected void postCleanup(Resource resource) {
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
LOGGER.warn("Post-cleanup sleep was interrupted", e);
}
}
}
| 3,807
| 37.857143
| 174
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/SimpleDBJanitorResourceTracker.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor;
import com.amazonaws.services.simpledb.AmazonSimpleDB;
import com.amazonaws.services.simpledb.model.*;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.Resource.CleanupState;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.janitor.JanitorResourceTracker;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* The JanitorResourceTracker implementation in SimpleDB.
*/
public class SimpleDBJanitorResourceTracker implements JanitorResourceTracker {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(SimpleDBJanitorResourceTracker.class);
/** The domain. */
private final String domain;
/** The SimpleDB client. */
private final AmazonSimpleDB simpleDBClient;
/**
* Instantiates a new simple db resource tracker.
*
* @param awsClient
* the AWS Client
* @param domain
* the domain
*/
public SimpleDBJanitorResourceTracker(AWSClient awsClient, String domain) {
this.domain = domain;
this.simpleDBClient = awsClient.sdbClient();
}
/**
* Gets the SimpleDB client.
* @return the SimpleDB client
*/
protected AmazonSimpleDB getSimpleDBClient() {
return simpleDBClient;
}
/** {@inheritDoc} */
@Override
public void addOrUpdate(Resource resource) {
List<ReplaceableAttribute> attrs = new ArrayList<ReplaceableAttribute>();
Map<String, String> fieldToValueMap = resource.getFieldToValueMap();
for (Map.Entry<String, String> entry : fieldToValueMap.entrySet()) {
attrs.add(new ReplaceableAttribute(entry.getKey(), entry.getValue(), true));
}
PutAttributesRequest putReqest = new PutAttributesRequest(domain, getSimpleDBItemName(resource), attrs);
LOGGER.debug(String.format("Saving resource %s to SimpleDB domain %s",
resource.getId(), domain));
this.simpleDBClient.putAttributes(putReqest);
LOGGER.debug("Successfully saved.");
}
/**
* Returns a list of AWSResource objects. You need to override this method if more
* specific resource types (e.g. subtypes of AWSResource) need to be obtained from
* the SimpleDB.
*/
@Override
public List<Resource> getResources(ResourceType resourceType, CleanupState state, String resourceRegion) {
Validate.notEmpty(resourceRegion);
List<Resource> resources = new ArrayList<Resource>();
StringBuilder query = new StringBuilder();
query.append(String.format("select * from `%s` where ", domain));
if (resourceType != null) {
query.append(String.format("resourceType='%s' and ", resourceType));
}
if (state != null) {
query.append(String.format("state='%s' and ", state));
}
query.append(String.format("region='%s'", resourceRegion));
LOGGER.debug(String.format("Query is '%s'", query));
List<Item> items = querySimpleDBItems(query.toString());
for (Item item : items) {
try {
resources.add(parseResource(item));
} catch (Exception e) {
// Ignore the item that cannot be parsed.
LOGGER.error(String.format("SimpleDB item %s cannot be parsed into a resource.", item));
}
}
LOGGER.info(String.format("Retrieved %d resources from SimpleDB in domain %s for resource type %s"
+ " and state %s and region %s",
resources.size(), domain, resourceType, state, resourceRegion));
return resources;
}
@Override
public Resource getResource(String resourceId) {
Validate.notEmpty(resourceId);
StringBuilder query = new StringBuilder();
query.append(String.format("select * from `%s` where resourceId = '%s'", domain, resourceId));
LOGGER.debug(String.format("Query is '%s'", query));
List<Item> items = querySimpleDBItems(query.toString());
Validate.isTrue(items.size() <= 1);
if (items.size() == 0) {
LOGGER.info(String.format("Not found resource with id %s", resourceId));
return null;
} else {
Resource resource = null;
try {
resource = parseResource(items.get(0));
} catch (Exception e) {
// Ignore the item that cannot be parsed.
LOGGER.error(String.format("SimpleDB item %s cannot be parsed into a resource.", items.get(0)));
}
return resource;
}
}
@Override
public Resource getResource(String resourceId, String region) {
Validate.notEmpty(resourceId);
Validate.notEmpty(region);
StringBuilder query = new StringBuilder();
query.append(String.format("select * from `%s` where resourceId = '%s' and region = '%s'", domain, resourceId, region));
LOGGER.debug(String.format("Query is '%s'", query));
List<Item> items = querySimpleDBItems(query.toString());
Validate.isTrue(items.size() <= 1);
if (items.size() == 0) {
LOGGER.info(String.format("Not found resource with id %s and region %s", resourceId, region));
return null;
} else {
Resource resource = null;
try {
resource = parseResource(items.get(0));
} catch (Exception e) {
// Ignore the item that cannot be parsed.
LOGGER.error(String.format("SimpleDB item %s cannot be parsed into a resource.", items.get(0)));
}
return resource;
}
}
/**
* Parses a SimpleDB item into an AWS resource.
* @param item the item from SimpleDB
* @return the AWSResource object for the SimpleDB item
*/
protected Resource parseResource(Item item) {
Map<String, String> fieldToValue = new HashMap<String, String>();
for (Attribute attr : item.getAttributes()) {
String name = attr.getName();
String value = attr.getValue();
if (name != null && value != null) {
fieldToValue.put(name, value);
}
}
return AWSResource.parseFieldtoValueMap(fieldToValue);
}
/**
* Gets the unique SimpleDB item name for a resource. The subclass can override this
* method to generate the item name differently.
* @param resource
* @return the SimpleDB item name for the resource
*/
protected String getSimpleDBItemName(Resource resource) {
return String.format("%s-%s-%s", resource.getResourceType().name(), resource.getId(), resource.getRegion());
}
private List<Item> querySimpleDBItems(String query) {
Validate.notNull(query);
String nextToken = null;
List<Item> items = new ArrayList<Item>();
do {
SelectRequest request = new SelectRequest(query);
request.setNextToken(nextToken);
request.setConsistentRead(Boolean.TRUE);
SelectResult result = this.simpleDBClient.select(request);
items.addAll(result.getItems());
nextToken = result.getNextToken();
} while (nextToken != null);
return items;
}
}
| 8,256
| 37.050691
| 128
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/VolumeTaggingMonkey.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor;
import java.util.Collection;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.google.common.collect.Maps;
import com.netflix.simianarmy.basic.BasicSimianArmyContext;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.amazonaws.services.ec2.model.Instance;
import com.amazonaws.services.ec2.model.Tag;
import com.amazonaws.services.ec2.model.Volume;
import com.amazonaws.services.ec2.model.VolumeAttachment;
import com.netflix.simianarmy.EventType;
import com.netflix.simianarmy.Monkey;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.MonkeyConfiguration;
import com.netflix.simianarmy.MonkeyRecorder.Event;
import com.netflix.simianarmy.MonkeyType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.janitor.JanitorMonkey;
/**
* A companion monkey of Janitor Monkey for tagging EBS volumes with the last attachment information.
* In many scenarios, EBS volumes generated by applications remain unattached to instances. Amazon
* does not keep track of last unattached time, which makes it difficult to determine its usage.
* To solve this, this monkey will tag all EBS volumes with last owner and instance to which they are attached
* and the time they got detached from instance. The monkey will poll and monitor EBS volumes hourly (by default).
*
*/
public class VolumeTaggingMonkey extends Monkey {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(VolumeTaggingMonkey.class);
/**
* The Interface Context.
*/
public interface Context extends Monkey.Context {
/**
* Configuration.
*
* @return the monkey configuration
*/
@Override
MonkeyConfiguration configuration();
/**
* AWS clients. Using a collection of clients for supporting running one monkey for multiple regions.
*
* @return the collection of AWS clients
*/
Collection<AWSClient> awsClients();
}
private final MonkeyConfiguration config;
private final Collection<AWSClient> awsClients;
private final MonkeyCalendar calendar;
/** We cache the global map from instance id to its owner when starting the monkey. */
private final Map<AWSClient, Map<String, String>> awsClientToInstanceToOwner;
/**
* The constructor.
* @param ctx the context
*/
public VolumeTaggingMonkey(Context ctx) {
super(ctx);
this.config = ctx.configuration();
this.awsClients = ctx.awsClients();
this.calendar = ctx.calendar();
awsClientToInstanceToOwner = Maps.newHashMap();
for (AWSClient awsClient : awsClients) {
Map<String, String> instanceToOwner = Maps.newHashMap();
awsClientToInstanceToOwner.put(awsClient, instanceToOwner);
for (Instance instance : awsClient.describeInstances()) {
for (Tag tag : instance.getTags()) {
if (tag.getKey().equals(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY)) {
instanceToOwner.put(instance.getInstanceId(), tag.getValue());
}
}
}
}
}
/**
* The monkey Type.
*/
public enum Type implements MonkeyType {
/** Volume tagging monkey. */
VOLUME_TAGGING
}
/**
* The event types that this monkey causes.
*/
public enum EventTypes implements EventType {
/** The event type for tagging the volume with Janitor meta data information. */
TAGGING_JANITOR
}
@Override
public Type type() {
return Type.VOLUME_TAGGING;
}
@Override
public void doMonkeyBusiness() {
String prop = "simianarmy.volumeTagging.enabled";
if (config.getBoolOrElse(prop, false)) {
for (AWSClient awsClient : awsClients) {
tagVolumesWithLatestAttachment(awsClient);
}
} else {
LOGGER.info(String.format("Volume tagging monkey is not enabled. You can set %s to true to enable it.",
prop));
}
}
private void tagVolumesWithLatestAttachment(AWSClient awsClient) {
List<Volume> volumes = awsClient.describeVolumes();
LOGGER.info(String.format("Trying to tag %d volumes for Janitor Monkey meta data.",
volumes.size()));
Date now = calendar.now().getTime();
for (Volume volume : volumes) {
String owner = null, instanceId = null;
Date lastDetachTime = null;
List<VolumeAttachment> attachments = volume.getAttachments();
List<Tag> tags = volume.getTags();
// The volume can have a special tag is it does not want to be changed/tagged
// by Janitor monkey.
if ("donotmark".equals(getTagValue(JanitorMonkey.JANITOR_TAG, tags))) {
LOGGER.info(String.format("The volume %s is tagged as not handled by Janitor",
volume.getVolumeId()));
continue;
}
Map<String, String> janitorMetadata = parseJanitorTag(tags);
// finding the instance attached most recently.
VolumeAttachment latest = null;
for (VolumeAttachment attachment : attachments) {
if (latest == null || latest.getAttachTime().before(attachment.getAttachTime())) {
latest = attachment;
}
}
if (latest != null) {
instanceId = latest.getInstanceId();
owner = getOwnerEmail(instanceId, janitorMetadata, tags, awsClient);
}
if (latest == null || "detached".equals(latest.getState())) {
if (janitorMetadata.get(JanitorMonkey.DETACH_TIME_TAG_KEY) == null) {
// There is no attached instance and the last detached time is not set.
// Use the current time as the last detached time.
LOGGER.info(String.format("Setting the last detached time to %s for volume %s",
now, volume.getVolumeId()));
lastDetachTime = now;
} else {
LOGGER.debug(String.format("The volume %s was already marked as detached at time %s",
volume.getVolumeId(), janitorMetadata.get(JanitorMonkey.DETACH_TIME_TAG_KEY)));
}
} else {
// The volume is currently attached to an instance
lastDetachTime = null;
}
String existingOwner = janitorMetadata.get(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY);
if (owner == null && existingOwner != null) {
// Save the current owner in the tag when we are not able to find a owner.
owner = existingOwner;
}
if (needsUpdate(janitorMetadata, owner, instanceId, lastDetachTime)) {
Event evt = updateJanitorMetaTag(volume, instanceId, owner, lastDetachTime, awsClient);
if (evt != null) {
context().recorder().recordEvent(evt);
}
}
}
}
private String getOwnerEmail(String instanceId, Map<String, String> janitorMetadata,
List<Tag> tags, AWSClient awsClient) {
// The owner of the volume is set as the owner of the last instance attached to it.
String owner = awsClientToInstanceToOwner.get(awsClient).get(instanceId);
if (owner == null) {
owner = janitorMetadata.get(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY);
}
if (owner == null) {
owner = getTagValue(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY, tags);
}
String emailDomain = getOwnerEmailDomain();
if (owner != null && !owner.contains("@")
&& StringUtils.isNotBlank(emailDomain)) {
owner = String.format("%s@%s", owner, emailDomain);
}
return owner;
}
/**
* Parses the Janitor meta tag set by this monkey and gets a map from key
* to value for the tag values.
* @param tags the tags of the volumes
* @return the map from the Janitor meta tag key to value
*/
private static Map<String, String> parseJanitorTag(List<Tag> tags) {
String janitorTag = getTagValue(JanitorMonkey.JANITOR_META_TAG, tags);
return parseJanitorMetaTag(janitorTag);
}
/**
* Parses the string of Janitor meta-data tag value to get a key value map.
* @param janitorMetaTag the value of the Janitor meta-data tag
* @return the key value map in the Janitor meta-data tag
*/
public static Map<String, String> parseJanitorMetaTag(String janitorMetaTag) {
Map<String, String> metadata = new HashMap<String, String>();
if (janitorMetaTag != null) {
for (String keyValue : janitorMetaTag.split(";")) {
String[] meta = keyValue.split("=");
if (meta.length == 2) {
metadata.put(meta[0], meta[1]);
}
}
}
return metadata;
}
/** Gets the domain name for the owner email. The method can be overridden in subclasses.
*
* @return the domain name for the owner email.
*/
protected String getOwnerEmailDomain() {
return config.getStrOrElse("simianarmy.volumeTagging.ownerEmailDomain", "");
}
private Event updateJanitorMetaTag(Volume volume, String instance, String owner, Date lastDetachTime,
AWSClient awsClient) {
String meta = makeMetaTag(instance, owner, lastDetachTime);
Map<String, String> janitorTags = new HashMap<String, String>();
janitorTags.put(JanitorMonkey.JANITOR_META_TAG, meta);
LOGGER.info(String.format("Setting tag %s to '%s' for volume %s",
JanitorMonkey.JANITOR_META_TAG, meta, volume.getVolumeId()));
String prop = "simianarmy.volumeTagging.leashed";
Event evt = null;
if (config.getBoolOrElse(prop, true)) {
LOGGER.info("Volume tagging monkey is leashed. No real change is made to the volume.");
} else {
try {
awsClient.createTagsForResources(janitorTags, volume.getVolumeId());
evt = context().recorder().newEvent(type(), EventTypes.TAGGING_JANITOR,
awsClient.region(), volume.getVolumeId());
evt.addField(JanitorMonkey.JANITOR_META_TAG, meta);
} catch (Exception e) {
LOGGER.error(String.format("Failed to update the tag for volume %s", volume.getVolumeId()));
}
}
return evt;
}
/**
* Makes the Janitor meta tag for volumes to track the last attachment/detachment information.
* The method is intentionally made public for testing.
* @param instance the last attached instance
* @param owner the last owner
* @param lastDetachTime the detach time
* @return the meta tag of Janitor Monkey
*/
public static String makeMetaTag(String instance, String owner, Date lastDetachTime) {
StringBuilder meta = new StringBuilder();
meta.append(String.format("%s=%s;",
JanitorMonkey.INSTANCE_TAG_KEY, instance == null ? "" : instance));
meta.append(String.format("%s=%s;", BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY, owner == null ? "" : owner));
meta.append(String.format("%s=%s", JanitorMonkey.DETACH_TIME_TAG_KEY,
lastDetachTime == null ? "" : AWSResource.DATE_FORMATTER.print(lastDetachTime.getTime())));
return meta.toString();
}
private static String getTagValue(String key, List<Tag> tags) {
for (Tag tag : tags) {
if (tag.getKey().equals(key)) {
return tag.getValue();
}
}
return null;
}
/** Needs to update tags of the volume if
* 1) owner or instance attached changed or
* 2) the last detached status is changed.
*/
private static boolean needsUpdate(Map<String, String> metadata,
String owner, String instance, Date lastDetachTime) {
return (owner != null && !StringUtils.equals(metadata.get(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY), owner))
|| (instance != null && !StringUtils.equals(metadata.get(JanitorMonkey.INSTANCE_TAG_KEY), instance))
|| lastDetachTime != null;
}
}
| 13,435
| 40.597523
| 118
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/EBSVolumeJanitor.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.janitor.AbstractJanitor;
/**
* The Janitor responsible for EBS volume cleanup.
*/
public class EBSVolumeJanitor extends AbstractJanitor {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(EBSVolumeJanitor.class);
private final AWSClient awsClient;
/**
* Constructor.
* @param awsClient the AWS client
* @param ctx the context
*/
public EBSVolumeJanitor(AWSClient awsClient, AbstractJanitor.Context ctx) {
super(ctx, AWSResourceType.EBS_VOLUME);
Validate.notNull(awsClient);
this.awsClient = awsClient;
}
@Override
protected void postMark(Resource resource) {
}
@Override
protected void cleanup(Resource resource) {
LOGGER.info(String.format("Deleting EBS volume %s", resource.getId()));
awsClient.deleteVolume(resource.getId());
}
@Override
protected void postCleanup(Resource resource) {
}
}
| 1,919
| 28.538462
| 89
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/EBSSnapshotJanitor.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.janitor.AbstractJanitor;
/**
* The Janitor responsible for EBS snapshot cleanup.
*/
public class EBSSnapshotJanitor extends AbstractJanitor {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(EBSSnapshotJanitor.class);
private final AWSClient awsClient;
/**
* Constructor.
* @param awsClient the AWS client
* @param ctx the context
*/
public EBSSnapshotJanitor(AWSClient awsClient, AbstractJanitor.Context ctx) {
super(ctx, AWSResourceType.EBS_SNAPSHOT);
Validate.notNull(awsClient);
this.awsClient = awsClient;
}
@Override
protected void postMark(Resource resource) {
}
@Override
protected void cleanup(Resource resource) {
LOGGER.info(String.format("Deleting EBS snapshot %s", resource.getId()));
awsClient.deleteSnapshot(resource.getId());
}
@Override
protected void postCleanup(Resource resource) {
}
}
| 1,933
| 28.753846
| 91
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/RDSJanitorResourceTracker.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor;
import com.amazonaws.AmazonClientException;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.Resource.CleanupState;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.janitor.JanitorResourceTracker;
import com.zaxxer.hikari.HikariDataSource;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import java.io.IOException;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Types;
import java.util.*;
/**
* The JanitorResourceTracker implementation in AWS RDS.
*/
public class RDSJanitorResourceTracker implements JanitorResourceTracker {
/** The Constant LOGGER. */
public static final Logger LOGGER = LoggerFactory.getLogger(RDSJanitorResourceTracker.class);
/** The table. */
private final String table;
/** the jdbcTemplate */
JdbcTemplate jdbcTemplate = null;
/**
* Instantiates a new RDS janitor resource tracker.
*
*/
public RDSJanitorResourceTracker(String dbDriver, String dbUser,
String dbPass, String dbUrl, String dbTable) {
HikariDataSource dataSource = new HikariDataSource();
dataSource.setDriverClassName(dbDriver);
dataSource.setJdbcUrl(dbUrl);
dataSource.setUsername(dbUser);
dataSource.setPassword(dbPass);
dataSource.setMaximumPoolSize(2);
this.jdbcTemplate = new JdbcTemplate(dataSource);
this.table = dbTable;
}
/**
* Instantiates a new RDS janitor resource tracker. This constructor is intended
* for unit testing.
*
*/
public RDSJanitorResourceTracker(JdbcTemplate jdbcTemplate, String table) {
this.jdbcTemplate = jdbcTemplate;
this.table = table;
}
public JdbcTemplate getJdbcTemplate() {
return jdbcTemplate;
}
public Object value(String value) {
return value == null ? Types.NULL : value;
}
public Object value(Date value) {
return value == null ? Types.NULL : value.getTime();
}
public Object value(boolean value) {
return new Boolean(value).toString();
}
public Object emailValue(String email) {
if (StringUtils.isBlank(email)) return Types.NULL;
if (email.equals("0")) return Types.NULL;
return email;
}
/** {@inheritDoc} */
@Override
public void addOrUpdate(Resource resource) {
Resource orig = getResource(resource.getId(), resource.getRegion());
LOGGER.debug(String.format("Saving resource %s to RDB table %s in region %s", resource.getId(), table, resource.getRegion()));
String json;
try {
json = new ObjectMapper().writeValueAsString(additionalFieldsAsMap(resource));
} catch (JsonProcessingException e) {
LOGGER.error("ERROR generating additional field JSON when saving resource " + resource.getId(), e);
return;
}
if (orig == null) {
StringBuilder sb = new StringBuilder();
sb.append("insert into ").append(table);
sb.append(" (");
sb.append(AWSResource.FIELD_RESOURCE_ID).append(",");
sb.append(AWSResource.FIELD_RESOURCE_TYPE).append(",");
sb.append(AWSResource.FIELD_REGION).append(",");
sb.append(AWSResource.FIELD_OWNER_EMAIL).append(",");
sb.append(AWSResource.FIELD_DESCRIPTION).append(",");
sb.append(AWSResource.FIELD_STATE).append(",");
sb.append(AWSResource.FIELD_TERMINATION_REASON).append(",");
sb.append(AWSResource.FIELD_EXPECTED_TERMINATION_TIME).append(",");
sb.append(AWSResource.FIELD_ACTUAL_TERMINATION_TIME).append(",");
sb.append(AWSResource.FIELD_NOTIFICATION_TIME).append(",");
sb.append(AWSResource.FIELD_LAUNCH_TIME).append(",");
sb.append(AWSResource.FIELD_MARK_TIME).append(",");
sb.append(AWSResource.FIELD_OPT_OUT_OF_JANITOR).append(",");
sb.append("additionalFields").append(") values (?,?,?,?,?,?,?,?,?,?,?,?,?,?)");
LOGGER.debug(String.format("Insert statement is '%s'", sb));
int updated = this.jdbcTemplate.update(sb.toString(),
resource.getId(),
value(resource.getResourceType().toString()),
value(resource.getRegion()),
emailValue(resource.getOwnerEmail()),
value(resource.getDescription()),
value(resource.getState().toString()),
value(resource.getTerminationReason()),
value(resource.getExpectedTerminationTime()),
value(resource.getActualTerminationTime()),
value(resource.getNotificationTime()),
value(resource.getLaunchTime()),
value(resource.getMarkTime()),
value(resource.isOptOutOfJanitor()),
json);
LOGGER.debug(String.format("%d rows inserted", updated));
} else {
StringBuilder sb = new StringBuilder();
sb.append("update ").append(table).append(" set ");
sb.append(AWSResource.FIELD_RESOURCE_TYPE).append("=?,");
sb.append(AWSResource.FIELD_REGION).append("=?,");
sb.append(AWSResource.FIELD_OWNER_EMAIL).append("=?,");
sb.append(AWSResource.FIELD_DESCRIPTION).append("=?,");
sb.append(AWSResource.FIELD_STATE).append("=?,");
sb.append(AWSResource.FIELD_TERMINATION_REASON).append("=?,");
sb.append(AWSResource.FIELD_EXPECTED_TERMINATION_TIME).append("=?,");
sb.append(AWSResource.FIELD_ACTUAL_TERMINATION_TIME).append("=?,");
sb.append(AWSResource.FIELD_NOTIFICATION_TIME).append("=?,");
sb.append(AWSResource.FIELD_LAUNCH_TIME).append("=?,");
sb.append(AWSResource.FIELD_MARK_TIME).append("=?,");
sb.append(AWSResource.FIELD_OPT_OUT_OF_JANITOR).append("=?,");
sb.append("additionalFields").append("=? where ");
sb.append(AWSResource.FIELD_RESOURCE_ID).append("=? and ");
sb.append(AWSResource.FIELD_REGION).append("=?");
LOGGER.debug(String.format("Update statement is '%s'", sb));
int updated = this.jdbcTemplate.update(sb.toString(),
resource.getResourceType().toString(),
value(resource.getRegion()),
emailValue(resource.getOwnerEmail()),
value(resource.getDescription()),
value(resource.getState().toString()),
value(resource.getTerminationReason()),
value(resource.getExpectedTerminationTime()),
value(resource.getActualTerminationTime()),
value(resource.getNotificationTime()),
value(resource.getLaunchTime()),
value(resource.getMarkTime()),
value(resource.isOptOutOfJanitor()),
json,
resource.getId(),
resource.getRegion());
LOGGER.debug(String.format("%d rows updated", updated));
}
LOGGER.debug("Successfully saved.");
}
/**
* Returns a list of AWSResource objects. You need to override this method if more
* specific resource types (e.g. subtypes of AWSResource) need to be obtained from
* the Database.
*/
@Override
public List<Resource> getResources(ResourceType resourceType, CleanupState state, String resourceRegion) {
Validate.notEmpty(resourceRegion);
StringBuilder query = new StringBuilder();
ArrayList<String> args = new ArrayList<>();
query.append(String.format("select * from %s where ", table));
if (resourceType != null) {
query.append("resourceType=? and ");
args.add(resourceType.toString());
}
if (state != null) {
query.append("state=? and ");
args.add(state.toString());
}
query.append("region=?");
args.add(resourceRegion);
LOGGER.debug(String.format("Query is '%s'", query));
List<Resource> resources = jdbcTemplate.query(query.toString(), args.toArray(), new RowMapper<Resource>() {
public Resource mapRow(ResultSet rs, int rowNum) throws SQLException {
return mapResource(rs);
}
});
return resources;
}
private Resource mapResource(ResultSet rs) throws SQLException {
String json = rs.getString("additionalFields");
Resource resource = null;
try {
// put additional fields
Map<String, String> map = new HashMap<>();
if (json != null) {
TypeReference<HashMap<String,String>> typeRef = new TypeReference<HashMap<String,String>>() {};
map = new ObjectMapper().readValue(json, typeRef);
}
// put everything else
map.put(AWSResource.FIELD_RESOURCE_ID, rs.getString(AWSResource.FIELD_RESOURCE_ID));
map.put(AWSResource.FIELD_RESOURCE_TYPE, rs.getString(AWSResource.FIELD_RESOURCE_TYPE));
map.put(AWSResource.FIELD_REGION, rs.getString(AWSResource.FIELD_REGION));
map.put(AWSResource.FIELD_DESCRIPTION, rs.getString(AWSResource.FIELD_DESCRIPTION));
map.put(AWSResource.FIELD_STATE, rs.getString(AWSResource.FIELD_STATE));
map.put(AWSResource.FIELD_TERMINATION_REASON, rs.getString(AWSResource.FIELD_TERMINATION_REASON));
map.put(AWSResource.FIELD_OPT_OUT_OF_JANITOR, rs.getString(AWSResource.FIELD_OPT_OUT_OF_JANITOR));
String email = rs.getString(AWSResource.FIELD_OWNER_EMAIL);
if (StringUtils.isBlank(email) || email.equals("0")) {
email = null;
}
map.put(AWSResource.FIELD_OWNER_EMAIL, email);
String expectedTerminationTime = millisToFormattedDate(rs.getString(AWSResource.FIELD_EXPECTED_TERMINATION_TIME));
String actualTerminationTime = millisToFormattedDate(rs.getString(AWSResource.FIELD_ACTUAL_TERMINATION_TIME));
String notificationTime = millisToFormattedDate(rs.getString(AWSResource.FIELD_NOTIFICATION_TIME));
String launchTime = millisToFormattedDate(rs.getString(AWSResource.FIELD_LAUNCH_TIME));
String markTime = millisToFormattedDate(rs.getString(AWSResource.FIELD_MARK_TIME));
if (expectedTerminationTime != null) {
map.put(AWSResource.FIELD_EXPECTED_TERMINATION_TIME, expectedTerminationTime);
}
if (actualTerminationTime != null) {
map.put(AWSResource.FIELD_ACTUAL_TERMINATION_TIME, actualTerminationTime);
}
if (notificationTime != null) {
map.put(AWSResource.FIELD_NOTIFICATION_TIME, notificationTime);
}
if (launchTime != null) {
map.put(AWSResource.FIELD_LAUNCH_TIME, launchTime);
}
if (markTime != null) {
map.put(AWSResource.FIELD_MARK_TIME, markTime);
}
resource = AWSResource.parseFieldtoValueMap(map);
}catch(IOException ie) {
String msg = "Error parsing resource from result set";
LOGGER.error(msg, ie);
throw new SQLException(msg);
}
return resource;
}
private String millisToFormattedDate(String millisStr) {
String datetime = null;
try {
long millis = Long.parseLong(millisStr);
datetime = AWSResource.DATE_FORMATTER.print(millis);
} catch(NumberFormatException nfe) {
LOGGER.error(String.format("Error parsing datetime %s when reading from RDS", millisStr));
}
return datetime;
}
@Override
public Resource getResource(String resourceId) {
Validate.notEmpty(resourceId);
StringBuilder query = new StringBuilder();
query.append(String.format("select * from %s where resourceId=?", table));
LOGGER.debug(String.format("Query is '%s'", query));
List<Resource> resources = jdbcTemplate.query(query.toString(), new String[]{resourceId}, new RowMapper<Resource>() {
public Resource mapRow(ResultSet rs, int rowNum) throws SQLException {
return mapResource(rs);
}
});
Resource resource = null;
Validate.isTrue(resources.size() <= 1);
if (resources.size() == 0) {
LOGGER.info(String.format("Not found resource with id %s", resourceId));
} else {
resource = resources.get(0);
}
return resource;
}
@Override
public Resource getResource(String resourceId, String region) {
Validate.notEmpty(resourceId);
Validate.notEmpty(region);
StringBuilder query = new StringBuilder();
query.append(String.format("select * from %s where resourceId=? and region=?", table));
LOGGER.debug(String.format("Query is '%s'", query));
List<Resource> resources = jdbcTemplate.query(query.toString(), new String[]{resourceId,region}, new RowMapper<Resource>() {
public Resource mapRow(ResultSet rs, int rowNum) throws SQLException {
return mapResource(rs);
}
});
Resource resource = null;
Validate.isTrue(resources.size() <= 1);
if (resources.size() == 0) {
LOGGER.info(String.format("Not found resource with id %s", resourceId));
} else {
resource = resources.get(0);
}
return resource;
}
/**
* Creates the RDS table, if it does not already exist.
*/
public void init() {
try {
LOGGER.info("Creating RDS table: {}", table);
String sql = String.format("create table if not exists %s ("
+ " %s varchar(255), "
+ " %s varchar(255), "
+ " %s varchar(25), "
+ " %s varchar(255), "
+ " %s varchar(255), "
+ " %s varchar(25), "
+ " %s varchar(255), "
+ " %s BIGINT, "
+ " %s BIGINT, "
+ " %s BIGINT, "
+ " %s BIGINT, "
+ " %s BIGINT, "
+ " %s varchar(8), "
+ " %s varchar(4096) )",
table,
AWSResource.FIELD_RESOURCE_ID,
AWSResource.FIELD_RESOURCE_TYPE,
AWSResource.FIELD_REGION,
AWSResource.FIELD_OWNER_EMAIL,
AWSResource.FIELD_DESCRIPTION,
AWSResource.FIELD_STATE,
AWSResource.FIELD_TERMINATION_REASON,
AWSResource.FIELD_EXPECTED_TERMINATION_TIME,
AWSResource.FIELD_ACTUAL_TERMINATION_TIME,
AWSResource.FIELD_NOTIFICATION_TIME,
AWSResource.FIELD_LAUNCH_TIME,
AWSResource.FIELD_MARK_TIME,
AWSResource.FIELD_OPT_OUT_OF_JANITOR,
"additionalFields");
LOGGER.debug("Create SQL is: '{}'", sql);
jdbcTemplate.execute(sql);
} catch (AmazonClientException e) {
LOGGER.warn("Error while trying to auto-create RDS table", e);
}
}
private HashMap<String, String> additionalFieldsAsMap(Resource resource) {
HashMap<String, String> fields = new HashMap<>();
for(String key : resource.getAdditionalFieldNames()) {
fields.put(key, resource.getAdditionalField(key));
}
return fields;
}
}
| 16,376
| 40.671756
| 134
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/ASGJanitor.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.janitor.AbstractJanitor;
/**
* The Janitor responsible for ASG cleanup.
*/
public class ASGJanitor extends AbstractJanitor {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractJanitor.class);
private final AWSClient awsClient;
/**
* Constructor.
* @param awsClient the AWS client
* @param ctx the context
*/
public ASGJanitor(AWSClient awsClient, AbstractJanitor.Context ctx) {
super(ctx, AWSResourceType.ASG);
Validate.notNull(awsClient);
this.awsClient = awsClient;
}
@Override
protected void postMark(Resource resource) {
}
@Override
protected void cleanup(Resource resource) {
LOGGER.info(String.format("Deleting ASG %s", resource.getId()));
awsClient.deleteAutoScalingGroup(resource.getId());
}
@Override
protected void postCleanup(Resource resource) {
}
}
| 1,895
| 28.169231
| 88
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/LaunchConfigJanitor.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.janitor.AbstractJanitor;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The Janitor responsible for launch configuration cleanup.
*/
public class LaunchConfigJanitor extends AbstractJanitor {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(LaunchConfigJanitor.class);
private final AWSClient awsClient;
/**
* Constructor.
* @param awsClient the AWS client
* @param ctx the context
*/
public LaunchConfigJanitor(AWSClient awsClient, AbstractJanitor.Context ctx) {
super(ctx, AWSResourceType.LAUNCH_CONFIG);
Validate.notNull(awsClient);
this.awsClient = awsClient;
}
@Override
protected void postMark(Resource resource) {
}
@Override
protected void cleanup(Resource resource) {
LOGGER.info(String.format("Deleting launch configuration %s", resource.getId()));
awsClient.deleteLaunchConfiguration(resource.getId());
}
@Override
protected void postCleanup(Resource resource) {
}
}
| 1,964
| 29.230769
| 92
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/rule/instance/OrphanedInstanceRule.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.instance;
import java.util.Date;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.janitor.crawler.InstanceJanitorCrawler;
import com.netflix.simianarmy.janitor.Rule;
/**
* The rule for checking the orphaned instances that do not belong to any ASGs and
* launched for certain days.
*/
public class OrphanedInstanceRule implements Rule {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(OrphanedInstanceRule.class);
private static final String TERMINATION_REASON = "No ASG is associated with this instance";
private static final String ASG_OR_OPSWORKS_TERMINATION_REASON = "No ASG or OpsWorks stack is associated with this instance";
private final MonkeyCalendar calendar;
private final int instanceAgeThreshold;
private final int retentionDaysWithOwner;
private final int retentionDaysWithoutOwner;
private final boolean respectOpsWorksParentage;
/**
* Constructor for OrphanedInstanceRule.
*
* @param calendar
* The calendar used to calculate the termination time
* @param instanceAgeThreshold
* The number of days that an instance is considered as orphaned since it is launched
* @param retentionDaysWithOwner
* The number of days that the orphaned instance is retained before being terminated
* when the instance has an owner specified
* @param retentionDaysWithoutOwner
* The number of days that the orphaned instance is retained before being terminated
* when the instance has no owner specified
* @param respectOpsWorksParentage
* If true, don't consider members of an OpsWorks stack as orphans
*/
public OrphanedInstanceRule(MonkeyCalendar calendar,
int instanceAgeThreshold, int retentionDaysWithOwner, int retentionDaysWithoutOwner, boolean respectOpsWorksParentage) {
Validate.notNull(calendar);
Validate.isTrue(instanceAgeThreshold >= 0);
Validate.isTrue(retentionDaysWithOwner >= 0);
Validate.isTrue(retentionDaysWithoutOwner >= 0);
this.calendar = calendar;
this.instanceAgeThreshold = instanceAgeThreshold;
this.retentionDaysWithOwner = retentionDaysWithOwner;
this.retentionDaysWithoutOwner = retentionDaysWithoutOwner;
this.respectOpsWorksParentage = respectOpsWorksParentage;
}
public OrphanedInstanceRule(MonkeyCalendar calendar,
int instanceAgeThreshold, int retentionDaysWithOwner, int retentionDaysWithoutOwner) {
this(calendar, instanceAgeThreshold, retentionDaysWithOwner, retentionDaysWithoutOwner, false);
}
@Override
public boolean isValid(Resource resource) {
Validate.notNull(resource);
if (!resource.getResourceType().name().equals("INSTANCE")) {
// The rule is supposed to only work on AWS instances. If a non-instance resource
// is passed to the rule, the rule simply ignores it and considers it as a valid
// resource not for cleanup.
return true;
}
String awsStatus = ((AWSResource) resource).getAWSResourceState();
if (!"running".equals(awsStatus) || "pending".equals(awsStatus)) {
return true;
}
AWSResource instanceResource = (AWSResource) resource;
String asgName = instanceResource.getAdditionalField(InstanceJanitorCrawler.INSTANCE_FIELD_ASG_NAME);
String opsworkStackName = instanceResource.getAdditionalField(InstanceJanitorCrawler.INSTANCE_FIELD_OPSWORKS_STACK_NAME);
// If there is no ASG AND it isn't an OpsWorks stack (or OpsWorks isn't respected as a parent), we have an orphan
if (StringUtils.isEmpty(asgName) && (!respectOpsWorksParentage || StringUtils.isEmpty(opsworkStackName))) {
if (resource.getLaunchTime() == null) {
LOGGER.error(String.format("The instance %s has no launch time.", resource.getId()));
return true;
} else {
DateTime launchTime = new DateTime(resource.getLaunchTime().getTime());
DateTime now = new DateTime(calendar.now().getTimeInMillis());
if (now.isBefore(launchTime.plusDays(instanceAgeThreshold))) {
LOGGER.info(String.format("The orphaned instance %s has not launched for more than %d days",
resource.getId(), instanceAgeThreshold));
return true;
}
LOGGER.info(String.format("The orphaned instance %s has launched for more than %d days",
resource.getId(), instanceAgeThreshold));
if (resource.getExpectedTerminationTime() == null) {
int retentionDays = retentionDaysWithoutOwner;
if (resource.getOwnerEmail() != null) {
retentionDays = retentionDaysWithOwner;
}
Date terminationTime = calendar.getBusinessDay(new Date(now.getMillis()), retentionDays);
resource.setExpectedTerminationTime(terminationTime);
resource.setTerminationReason((respectOpsWorksParentage) ? ASG_OR_OPSWORKS_TERMINATION_REASON : TERMINATION_REASON);
}
return false;
}
}
return true;
}
}
| 6,405
| 45.759124
| 136
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/rule/generic/UntaggedRule.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.generic;
import java.util.Date;
import java.util.Set;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.janitor.Rule;
/**
* The rule for checking the orphaned instances that do not belong to any ASGs and
* launched for certain days.
*/
public class UntaggedRule implements Rule {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(UntaggedRule.class);
private static final String TERMINATION_REASON = "This resource is missing the required tags";
private final MonkeyCalendar calendar;
private final Set<String> tagNames;
private final int retentionDaysWithOwner;
private final int retentionDaysWithoutOwner;
/**
* Constructor for UntaggedInstanceRule.
*
* @param calendar
* The calendar used to calculate the termination time
* @param tagNames
* Set of tags that needs to be set
*/
public UntaggedRule(MonkeyCalendar calendar, Set<String> tagNames, int retentionDaysWithOwner, int retentionDaysWithoutOwner) {
Validate.notNull(calendar);
Validate.notNull(tagNames);
this.calendar = calendar;
this.tagNames = tagNames;
this.retentionDaysWithOwner = retentionDaysWithOwner;
this.retentionDaysWithoutOwner = retentionDaysWithoutOwner;
}
@Override
public boolean isValid(Resource resource) {
Validate.notNull(resource);
for (String tagName : this.tagNames) {
if (((AWSResource) resource).getTag(tagName) == null) {
String terminationReason = String.format(" does not have the required tag %s", tagName);
LOGGER.error(String.format("The resource %s %s", resource.getId(), terminationReason));
DateTime now = new DateTime(calendar.now().getTimeInMillis());
if (resource.getExpectedTerminationTime() == null) {
int retentionDays = retentionDaysWithoutOwner;
if (resource.getOwnerEmail() != null) {
retentionDays = retentionDaysWithOwner;
}
Date terminationTime = calendar.getBusinessDay(new Date(now.getMillis()), retentionDays);
resource.setExpectedTerminationTime(terminationTime);
resource.setTerminationReason(terminationReason);
}
return false;
} else {
LOGGER.debug(String.format("The resource %s has the required tag %s", resource.getId(), tagName));
}
}
LOGGER.info(String.format("The resource %s has all required tags", resource.getId()));
return true;
}
}
| 3,678
| 36.540816
| 131
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/rule/generic/TagValueExclusionRule.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.generic;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.janitor.Rule;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.HashMap;
import java.util.Map;
/**
* A rule for excluding resources that contain the provided tags (name and value).
*
* If a resource contains the tag and the appropriate value, it will be excluded from any
* other janitor rules and will not be cleaned.
*
*/
public class TagValueExclusionRule implements Rule {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(TagValueExclusionRule.class);
private final Map<String,String> tags;
/**
* Constructor for TagValueExclusionRule.
*
* @param tags
* Set of tags and values to match for exclusion
*/
public TagValueExclusionRule(Map<String, String> tags) {
this.tags = tags;
}
/**
* Constructor for TagValueExclusionRule. Use this constructor to pass names and values as separate args.
* This is intended for convenience when specifying tag names/values in property files.
*
* Each tag[i] = (name[i], value[i])
*
* @param names
* Set of names to match for exclusion. Size of names must match size of values.
* @param values
* Set of values to match for exclusion. Size of names must match size of values.
*/
public TagValueExclusionRule(String[] names, String[] values) {
tags = new HashMap<String,String>();
int i = 0;
for(String name : names) {
tags.put(name, values[i]);
i++;
}
}
@Override
public boolean isValid(Resource resource) {
Validate.notNull(resource);
for (String tagName : tags.keySet()) {
String resourceValue = resource.getTag(tagName);
if (resourceValue != null && resourceValue.equals(tags.get(tagName))) {
LOGGER.debug(String.format("The resource %s has the exclusion tag %s with value %s", resource.getId(), tagName, resourceValue));
return true;
}
}
return false;
}
}
| 2,905
| 32.790698
| 144
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/rule/volume/DeleteOnTerminationRule.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.volume;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.janitor.crawler.edda.EddaEBSVolumeJanitorCrawler;
import com.netflix.simianarmy.janitor.JanitorMonkey;
import com.netflix.simianarmy.janitor.Rule;
import org.apache.commons.lang.Validate;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Date;
/**
* The rule is for checking whether an EBS volume is not attached to any instance and had the
* DeleteOnTermination flag set in the previous attachment. This is an error case that AWS didn't
* handle. The volume should have been deleted as soon as it was detached.
*
* NOTE: since the information came from the history, the rule will work only if Edda is enabled
* for Janitor Monkey.
*/
public class DeleteOnTerminationRule implements Rule {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(DeleteOnTerminationRule.class);
private final MonkeyCalendar calendar;
private final int retentionDays;
/** The date format used to print or parse the user specified termination date. **/
private static final DateTimeFormatter TERMINATION_DATE_FORMATTER = DateTimeFormat.forPattern("yyyy-MM-dd");
/**
* The termination reason for the DeleteOnTerminationRule.
*/
public static final String TERMINATION_REASON = "Not attached and DeleteOnTerminate flag was set";
/**
* Constructor.
*
* @param calendar
* The calendar used to calculate the termination time
* @param retentionDays
* The number of days that the volume is retained before being terminated after being marked
* as cleanup candidate
*/
public DeleteOnTerminationRule(MonkeyCalendar calendar, int retentionDays) {
Validate.notNull(calendar);
Validate.isTrue(retentionDays >= 0);
this.calendar = calendar;
this.retentionDays = retentionDays;
}
@Override
public boolean isValid(Resource resource) {
Validate.notNull(resource);
if (!resource.getResourceType().name().equals("EBS_VOLUME")) {
return true;
}
// The state of the volume being "available" means that it is not attached to any instance.
if (!"available".equals(((AWSResource) resource).getAWSResourceState())) {
return true;
}
String janitorTag = resource.getTag(JanitorMonkey.JANITOR_TAG);
if (janitorTag != null) {
if ("donotmark".equals(janitorTag)) {
LOGGER.info(String.format("The volume %s is tagged as not handled by Janitor",
resource.getId()));
return true;
}
try {
// Owners can tag the volume with a termination date in the "janitor" tag.
Date userSpecifiedDate = new Date(
TERMINATION_DATE_FORMATTER.parseDateTime(janitorTag).getMillis());
resource.setExpectedTerminationTime(userSpecifiedDate);
resource.setTerminationReason(String.format("User specified termination date %s", janitorTag));
return false;
} catch (Exception e) {
LOGGER.error(String.format("The janitor tag is not a user specified date: %s", janitorTag));
}
}
if ("true".equals(resource.getAdditionalField(EddaEBSVolumeJanitorCrawler.DELETE_ON_TERMINATION))) {
if (resource.getExpectedTerminationTime() == null) {
Date terminationTime = calendar.getBusinessDay(calendar.now().getTime(), retentionDays);
resource.setExpectedTerminationTime(terminationTime);
resource.setTerminationReason(TERMINATION_REASON);
LOGGER.info(String.format(
"Volume %s is marked to be cleaned at %s as it is detached and DeleteOnTermination was set",
resource.getId(), resource.getExpectedTerminationTime()));
} else {
LOGGER.info(String.format("Resource %s is already marked.", resource.getId()));
}
return false;
}
return true;
}
}
| 5,085
| 41.033058
| 116
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/rule/volume/OldDetachedVolumeRule.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.volume;
import java.util.Date;
import java.util.Map;
import org.apache.commons.lang.Validate;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.janitor.VolumeTaggingMonkey;
import com.netflix.simianarmy.janitor.JanitorMonkey;
import com.netflix.simianarmy.janitor.Rule;
/**
* The rule is for checking whether an EBS volume is detached for more than
* certain days. The rule mostly relies on tags on the volume to decide if
* the volume should be marked.
*/
public class OldDetachedVolumeRule implements Rule {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(OldDetachedVolumeRule.class);
private final MonkeyCalendar calendar;
private final int detachDaysThreshold;
private final int retentionDays;
/** The date format used to print or parse the user specified termination date. **/
public static final DateTimeFormatter TERMINATION_DATE_FORMATTER = DateTimeFormat.forPattern("yyyy-MM-dd");
/**
* Constructor.
*
* @param calendar
* The calendar used to calculate the termination time
* @param detachDaysThreshold
* The number of days that a volume is considered as cleanup candidate since it is detached
* @param retentionDays
* The number of days that the volume is retained before being terminated after being marked
* as cleanup candidate
*/
public OldDetachedVolumeRule(MonkeyCalendar calendar, int detachDaysThreshold, int retentionDays) {
Validate.notNull(calendar);
Validate.isTrue(detachDaysThreshold >= 0);
Validate.isTrue(retentionDays >= 0);
this.calendar = calendar;
this.detachDaysThreshold = detachDaysThreshold;
this.retentionDays = retentionDays;
}
@Override
public boolean isValid(Resource resource) {
Validate.notNull(resource);
if (!resource.getResourceType().name().equals("EBS_VOLUME")) {
return true;
}
if (!"available".equals(((AWSResource) resource).getAWSResourceState())) {
return true;
}
String janitorTag = resource.getTag(JanitorMonkey.JANITOR_TAG);
if (janitorTag != null) {
if ("donotmark".equals(janitorTag)) {
LOGGER.info(String.format("The volume %s is tagged as not handled by Janitor",
resource.getId()));
return true;
}
try {
// Owners can tag the volume with a termination date in the "janitor" tag.
Date userSpecifiedDate = new Date(
TERMINATION_DATE_FORMATTER.parseDateTime(janitorTag).getMillis());
resource.setExpectedTerminationTime(userSpecifiedDate);
resource.setTerminationReason(String.format("User specified termination date %s", janitorTag));
return false;
} catch (Exception e) {
LOGGER.error(String.format("The janitor tag is not a user specified date: %s", janitorTag));
}
}
String janitorMetaTag = resource.getTag(JanitorMonkey.JANITOR_META_TAG);
if (janitorMetaTag == null) {
LOGGER.info(String.format("Volume %s is not tagged with the Janitor meta information, ignore.",
resource.getId()));
return true;
}
Map<String, String> metadata = VolumeTaggingMonkey.parseJanitorMetaTag(janitorMetaTag);
String detachTimeTag = metadata.get(JanitorMonkey.DETACH_TIME_TAG_KEY);
if (detachTimeTag == null) {
return true;
}
DateTime detachTime = null;
try {
detachTime = AWSResource.DATE_FORMATTER.parseDateTime(detachTimeTag);
} catch (Exception e) {
LOGGER.error(String.format("Detach time in the JANITOR_META tag of %s is not in the valid format: %s",
resource.getId(), detachTime));
return true;
}
DateTime now = new DateTime(calendar.now().getTimeInMillis());
if (detachTime != null && detachTime.plusDays(detachDaysThreshold).isBefore(now)) {
if (resource.getExpectedTerminationTime() == null) {
Date terminationTime = calendar.getBusinessDay(new Date(now.getMillis()), retentionDays);
resource.setExpectedTerminationTime(terminationTime);
resource.setTerminationReason(String.format("Volume not attached for %d days",
detachDaysThreshold + retentionDays));
LOGGER.info(String.format(
"Volume %s is marked to be cleaned at %s as it is detached for more than %d days",
resource.getId(), resource.getExpectedTerminationTime(), detachDaysThreshold));
} else {
LOGGER.info(String.format("Resource %s is already marked.", resource.getId()));
}
return false;
}
return true;
}
}
| 6,012
| 41.048951
| 114
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/rule/ami/UnusedImageRule.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.ami;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.janitor.crawler.edda.EddaImageJanitorCrawler;
import com.netflix.simianarmy.janitor.Rule;
import org.apache.commons.lang.Validate;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Date;
/**
* The rule class to clean up images that are not used.
*/
public class UnusedImageRule implements Rule {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(UnusedImageRule.class);
private final MonkeyCalendar calendar;
private final int retentionDays;
private final int lastReferenceDaysThreshold;
/**
* Constructor.
*
* @param calendar
* The calendar used to calculate the termination time
* @param retentionDays
* The number of days that the marked ASG is retained before being terminated
* @param lastReferenceDaysThreshold
* The number of days that the image has been not referenced that makes the ASG be
* considered obsolete
*/
public UnusedImageRule(MonkeyCalendar calendar, int retentionDays, int lastReferenceDaysThreshold) {
Validate.notNull(calendar);
Validate.isTrue(retentionDays >= 0);
Validate.isTrue(lastReferenceDaysThreshold >= 0);
this.calendar = calendar;
this.retentionDays = retentionDays;
this.lastReferenceDaysThreshold = lastReferenceDaysThreshold;
}
@Override
public boolean isValid(Resource resource) {
Validate.notNull(resource);
if (!"IMAGE".equals(resource.getResourceType().name())) {
return true;
}
if (!"available".equals(((AWSResource) resource).getAWSResourceState())) {
return true;
}
if ("true".equals(resource.getAdditionalField(EddaImageJanitorCrawler.AMI_FIELD_BASE_IMAGE))) {
LOGGER.info(String.format("Image %s is a base image that is used to create other images",
resource.getId()));
return true;
}
long instanceRefTime = getRefTimeInMilis(resource, EddaImageJanitorCrawler.AMI_FIELD_LAST_INSTANCE_REF_TIME);
long lcRefTime = getRefTimeInMilis(resource, EddaImageJanitorCrawler.AMI_FIELD_LAST_LC_REF_TIME);
Date now = calendar.now().getTime();
long windowStart = new DateTime(now.getTime()).minusDays(lastReferenceDaysThreshold).getMillis();
if (instanceRefTime < windowStart && lcRefTime < windowStart) {
if (resource.getExpectedTerminationTime() == null) {
Date terminationTime = calendar.getBusinessDay(now, retentionDays);
resource.setExpectedTerminationTime(terminationTime);
resource.setTerminationReason(String.format("Image not referenced for %d days",
lastReferenceDaysThreshold + retentionDays));
LOGGER.info(String.format(
"Image %s in region %s is marked to be cleaned at %s as it is not referenced"
+ "for more than %d days",
resource.getId(), resource.getRegion(), resource.getExpectedTerminationTime(),
lastReferenceDaysThreshold));
} else {
LOGGER.info(String.format("Resource %s is already marked.", resource.getId()));
}
return false;
}
return true;
}
/**
* Tries to get the long value from the provided field. If the field does not exist, try to use the
* creation time. If both do not exist, use the current time.
*/
private long getRefTimeInMilis(Resource resource, String field) {
String fieldValue = resource.getAdditionalField(field);
long refTime;
if (fieldValue != null) {
refTime = Long.parseLong(fieldValue);
} else if (resource.getLaunchTime() != null) {
LOGGER.info(String.format("No value in field %s is found, use the creation time %s as the ref time of %s",
field, resource.getLaunchTime(), resource.getId()));
refTime = resource.getLaunchTime().getTime();
} else {
// When there is no creation time or ref time is found, we consider the image is referenced.
LOGGER.info(String.format("Use the current time as the ref time of %s", resource.getId()));
refTime = DateTime.now().getMillis();
}
return refTime;
}
}
| 5,339
| 42.414634
| 118
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/rule/asg/SuspendedASGRule.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.asg;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.janitor.crawler.ASGJanitorCrawler;
import com.netflix.simianarmy.janitor.Rule;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Date;
/**
* The rule for detecting the ASGs that 1) have old launch configurations and
* 2) do not have any instances or all instances are inactive in Eureka.
* 3) are not fronted with any ELBs.
*/
public class SuspendedASGRule implements Rule {
private final MonkeyCalendar calendar;
private final int retentionDays;
private final int suspensionAgeThreshold;
private final ASGInstanceValidator instanceValidator;
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(SuspendedASGRule.class);
/**
* Constructor.
*
* @param calendar
* The calendar used to calculate the termination time
* @param retentionDays
* The number of days that the marked ASG is retained before being terminated after
* being marked
* @param suspensionAgeThreshold
* The number of days that the ASG has been suspended from ELB that makes the ASG be
* considered a cleanup candidate
* @param instanceValidator
* The instance validator to check if an instance is active
*/
public SuspendedASGRule(MonkeyCalendar calendar, int suspensionAgeThreshold, int retentionDays,
ASGInstanceValidator instanceValidator) {
Validate.notNull(calendar);
Validate.isTrue(retentionDays >= 0);
Validate.isTrue(suspensionAgeThreshold >= 0);
Validate.notNull(instanceValidator);
this.calendar = calendar;
this.retentionDays = retentionDays;
this.suspensionAgeThreshold = suspensionAgeThreshold;
this.instanceValidator = instanceValidator;
}
/** {@inheritDoc} */
@Override
public boolean isValid(Resource resource) {
Validate.notNull(resource);
if (!"ASG".equals(resource.getResourceType().name())) {
return true;
}
if (instanceValidator.hasActiveInstance(resource)) {
return true;
}
String suspensionTimeStr = resource.getAdditionalField(ASGJanitorCrawler.ASG_FIELD_SUSPENSION_TIME);
if (!StringUtils.isEmpty(suspensionTimeStr)) {
DateTime createTime = ASGJanitorCrawler.SUSPENSION_TIME_FORMATTER.parseDateTime(suspensionTimeStr);
DateTime now = new DateTime(calendar.now().getTimeInMillis());
if (now.isBefore(createTime.plusDays(suspensionAgeThreshold))) {
LOGGER.info(String.format("The ASG %s has not been suspended for more than %d days",
resource.getId(), suspensionAgeThreshold));
return true;
}
LOGGER.info(String.format("The ASG %s has been suspended for more than %d days",
resource.getId(), suspensionAgeThreshold));
if (resource.getExpectedTerminationTime() == null) {
Date terminationTime = calendar.getBusinessDay(new Date(now.getMillis()), retentionDays);
resource.setExpectedTerminationTime(terminationTime);
resource.setTerminationReason(String.format(
"ASG has been disabled for more than %d days and all instances are out of service in Discovery",
suspensionAgeThreshold + retentionDays));
}
return false;
} else {
LOGGER.info(String.format("ASG %s is not suspended from ELB.", resource.getId()));
return true;
}
}
}
| 4,582
| 40.288288
| 120
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/rule/asg/OldEmptyASGRule.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.asg;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.janitor.crawler.ASGJanitorCrawler;
import com.netflix.simianarmy.aws.janitor.crawler.edda.EddaASGJanitorCrawler;
import com.netflix.simianarmy.janitor.Rule;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Date;
/**
* The rule for detecting the ASGs that 1) have old launch configurations and
* 2) do not have any instances or all instances are inactive in Eureka.
* 3) are not fronted with any ELBs.
*/
public class OldEmptyASGRule implements Rule {
private final MonkeyCalendar calendar;
private final int retentionDays;
private final int launchConfigAgeThreshold;
private final Integer lastChangeDaysThreshold;
private final ASGInstanceValidator instanceValidator;
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(OldEmptyASGRule.class);
/**
* Constructor.
*
* @param calendar
* The calendar used to calculate the termination time
* @param retentionDays
* The number of days that the marked ASG is retained before being terminated
* @param launchConfigAgeThreshold
* The number of days that the launch configuration for the ASG has been created that makes the ASG be
* considered obsolete
* @param instanceValidator
* The instance validator to check if an instance is active
*/
public OldEmptyASGRule(MonkeyCalendar calendar, int launchConfigAgeThreshold,
int retentionDays, ASGInstanceValidator instanceValidator) {
this(calendar, launchConfigAgeThreshold, null, retentionDays, instanceValidator);
}
/**
* Constructor.
*
* @param calendar
* The calendar used to calculate the termination time
* @param retentionDays
* The number of days that the marked ASG is retained before being terminated
* @param launchConfigAgeThreshold
* The number of days that the launch configuration for the ASG has been created that makes the ASG be
* considered obsolete
* @param lastChangeDaysThreshold
* The number of days that the launch configuration has not been changed. An ASG is considered as a
* cleanup candidate only if it has no change during the last n days. The parameter can be null.
* @param instanceValidator
* The instance validator to check if an instance is active
*/
public OldEmptyASGRule(MonkeyCalendar calendar, int launchConfigAgeThreshold, Integer lastChangeDaysThreshold,
int retentionDays, ASGInstanceValidator instanceValidator) {
Validate.notNull(calendar);
Validate.isTrue(retentionDays >= 0);
Validate.isTrue(launchConfigAgeThreshold >= 0);
Validate.isTrue(lastChangeDaysThreshold == null || lastChangeDaysThreshold >= 0);
Validate.notNull(instanceValidator);
this.calendar = calendar;
this.retentionDays = retentionDays;
this.launchConfigAgeThreshold = launchConfigAgeThreshold;
this.lastChangeDaysThreshold = lastChangeDaysThreshold;
this.instanceValidator = instanceValidator;
}
/** {@inheritDoc} */
@Override
public boolean isValid(Resource resource) {
Validate.notNull(resource);
if (!"ASG".equals(resource.getResourceType().name())) {
return true;
}
if (StringUtils.isNotEmpty(resource.getAdditionalField(ASGJanitorCrawler.ASG_FIELD_ELBS))) {
LOGGER.info(String.format("ASG %s has ELBs.", resource.getId()));
return true;
}
if (instanceValidator.hasActiveInstance(resource)) {
LOGGER.info(String.format("ASG %s has active instance.", resource.getId()));
return true;
}
String lcName = resource.getAdditionalField(ASGJanitorCrawler.ASG_FIELD_LC_NAME);
DateTime now = new DateTime(calendar.now().getTimeInMillis());
if (StringUtils.isEmpty(lcName)) {
LOGGER.error(String.format("Failed to find launch configuration for ASG %s", resource.getId()));
markResource(resource, now);
return false;
}
String lcCreationTime = resource.getAdditionalField(ASGJanitorCrawler.ASG_FIELD_LC_CREATION_TIME);
if (StringUtils.isEmpty(lcCreationTime)) {
LOGGER.error(String.format("Failed to find creation time for launch configuration %s", lcName));
return true;
}
DateTime createTime = new DateTime(Long.parseLong(lcCreationTime));
if (now.isBefore(createTime.plusDays(launchConfigAgeThreshold))) {
LOGGER.info(String.format("The launch configuration %s has not been created for more than %d days",
lcName, launchConfigAgeThreshold));
return true;
}
LOGGER.info(String.format("The launch configuration %s has been created for more than %d days",
lcName, launchConfigAgeThreshold));
if (lastChangeDaysThreshold != null) {
String lastChangeTimeField = resource.getAdditionalField(EddaASGJanitorCrawler.ASG_FIELD_LAST_CHANGE_TIME);
if (StringUtils.isNotBlank(lastChangeTimeField)) {
DateTime lastChangeTime = new DateTime(Long.parseLong(lastChangeTimeField));
if (lastChangeTime.plusDays(lastChangeDaysThreshold).isAfter(now)) {
LOGGER.info(String.format("ASG %s had change during the last %d days",
resource.getId(), lastChangeDaysThreshold));
return true;
}
}
}
markResource(resource, now);
return false;
}
private void markResource(Resource resource, DateTime now) {
if (resource.getExpectedTerminationTime() == null) {
Date terminationTime = calendar.getBusinessDay(new Date(now.getMillis()), retentionDays);
resource.setExpectedTerminationTime(terminationTime);
resource.setTerminationReason(String.format(
"Launch config older than %d days. Not in Discovery. No ELB.",
launchConfigAgeThreshold + retentionDays));
} else {
LOGGER.info(String.format("Resource %s is already marked as cleanup candidate.", resource.getId()));
}
}
}
| 7,356
| 43.053892
| 119
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/rule/asg/DiscoveryASGInstanceValidator.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.asg;
import java.util.List;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
import com.netflix.discovery.DiscoveryClient;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.janitor.crawler.ASGJanitorCrawler;
/**
* The class is for checking whether an ASG has any active instance using Discovery/Eureka.
* If Discovery/Eureka is enabled, it uses its service to check if the instances in the ASG are
* registered and up there.
*/
public class DiscoveryASGInstanceValidator implements ASGInstanceValidator {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(DiscoveryASGInstanceValidator.class);
private final DiscoveryClient discoveryClient;
/**
* Constructor.
* @param discoveryClient
* the client to access the Discovery/Eureka service for checking the status of instances.
*/
public DiscoveryASGInstanceValidator(DiscoveryClient discoveryClient) {
Validate.notNull(discoveryClient);
this.discoveryClient = discoveryClient;
}
/** {@inheritDoc} */
@Override
public boolean hasActiveInstance(Resource resource) {
String instanceIds = resource.getAdditionalField(ASGJanitorCrawler.ASG_FIELD_INSTANCES);
String maxSizeStr = resource.getAdditionalField(ASGJanitorCrawler.ASG_FIELD_MAX_SIZE);
if (StringUtils.isBlank(instanceIds)) {
if (maxSizeStr != null && Integer.parseInt(maxSizeStr) == 0) {
// The ASG is empty when it has no instance and the max size of the ASG is 0.
// If the max size is not 0, the ASG could probably be in the process of starting new instances.
LOGGER.info(String.format("ASG %s is empty.", resource.getId()));
return false;
} else {
LOGGER.info(String.format("ASG %s does not have instances but the max size is %s",
resource.getId(), maxSizeStr));
return true;
}
}
String[] instances = StringUtils.split(instanceIds, ",");
LOGGER.debug(String.format("Checking if the %d instances in ASG %s are active.",
instances.length, resource.getId()));
for (String instanceId : instances) {
if (isActiveInstance(instanceId)) {
LOGGER.info(String.format("ASG %s has active instance.", resource.getId()));
return true;
}
}
LOGGER.info(String.format("ASG %s has no active instance.", resource.getId()));
return false;
}
/**
* Returns true if the instance is registered in Eureka/Discovery.
* @param instanceId the instance id
* @return true if the instance is active, false otherwise
*/
private boolean isActiveInstance(String instanceId) {
Validate.notNull(instanceId);
LOGGER.debug(String.format("Checking if instance %s is active", instanceId));
List<InstanceInfo> instanceInfos = discoveryClient.getInstancesById(instanceId);
for (InstanceInfo info : instanceInfos) {
InstanceStatus status = info.getStatus();
if (status == InstanceStatus.UP || status == InstanceStatus.STARTING) {
LOGGER.debug(String.format("Instance %s is active in Discovery.", instanceId));
return true;
}
}
LOGGER.debug(String.format("Instance %s is not active in Discovery.", instanceId));
return false;
}
}
| 4,393
| 41.660194
| 112
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/rule/asg/ASGInstanceValidator.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.asg;
import com.netflix.simianarmy.Resource;
/**
* The interface is for checking whether an ASG has any active instance.
*/
public interface ASGInstanceValidator {
/**
* Checks whether an ASG resource contains any active instances.
* @param resource the ASG resource
* @return true if the ASG contains any active instances, false otherwise.
*/
boolean hasActiveInstance(Resource resource);
}
| 1,112
| 32.727273
| 79
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/rule/asg/DummyASGInstanceValidator.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.asg;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.janitor.crawler.ASGJanitorCrawler;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A dummy implementation of ASGInstanceValidator that considers every instance as active.
*/
public class DummyASGInstanceValidator implements ASGInstanceValidator {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(DummyASGInstanceValidator.class);
/** {@inheritDoc} */
@Override
public boolean hasActiveInstance(Resource resource) {
String instanceIds = resource.getAdditionalField(ASGJanitorCrawler.ASG_FIELD_INSTANCES);
String maxSizeStr = resource.getAdditionalField(ASGJanitorCrawler.ASG_FIELD_MAX_SIZE);
if (StringUtils.isBlank(instanceIds)) {
if (maxSizeStr != null && Integer.parseInt(maxSizeStr) == 0) {
// The ASG is empty when it has no instance and the max size of the ASG is 0.
// If the max size is not 0, the ASG could probably be in the process of starting new instances.
LOGGER.info(String.format("ASG %s is empty.", resource.getId()));
return false;
} else {
LOGGER.info(String.format("ASG %s does not have instances but the max size is %s",
resource.getId(), maxSizeStr));
return true;
}
}
String[] instances = StringUtils.split(instanceIds, ",");
return instances.length > 0;
}
}
| 2,280
| 41.240741
| 112
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/rule/launchconfig/OldUnusedLaunchConfigRule.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.launchconfig;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.janitor.crawler.LaunchConfigJanitorCrawler;
import com.netflix.simianarmy.janitor.Rule;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Date;
/**
* The rule for detecting the launch configurations that
* 1) have been created for certain days and
* 2) are not used by any auto scaling groups.
*/
public class OldUnusedLaunchConfigRule implements Rule {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(OldUnusedLaunchConfigRule.class);
private static final String TERMINATION_REASON = "Launch config is not used by any ASG";
private final MonkeyCalendar calendar;
private final int ageThreshold;
private final int retentionDays;
/**
* Constructor for OrphanedInstanceRule.
*
* @param calendar
* The calendar used to calculate the termination time
* @param ageThreshold
* The number of days that a launch configuration is considered as a cleanup candidate
* since it is created
* @param retentionDays
* The number of days that the unused launch configuration is retained before being terminated
*/
public OldUnusedLaunchConfigRule(MonkeyCalendar calendar, int ageThreshold, int retentionDays) {
Validate.notNull(calendar);
Validate.isTrue(ageThreshold >= 0);
Validate.isTrue(retentionDays >= 0);
this.calendar = calendar;
this.ageThreshold = ageThreshold;
this.retentionDays = retentionDays;
}
@Override
public boolean isValid(Resource resource) {
Validate.notNull(resource);
if (!"LAUNCH_CONFIG".equals(resource.getResourceType().name())) {
return true;
}
AWSResource lcResource = (AWSResource) resource;
String usedByASG = lcResource.getAdditionalField(LaunchConfigJanitorCrawler.LAUNCH_CONFIG_FIELD_USED_BY_ASG);
if (StringUtils.isNotEmpty(usedByASG) && !Boolean.parseBoolean(usedByASG)) {
if (resource.getLaunchTime() == null) {
LOGGER.error(String.format("The launch config %s has no creation time.", resource.getId()));
return true;
} else {
DateTime launchTime = new DateTime(resource.getLaunchTime().getTime());
DateTime now = new DateTime(calendar.now().getTimeInMillis());
if (now.isBefore(launchTime.plusDays(ageThreshold))) {
LOGGER.info(String.format("The unused launch config %s has not been created for more than %d days",
resource.getId(), ageThreshold));
return true;
}
LOGGER.info(String.format("The unused launch config %s has been created for more than %d days",
resource.getId(), ageThreshold));
if (resource.getExpectedTerminationTime() == null) {
Date terminationTime = calendar.getBusinessDay(new Date(now.getMillis()), retentionDays);
resource.setExpectedTerminationTime(terminationTime);
resource.setTerminationReason(TERMINATION_REASON);
}
return false;
}
}
return true;
}
}
| 4,280
| 39.771429
| 119
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/rule/elb/OrphanedELBRule.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.elb;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.janitor.Rule;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.apache.commons.lang.math.NumberUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Date;
/**
* The rule for checking the orphaned instances that do not belong to any ASGs and
* launched for certain days.
*/
public class OrphanedELBRule implements Rule {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(OrphanedELBRule.class);
private static final String TERMINATION_REASON = "ELB has no instances and is not referenced by any ASG";
private final MonkeyCalendar calendar;
private final int retentionDays;
/**
* Constructor for OrphanedELBRule.
*
* @param calendar
* The calendar used to calculate the termination time
* @param retentionDays
* The number of days that the marked ASG is retained before being terminated
*/
public OrphanedELBRule(MonkeyCalendar calendar, int retentionDays) {
Validate.notNull(calendar);
Validate.isTrue(retentionDays >= 0);
this.calendar = calendar;
this.retentionDays = retentionDays;
}
@Override
public boolean isValid(Resource resource) {
Validate.notNull(resource);
if (!resource.getResourceType().name().equals("ELB")) {
return true;
}
String instanceCountStr = resource.getAdditionalField("instanceCount");
String refASGCountStr = resource.getAdditionalField("referencedASGCount");
if (StringUtils.isBlank(instanceCountStr)) {
LOGGER.info(String.format("Resource %s is missing instance count, not marked as a cleanup candidate.", resource.getId()));
return true;
}
if (StringUtils.isBlank(refASGCountStr)) {
LOGGER.info(String.format("Resource %s is missing referenced ASG count, not marked as a cleanup candidate.", resource.getId()));
return true;
}
int instanceCount = NumberUtils.toInt(instanceCountStr);
int refASGCount = NumberUtils.toInt(refASGCountStr);
if (instanceCount == 0 && refASGCount == 0) {
LOGGER.info(String.format("Resource %s is marked as cleanup candidate with 0 instances and 0 referenced ASGs (owner: %s).", resource.getId(), resource.getOwnerEmail()));
markResource(resource);
return false;
} else {
LOGGER.info(String.format("Resource %s is not marked as cleanup candidate with %d instances and %d referenced ASGs.", resource.getId(), instanceCount, refASGCount));
return true;
}
}
private void markResource(Resource resource) {
if (resource.getExpectedTerminationTime() == null) {
Date terminationTime = calendar.getBusinessDay(new Date(), retentionDays);
resource.setExpectedTerminationTime(terminationTime);
resource.setTerminationReason(TERMINATION_REASON);
} else {
LOGGER.info(String.format("Resource %s is already marked as cleanup candidate.", resource.getId()));
}
}
}
| 3,989
| 38.50495
| 181
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/rule/snapshot/NoGeneratedAMIRule.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.snapshot;
import java.util.Date;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.janitor.crawler.EBSSnapshotJanitorCrawler;
import com.netflix.simianarmy.janitor.JanitorMonkey;
import com.netflix.simianarmy.janitor.Rule;
/**
* The rule is for checking whether an EBS snapshot has any AMIs generated from it.
* If there are no AMIs generated using the snapshot and the snapshot is created
* for certain days, it is marked as a cleanup candidate by this rule.
*/
public class NoGeneratedAMIRule implements Rule {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(NoGeneratedAMIRule.class);
private String ownerEmailOverride = null;
private static final String TERMINATION_REASON = "No AMI is generated for this snapshot";
private final MonkeyCalendar calendar;
private final int ageThreshold;
private final int retentionDays;
/** The date format used to print or parse the user specified termination date. **/
public static final DateTimeFormatter TERMINATION_DATE_FORMATTER = DateTimeFormat.forPattern("yyyy-MM-dd");
/**
* Constructor.
*
* @param calendar
* The calendar used to calculate the termination time
* @param ageThreshold
* The number of days that a snapshot is considered as cleanup candidate since it is created
* @param retentionDays
* The number of days that the volume is retained before being terminated after being marked
* as cleanup candidate
*/
public NoGeneratedAMIRule(MonkeyCalendar calendar, int ageThreshold, int retentionDays) {
this(calendar, ageThreshold, retentionDays, null);
}
/**
* Constructor.
*
* @param calendar
* The calendar used to calculate the termination time
* @param ageThreshold
* The number of days that a snapshot is considered as cleanup candidate since it is created
* @param retentionDays
* The number of days that the volume is retained before being terminated after being marked
* as cleanup candidate
* @param ownerEmailOverride
* If null, send notifications to the resource owner.
* If not null, send notifications to the provided owner email address instead of the resource owner.
*/
public NoGeneratedAMIRule(MonkeyCalendar calendar, int ageThreshold, int retentionDays, String ownerEmailOverride) {
Validate.notNull(calendar);
Validate.isTrue(ageThreshold >= 0);
Validate.isTrue(retentionDays >= 0);
this.calendar = calendar;
this.ageThreshold = ageThreshold;
this.retentionDays = retentionDays;
this.ownerEmailOverride = ownerEmailOverride;
}
@Override
public boolean isValid(Resource resource) {
Validate.notNull(resource);
if (!resource.getResourceType().name().equals("EBS_SNAPSHOT")) {
return true;
}
if (!"completed".equals(((AWSResource) resource).getAWSResourceState())) {
return true;
}
String janitorTag = resource.getTag(JanitorMonkey.JANITOR_TAG);
if (janitorTag != null) {
if ("donotmark".equals(janitorTag)) {
LOGGER.info(String.format("The snapshot %s is tagged as not handled by Janitor",
resource.getId()));
return true;
}
try {
// Owners can tag the volume with a termination date in the "janitor" tag.
Date userSpecifiedDate = new Date(TERMINATION_DATE_FORMATTER.parseDateTime(janitorTag).getMillis());
resource.setExpectedTerminationTime(userSpecifiedDate);
resource.setTerminationReason(String.format("User specified termination date %s", janitorTag));
if (ownerEmailOverride != null) {
resource.setOwnerEmail(ownerEmailOverride);
}
return false;
} catch (Exception e) {
LOGGER.error(String.format("The janitor tag is not a user specified date: %s", janitorTag));
}
}
if (hasGeneratedImage(resource)) {
return true;
}
if (resource.getLaunchTime() == null) {
LOGGER.error(String.format("Snapshot %s does not have a creation time.", resource.getId()));
return true;
}
DateTime launchTime = new DateTime(resource.getLaunchTime().getTime());
DateTime now = new DateTime(calendar.now().getTimeInMillis());
if (launchTime.plusDays(ageThreshold).isBefore(now)) {
if (ownerEmailOverride != null) {
resource.setOwnerEmail(ownerEmailOverride);
}
if (resource.getExpectedTerminationTime() == null) {
Date terminationTime = calendar.getBusinessDay(new Date(now.getMillis()), retentionDays);
resource.setExpectedTerminationTime(terminationTime);
resource.setTerminationReason(TERMINATION_REASON);
LOGGER.info(String.format(
"Snapshot %s is marked to be cleaned at %s as there is no AMI generated using it",
resource.getId(), resource.getExpectedTerminationTime()));
} else {
LOGGER.info(String.format("Resource %s is already marked.", resource.getId()));
}
return false;
}
return true;
}
/**
* Gets the AMI created using the snapshot. This method can be overridden by subclasses
* if they use a different way to check this.
* @param resource the snapshot resource
* @return true if there are AMIs that are created using the snapshot, false otherwise
*/
protected boolean hasGeneratedImage(Resource resource) {
return StringUtils.isNotEmpty(resource.getAdditionalField(EBSSnapshotJanitorCrawler.SNAPSHOT_FIELD_AMIS));
}
}
| 7,095
| 40.741176
| 120
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/ASGJanitorCrawler.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler;
import java.util.ArrayList;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.lang.StringUtils;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.amazonaws.services.autoscaling.model.AutoScalingGroup;
import com.amazonaws.services.autoscaling.model.Instance;
import com.amazonaws.services.autoscaling.model.LaunchConfiguration;
import com.amazonaws.services.autoscaling.model.SuspendedProcess;
import com.amazonaws.services.autoscaling.model.TagDescription;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.client.aws.AWSClient;
/**
* The crawler to crawl AWS auto scaling groups for janitor monkey.
*/
public class ASGJanitorCrawler extends AbstractAWSJanitorCrawler {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(ASGJanitorCrawler.class);
/** The name representing the additional field name of instance ids. */
public static final String ASG_FIELD_INSTANCES = "INSTANCES";
/** The name representing the additional field name of max ASG size. */
public static final String ASG_FIELD_MAX_SIZE = "MAX_SIZE";
/** The name representing the additional field name of ELB names. */
public static final String ASG_FIELD_ELBS = "ELBS";
/** The name representing the additional field name of launch configuration name. */
public static final String ASG_FIELD_LC_NAME = "LAUNCH_CONFIGURATION_NAME";
/** The name representing the additional field name of launch configuration creation time. */
public static final String ASG_FIELD_LC_CREATION_TIME = "LAUNCH_CONFIGURATION_CREATION_TIME";
/** The name representing the additional field name of ASG suspension time from ELB. */
public static final String ASG_FIELD_SUSPENSION_TIME = "ASG_SUSPENSION_TIME";
private final Map<String, LaunchConfiguration> nameToLaunchConfig = new HashMap<String, LaunchConfiguration>();
/** The regular expression patter below is for the termination reason added by AWS when
* an ASG is suspended from ELB's traffic.
*/
private static final Pattern SUSPENSION_REASON_PATTERN =
Pattern.compile("User suspended at (\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}).*");
/** The date format used to print or parse the suspension time value. **/
public static final DateTimeFormatter SUSPENSION_TIME_FORMATTER =
DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss");
/**
* Instantiates a new basic ASG crawler.
* @param awsClient
* the aws client
*/
public ASGJanitorCrawler(AWSClient awsClient) {
super(awsClient);
}
@Override
public EnumSet<? extends ResourceType> resourceTypes() {
return EnumSet.of(AWSResourceType.ASG);
}
@Override
public List<Resource> resources(ResourceType resourceType) {
if ("ASG".equals(resourceType.name())) {
return getASGResources();
}
return Collections.emptyList();
}
@Override
public List<Resource> resources(String... asgNames) {
return getASGResources(asgNames);
}
private List<Resource> getASGResources(String... asgNames) {
AWSClient awsClient = getAWSClient();
List<LaunchConfiguration> launchConfigurations = awsClient.describeLaunchConfigurations();
for (LaunchConfiguration lc : launchConfigurations) {
nameToLaunchConfig.put(lc.getLaunchConfigurationName(), lc);
}
List<Resource> resources = new LinkedList<Resource>();
for (AutoScalingGroup asg : awsClient.describeAutoScalingGroups(asgNames)) {
Resource asgResource = new AWSResource().withId(asg.getAutoScalingGroupName())
.withResourceType(AWSResourceType.ASG).withRegion(awsClient.region())
.withLaunchTime(asg.getCreatedTime());
for (TagDescription tag : asg.getTags()) {
asgResource.setTag(tag.getKey(), tag.getValue());
}
asgResource.setDescription(String.format("%d instances", asg.getInstances().size()));
asgResource.setOwnerEmail(getOwnerEmailForResource(asgResource));
if (asg.getStatus() != null) {
((AWSResource) asgResource).setAWSResourceState(asg.getStatus());
}
Integer maxSize = asg.getMaxSize();
if (maxSize != null) {
asgResource.setAdditionalField(ASG_FIELD_MAX_SIZE, String.valueOf(maxSize));
}
// Adds instances and ELBs as additional fields.
List<String> instances = new ArrayList<String>();
for (Instance instance : asg.getInstances()) {
instances.add(instance.getInstanceId());
}
asgResource.setAdditionalField(ASG_FIELD_INSTANCES, StringUtils.join(instances, ","));
asgResource.setAdditionalField(ASG_FIELD_ELBS,
StringUtils.join(asg.getLoadBalancerNames(), ","));
String lcName = asg.getLaunchConfigurationName();
LaunchConfiguration lc = nameToLaunchConfig.get(lcName);
if (lc != null) {
asgResource.setAdditionalField(ASG_FIELD_LC_NAME, lcName);
}
if (lc != null && lc.getCreatedTime() != null) {
asgResource.setAdditionalField(ASG_FIELD_LC_CREATION_TIME,
String.valueOf(lc.getCreatedTime().getTime()));
}
// sets the field for the time when the ASG's traffic is suspended from ELB
for (SuspendedProcess sp : asg.getSuspendedProcesses()) {
if ("AddToLoadBalancer".equals(sp.getProcessName())) {
String suspensionTime = getSuspensionTimeString(sp.getSuspensionReason());
if (suspensionTime != null) {
LOGGER.info(String.format("Suspension time of ASG %s is %s",
asg.getAutoScalingGroupName(), suspensionTime));
asgResource.setAdditionalField(ASG_FIELD_SUSPENSION_TIME, suspensionTime);
break;
}
}
}
resources.add(asgResource);
}
return resources;
}
private String getSuspensionTimeString(String suspensionReason) {
if (suspensionReason == null) {
return null;
}
Matcher matcher = SUSPENSION_REASON_PATTERN.matcher(suspensionReason);
if (matcher.matches()) {
return matcher.group(1);
}
return null;
}
}
| 7,710
| 41.136612
| 115
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/AbstractAWSJanitorCrawler.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler;
import com.netflix.simianarmy.basic.BasicSimianArmyContext;
import org.apache.commons.lang.Validate;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.janitor.JanitorCrawler;
/**
* The abstract class for crawler of AWS resources.
*/
public abstract class AbstractAWSJanitorCrawler implements JanitorCrawler {
/** The AWS client. */
private final AWSClient awsClient;
/**
* The constructor.
* @param awsClient the AWS client used by the crawler.
*/
public AbstractAWSJanitorCrawler(AWSClient awsClient) {
Validate.notNull(awsClient);
this.awsClient = awsClient;
}
/**
* Gets the owner email from the resource's tag key set in GLOBAL_OWNER_TAGKEY.
* @param resource the resource
* @return the owner email specified in the resource's tags
*/
@Override
public String getOwnerEmailForResource(Resource resource) {
Validate.notNull(resource);
return resource.getTag(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY);
}
/**
* Gets the AWS client used by the crawler.
* @return the AWS client used by the crawler.
*/
protected AWSClient getAWSClient() {
return awsClient;
}
}
| 1,973
| 30.333333
| 83
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/ELBJanitorCrawler.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler;
import com.amazonaws.services.autoscaling.model.AutoScalingGroup;
import com.amazonaws.services.elasticloadbalancing.model.Instance;
import com.amazonaws.services.elasticloadbalancing.model.LoadBalancerDescription;
import com.amazonaws.services.elasticloadbalancing.model.Tag;
import com.amazonaws.services.elasticloadbalancing.model.TagDescription;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.client.aws.AWSClient;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
/**
* The crawler to crawl AWS instances for janitor monkey.
*/
public class ELBJanitorCrawler extends AbstractAWSJanitorCrawler {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(ELBJanitorCrawler.class);
/**
* Instantiates a new basic instance crawler.
* @param awsClient
* the aws client
*/
public ELBJanitorCrawler(AWSClient awsClient) {
super(awsClient);
}
@Override
public EnumSet<? extends ResourceType> resourceTypes() {
return EnumSet.of(AWSResourceType.ELB);
}
@Override
public List<Resource> resources(ResourceType resourceType) {
if ("ELB".equals(resourceType.name())) {
return getELBResources();
}
return Collections.emptyList();
}
@Override
public List<Resource> resources(String... resourceIds) {
return getELBResources(resourceIds);
}
private List<Resource> getELBResources(String... elbNames) {
List<Resource> resources = new LinkedList<Resource>();
AWSClient awsClient = getAWSClient();
for (LoadBalancerDescription elb : awsClient.describeElasticLoadBalancers(elbNames)) {
Resource resource = new AWSResource().withId(elb.getLoadBalancerName())
.withRegion(getAWSClient().region()).withResourceType(AWSResourceType.ELB)
.withLaunchTime(elb.getCreatedTime());
resource.setOwnerEmail(getOwnerEmailForResource(resource));
resources.add(resource);
List<Instance> instances = elb.getInstances();
if (instances == null || instances.size() == 0) {
resource.setAdditionalField("instanceCount", "0");
resource.setDescription("instances=none");
LOGGER.debug(String.format("No instances found for ELB %s", resource.getId()));
} else {
resource.setAdditionalField("instanceCount", "" + instances.size());
ArrayList<String> instanceList = new ArrayList<String>(instances.size());
LOGGER.debug(String.format("Found %d instances for ELB %s", instances.size(), resource.getId()));
for (Instance instance : instances) {
String instanceId = instance.getInstanceId();
instanceList.add(instanceId);
}
String instancesStr = StringUtils.join(instanceList,",");
resource.setDescription(String.format("instances=%s", instances));
LOGGER.debug(String.format("Resource ELB %s has instances %s", resource.getId(), instancesStr));
}
for(TagDescription tagDescription : awsClient.describeElasticLoadBalancerTags(resource.getId())) {
for(Tag tag : tagDescription.getTags()) {
LOGGER.debug(String.format("Adding tag %s = %s to resource %s",
tag.getKey(), tag.getValue(), resource.getId()));
resource.setTag(tag.getKey(), tag.getValue());
}
}
}
Map<String, List<String>> elbtoASGMap = buildELBtoASGMap();
for(Resource resource : resources) {
List<String> asgList = elbtoASGMap.get(resource.getId());
if (asgList != null && asgList.size() > 0) {
resource.setAdditionalField("referencedASGCount", "" + asgList.size());
String asgStr = StringUtils.join(asgList,",");
resource.setDescription(resource.getDescription() + ", ASGS=" + asgStr);
LOGGER.debug(String.format("Resource ELB %s is referenced by ASGs %s", resource.getId(), asgStr));
} else {
resource.setAdditionalField("referencedASGCount", "0");
resource.setDescription(resource.getDescription() + ", ASGS=none");
LOGGER.debug(String.format("No ASGs found for ELB %s", resource.getId()));
}
}
return resources;
}
private Map<String, List<String>> buildELBtoASGMap() {
AWSClient awsClient = getAWSClient();
LOGGER.info(String.format("Getting all ELBs associated with ASGs in region %s", awsClient.region()));
List<AutoScalingGroup> autoScalingGroupList = awsClient.describeAutoScalingGroups();
HashMap<String, List<String>> asgMap = new HashMap<>();
for (AutoScalingGroup asg : autoScalingGroupList) {
String asgName = asg.getAutoScalingGroupName();
if (asg.getLoadBalancerNames() != null ) {
for (String elbName : asg.getLoadBalancerNames()) {
List<String> asgList = asgMap.get(elbName);
if (asgList == null) {
asgList = new ArrayList<>();
asgMap.put(elbName, asgList);
}
asgList.add(asgName);
LOGGER.debug(String.format("Found ASG %s associated with ELB %s", asgName, elbName));
}
}
}
return asgMap;
}
}
| 6,526
| 42.225166
| 114
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/InstanceJanitorCrawler.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.amazonaws.services.autoscaling.model.AutoScalingInstanceDetails;
import com.amazonaws.services.ec2.model.Instance;
import com.amazonaws.services.ec2.model.Tag;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.client.aws.AWSClient;
/**
* The crawler to crawl AWS instances for janitor monkey.
*/
public class InstanceJanitorCrawler extends AbstractAWSJanitorCrawler {
/** The name representing the additional field name of ASG's name. */
public static final String INSTANCE_FIELD_ASG_NAME = "ASG_NAME";
/** The name representing the additional field name of the OpsWork stack name. */
public static final String INSTANCE_FIELD_OPSWORKS_STACK_NAME = "OPSWORKS_STACK_NAME";
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(InstanceJanitorCrawler.class);
/**
* Instantiates a new basic instance crawler.
* @param awsClient
* the aws client
*/
public InstanceJanitorCrawler(AWSClient awsClient) {
super(awsClient);
}
@Override
public EnumSet<? extends ResourceType> resourceTypes() {
return EnumSet.of(AWSResourceType.INSTANCE);
}
@Override
public List<Resource> resources(ResourceType resourceType) {
if ("INSTANCE".equals(resourceType.name())) {
return getInstanceResources();
}
return Collections.emptyList();
}
@Override
public List<Resource> resources(String... resourceIds) {
return getInstanceResources(resourceIds);
}
private List<Resource> getInstanceResources(String... instanceIds) {
List<Resource> resources = new LinkedList<Resource>();
AWSClient awsClient = getAWSClient();
Map<String, AutoScalingInstanceDetails> idToASGInstance = new HashMap<String, AutoScalingInstanceDetails>();
for (AutoScalingInstanceDetails instanceDetails : awsClient.describeAutoScalingInstances(instanceIds)) {
idToASGInstance.put(instanceDetails.getInstanceId(), instanceDetails);
}
for (Instance instance : awsClient.describeInstances(instanceIds)) {
Resource instanceResource = new AWSResource().withId(instance.getInstanceId())
.withRegion(getAWSClient().region()).withResourceType(AWSResourceType.INSTANCE)
.withLaunchTime(instance.getLaunchTime());
for (Tag tag : instance.getTags()) {
instanceResource.setTag(tag.getKey(), tag.getValue());
}
String description = String.format("type=%s; host=%s", instance.getInstanceType(),
instance.getPublicDnsName() == null ? "" : instance.getPublicDnsName());
instanceResource.setDescription(description);
instanceResource.setOwnerEmail(getOwnerEmailForResource(instanceResource));
String asgName = getAsgName(instanceResource, idToASGInstance);
if (asgName != null) {
instanceResource.setAdditionalField(INSTANCE_FIELD_ASG_NAME, asgName);
LOGGER.info(String.format("instance %s has a ASG tag name %s.", instanceResource.getId(), asgName));
}
String opsworksStackName = getOpsWorksStackName(instanceResource);
if (opsworksStackName != null) {
instanceResource.setAdditionalField(INSTANCE_FIELD_OPSWORKS_STACK_NAME, opsworksStackName);
LOGGER.info(String.format("instance %s is part of an OpsWorks stack named %s.", instanceResource.getId(), opsworksStackName));
}
if (instance.getState() != null) {
((AWSResource) instanceResource).setAWSResourceState(instance.getState().getName());
}
resources.add(instanceResource);
}
return resources;
}
private String getAsgName(Resource instanceResource, Map<String, AutoScalingInstanceDetails> idToASGInstance) {
String asgName = instanceResource.getTag("aws:autoscaling:groupName");
if (asgName == null) {
// At most times the aws:autoscaling:groupName tag has the ASG name, but there are cases
// that the instance is not correctly tagged and we can find the ASG name from AutoScaling
// service.
AutoScalingInstanceDetails instanceDetails = idToASGInstance.get(instanceResource.getId());
if (instanceDetails != null) {
asgName = instanceDetails.getAutoScalingGroupName();
}
}
return asgName;
}
private String getOpsWorksStackName(Resource instanceResource) {
return instanceResource.getTag("opsworks:stack");
}
}
| 5,754
| 40.702899
| 142
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/EBSSnapshotJanitorCrawler.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import com.netflix.simianarmy.basic.BasicSimianArmyContext;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.amazonaws.services.ec2.model.BlockDeviceMapping;
import com.amazonaws.services.ec2.model.EbsBlockDevice;
import com.amazonaws.services.ec2.model.Image;
import com.amazonaws.services.ec2.model.Snapshot;
import com.amazonaws.services.ec2.model.Tag;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.client.aws.AWSClient;
/**
* The crawler to crawl AWS EBS snapshots for janitor monkey.
*/
public class EBSSnapshotJanitorCrawler extends AbstractAWSJanitorCrawler {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(EBSSnapshotJanitorCrawler.class);
/** The name representing the additional field name of AMIs generated using the snapshot. */
public static final String SNAPSHOT_FIELD_AMIS = "AMIs";
/** The map from snapshot id to the AMI ids that are generated using the snapshot. */
private final Map<String, Collection<String>> snapshotToAMIs =
new HashMap<String, Collection<String>>();
/**
* The constructor.
* @param awsClient the AWS client
*/
public EBSSnapshotJanitorCrawler(AWSClient awsClient) {
super(awsClient);
}
@Override
public EnumSet<? extends ResourceType> resourceTypes() {
return EnumSet.of(AWSResourceType.EBS_SNAPSHOT);
}
@Override
public List<Resource> resources(ResourceType resourceType) {
if ("EBS_SNAPSHOT".equals(resourceType.name())) {
return getSnapshotResources();
}
return Collections.emptyList();
}
@Override
public List<Resource> resources(String... resourceIds) {
return getSnapshotResources(resourceIds);
}
private List<Resource> getSnapshotResources(String... snapshotIds) {
refreshSnapshotToAMIs();
List<Resource> resources = new LinkedList<Resource>();
AWSClient awsClient = getAWSClient();
for (Snapshot snapshot : awsClient.describeSnapshots(snapshotIds)) {
Resource snapshotResource = new AWSResource().withId(snapshot.getSnapshotId())
.withRegion(getAWSClient().region()).withResourceType(AWSResourceType.EBS_SNAPSHOT)
.withLaunchTime(snapshot.getStartTime()).withDescription(snapshot.getDescription());
for (Tag tag : snapshot.getTags()) {
LOGGER.debug(String.format("Adding tag %s = %s to resource %s",
tag.getKey(), tag.getValue(), snapshotResource.getId()));
snapshotResource.setTag(tag.getKey(), tag.getValue());
}
snapshotResource.setOwnerEmail(getOwnerEmailForResource(snapshotResource));
((AWSResource) snapshotResource).setAWSResourceState(snapshot.getState());
Collection<String> amis = snapshotToAMIs.get(snapshotResource.getId());
if (amis != null) {
snapshotResource.setAdditionalField(SNAPSHOT_FIELD_AMIS, StringUtils.join(amis, ","));
}
resources.add(snapshotResource);
}
return resources;
}
@Override
public String getOwnerEmailForResource(Resource resource) {
Validate.notNull(resource);
String owner = resource.getTag(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY);
if (owner == null) {
owner = super.getOwnerEmailForResource(resource);
}
return owner;
}
/**
* Gets the collection of AMIs that are created using a specific snapshot.
* @param snapshotId the snapshot id
*/
protected Collection<String> getAMIsForSnapshot(String snapshotId) {
Collection<String> amis = snapshotToAMIs.get(snapshotId);
if (amis != null) {
return Collections.unmodifiableCollection(amis);
} else {
return Collections.emptyList();
}
}
private void refreshSnapshotToAMIs() {
snapshotToAMIs.clear();
for (Image image : getAWSClient().describeImages()) {
for (BlockDeviceMapping bdm : image.getBlockDeviceMappings()) {
EbsBlockDevice ebd = bdm.getEbs();
if (ebd != null && ebd.getSnapshotId() != null) {
LOGGER.debug(String.format("Snapshot %s is used to generate AMI %s",
ebd.getSnapshotId(), image.getImageId()));
Collection<String> amis = snapshotToAMIs.get(ebd.getSnapshotId());
if (amis == null) {
amis = new ArrayList<String>();
snapshotToAMIs.put(ebd.getSnapshotId(), amis);
}
amis.add(image.getImageId());
}
}
}
}
}
| 5,956
| 37.185897
| 104
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/LaunchConfigJanitorCrawler.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler;
import com.amazonaws.services.autoscaling.model.AutoScalingGroup;
import com.amazonaws.services.autoscaling.model.LaunchConfiguration;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.client.aws.AWSClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collections;
import java.util.EnumSet;
import java.util.List;
import java.util.Set;
/**
* The crawler to crawl AWS launch configurations for janitor monkey.
*/
public class LaunchConfigJanitorCrawler extends AbstractAWSJanitorCrawler {
/** The name representing the additional field name of a flag indicating if the launch config
* if used by an auto scaling group. */
public static final String LAUNCH_CONFIG_FIELD_USED_BY_ASG = "USED_BY_ASG";
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(LaunchConfigJanitorCrawler.class);
/**
* Instantiates a new basic launch configuration crawler.
* @param awsClient
* the aws client
*/
public LaunchConfigJanitorCrawler(AWSClient awsClient) {
super(awsClient);
}
@Override
public EnumSet<? extends ResourceType> resourceTypes() {
return EnumSet.of(AWSResourceType.LAUNCH_CONFIG);
}
@Override
public List<Resource> resources(ResourceType resourceType) {
if ("LAUNCH_CONFIG".equals(resourceType.name())) {
return getLaunchConfigResources();
}
return Collections.emptyList();
}
@Override
public List<Resource> resources(String... resourceIds) {
return getLaunchConfigResources(resourceIds);
}
private List<Resource> getLaunchConfigResources(String... launchConfigNames) {
List<Resource> resources = Lists.newArrayList();
AWSClient awsClient = getAWSClient();
Set<String> usedLCs = Sets.newHashSet();
for (AutoScalingGroup asg : awsClient.describeAutoScalingGroups()) {
usedLCs.add(asg.getLaunchConfigurationName());
}
for (LaunchConfiguration launchConfiguration : awsClient.describeLaunchConfigurations(launchConfigNames)) {
String lcName = launchConfiguration.getLaunchConfigurationName();
Resource lcResource = new AWSResource().withId(lcName)
.withRegion(getAWSClient().region()).withResourceType(AWSResourceType.LAUNCH_CONFIG)
.withLaunchTime(launchConfiguration.getCreatedTime());
lcResource.setOwnerEmail(getOwnerEmailForResource(lcResource));
lcResource.setAdditionalField(LAUNCH_CONFIG_FIELD_USED_BY_ASG, String.valueOf(usedLCs.contains(lcName)));
resources.add(lcResource);
}
return resources;
}
}
| 3,664
| 35.287129
| 117
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/EBSVolumeJanitorCrawler.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler;
import java.util.Collections;
import java.util.EnumSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import com.netflix.simianarmy.basic.BasicSimianArmyContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.amazonaws.services.ec2.model.Tag;
import com.amazonaws.services.ec2.model.Volume;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.aws.janitor.VolumeTaggingMonkey;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.janitor.JanitorMonkey;
/**
* The crawler to crawl AWS EBS volumes for janitor monkey.
*/
public class EBSVolumeJanitorCrawler extends AbstractAWSJanitorCrawler {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(EBSVolumeJanitorCrawler.class);
/**
* The constructor.
* @param awsClient the AWS client
*/
public EBSVolumeJanitorCrawler(AWSClient awsClient) {
super(awsClient);
}
@Override
public EnumSet<? extends ResourceType> resourceTypes() {
return EnumSet.of(AWSResourceType.EBS_VOLUME);
}
@Override
public List<Resource> resources(ResourceType resourceType) {
if ("EBS_VOLUME".equals(resourceType.name())) {
return getVolumeResources();
}
return Collections.emptyList();
}
@Override
public List<Resource> resources(String... resourceIds) {
return getVolumeResources(resourceIds);
}
private List<Resource> getVolumeResources(String... volumeIds) {
List<Resource> resources = new LinkedList<Resource>();
AWSClient awsClient = getAWSClient();
for (Volume volume : awsClient.describeVolumes(volumeIds)) {
Resource volumeResource = new AWSResource().withId(volume.getVolumeId())
.withRegion(getAWSClient().region()).withResourceType(AWSResourceType.EBS_VOLUME)
.withLaunchTime(volume.getCreateTime());
for (Tag tag : volume.getTags()) {
LOGGER.info(String.format("Adding tag %s = %s to resource %s",
tag.getKey(), tag.getValue(), volumeResource.getId()));
volumeResource.setTag(tag.getKey(), tag.getValue());
}
volumeResource.setOwnerEmail(getOwnerEmailForResource(volumeResource));
volumeResource.setDescription(getVolumeDescription(volume));
((AWSResource) volumeResource).setAWSResourceState(volume.getState());
resources.add(volumeResource);
}
return resources;
}
private String getVolumeDescription(Volume volume) {
StringBuilder description = new StringBuilder();
Integer size = volume.getSize();
description.append(String.format("size=%s", size == null ? "unknown" : size));
for (Tag tag : volume.getTags()) {
description.append(String.format("; %s=%s", tag.getKey(), tag.getValue()));
}
return description.toString();
}
@Override
public String getOwnerEmailForResource(Resource resource) {
String owner = super.getOwnerEmailForResource(resource);
if (owner == null) {
// try to find the owner from Janitor Metadata tag set by the volume tagging monkey.
Map<String, String> janitorTag = VolumeTaggingMonkey.parseJanitorMetaTag(resource.getTag(
JanitorMonkey.JANITOR_META_TAG));
owner = janitorTag.get(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY);
}
return owner;
}
}
| 4,416
| 35.808333
| 101
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/edda/EddaEBSVolumeJanitorCrawler.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler.edda;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.basic.BasicSimianArmyContext;
import com.netflix.simianarmy.client.edda.EddaClient;
import com.netflix.simianarmy.janitor.JanitorCrawler;
import com.netflix.simianarmy.janitor.JanitorMonkey;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.codehaus.jackson.JsonNode;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.*;
/**
* The crawler to crawl AWS EBS volumes for Janitor monkey using Edda.
*/
public class EddaEBSVolumeJanitorCrawler implements JanitorCrawler {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(EddaEBSVolumeJanitorCrawler.class);
private static final DateTimeFormatter TIME_FORMATTER = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.S'Z'");
private static final int BATCH_SIZE = 50;
// The value below specifies how many days we want to look back in Edda to find the owner of old instances.
// In case of Edda keeps too much history data, without a reasonable date range, the query may fail.
private static final int LOOKBACK_DAYS = 90;
/**
* The field name for purpose.
*/
public static final String PURPOSE = "purpose";
/**
* The field name for deleteOnTermination.
*/
public static final String DELETE_ON_TERMINATION = "deleteOnTermination";
/**
* The field name for detach time.
*/
public static final String DETACH_TIME = "detachTime";
private final EddaClient eddaClient;
private final List<String> regions = Lists.newArrayList();
private final Map<String, String> instanceToOwner = Maps.newHashMap();
/**
* The constructor.
* @param eddaClient
* the Edda client
* @param regions
* the regions the crawler will crawl resources for
*/
public EddaEBSVolumeJanitorCrawler(EddaClient eddaClient, String... regions) {
Validate.notNull(eddaClient);
this.eddaClient = eddaClient;
Validate.notNull(regions);
for (String region : regions) {
this.regions.add(region);
updateInstanceToOwner(region);
}
LOGGER.info(String.format("Found owner for %d instances in %s", instanceToOwner.size(), this.regions));
}
private void updateInstanceToOwner(String region) {
LOGGER.info(String.format("Getting owners for all instances in region %s", region));
long startTime = DateTime.now().minusDays(LOOKBACK_DAYS).getMillis();
String url = String.format("%1$s/view/instances;_since=%2$d;state.name=running;tags.key=%3$s;"
+ "_expand:(instanceId,tags:(key,value))",
eddaClient.getBaseUrl(region), startTime, BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY);
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for instance owners in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s", url, jsonNode));
}
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode elem = it.next();
String instanceId = elem.get("instanceId").getTextValue();
JsonNode tags = elem.get("tags");
if (tags == null || !tags.isArray() || tags.size() == 0) {
continue;
}
for (Iterator<JsonNode> tagsIt = tags.getElements(); tagsIt.hasNext();) {
JsonNode tag = tagsIt.next();
String tagKey = tag.get("key").getTextValue();
if (BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY.equals(tagKey)) {
instanceToOwner.put(instanceId, tag.get("value").getTextValue());
break;
}
}
}
}
@Override
public EnumSet<? extends ResourceType> resourceTypes() {
return EnumSet.of(AWSResourceType.EBS_VOLUME);
}
@Override
public List<Resource> resources(ResourceType resourceType) {
if ("EBS_VOLUME".equals(resourceType.name())) {
return getVolumeResources();
}
return Collections.emptyList();
}
@Override
public List<Resource> resources(String... resourceIds) {
return getVolumeResources(resourceIds);
}
@Override
public String getOwnerEmailForResource(Resource resource) {
Validate.notNull(resource);
return resource.getTag(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY);
}
private List<Resource> getVolumeResources(String... volumeIds) {
List<Resource> resources = Lists.newArrayList();
for (String region : regions) {
resources.addAll(getUnattachedVolumeResourcesInRegion(region, volumeIds));
addLastAttachmentInfo(resources);
}
return resources;
}
/**
* Gets all volumes that are not attached to any instance. Janitor Monkey only considers unattached volumes
* as cleanup candidates, so there is no need to get volumes that are in-use.
* @param region
* @return list of resources that are not attached to any instance
*/
private List<Resource> getUnattachedVolumeResourcesInRegion(String region, String... volumeIds) {
String url = eddaClient.getBaseUrl(region) + "/aws/volumes;";
if (volumeIds != null && volumeIds.length != 0) {
url += StringUtils.join(volumeIds, ',');
LOGGER.info(String.format("Getting volumes in region %s for %d ids", region, volumeIds.length));
} else {
LOGGER.info(String.format("Getting all unattached volumes in region %s", region));
}
url += ";state=available;_expand:(volumeId,createTime,size,state,tags)";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for unattached volumes in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s", url, jsonNode));
}
List<Resource> resources = Lists.newArrayList();
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
resources.add(parseJsonElementToVolumeResource(region, it.next()));
}
return resources;
}
private Resource parseJsonElementToVolumeResource(String region, JsonNode jsonNode) {
Validate.notNull(jsonNode);
long createTime = jsonNode.get("createTime").asLong();
Resource resource = new AWSResource().withId(jsonNode.get("volumeId").getTextValue()).withRegion(region)
.withResourceType(AWSResourceType.EBS_VOLUME)
.withLaunchTime(new Date(createTime));
JsonNode tags = jsonNode.get("tags");
StringBuilder description = new StringBuilder();
JsonNode size = jsonNode.get("size");
description.append(String.format("size=%s", size == null ? "unknown" : size.getIntValue()));
if (tags == null || !tags.isArray() || tags.size() == 0) {
LOGGER.debug(String.format("No tags is found for %s", resource.getId()));
} else {
for (Iterator<JsonNode> it = tags.getElements(); it.hasNext();) {
JsonNode tag = it.next();
String key = tag.get("key").getTextValue();
String value = tag.get("value").getTextValue();
description.append(String.format("; %s=%s", key, value));
resource.setTag(key, value);
if (key.equals(PURPOSE)) {
resource.setAdditionalField(PURPOSE, value);
}
}
resource.setDescription(description.toString());
}
((AWSResource) resource).setAWSResourceState(jsonNode.get("state").getTextValue());
return resource;
}
/**
* Adds information of last attachment to the resources. To be compatible with the AWS implementation of
* the same crawler, add the information to the JANITOR_META tag. It always uses the latest information
* to update the tag in this resource (not writing back to AWS) no matter if the tag exists.
* @param resources the volume resources
*/
private void addLastAttachmentInfo(List<Resource> resources) {
Validate.notNull(resources);
LOGGER.info(String.format("Updating the latest attachment info for %d resources", resources.size()));
Map<String, List<Resource>> regionToResources = Maps.newHashMap();
for (Resource resource : resources) {
List<Resource> regionalList = regionToResources.get(resource.getRegion());
if (regionalList == null) {
regionalList = Lists.newArrayList();
regionToResources.put(resource.getRegion(), regionalList);
}
regionalList.add(resource);
}
for (Map.Entry<String, List<Resource>> entry : regionToResources.entrySet()) {
LOGGER.info(String.format("Updating the latest attachment info for %d resources in region %s",
resources.size(), entry.getKey()));
for (List<Resource> batch : Lists.partition(entry.getValue(), BATCH_SIZE)) {
LOGGER.info(String.format("Processing batch of size %d", batch.size()));
String batchUrl = getBatchUrl(entry.getKey(), batch);
JsonNode batchResult = null;
try {
batchResult = eddaClient.getJsonNodeFromUrl(batchUrl);
} catch (IOException e) {
LOGGER.error("Failed to get response for the batch.", e);
}
Map<String, Resource> idToResource = Maps.newHashMap();
for (Resource resource : batch) {
idToResource.put(resource.getId(), resource);
}
if (batchResult == null || !batchResult.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s",
batchUrl, batchResult));
}
Set<String> processedIds = Sets.newHashSet();
for (Iterator<JsonNode> it = batchResult.getElements(); it.hasNext();) {
JsonNode elem = it.next();
JsonNode data = elem.get("data");
String volumeId = data.get("volumeId").getTextValue();
Resource resource = idToResource.get(volumeId);
JsonNode attachments = data.get("attachments");
if (!(attachments.isArray() && attachments.size() > 0)) {
continue;
}
JsonNode attachment = attachments.get(0);
JsonNode ltime = elem.get("ltime");
if (ltime == null || ltime.isNull()) {
continue;
}
DateTime detachTime = new DateTime(ltime.asLong());
processedIds.add(volumeId);
setAttachmentInfo(volumeId, attachment, detachTime, resource);
}
for (Map.Entry<String, Resource> volumeEntry : idToResource.entrySet()) {
String id = volumeEntry.getKey();
if (!processedIds.contains(id)) {
Resource resource = volumeEntry.getValue();
LOGGER.info(String.format("Volume %s never was attached, use createTime %s as the detachTime",
id, resource.getLaunchTime()));
setAttachmentInfo(id, null, new DateTime(resource.getLaunchTime().getTime()), resource);
}
}
}
}
}
private void setAttachmentInfo(String volumeId, JsonNode attachment, DateTime detachTime, Resource resource) {
String instanceId = null;
if (attachment != null) {
boolean deleteOnTermination = attachment.get(DELETE_ON_TERMINATION).getBooleanValue();
if (deleteOnTermination) {
LOGGER.info(String.format(
"Volume %s had set the deleteOnTermination flag as true", volumeId));
}
resource.setAdditionalField(DELETE_ON_TERMINATION, String.valueOf(deleteOnTermination));
instanceId = attachment.get("instanceId").getTextValue();
}
// The subclass can customize the way to get the owner for a volume
String owner = getOwnerEmailForResource(resource);
if (owner == null && instanceId != null) {
owner = instanceToOwner.get(instanceId);
}
resource.setOwnerEmail(owner);
String metaTag = makeMetaTag(instanceId, owner, detachTime);
LOGGER.info(String.format("Setting Janitor Metatag as %s for volume %s", metaTag, volumeId));
resource.setTag(JanitorMonkey.JANITOR_META_TAG, metaTag);
LOGGER.info(String.format("The last detach time of volume %s is %s", volumeId, detachTime));
resource.setAdditionalField(DETACH_TIME, String.valueOf(detachTime.getMillis()));
}
private String makeMetaTag(String instance, String owner, DateTime lastDetachTime) {
StringBuilder meta = new StringBuilder();
meta.append(String.format("%s=%s;",
JanitorMonkey.INSTANCE_TAG_KEY, instance == null ? "" : instance));
meta.append(String.format("%s=%s;", BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY, owner == null ? "" : owner));
meta.append(String.format("%s=%s", JanitorMonkey.DETACH_TIME_TAG_KEY,
lastDetachTime == null ? "" : AWSResource.DATE_FORMATTER.print(lastDetachTime)));
return meta.toString();
}
private String getBatchUrl(String region, List<Resource> batch) {
StringBuilder batchUrl = new StringBuilder(eddaClient.getBaseUrl(region) + "/aws/volumes/");
boolean isFirst = true;
for (Resource resource : batch) {
if (!isFirst) {
batchUrl.append(',');
} else {
isFirst = false;
}
batchUrl.append(resource.getId());
}
batchUrl.append(";data.state=in-use;_since=0;_expand;_meta:"
+ "(ltime,data:(volumeId,attachments:(deleteOnTermination,instanceId)))");
return batchUrl.toString();
}
}
| 16,004
| 43.33518
| 118
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/edda/EddaEBSSnapshotJanitorCrawler.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler.edda;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.basic.BasicSimianArmyContext;
import com.netflix.simianarmy.client.edda.EddaClient;
import com.netflix.simianarmy.janitor.JanitorCrawler;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.codehaus.jackson.JsonNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.EnumSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
/**
* The crawler to crawl AWS EBS snapshots for janitor monkey using Edda.
*/
public class EddaEBSSnapshotJanitorCrawler implements JanitorCrawler {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(EddaEBSSnapshotJanitorCrawler.class);
/** The name representing the additional field name of AMIs generated using the snapshot. */
public static final String SNAPSHOT_FIELD_AMIS = "AMIs";
/** The map from snapshot id to the AMI ids that are generated using the snapshot. */
private final Map<String, Collection<String>> snapshotToAMIs = Maps.newHashMap();
private final EddaClient eddaClient;
private final List<String> regions = Lists.newArrayList();
private final String defaultOwnerId;
/**
* The constructor.
* @param defaultOwnerId
* the default owner id that snapshots need to have for being crawled, null means no filtering is
* needed
* @param eddaClient
* the Edda client
* @param regions
* the regions the crawler will crawl resources for
*/
public EddaEBSSnapshotJanitorCrawler(String defaultOwnerId, EddaClient eddaClient, String... regions) {
this.defaultOwnerId = defaultOwnerId;
Validate.notNull(eddaClient);
this.eddaClient = eddaClient;
Validate.notNull(regions);
for (String region : regions) {
this.regions.add(region);
}
}
@Override
public EnumSet<? extends ResourceType> resourceTypes() {
return EnumSet.of(AWSResourceType.EBS_SNAPSHOT);
}
@Override
public List<Resource> resources(ResourceType resourceType) {
if ("EBS_SNAPSHOT".equals(resourceType.name())) {
return getSnapshotResources();
}
return Collections.emptyList();
}
@Override
public List<Resource> resources(String... resourceIds) {
return getSnapshotResources(resourceIds);
}
private List<Resource> getSnapshotResources(String... snapshotIds) {
List<Resource> resources = Lists.newArrayList();
for (String region : regions) {
resources.addAll(getSnapshotResourcesInRegion(region, snapshotIds));
}
return resources;
}
private List<Resource> getSnapshotResourcesInRegion(String region, String... snapshotIds) {
refreshSnapshotToAMIs(region);
String url = eddaClient.getBaseUrl(region) + "/aws/snapshots/";
if (snapshotIds != null && snapshotIds.length != 0) {
url += StringUtils.join(snapshotIds, ',');
LOGGER.info(String.format("Getting snapshots in region %s for %d ids", region, snapshotIds.length));
} else {
LOGGER.info(String.format("Getting all snapshots in region %s", region));
}
url += ";state=completed;_expand:(snapshotId,state,description,startTime,tags,ownerId)";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for snapshots in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s", url, jsonNode));
}
List<Resource> resources = Lists.newArrayList();
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode elem = it.next();
// Filter out shared snapshots that do not have the specified owner id.
String ownerId = elem.get("ownerId").getTextValue();
if (defaultOwnerId != null && !defaultOwnerId.equals(ownerId)) {
LOGGER.info(String.format("Ignoring snapshotIds %s since it does not have the specified ownerId.",
elem.get("snapshotId").getTextValue()));
} else {
resources.add(parseJsonElementToSnapshotResource(region, elem));
}
}
return resources;
}
private Resource parseJsonElementToSnapshotResource(String region, JsonNode jsonNode) {
Validate.notNull(jsonNode);
long startTime = jsonNode.get("startTime").asLong();
Resource resource = new AWSResource().withId(jsonNode.get("snapshotId").getTextValue()).withRegion(region)
.withResourceType(AWSResourceType.EBS_SNAPSHOT)
.withLaunchTime(new Date(startTime));
JsonNode tags = jsonNode.get("tags");
if (tags == null || !tags.isArray() || tags.size() == 0) {
LOGGER.debug(String.format("No tags is found for %s", resource.getId()));
} else {
for (Iterator<JsonNode> it = tags.getElements(); it.hasNext();) {
JsonNode tag = it.next();
String key = tag.get("key").getTextValue();
String value = tag.get("value").getTextValue();
resource.setTag(key, value);
}
}
JsonNode description = jsonNode.get("description");
if (description != null) {
resource.setDescription(description.getTextValue());
}
((AWSResource) resource).setAWSResourceState(jsonNode.get("state").getTextValue());
Collection<String> amis = snapshotToAMIs.get(resource.getId());
if (amis != null) {
resource.setAdditionalField(SNAPSHOT_FIELD_AMIS, StringUtils.join(amis, ","));
}
resource.setOwnerEmail(getOwnerEmailForResource(resource));
return resource;
}
@Override
public String getOwnerEmailForResource(Resource resource) {
Validate.notNull(resource);
return resource.getTag(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY);
}
/**
* Gets the collection of AMIs that are created using a specific snapshot.
* @param snapshotId the snapshot id
*/
protected Collection<String> getAMIsForSnapshot(String snapshotId) {
Collection<String> amis = snapshotToAMIs.get(snapshotId);
if (amis != null) {
return Collections.unmodifiableCollection(amis);
} else {
return Collections.emptyList();
}
}
private void refreshSnapshotToAMIs(String region) {
snapshotToAMIs.clear();
LOGGER.info(String.format("Getting mapping from snapshot to AMIs in region %s", region));
String url = eddaClient.getBaseUrl(region) + "/aws/images/"
+ ";_expand:(imageId,blockDeviceMappings:(ebs:(snapshotId)))";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for AMI mapping in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s", url, jsonNode));
}
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode elem = it.next();
String imageId = elem.get("imageId").getTextValue();
JsonNode blockMappings = elem.get("blockDeviceMappings");
if (blockMappings == null || !blockMappings.isArray() || blockMappings.size() == 0) {
continue;
}
for (Iterator<JsonNode> blockMappingsIt = blockMappings.getElements(); blockMappingsIt.hasNext();) {
JsonNode blockMappingNode = blockMappingsIt.next();
JsonNode ebs = blockMappingNode.get("ebs");
if (ebs == null) {
continue;
}
JsonNode snapshotIdNode = ebs.get("snapshotId");
String snapshotId = snapshotIdNode.getTextValue();
LOGGER.debug(String.format("Snapshot %s is used to generate AMI %s", snapshotId, imageId));
Collection<String> amis = snapshotToAMIs.get(snapshotId);
if (amis == null) {
amis = Lists.newArrayList();
snapshotToAMIs.put(snapshotId, amis);
}
amis.add(imageId);
}
}
}
}
| 9,865
| 39.105691
| 118
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/edda/EddaImageJanitorCrawler.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler.edda;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.basic.BasicSimianArmyContext;
import com.netflix.simianarmy.client.edda.EddaClient;
import com.netflix.simianarmy.janitor.JanitorCrawler;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.codehaus.jackson.JsonNode;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* The crawler to crawl AWS AMIs for janitor monkey using Edda. Only images that are not currently referenced
* by any existing instances or launch configurations are returned.
*/
public class EddaImageJanitorCrawler implements JanitorCrawler {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(EddaImageJanitorCrawler.class);
/** The name representing the additional field name for the last reference time by instance. */
public static final String AMI_FIELD_LAST_INSTANCE_REF_TIME = "Last_Instance_Reference_Time";
/** The name representing the additional field name for the last reference time by launch config. */
public static final String AMI_FIELD_LAST_LC_REF_TIME = "Last_Launch_Config_Reference_Time";
/** The name representing the additional field name for whether the image is a base image. **/
public static final String AMI_FIELD_BASE_IMAGE = "Base_Image";
private static final int BATCH_SIZE = 100;
private final EddaClient eddaClient;
private final List<String> regions = Lists.newArrayList();
private final Set<String> usedByInstance = Sets.newHashSet();
private final Set<String> usedByLaunchConfig = Sets.newHashSet();
private final Set<String> usedNames = Sets.newHashSet();
protected final Map<String, String> imageIdToName = Maps.newHashMap();
private final Map<String, Long> imageIdToCreationTime = Maps.newHashMap();
private final Set<String> ancestorImageIds = Sets.newHashSet();
private String ownerId;
private final int daysBack;
private static final String IMAGE_ID = "ami-[a-z0-9]{8}";
private static final Pattern BASE_AMI_ID_PATTERN = Pattern.compile("^.*?base_ami_id=(" + IMAGE_ID + ").*?");
private static final Pattern ANCESTOR_ID_PATTERN = Pattern.compile("^.*?ancestor_id=(" + IMAGE_ID + ").*?$");
/**
* Instantiates a new basic AMI crawler.
* @param eddaClient
* the Edda client
* @param daysBack
* the number of days that the crawler checks back in history stored in Edda
* @param regions
* the regions the crawler will crawl resources for
*/
public EddaImageJanitorCrawler(EddaClient eddaClient, String ownerId, int daysBack, String... regions) {
Validate.notNull(eddaClient);
this.eddaClient = eddaClient;
this.ownerId = ownerId;
Validate.isTrue(daysBack >= 0);
this.daysBack = daysBack;
Validate.notNull(regions);
for (String region : regions) {
this.regions.add(region);
}
}
@Override
public EnumSet<? extends ResourceType> resourceTypes() {
return EnumSet.of(AWSResourceType.IMAGE);
}
@Override
public List<Resource> resources(ResourceType resourceType) {
if ("IMAGE".equals(resourceType.name())) {
return getAMIResources();
}
return Collections.emptyList();
}
@Override
public List<Resource> resources(String... imageIds) {
return getAMIResources(imageIds);
}
@Override
public String getOwnerEmailForResource(Resource resource) {
Validate.notNull(resource);
return resource.getTag(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY);
}
private List<Resource> getAMIResources(String... imageIds) {
refreshIdToNameMap();
refreshAMIsUsedByInstance();
refreshAMIsUsedByLC();
refreshIdToCreationTime();
for (String excludedId : getExcludedImageIds()) {
String name = imageIdToName.get(excludedId);
usedNames.add(name);
}
LOGGER.info(String.format("%d image names are used across the %d regions.",
usedNames.size(), regions.size()));
Collection<String> excludedImageIds = getExcludedImageIds();
List<Resource> resources = Lists.newArrayList();
for (String region : regions) {
try {
resources.addAll(getAMIResourcesInRegion(region, excludedImageIds, imageIds));
} catch (Exception e) {
LOGGER.error("AMI look up failed for {} in {}", imageIds, region, e);
}
}
return resources;
}
/**
* The method allows users to put their own logic to exclude a set of images from being
* cleaned up by Janitor Monkey. In some cases, images are not used but still need to be
* kept longer.
* @return a collection of image ids that need to be excluded from Janitor Monkey
*/
protected Collection<String> getExcludedImageIds() {
return Sets.newHashSet();
}
private JsonNode getImagesInJson(String region, String... imageIds) {
String url = eddaClient.getBaseUrl(region) + "/aws/images";
if (imageIds != null && imageIds.length != 0) {
url += "/" + StringUtils.join(imageIds, ',');
if (imageIds.length == 1) {
url +=","; // Edda will return a non-array if passing exactly one imageId which will fail the crawler
}
LOGGER.info(String.format("Getting unreferenced AMIs in region %s for %d ids", region, imageIds.length));
} else {
LOGGER.info(String.format("Getting all unreferenced AMIs in region %s", region));
if (StringUtils.isNotBlank(ownerId)) {
url += ";ownerId=" + ownerId;
}
}
url += ";_expand:(imageId,name,description,state,tags:(key,value))";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for AMIs in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s", url, jsonNode));
}
return jsonNode;
}
private void refreshIdToNameMap() {
imageIdToName.clear();
for (String region : regions) {
JsonNode jsonNode = getImagesInJson(region);
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode ami = it.next();
String imageId = ami.get("imageId").getTextValue();
String name = ami.get("name").getTextValue();
imageIdToName.put(imageId, name);
}
}
LOGGER.info(String.format("Got mapping from image id to name for %d ids", imageIdToName.size()));
}
/**
* AWS doesn't provide creation time for images. We use the ctime (the creation time of the image record in Edda)
* to approximate the creation time of the image.
*/
private void refreshIdToCreationTime() {
for (String region : regions) {
String url = eddaClient.getBaseUrl(region) + "/aws/images";
LOGGER.info(String.format("Getting the creation time for all AMIs in region %s", region));
if (StringUtils.isNotBlank(ownerId)) {
url += ";data.ownerId=" + ownerId;
}
url += ";_expand;_meta:(ctime,data:(imageId))";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for creation time of AMIs in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s",
url, jsonNode));
}
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode elem = it.next();
JsonNode data = elem.get("data");
String imageId = data.get("imageId").getTextValue();
JsonNode ctimeNode = elem.get("ctime");
if (ctimeNode != null && !ctimeNode.isNull()) {
long ctime = ctimeNode.asLong();
LOGGER.debug(String.format("The image record of %s was created in Edda at %s",
imageId, new DateTime(ctime)));
imageIdToCreationTime.put(imageId, ctime);
}
}
}
LOGGER.info(String.format("Got creation time for %d images", imageIdToCreationTime.size()));
}
private List<Resource> getAMIResourcesInRegion(String region,
Collection<String> excludedImageIds,
String... imageIds) {
JsonNode jsonNode = getImagesInJson(region, imageIds);
List<Resource> resources = Lists.newArrayList();
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode ami = it.next();
String imageId = ami.get("imageId").getTextValue();
Resource resource = parseJsonElementToresource(region, ami);
String name = ami.get("name").getTextValue();
if (excludedImageIds.contains(imageId)) {
LOGGER.info(String.format("Image %s is excluded from being managed by Janitor Monkey, ignore.",
imageId));
continue;
}
if (usedByInstance.contains(imageId) || usedByLaunchConfig.contains(imageId)) {
LOGGER.info(String.format("AMI %s is referenced by existing instance or launch configuration.",
imageId));
} else {
LOGGER.info(String.format("AMI %s is not referenced by existing instance or launch configuration.",
imageId));
if (usedNames.contains(name)) {
LOGGER.info(String.format("The same AMI name %s is used in another region", name));
} else {
resources.add(resource);
}
}
}
long since = DateTime.now().minusDays(daysBack).getMillis();
addLastReferenceInfo(resources, since);
// Mark the base AMIs that are used as the ancestor of other images
for (Resource resource : resources) {
if (ancestorImageIds.contains(resource.getId())) {
resource.setAdditionalField(AMI_FIELD_BASE_IMAGE, "true");
}
}
return resources;
}
private Resource parseJsonElementToresource(String region, JsonNode jsonNode) {
Validate.notNull(jsonNode);
String imageId = jsonNode.get("imageId").getTextValue();
Resource resource = new AWSResource().withId(imageId).withRegion(region)
.withResourceType(AWSResourceType.IMAGE);
Long creationTime = imageIdToCreationTime.get(imageId);
if (creationTime != null) {
resource.setLaunchTime(new Date(creationTime));
}
JsonNode tags = jsonNode.get("tags");
if (tags == null || !tags.isArray() || tags.size() == 0) {
LOGGER.debug(String.format("No tags is found for %s", resource.getId()));
} else {
for (Iterator<JsonNode> it = tags.getElements(); it.hasNext();) {
JsonNode tag = it.next();
String key = tag.get("key").getTextValue();
String value = tag.get("value").getTextValue();
resource.setTag(key, value);
}
}
JsonNode descNode = jsonNode.get("description");
if (descNode != null && !descNode.isNull()) {
String description = descNode.getTextValue();
resource.setDescription(description);
String ancestorImageId = getBaseAmiIdFromDescription(description);
if (ancestorImageId != null && !ancestorImageIds.contains(ancestorImageId)) {
LOGGER.info(String.format("Found base AMI id %s from description '%s'", ancestorImageId, description));
ancestorImageIds.add(ancestorImageId);
}
}
((AWSResource) resource).setAWSResourceState(jsonNode.get("state").getTextValue());
String owner = getOwnerEmailForResource(resource);
if (owner != null) {
resource.setOwnerEmail(owner);
}
return resource;
}
private void refreshAMIsUsedByInstance() {
usedByInstance.clear();
for (String region : regions) {
LOGGER.info(String.format("Getting AMIs used by instances in region %s", region));
String url = eddaClient.getBaseUrl(region) + "/view/instances/;_expand:(imageId)";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for AMIs used by instances in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s",
url, jsonNode));
}
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode img = it.next();
String id = img.get("imageId").getTextValue();
usedByInstance.add(id);
usedNames.add(imageIdToName.get(id));
}
}
LOGGER.info(String.format("Found %d image ids used by instance from Edda", usedByInstance.size()));
}
private void refreshAMIsUsedByLC() {
usedByLaunchConfig.clear();
for (String region : regions) {
LOGGER.info(String.format("Getting AMIs used by launch configs in region %s", region));
String url = eddaClient.getBaseUrl(region) + "/aws/launchConfigurations;_expand:(imageId)";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for AMIs used by launch configs in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s",
url, jsonNode));
}
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode img = it.next();
String id = img.get("imageId").getTextValue();
usedByLaunchConfig.add(id);
usedNames.add(imageIdToName.get(id));
}
}
LOGGER.info(String.format("Found %d image ids used by launch config from Edda", usedByLaunchConfig.size()));
}
private void addLastReferenceInfo(List<Resource> resources, long since) {
Validate.notNull(resources);
LOGGER.info(String.format("Updating the latest reference info for %d images", resources.size()));
Map<String, List<Resource>> regionToResources = Maps.newHashMap();
for (Resource resource : resources) {
List<Resource> regionalList = regionToResources.get(resource.getRegion());
if (regionalList == null) {
regionalList = Lists.newArrayList();
regionToResources.put(resource.getRegion(), regionalList);
}
regionalList.add(resource);
}
//
for (Map.Entry<String, List<Resource>> entry : regionToResources.entrySet()) {
String region = entry.getKey();
LOGGER.info(String.format("Updating the latest reference info for %d images in region %s",
resources.size(), region));
for (List<Resource> batch : Lists.partition(entry.getValue(), BATCH_SIZE)) {
LOGGER.info(String.format("Processing batch of size %d", batch.size()));
updateReferenceTimeByInstance(region, batch, since);
updateReferenceTimeByLaunchConfig(region, batch, since);
}
}
}
private void updateReferenceTimeByInstance(String region, List<Resource> batch, long since) {
LOGGER.info(String.format("Getting the last reference time by instance for batch of size %d", batch.size()));
String batchUrl = getInstanceBatchUrl(region, batch, since);
JsonNode batchResult = null;
Map<String, Resource> idToResource = Maps.newHashMap();
for (Resource resource : batch) {
idToResource.put(resource.getId(), resource);
}
try {
batchResult = eddaClient.getJsonNodeFromUrl(batchUrl);
} catch (IOException e) {
LOGGER.error("Failed to get response for the batch.", e);
}
if (batchResult == null || !batchResult.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s",
batchUrl, batchResult));
}
for (Iterator<JsonNode> it = batchResult.getElements(); it.hasNext();) {
JsonNode elem = it.next();
JsonNode data = elem.get("data");
String imageId = data.get("imageId").getTextValue();
String instanceId = data.get("instanceId").getTextValue();
JsonNode ltimeNode = elem.get("ltime");
if (ltimeNode != null && !ltimeNode.isNull()) {
long ltime = ltimeNode.asLong();
Resource ami = idToResource.get(imageId);
String lastRefTimeByInstance = ami.getAdditionalField(
AMI_FIELD_LAST_INSTANCE_REF_TIME);
if (lastRefTimeByInstance == null || Long.parseLong(lastRefTimeByInstance) < ltime) {
LOGGER.info(String.format("The last time that the image %s was referenced by instance %s is %d",
imageId, instanceId, ltime));
ami.setAdditionalField(AMI_FIELD_LAST_INSTANCE_REF_TIME, String.valueOf(ltime));
}
}
}
}
private void updateReferenceTimeByLaunchConfig(String region, List<Resource> batch, long since) {
LOGGER.info(String.format("Getting the last reference time by launch config for batch of size %d",
batch.size()));
String batchUrl = getLaunchConfigBatchUrl(region, batch, since);
JsonNode batchResult = null;
Map<String, Resource> idToResource = Maps.newHashMap();
for (Resource resource : batch) {
idToResource.put(resource.getId(), resource);
}
try {
batchResult = eddaClient.getJsonNodeFromUrl(batchUrl);
} catch (IOException e) {
LOGGER.error("Failed to get response for the batch.", e);
}
if (batchResult == null || !batchResult.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s",
batchUrl, batchResult));
}
for (Iterator<JsonNode> it = batchResult.getElements(); it.hasNext();) {
JsonNode elem = it.next();
JsonNode data = elem.get("data");
String imageId = data.get("imageId").getTextValue();
String launchConfigurationName = data.get("launchConfigurationName").getTextValue();
JsonNode ltimeNode = elem.get("ltime");
if (ltimeNode != null && !ltimeNode.isNull()) {
long ltime = ltimeNode.asLong();
Resource ami = idToResource.get(imageId);
String lastRefTimeByLC = ami.getAdditionalField(AMI_FIELD_LAST_LC_REF_TIME);
if (lastRefTimeByLC == null || Long.parseLong(lastRefTimeByLC) < ltime) {
LOGGER.info(String.format(
"The last time that the image %s was referenced by launch config %s is %d",
imageId, launchConfigurationName, ltime));
ami.setAdditionalField(AMI_FIELD_LAST_LC_REF_TIME, String.valueOf(ltime));
}
}
}
}
private String getInstanceBatchUrl(String region, List<Resource> batch, long since) {
StringBuilder batchUrl = new StringBuilder(eddaClient.getBaseUrl(region)
+ "/view/instances/;data.imageId=");
batchUrl.append(getImageIdsString(batch));
batchUrl.append(String.format(";data.state.name=terminated;_since=%d;_expand;_meta:"
+ "(ltime,data:(imageId,instanceId))", since));
return batchUrl.toString();
}
private String getLaunchConfigBatchUrl(String region, List<Resource> batch, long since) {
StringBuilder batchUrl = new StringBuilder(eddaClient.getBaseUrl(region)
+ "/aws/launchConfigurations/;data.imageId=");
batchUrl.append(getImageIdsString(batch));
batchUrl.append(String.format(";_since=%d;_expand;_meta:(ltime,data:(imageId,launchConfigurationName))",
since));
return batchUrl.toString();
}
private String getImageIdsString(List<Resource> resources) {
StringBuilder sb = new StringBuilder();
boolean isFirst = true;
for (Resource resource : resources) {
if (!isFirst) {
sb.append(',');
} else {
isFirst = false;
}
sb.append(resource.getId());
}
return sb.toString();
}
private static String getBaseAmiIdFromDescription(String imageDescription) {
// base_ami_id=ami-1eb75c77,base_ami_name=servicenet-roku-qadd.dc.81210.10.44
Matcher matcher = BASE_AMI_ID_PATTERN.matcher(imageDescription);
if (matcher.matches()) {
return matcher.group(1);
}
// store=ebs,ancestor_name=ebs-centosbase-x86_64-20101124,ancestor_id=ami-7b4eb912
matcher = ANCESTOR_ID_PATTERN.matcher(imageDescription);
if (matcher.matches()) {
return matcher.group(1);
}
return null;
}
}
| 23,864
| 43.524254
| 120
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/edda/EddaELBJanitorCrawler.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler.edda;
import com.google.common.collect.Lists;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.basic.BasicSimianArmyContext;
import com.netflix.simianarmy.client.edda.EddaClient;
import com.netflix.simianarmy.janitor.JanitorCrawler;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.codehaus.jackson.JsonNode;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
/**
* The crawler to crawl AWS instances for janitor monkey using Edda.
*/
public class EddaELBJanitorCrawler implements JanitorCrawler {
class DNSEntry {
String dnsName;
String dnsType;
String hostedZoneId;
};
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(EddaELBJanitorCrawler.class);
private static final DateTimeFormatter TIME_FORMATTER = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.S'Z'");
private final EddaClient eddaClient;
private final List<String> regions = Lists.newArrayList();
private final boolean useEddaApplicationOwner;
private final String fallbackOwnerEmail;
private Map<String, String> applicationToOwner = new HashMap<String, String>();
/**
* Instantiates a new basic instance crawler.
* @param eddaClient
* the Edda client
* @param regions
* the regions the crawler will crawl resources for
*/
public EddaELBJanitorCrawler(EddaClient eddaClient, String fallbackOwnerEmail, boolean useEddaApplicationOwner, String... regions) {
this.useEddaApplicationOwner = useEddaApplicationOwner;
this.fallbackOwnerEmail = fallbackOwnerEmail;
Validate.notNull(eddaClient);
this.eddaClient = eddaClient;
Validate.notNull(regions);
for (String region : regions) {
this.regions.add(region);
}
}
@Override
public EnumSet<? extends ResourceType> resourceTypes() {
return EnumSet.of(AWSResourceType.ELB);
}
@Override
public List<Resource> resources(ResourceType resourceType) {
if ("ELB".equals(resourceType.name())) {
return getELBResources();
}
return Collections.emptyList();
}
@Override
public List<Resource> resources(String... resourceIds) {
return getELBResources(resourceIds);
}
@Override
public String getOwnerEmailForResource(Resource resource) {
Validate.notNull(resource);
String ownerEmail = null;
if (useEddaApplicationOwner) {
for (String app : applicationToOwner.keySet()) {
if (resource.getId().toLowerCase().startsWith(app)) {
ownerEmail = applicationToOwner.get(app);
break;
}
}
} else {
ownerEmail = resource.getTag(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY);
}
if (ownerEmail == null) {
ownerEmail = fallbackOwnerEmail;
}
return ownerEmail;
}
private List<Resource> getELBResources(String... instanceIds) {
if (useEddaApplicationOwner) {
applicationToOwner = EddaUtils.getAllApplicationOwnerEmails(eddaClient);
}
List<Resource> resources = Lists.newArrayList();
for (String region : regions) {
resources.addAll(getELBResourcesInRegion(region, instanceIds));
}
return resources;
}
private List<Resource> getELBResourcesInRegion(String region, String... elbNames) {
String url = eddaClient.getBaseUrl(region) + "/aws/loadBalancers";
if (elbNames != null && elbNames.length != 0) {
url += StringUtils.join(elbNames, ',');
LOGGER.info(String.format("Getting ELBs in region %s for %d names", region, elbNames.length));
} else {
LOGGER.info(String.format("Getting all ELBs in region %s", region));
}
url += ";_expand:(loadBalancerName,createdTime,DNSName,instances,tags:(key,value))";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for ELBs in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s", url, jsonNode));
}
List<Resource> resources = Lists.newArrayList();
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
resources.add(parseJsonElementToELBResource(region, it.next()));
}
Map<String, List<String>> elBtoASGMap = buildELBtoASGMap(region);
for(Resource resource : resources) {
List<String> asgList = elBtoASGMap.get(resource.getId());
if (asgList != null && asgList.size() > 0) {
resource.setAdditionalField("referencedASGCount", "" + asgList.size());
String asgStr = StringUtils.join(asgList,",");
resource.setDescription(resource.getDescription() + ", ASGS=" + asgStr);
LOGGER.debug(String.format("Resource ELB %s is referenced by ASGs %s", resource.getId(), asgStr));
} else {
resource.setAdditionalField("referencedASGCount", "0");
resource.setDescription(resource.getDescription() + ", ASGS=none");
LOGGER.debug(String.format("No ASGs found for ELB %s", resource.getId()));
}
}
Map<String, List<DNSEntry>> elBtoDNSMap = buildELBtoDNSMap(region);
for(Resource resource : resources) {
List<DNSEntry> dnsEntryList = elBtoDNSMap.get(resource.getAdditionalField("DNSName"));
if (dnsEntryList != null && dnsEntryList.size() > 0) {
ArrayList<String> dnsNames = new ArrayList<>();
ArrayList<String> dnsTypes = new ArrayList<>();
ArrayList<String> hostedZoneIds = new ArrayList<>();
for (DNSEntry dnsEntry : dnsEntryList) {
dnsNames.add(dnsEntry.dnsName);
dnsTypes.add(dnsEntry.dnsType);
hostedZoneIds.add(dnsEntry.hostedZoneId);
}
resource.setAdditionalField("referencedDNS", StringUtils.join(dnsNames,","));
resource.setAdditionalField("referencedDNSTypes", StringUtils.join(dnsTypes,","));
resource.setAdditionalField("referencedDNSZones", StringUtils.join(hostedZoneIds,","));
resource.setDescription(resource.getDescription() + ", DNS=" + resource.getAdditionalField("referencedDNS"));
LOGGER.debug(String.format("Resource ELB %s is referenced by DNS %s", resource.getId(), resource.getAdditionalField("referencedDNS")));
} else {
resource.setAdditionalField("referencedDNS", "");
resource.setDescription(resource.getDescription() + ", DNS=none");
LOGGER.debug(String.format("No DNS found for ELB %s", resource.getId()));
}
}
return resources;
}
private Map<String, List<String>> buildELBtoASGMap(String region) {
String url = eddaClient.getBaseUrl(region) + "/aws/autoScalingGroups;_expand:(autoScalingGroupName,loadBalancerNames)";
LOGGER.info(String.format("Getting all ELBs associated with ASGs in region %s", region));
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get JSON node from edda for ASG ELBs in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s", url, jsonNode));
}
HashMap<String, List<String>> asgMap = new HashMap<>();
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode asgNode = it.next();
String asgName = asgNode.get("autoScalingGroupName").getTextValue();
JsonNode elbs = asgNode.get("loadBalancerNames");
if (elbs == null || !elbs.isArray() || elbs.size() == 0) {
continue;
} else {
for (Iterator<JsonNode> elbNode = elbs.getElements(); elbNode.hasNext();) {
JsonNode elb = elbNode.next();
String elbName = elb.getTextValue();
List<String> asgList = asgMap.get(elbName);
if (asgList == null) {
asgList = new ArrayList<>();
asgMap.put(elbName, asgList);
}
asgList.add(asgName);
LOGGER.debug(String.format("Found ASG %s associated with ELB %s", asgName, elbName));
}
}
}
return asgMap;
}
private Resource parseJsonElementToELBResource(String region, JsonNode jsonNode) {
Validate.notNull(jsonNode);
String elbName = jsonNode.get("loadBalancerName").getTextValue();
long launchTime = jsonNode.get("createdTime").getLongValue();
Resource resource = new AWSResource().withId(elbName).withRegion(region)
.withResourceType(AWSResourceType.ELB)
.withLaunchTime(new Date(launchTime));
String dnsName = jsonNode.get("DNSName").getTextValue();
resource.setAdditionalField("DNSName", dnsName);
JsonNode tags = jsonNode.get("tags");
if (tags == null || !tags.isArray() || tags.size() == 0) {
LOGGER.debug(String.format("No tags is found for %s", resource.getId()));
} else {
for (Iterator<JsonNode> it = tags.getElements(); it.hasNext();) {
JsonNode tag = it.next();
String key = tag.get("key").getTextValue();
String value = tag.get("value").getTextValue();
resource.setTag(key, value);
}
}
String owner = getOwnerEmailForResource(resource);
if (owner != null) {
resource.setOwnerEmail(owner);
}
LOGGER.debug(String.format("Owner of ELB Resource %s (ELB DNS: %s) is %s", resource.getId(), resource.getAdditionalField("DNSName"), resource.getOwnerEmail()));
JsonNode instances = jsonNode.get("instances");
if (instances == null || !instances.isArray() || instances.size() == 0) {
resource.setAdditionalField("instanceCount", "0");
resource.setDescription("instances=none");
LOGGER.debug(String.format("No instances found for ELB %s", resource.getId()));
} else {
resource.setAdditionalField("instanceCount", "" + instances.size());
ArrayList<String> instanceList = new ArrayList<String>(instances.size());
LOGGER.debug(String.format("Found %d instances for ELB %s", instances.size(), resource.getId()));
for (Iterator<JsonNode> it = instances.getElements(); it.hasNext();) {
JsonNode instance = it.next();
String instanceId = instance.get("instanceId").getTextValue();
instanceList.add(instanceId);
}
String instancesStr = StringUtils.join(instanceList,",");
resource.setDescription(String.format("instances=%s", instances));
LOGGER.debug(String.format("Resource ELB %s has instances %s", resource.getId(), instancesStr));
}
return resource;
}
private Map<String, List<DNSEntry>> buildELBtoDNSMap(String region) {
String url = eddaClient.getBaseUrl(region) + "/aws/hostedRecords;_expand:(name,type,aliasTarget,resourceRecords:(value),zone:(id))";
LOGGER.info(String.format("Getting all ELBs associated with DNSs in region %s", region));
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get JSON node from edda for DNS ELBs in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s", url, jsonNode));
}
HashMap<String, List<DNSEntry>> dnsMap = new HashMap<>();
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode dnsNode = it.next();
String dnsName = dnsNode.get("name").getTextValue();
String dnsType = dnsNode.get("type").getTextValue();
String hostedZoneId = null;
JsonNode hostedZoneNode = dnsNode.get("zone");
if (hostedZoneNode != null) {
JsonNode hostedZoneIdNode = hostedZoneNode.get("id");
if (hostedZoneIdNode != null) {
hostedZoneId = hostedZoneIdNode.getTextValue();
}
}
JsonNode aliasTarget = dnsNode.get("aliasTarget");
if (aliasTarget != null) {
JsonNode aliasTargetDnsNameNode = aliasTarget.get("DNSName");
if (aliasTargetDnsNameNode != null) {
String aliasTargetDnsName = aliasTargetDnsNameNode.getTextValue();
if (aliasTargetDnsName != null && aliasTargetDnsName.contains(".elb.")) {
DNSEntry dnsEntry = new DNSEntry();
dnsEntry.dnsName = dnsName;
dnsEntry.dnsType = dnsType;
dnsEntry.hostedZoneId = hostedZoneId;
if (aliasTargetDnsName.endsWith(".")) {
aliasTargetDnsName = aliasTargetDnsName.substring(0, aliasTargetDnsName.length()-1);
}
List<DNSEntry> dnsEntryList = dnsMap.get(aliasTargetDnsName);
if (dnsEntryList == null) {
dnsEntryList = new ArrayList<>();
dnsMap.put(aliasTargetDnsName, dnsEntryList);
}
dnsEntryList.add(dnsEntry);
LOGGER.debug(String.format("Found DNS %s (alias) associated with ELB DNS %s, type %s, zone %s", dnsName, aliasTargetDnsName, dnsType, hostedZoneId));
}
}
}
JsonNode records = dnsNode.get("resourceRecords");
if (records == null || !records.isArray() || records.size() == 0) {
continue;
} else {
for (Iterator<JsonNode> recordNode = records.getElements(); recordNode.hasNext();) {
JsonNode record = recordNode.next();
String elbDNS = record.get("value").getTextValue();
if (elbDNS.contains(".elb.")) {
DNSEntry dnsEntry = new DNSEntry();
dnsEntry.dnsName = dnsName;
dnsEntry.dnsType = dnsType;
dnsEntry.hostedZoneId = hostedZoneId;
List<DNSEntry> dnsEntryList = dnsMap.get(elbDNS);
if (dnsEntryList == null) {
dnsEntryList = new ArrayList<>();
dnsMap.put(elbDNS, dnsEntryList);
}
dnsEntryList.add(dnsEntry);
LOGGER.debug(String.format("Found DNS %s associated with ELB DNS %s, type %s, zone %s", dnsName, elbDNS, dnsType, hostedZoneId));
}
}
}
}
return dnsMap;
}
}
| 16,875
| 43.293963
| 173
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/edda/EddaInstanceJanitorCrawler.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler.edda;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.aws.janitor.crawler.InstanceJanitorCrawler;
import com.netflix.simianarmy.basic.BasicSimianArmyContext;
import com.netflix.simianarmy.client.edda.EddaClient;
import com.netflix.simianarmy.janitor.JanitorCrawler;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.codehaus.jackson.JsonNode;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.EnumSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.HashSet;
import java.util.HashMap;
/**
* The crawler to crawl AWS instances for janitor monkey using Edda.
*/
public class EddaInstanceJanitorCrawler implements JanitorCrawler {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(EddaInstanceJanitorCrawler.class);
private static final DateTimeFormatter TIME_FORMATTER = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.S'Z'");
private final EddaClient eddaClient;
private final List<String> regions = Lists.newArrayList();
private final Map<String, String> instanceToAsg = Maps.newHashMap();
/** Max image ids per Edda Query */
private static final int MAX_IMAGE_IDS_PER_QUERY = 40;
/**
* Instantiates a new basic instance crawler.
* @param eddaClient
* the Edda client
* @param regions
* the regions the crawler will crawl resources for
*/
public EddaInstanceJanitorCrawler(EddaClient eddaClient, String... regions) {
Validate.notNull(eddaClient);
this.eddaClient = eddaClient;
Validate.notNull(regions);
for (String region : regions) {
this.regions.add(region);
}
}
@Override
public EnumSet<? extends ResourceType> resourceTypes() {
return EnumSet.of(AWSResourceType.INSTANCE);
}
@Override
public List<Resource> resources(ResourceType resourceType) {
if ("INSTANCE".equals(resourceType.name())) {
return getInstanceResources();
}
return Collections.emptyList();
}
@Override
public List<Resource> resources(String... resourceIds) {
return getInstanceResources(resourceIds);
}
@Override
public String getOwnerEmailForResource(Resource resource) {
Validate.notNull(resource);
return resource.getTag(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY);
}
private List<Resource> getInstanceResources(String... instanceIds) {
List<Resource> resources = Lists.newArrayList();
for (String region : regions) {
resources.addAll(getInstanceResourcesInRegion(region, instanceIds));
}
return resources;
}
private List<Resource> getInstanceResourcesInRegion(String region, String... instanceIds) {
refreshAsgInstances();
String url = eddaClient.getBaseUrl(region) + "/view/instances;";
if (instanceIds != null && instanceIds.length != 0) {
url += StringUtils.join(instanceIds, ',');
LOGGER.info(String.format("Getting instances in region %s for %d ids", region, instanceIds.length));
} else {
LOGGER.info(String.format("Getting all instances in region %s", region));
}
url += ";state.name=running;_expand:(instanceId,launchTime,state:(name),instanceType,imageId"
+ ",publicDnsName,tags:(key,value))";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for instances in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s", url, jsonNode));
}
List<Resource> resources = Lists.newArrayList();
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
resources.add(parseJsonElementToInstanceResource(region, it.next()));
}
refreshOwnerByImage(region, resources);
return resources;
}
private Resource parseJsonElementToInstanceResource(String region, JsonNode jsonNode) {
Validate.notNull(jsonNode);
String instanceId = jsonNode.get("instanceId").getTextValue();
long launchTime = jsonNode.get("launchTime").getLongValue();
Resource resource = new AWSResource().withId(instanceId).withRegion(region)
.withResourceType(AWSResourceType.INSTANCE)
.withLaunchTime(new Date(launchTime));
JsonNode publicDnsName = jsonNode.get("publicDnsName");
String description = String.format("type=%s; host=%s",
jsonNode.get("instanceType").getTextValue(),
publicDnsName == null ? "" : publicDnsName.getTextValue());
resource.setDescription(description);
String owner = getOwnerEmailForResource(resource);
resource.setOwnerEmail(owner);
JsonNode tags = jsonNode.get("tags");
String asgName = null;
if (tags == null || !tags.isArray() || tags.size() == 0) {
LOGGER.debug(String.format("No tags is found for %s", resource.getId()));
} else {
for (Iterator<JsonNode> it = tags.getElements(); it.hasNext();) {
JsonNode tag = it.next();
String key = tag.get("key").getTextValue();
String value = tag.get("value").getTextValue();
resource.setTag(key, value);
if ("aws:autoscaling:groupName".equals(key)) {
asgName = value;
} else if (owner == null && BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY.equals(key)) {
resource.setOwnerEmail(value);
}
}
resource.setDescription(description.toString());
}
// If we cannot find ASG name in tags, use the map for the ASG name
if (asgName == null) {
asgName = instanceToAsg.get(instanceId);
if (asgName != null) {
LOGGER.debug(String.format("Failed to find ASG name in tags of %s, use the ASG name %s from map",
instanceId, asgName));
}
}
if (asgName != null) {
resource.setAdditionalField(InstanceJanitorCrawler.INSTANCE_FIELD_ASG_NAME, asgName);
}
((AWSResource) resource).setAWSResourceState(jsonNode.get("state").get("name").getTextValue());
String imageId = jsonNode.get("imageId").getTextValue();
resource.setAdditionalField("imageId", imageId);
return resource;
}
private void refreshAsgInstances() {
instanceToAsg.clear();
for (String region : regions) {
LOGGER.info(String.format("Getting ASG instances in region %s", region));
String url = eddaClient.getBaseUrl(region) + "/aws/autoScalingGroups"
+ ";_expand:(autoScalingGroupName,instances:(instanceId))";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for ASGs in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s",
url, jsonNode));
}
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode asg = it.next();
String asgName = asg.get("autoScalingGroupName").getTextValue();
JsonNode instances = asg.get("instances");
if (instances == null || instances.isNull() || !instances.isArray() || instances.size() == 0) {
continue;
}
for (Iterator<JsonNode> instanceIt = instances.getElements(); instanceIt.hasNext();) {
JsonNode instance = instanceIt.next();
instanceToAsg.put(instance.get("instanceId").getTextValue(), asgName);
}
}
}
}
private void refreshOwnerByImage(String region, List<Resource> resources) {
HashSet<String> imageIds = new HashSet<>();
for (Resource resource: resources) {
if (resource.getOwnerEmail() == null) {
imageIds.add(resource.getAdditionalField("imageId"));
}
}
if (imageIds.size() > 0) {
HashMap<String, String> imageToOwner = new HashMap<>();
String baseurl = eddaClient.getBaseUrl(region) + "/aws/images/";
Iterator<String> itr = imageIds.iterator();
long leftToQuery = imageIds.size();
while (leftToQuery > 0) {
long batchcount = leftToQuery > MAX_IMAGE_IDS_PER_QUERY ? MAX_IMAGE_IDS_PER_QUERY : leftToQuery;
leftToQuery -= batchcount;
ArrayList<String> batch = new ArrayList<>();
for(int i=0;i<batchcount; i++) {
batch.add(itr.next());
}
String url = baseurl;
url += StringUtils.join(batch, ',');
url += ";tags.key=owner;public=false;_expand:(imageId,tags:(owner))";
JsonNode imageJsonNode = null;
try {
imageJsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Json node from edda for AMIs in region %s.", region), e);
}
if (imageJsonNode != null) {
for (Iterator<JsonNode> it = imageJsonNode.getElements(); it.hasNext();) {
JsonNode image = it.next();
String imageId = image.get("imageId").getTextValue();
JsonNode tags = image.get("tags");
for (Iterator<JsonNode> tagIt = tags.getElements(); tagIt.hasNext();) {
JsonNode tag = tagIt.next();
if (tag.get(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY) != null) {
imageToOwner.put(imageId, tag.get(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY).getTextValue());
break;
}
}
}
}
}
if (imageToOwner.size() > 0) {
for (Resource resource: resources) {
if (resource.getOwnerEmail() == null
&& imageToOwner.get(resource.getAdditionalField("imageId")) != null) {
resource.setOwnerEmail(imageToOwner.get(resource.getAdditionalField("imageId")));
LOGGER.info(String.format("Found owner %s for instance %s in AMI %s",
resource.getOwnerEmail(), resource.getId(), resource.getAdditionalField("imageId")));
}
}
}
}
}
}
| 12,583
| 41.086957
| 126
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/edda/EddaLaunchConfigJanitorCrawler.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler.edda;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.client.edda.EddaClient;
import com.netflix.simianarmy.janitor.JanitorCrawler;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.codehaus.jackson.JsonNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collections;
import java.util.Date;
import java.util.EnumSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
/**
* The crawler to crawl AWS launch configurations for janitor monkey using Edda.
*/
public class EddaLaunchConfigJanitorCrawler implements JanitorCrawler {
/** The name representing the additional field name of a flag indicating if the launch config
* if used by an auto scaling group. */
public static final String LAUNCH_CONFIG_FIELD_USED_BY_ASG = "USED_BY_ASG";
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(EddaLaunchConfigJanitorCrawler.class);
private final EddaClient eddaClient;
private final List<String> regions = Lists.newArrayList();
/**
* Instantiates a new basic launch configuration crawler.
* @param eddaClient
* the Edda client
* @param regions
* the regions the crawler will crawl resources for
*/
public EddaLaunchConfigJanitorCrawler(EddaClient eddaClient, String... regions) {
Validate.notNull(eddaClient);
this.eddaClient = eddaClient;
Validate.notNull(regions);
for (String region : regions) {
this.regions.add(region);
}
}
@Override
public EnumSet<? extends ResourceType> resourceTypes() {
return EnumSet.of(AWSResourceType.LAUNCH_CONFIG);
}
@Override
public List<Resource> resources(ResourceType resourceType) {
if ("LAUNCH_CONFIG".equals(resourceType.name())) {
return getLaunchConfigResources();
}
return Collections.emptyList();
}
@Override
public List<Resource> resources(String... resourceIds) {
return getLaunchConfigResources(resourceIds);
}
private List<Resource> getLaunchConfigResources(String... launchConfigNames) {
List<Resource> resources = Lists.newArrayList();
for (String region : regions) {
resources.addAll(getLaunchConfigResourcesInRegion(region, launchConfigNames));
}
return resources;
}
@Override
public String getOwnerEmailForResource(Resource resource) {
//Launch Configs don't have Tags
return null;
}
private List<Resource> getLaunchConfigResourcesInRegion(String region, String... launchConfigNames) {
String url = eddaClient.getBaseUrl(region) + "/aws/launchConfigurations;";
if (launchConfigNames != null && launchConfigNames.length != 0) {
url += StringUtils.join(launchConfigNames, ',');
LOGGER.info(String.format("Getting launch configurations in region %s for %d ids",
region, launchConfigNames.length));
} else {
LOGGER.info(String.format("Getting all launch configurations in region %s", region));
}
url += ";_expand:(launchConfigurationName,createdTime)";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for instances in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s", url, jsonNode));
}
List<Resource> resources = Lists.newArrayList();
Set<String> usedLCs = getLaunchConfigsInUse(region);
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode launchConfiguration = it.next();
String lcName = launchConfiguration.get("launchConfigurationName").getTextValue();
Resource lcResource = new AWSResource().withId(lcName)
.withRegion(region).withResourceType(AWSResourceType.LAUNCH_CONFIG)
.withLaunchTime(new Date(launchConfiguration.get("createdTime").getLongValue()));
lcResource.setOwnerEmail(getOwnerEmailForResource(lcResource));
lcResource.setAdditionalField(LAUNCH_CONFIG_FIELD_USED_BY_ASG, String.valueOf(usedLCs.contains(lcName)));
resources.add(lcResource);
}
return resources;
}
/**
* Gets the launch configs that are currently in use by at least one ASG in a region.
* @param region the region
* @return the set of launch config names
*/
private Set<String> getLaunchConfigsInUse(String region) {
LOGGER.info(String.format("Getting all launch configurations in use in region %s", region));
String url = eddaClient.getBaseUrl(region) + "/aws/autoScalingGroups;_expand:(launchConfigurationName)";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for launch configs in use in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s", url, jsonNode));
}
Set<String> launchConfigs = Sets.newHashSet();
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
launchConfigs.add(it.next().get("launchConfigurationName").getTextValue());
}
return launchConfigs;
}
}
| 6,765
| 37.443182
| 118
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/edda/EddaUtils.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler.edda;
import com.netflix.simianarmy.client.edda.EddaClient;
import org.codehaus.jackson.JsonNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.UnknownHostException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
/**
* Misc common Edda Utilities
*/
public class EddaUtils {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(EddaUtils.class);
public static Map<String, String> getAllApplicationOwnerEmails(EddaClient eddaClient) {
String region = "us-east-1";
LOGGER.info(String.format("Getting all application names and emails in region %s.", region));
String url = eddaClient.getBaseUrl(region) + "/netflix/applications/;_expand:(name,email)";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (UnknownHostException e) {
LOGGER.warn(String.format("Edda endpoint is not available in region %s", region));
return Collections.emptyMap();
} catch (Exception e) {
throw new RuntimeException(String.format("Failed to get Json node from url: %s", url), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("failed to get valid document from %s, got: %s", url, jsonNode));
}
Iterator<JsonNode> it = jsonNode.getElements();
Map<String, String> appToOwner = new HashMap<String, String>();
while (it.hasNext()) {
JsonNode node = it.next();
String appName = node.get("name").getTextValue().toLowerCase();
String owner = node.get("email").getTextValue();
if (appName != null && owner != null) {
appToOwner.put(appName, owner);
}
}
return appToOwner;
}
}
| 2,618
| 35.375
| 118
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/edda/EddaASGJanitorCrawler.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler.edda;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.basic.BasicSimianArmyContext;
import com.netflix.simianarmy.client.edda.EddaClient;
import com.netflix.simianarmy.janitor.JanitorCrawler;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.codehaus.jackson.JsonNode;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collections;
import java.util.Date;
import java.util.EnumSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* The crawler to crawl AWS auto scaling groups for janitor monkey using Edda.
*/
public class EddaASGJanitorCrawler implements JanitorCrawler {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(EddaASGJanitorCrawler.class);
/** The name representing the additional field name of instance ids. */
public static final String ASG_FIELD_INSTANCES = "INSTANCES";
/** The name representing the additional field name of max ASG size. */
public static final String ASG_FIELD_MAX_SIZE = "MAX_SIZE";
/** The name representing the additional field name of ELB names. */
public static final String ASG_FIELD_ELBS = "ELBS";
/** The name representing the additional field name of launch configuration name. */
public static final String ASG_FIELD_LC_NAME = "LAUNCH_CONFIGURATION_NAME";
/** The name representing the additional field name of launch configuration creation time. */
public static final String ASG_FIELD_LC_CREATION_TIME = "LAUNCH_CONFIGURATION_CREATION_TIME";
/** The name representing the additional field name of ASG suspension time from ELB. */
public static final String ASG_FIELD_SUSPENSION_TIME = "ASG_SUSPENSION_TIME";
/** The name representing the additional field name of ASG's last change/activity time. */
public static final String ASG_FIELD_LAST_CHANGE_TIME = "ASG_LAST_CHANGE_TIME";
/** The regular expression patter below is for the termination reason added by AWS when
* an ASG is suspended from ELB's traffic.
*/
private static final Pattern SUSPENSION_REASON_PATTERN =
Pattern.compile("User suspended at (\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}).*");
private final EddaClient eddaClient;
private final List<String> regions = Lists.newArrayList();
private final Map<String, Map<String, Long>> regionToAsgToLastChangeTime = Maps.newHashMap();
/**
* Instantiates a new basic ASG crawler.
* @param eddaClient
* the Edda client
* @param regions
* the regions the crawler will crawl resources for
*/
public EddaASGJanitorCrawler(EddaClient eddaClient, String... regions) {
Validate.notNull(eddaClient);
this.eddaClient = eddaClient;
Validate.notNull(regions);
for (String region : regions) {
this.regions.add(region);
}
}
@Override
public EnumSet<? extends ResourceType> resourceTypes() {
return EnumSet.of(AWSResourceType.ASG);
}
@Override
public List<Resource> resources(ResourceType resourceType) {
if ("ASG".equals(resourceType.name())) {
return getASGResources();
}
return Collections.emptyList();
}
@Override
public List<Resource> resources(String... asgNames) {
return getASGResources(asgNames);
}
@Override
public String getOwnerEmailForResource(Resource resource) {
Validate.notNull(resource);
return resource.getTag(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY);
}
private List<Resource> getASGResources(String... asgNames) {
refreshAsgLastChangeTime();
List<Resource> resources = Lists.newArrayList();
for (String region : regions) {
resources.addAll(getASGResourcesInRegion(region, asgNames));
}
return resources;
}
private List<Resource> getASGResourcesInRegion(String region, String... asgNames) {
String url = eddaClient.getBaseUrl(region) + "/aws/autoScalingGroups;";
if (asgNames != null && asgNames.length != 0) {
url += StringUtils.join(asgNames, ',');
LOGGER.info(String.format("Getting ASGs in region %s for %d ids", region, asgNames.length));
} else {
LOGGER.info(String.format("Getting all ASGs in region %s", region));
}
url += ";_expand:(autoScalingGroupName,createdTime,maxSize,suspendedProcesses:(processName,suspensionReason),"
+ "tags:(key,value),instances:(instanceId),loadBalancerNames,launchConfigurationName)";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for ASGs in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s", url, jsonNode));
}
Map<String, Long> lcNameToCreationTime = getLaunchConfigCreationTimes(region);
List<Resource> resources = Lists.newArrayList();
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
resources.add(parseJsonElementToresource(region, it.next(), lcNameToCreationTime));
}
return resources;
}
private Resource parseJsonElementToresource(String region, JsonNode jsonNode
, Map<String, Long> lcNameToCreationTime) {
Validate.notNull(jsonNode);
String asgName = jsonNode.get("autoScalingGroupName").getTextValue();
long createdTime = jsonNode.get("createdTime").getLongValue();
Resource resource = new AWSResource().withId(asgName).withRegion(region)
.withResourceType(AWSResourceType.ASG)
.withLaunchTime(new Date(createdTime));
JsonNode tags = jsonNode.get("tags");
if (tags == null || !tags.isArray() || tags.size() == 0) {
LOGGER.debug(String.format("No tags is found for %s", resource.getId()));
} else {
for (Iterator<JsonNode> it = tags.getElements(); it.hasNext();) {
JsonNode tag = it.next();
String key = tag.get("key").getTextValue();
String value = tag.get("value").getTextValue();
resource.setTag(key, value);
}
}
String owner = getOwnerEmailForResource(resource);
if (owner != null) {
resource.setOwnerEmail(owner);
}
JsonNode maxSize = jsonNode.get("maxSize");
if (maxSize != null) {
resource.setAdditionalField(ASG_FIELD_MAX_SIZE, String.valueOf(maxSize.getIntValue()));
}
// Adds instances and ELBs as additional fields.
JsonNode instances = jsonNode.get("instances");
resource.setDescription(String.format("%d instances", instances.size()));
List<String> instanceIds = Lists.newArrayList();
for (Iterator<JsonNode> it = instances.getElements(); it.hasNext();) {
instanceIds.add(it.next().get("instanceId").getTextValue());
}
resource.setAdditionalField(ASG_FIELD_INSTANCES, StringUtils.join(instanceIds, ","));
JsonNode elbs = jsonNode.get("loadBalancerNames");
List<String> elbNames = Lists.newArrayList();
for (Iterator<JsonNode> it = elbs.getElements(); it.hasNext();) {
elbNames.add(it.next().getTextValue());
}
resource.setAdditionalField(ASG_FIELD_ELBS, StringUtils.join(elbNames, ","));
JsonNode lc = jsonNode.get("launchConfigurationName");
if (lc != null) {
String lcName = lc.getTextValue();
Long lcCreationTime = lcNameToCreationTime.get(lcName);
if (lcName != null) {
resource.setAdditionalField(ASG_FIELD_LC_NAME, lcName);
}
if (lcCreationTime != null) {
resource.setAdditionalField(ASG_FIELD_LC_CREATION_TIME, String.valueOf(lcCreationTime));
}
}
// sets the field for the time when the ASG's traffic is suspended from ELB
JsonNode suspendedProcesses = jsonNode.get("suspendedProcesses");
for (Iterator<JsonNode> it = suspendedProcesses.getElements(); it.hasNext();) {
JsonNode sp = it.next();
if ("AddToLoadBalancer".equals(sp.get("processName").getTextValue())) {
String suspensionTime = getSuspensionTimeString(sp.get("suspensionReason").getTextValue());
if (suspensionTime != null) {
LOGGER.info(String.format("Suspension time of ASG %s is %s",
asgName, suspensionTime));
resource.setAdditionalField(ASG_FIELD_SUSPENSION_TIME, suspensionTime);
break;
}
}
}
Long lastChangeTime = regionToAsgToLastChangeTime.get(region).get(asgName);
if (lastChangeTime != null) {
resource.setAdditionalField(ASG_FIELD_LAST_CHANGE_TIME, String.valueOf(lastChangeTime));
}
return resource;
}
private Map<String, Long> getLaunchConfigCreationTimes(String region) {
LOGGER.info(String.format("Getting launch configuration creation times in region %s", region));
String url = eddaClient.getBaseUrl(region)
+ "/aws/launchConfigurations;_expand:(launchConfigurationName,createdTime)";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for lc creation times in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s", url, jsonNode));
}
Map<String, Long> nameToCreationTime = Maps.newHashMap();
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode elem = it.next();
nameToCreationTime.put(elem.get("launchConfigurationName").getTextValue(),
elem.get("createdTime").getLongValue());
}
return nameToCreationTime;
}
private String getSuspensionTimeString(String suspensionReason) {
if (suspensionReason == null) {
return null;
}
Matcher matcher = SUSPENSION_REASON_PATTERN.matcher(suspensionReason);
if (matcher.matches()) {
return matcher.group(1);
}
return null;
}
private void refreshAsgLastChangeTime() {
regionToAsgToLastChangeTime.clear();
for (String region : regions) {
LOGGER.info(String.format("Getting ASG last change time in region %s", region));
Map<String, Long> asgToLastChangeTime = regionToAsgToLastChangeTime.get(region);
if (asgToLastChangeTime == null) {
asgToLastChangeTime = Maps.newHashMap();
regionToAsgToLastChangeTime.put(region, asgToLastChangeTime);
}
String url = eddaClient.getBaseUrl(region) + "/aws/autoScalingGroups;"
+ ";_expand;_meta:(stime,data:(autoScalingGroupName))";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for ASG last change time in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s",
url, jsonNode));
}
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode asg = it.next();
String asgName = asg.get("data").get("autoScalingGroupName").getTextValue();
Long lastChangeTime = asg.get("stime").asLong();
LOGGER.debug(String.format("The last change time of ASG %s is %s", asgName,
new DateTime(lastChangeTime)));
asgToLastChangeTime.put(asgName, lastChangeTime);
}
}
}
}
| 13,576
| 41.561129
| 118
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/conformity/SimpleDBConformityClusterTracker.java
|
/*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.conformity;
import com.amazonaws.services.simpledb.AmazonSimpleDB;
import com.amazonaws.services.simpledb.model.Attribute;
import com.amazonaws.services.simpledb.model.DeleteAttributesRequest;
import com.amazonaws.services.simpledb.model.Item;
import com.amazonaws.services.simpledb.model.PutAttributesRequest;
import com.amazonaws.services.simpledb.model.ReplaceableAttribute;
import com.amazonaws.services.simpledb.model.SelectRequest;
import com.amazonaws.services.simpledb.model.SelectResult;
import com.google.common.collect.Lists;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.conformity.Cluster;
import com.netflix.simianarmy.conformity.ConformityClusterTracker;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* The ConformityResourceTracker implementation in SimpleDB.
*/
public class SimpleDBConformityClusterTracker implements ConformityClusterTracker {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(SimpleDBConformityClusterTracker.class);
/** The domain. */
private final String domain;
/** The SimpleDB client. */
private final AmazonSimpleDB simpleDBClient;
private static final int MAX_ATTR_SIZE = 1024;
/**
* Instantiates a new simple db cluster tracker for conformity monkey.
*
* @param awsClient
* the AWS Client
* @param domain
* the domain
*/
public SimpleDBConformityClusterTracker(AWSClient awsClient, String domain) {
Validate.notNull(awsClient);
Validate.notNull(domain);
this.domain = domain;
this.simpleDBClient = awsClient.sdbClient();
}
/**
* Gets the SimpleDB client.
* @return the SimpleDB client
*/
protected AmazonSimpleDB getSimpleDBClient() {
return simpleDBClient;
}
/** {@inheritDoc} */
@Override
public void addOrUpdate(Cluster cluster) {
List<ReplaceableAttribute> attrs = new ArrayList<ReplaceableAttribute>();
Map<String, String> fieldToValueMap = cluster.getFieldToValueMap();
for (Map.Entry<String, String> entry : fieldToValueMap.entrySet()) {
attrs.add(new ReplaceableAttribute(entry.getKey(), StringUtils.left(entry.getValue(), MAX_ATTR_SIZE),
true));
}
PutAttributesRequest putReqest = new PutAttributesRequest(domain, getSimpleDBItemName(cluster), attrs);
LOGGER.debug(String.format("Saving cluster %s to SimpleDB domain %s",
cluster.getName(), domain));
this.simpleDBClient.putAttributes(putReqest);
LOGGER.debug("Successfully saved.");
}
/**
* Gets the clusters for a list of regions. If the regions parameter is empty, returns the clusters
* for all regions.
*/
@Override
public List<Cluster> getAllClusters(String... regions) {
return getClusters(null, regions);
}
@Override
public List<Cluster> getNonconformingClusters(String... regions) {
return getClusters(false, regions);
}
@Override
public Cluster getCluster(String clusterName, String region) {
Validate.notEmpty(clusterName);
Validate.notEmpty(region);
StringBuilder query = new StringBuilder();
query.append(String.format("select * from `%s` where cluster = '%s' and region = '%s'",
domain, clusterName, region));
LOGGER.info(String.format("Query is to get the cluster is '%s'", query));
List<Item> items = querySimpleDBItems(query.toString());
Validate.isTrue(items.size() <= 1);
if (items.size() == 0) {
LOGGER.info(String.format("Not found cluster with name %s in region %s", clusterName, region));
return null;
} else {
Cluster cluster = null;
try {
cluster = parseCluster(items.get(0));
} catch (Exception e) {
// Ignore the item that cannot be parsed.
LOGGER.error(String.format("SimpleDB item %s cannot be parsed into a cluster.", items.get(0)));
}
return cluster;
}
}
@Override
public void deleteClusters(Cluster... clusters) {
Validate.notNull(clusters);
LOGGER.info(String.format("Deleting %d clusters", clusters.length));
for (Cluster cluster : clusters) {
LOGGER.info(String.format("Deleting cluster %s", cluster.getName()));
simpleDBClient.deleteAttributes(new DeleteAttributesRequest(domain, getSimpleDBItemName(cluster)));
LOGGER.info(String.format("Successfully deleted cluster %s", cluster.getName()));
}
}
private List<Cluster> getClusters(Boolean conforming, String... regions) {
Validate.notNull(regions);
List<Cluster> clusters = Lists.newArrayList();
StringBuilder query = new StringBuilder();
query.append(String.format("select * from `%s` where cluster is not null and ", domain));
boolean needsAnd = false;
if (regions.length != 0) {
query.append(String.format("region in ('%s') ", StringUtils.join(regions, "','")));
needsAnd = true;
}
if (conforming != null) {
if (needsAnd) {
query.append(" and ");
}
query.append(String.format("isConforming = '%s'", conforming));
}
LOGGER.info(String.format("Query to retrieve clusters for regions %s is '%s'",
StringUtils.join(regions, "','"), query.toString()));
List<Item> items = querySimpleDBItems(query.toString());
for (Item item : items) {
try {
clusters.add(parseCluster(item));
} catch (Exception e) {
// Ignore the item that cannot be parsed.
LOGGER.error(String.format("SimpleDB item %s cannot be parsed into a cluster.", item), e);
}
}
LOGGER.info(String.format("Retrieved %d clusters from SimpleDB in domain %s and regions %s",
clusters.size(), domain, StringUtils.join(regions, "','")));
return clusters;
}
/**
* Parses a SimpleDB item into a cluster.
* @param item the item from SimpleDB
* @return the cluster for the SimpleDB item
*/
protected Cluster parseCluster(Item item) {
Map<String, String> fieldToValue = new HashMap<String, String>();
for (Attribute attr : item.getAttributes()) {
String name = attr.getName();
String value = attr.getValue();
if (name != null && value != null) {
fieldToValue.put(name, value);
}
}
return Cluster.parseFieldToValueMap(fieldToValue);
}
/**
* Gets the unique SimpleDB item name for a cluster. The subclass can override this
* method to generate the item name differently.
* @param cluster
* @return the SimpleDB item name for the cluster
*/
protected String getSimpleDBItemName(Cluster cluster) {
return String.format("%s-%s", cluster.getName(), cluster.getRegion());
}
private List<Item> querySimpleDBItems(String query) {
Validate.notNull(query);
String nextToken = null;
List<Item> items = new ArrayList<Item>();
do {
SelectRequest request = new SelectRequest(query);
request.setNextToken(nextToken);
request.setConsistentRead(Boolean.TRUE);
SelectResult result = this.simpleDBClient.select(request);
items.addAll(result.getItems());
nextToken = result.getNextToken();
} while (nextToken != null);
return items;
}
}
| 8,622
| 36.820175
| 113
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/conformity/RDSConformityClusterTracker.java
|
/*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.conformity;
import com.amazonaws.AmazonClientException;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.conformity.Cluster;
import com.netflix.simianarmy.conformity.Conformity;
import com.netflix.simianarmy.conformity.ConformityClusterTracker;
import com.zaxxer.hikari.HikariDataSource;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import java.io.IOException;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Types;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* The RDSConformityClusterTracker implementation in RDS (relational database).
*/
public class RDSConformityClusterTracker implements ConformityClusterTracker {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(RDSConformityClusterTracker.class);
/** The table. */
private final String table;
/** the jdbcTemplate */
JdbcTemplate jdbcTemplate = null;
/**
* Instantiates a new RDS db resource tracker.
*
*/
public RDSConformityClusterTracker(String dbDriver, String dbUser,
String dbPass, String dbUrl, String dbTable) {
HikariDataSource dataSource = new HikariDataSource();
dataSource.setDriverClassName(dbDriver);
dataSource.setJdbcUrl(dbUrl);
dataSource.setUsername(dbUser);
dataSource.setPassword(dbPass);
dataSource.setMaximumPoolSize(2);
this.jdbcTemplate = new JdbcTemplate(dataSource);
this.table = dbTable;
}
/**
* Instantiates a new RDS conformity cluster tracker. This constructor is intended
* for unit testing.
*
*/
public RDSConformityClusterTracker(JdbcTemplate jdbcTemplate, String table) {
this.jdbcTemplate = jdbcTemplate;
this.table = table;
}
public JdbcTemplate getJdbcTemplate() {
return jdbcTemplate;
}
public Object value(String value) {
return value == null ? Types.NULL : value;
}
public Object value(Date value) {
return value == null ? Types.NULL : value.getTime();
}
public Object value(boolean value) {
return Boolean.toString(value);
}
public Object emailValue(String email) {
if (StringUtils.isBlank(email)) return Types.NULL;
if (email.equals("0")) return Types.NULL;
return email;
}
/** {@inheritDoc} */
@Override
public void addOrUpdate(Cluster cluster) {
Cluster orig = getCluster(cluster.getName(), cluster.getRegion());
LOGGER.debug(String.format("Saving cluster %s to RDB table %s in region %s", cluster.getName(), cluster.getRegion(), table));
Map<String, String> map = cluster.getFieldToValueMap();
String conformityJson;
try {
conformityJson = new ObjectMapper().writeValueAsString(conformitiesAsMap(cluster));
} catch (JsonProcessingException e) {
LOGGER.error("ERROR generating conformities JSON when saving cluster " + cluster.getName() + ", " + cluster.getRegion(), e);
return;
}
if (orig == null) {
StringBuilder sb = new StringBuilder();
sb.append("insert into ").append(table);
sb.append(" (");
sb.append(Cluster.CLUSTER).append(",");
sb.append(Cluster.REGION).append(",");
sb.append(Cluster.OWNER_EMAIL).append(",");
sb.append(Cluster.IS_CONFORMING).append(",");
sb.append(Cluster.IS_OPTEDOUT).append(",");
sb.append(Cluster.UPDATE_TIMESTAMP).append(",");
sb.append(Cluster.EXCLUDED_RULES).append(",");
sb.append("conformities").append(",");
sb.append(Cluster.CONFORMITY_RULES);
sb.append(") values (?,?,?,?,?,?,?,?,?)");
LOGGER.debug(String.format("Insert statement is '%s'", sb));
this.jdbcTemplate.update(sb.toString(),
value(map.get(Cluster.CLUSTER)),
value(map.get(Cluster.REGION)),
emailValue(map.get(Cluster.OWNER_EMAIL)),
value(map.get(Cluster.IS_CONFORMING)),
value(map.get(Cluster.IS_OPTEDOUT)),
value(cluster.getUpdateTime()),
value(map.get(Cluster.EXCLUDED_RULES)),
value(conformityJson),
value(map.get(Cluster.CONFORMITY_RULES)));
} else {
StringBuilder sb = new StringBuilder();
sb.append("update ").append(table).append(" set ");
sb.append(Cluster.OWNER_EMAIL).append("=?,");
sb.append(Cluster.IS_CONFORMING).append("=?,");
sb.append(Cluster.IS_OPTEDOUT).append("=?,");
sb.append(Cluster.UPDATE_TIMESTAMP).append("=?,");
sb.append(Cluster.EXCLUDED_RULES).append("=?,");
sb.append("conformities").append("=?,");
sb.append(Cluster.CONFORMITY_RULES).append("=? where ");
sb.append(Cluster.CLUSTER).append("=? and ");
sb.append(Cluster.REGION).append("=?");
LOGGER.debug(String.format("Update statement is '%s'", sb));
this.jdbcTemplate.update(sb.toString(),
emailValue(map.get(Cluster.OWNER_EMAIL)),
value(map.get(Cluster.IS_CONFORMING)),
value(map.get(Cluster.IS_OPTEDOUT)),
value(cluster.getUpdateTime()),
value(map.get(Cluster.EXCLUDED_RULES)),
value(conformityJson),
value(map.get(Cluster.CONFORMITY_RULES)),
value(cluster.getName()),
value(cluster.getRegion()));
}
LOGGER.debug("Successfully saved.");
}
private HashMap<String,String> conformitiesAsMap(Cluster cluster) {
HashMap<String,String> map = new HashMap<>();
for(Conformity conformity : cluster.getConformties()) {
map.put(conformity.getRuleId(), StringUtils.join(conformity.getFailedComponents(), ","));
}
return map;
}
/**
* Gets the clusters for a list of regions. If the regions parameter is empty, returns the clusters
* for all regions.
*/
@Override
public List<Cluster> getAllClusters(String... regions) {
return getClusters(null, regions);
}
@Override
public List<Cluster> getNonconformingClusters(String... regions) {
return getClusters(false, regions);
}
@Override
public Cluster getCluster(String clusterName, String region) {
Validate.notEmpty(clusterName);
Validate.notEmpty(region);
StringBuilder query = new StringBuilder();
query.append(String.format("select * from %s where cluster = ? and region = ?", table));
LOGGER.info(String.format("Query is '%s'", query));
List<Cluster> clusters = jdbcTemplate.query(query.toString(), new String[] {clusterName, region}, new RowMapper<Cluster>() {
public Cluster mapRow(ResultSet rs, int rowNum) throws SQLException {
return mapResource(rs);
}
});
Validate.isTrue(clusters.size() <= 1);
if (clusters.size() == 0) {
LOGGER.info(String.format("Not found cluster with name %s in region %s", clusterName, region));
return null;
} else {
Cluster cluster = clusters.get(0);
return cluster;
}
}
private Cluster mapResource(ResultSet rs) throws SQLException {
Map<String, String> map = conformityMapFromJson(rs.getString("conformities"));
map.put(Cluster.CLUSTER, rs.getString(Cluster.CLUSTER));
map.put(Cluster.REGION, rs.getString(Cluster.REGION));
map.put(Cluster.IS_CONFORMING, rs.getString(Cluster.IS_CONFORMING));
map.put(Cluster.IS_OPTEDOUT, rs.getString(Cluster.IS_OPTEDOUT));
String email = rs.getString(Cluster.OWNER_EMAIL);
if (StringUtils.isBlank(email) || email.equals("0")) {
email = null;
}
map.put(Cluster.OWNER_EMAIL, email);
String updatedTimestamp = millisToFormattedDate(rs.getString(Cluster.UPDATE_TIMESTAMP));
if (updatedTimestamp != null) {
map.put(Cluster.UPDATE_TIMESTAMP, updatedTimestamp);
}
map.put(Cluster.EXCLUDED_RULES, rs.getString(Cluster.EXCLUDED_RULES));
map.put(Cluster.CONFORMITY_RULES, rs.getString(Cluster.CONFORMITY_RULES));
return Cluster.parseFieldToValueMap(map);
}
private String millisToFormattedDate(String millisStr) {
String datetime = null;
try {
long millis = Long.parseLong(millisStr);
datetime = AWSResource.DATE_FORMATTER.print(millis);
} catch(NumberFormatException nfe) {
LOGGER.error(String.format("Error parsing datetime %s when reading from RDS", millisStr));
}
return datetime;
}
private HashMap<String,String> conformityMapFromJson(String json) throws SQLException {
HashMap<String,String> map = new HashMap<>();
if (json != null) {
TypeReference<HashMap<String,String>> typeRef = new TypeReference<HashMap<String,String>>() {};
try {
ObjectMapper mapper = new ObjectMapper();
map = mapper.readValue(json, typeRef);
}catch(IOException ie) {
String msg = "Error parsing conformities from result set";
LOGGER.error(msg, ie);
throw new SQLException(msg);
}
}
return map;
}
@Override
public void deleteClusters(Cluster... clusters) {
Validate.notNull(clusters);
LOGGER.info(String.format("Deleting %d clusters", clusters.length));
for (Cluster cluster : clusters) {
LOGGER.info(String.format("Deleting cluster %s", cluster.getName()));
String stmt = String.format("delete from %s where %s=? and %s=?", table, Cluster.CLUSTER, Cluster.REGION);
jdbcTemplate.update(stmt, cluster.getName(), cluster.getRegion());
LOGGER.info(String.format("Successfully deleted cluster %s", cluster.getName()));
}
}
private List<Cluster> getClusters(Boolean conforming, String... regions) {
Validate.notNull(regions);
StringBuilder query = new StringBuilder();
query.append(String.format("select * from %s where cluster is not null and ", table));
boolean needsAnd = false;
if (regions.length != 0) {
query.append(String.format("region in ('%s') ", StringUtils.join(regions, "','")));
needsAnd = true;
}
if (conforming != null) {
if (needsAnd) {
query.append(" and ");
}
query.append(String.format("isConforming = '%s'", conforming));
}
LOGGER.info(String.format("Query to retrieve clusters for regions %s is '%s'",
StringUtils.join(regions, "','"), query.toString()));
List<Cluster> clusters = jdbcTemplate.query(query.toString(), new RowMapper<Cluster>() {
public Cluster mapRow(ResultSet rs, int rowNum) throws SQLException {
return mapResource(rs);
}
});
LOGGER.info(String.format("Retrieved %d clusters from RDS DB in table %s and regions %s",
clusters.size(), table, StringUtils.join(regions, "','")));
return clusters;
}
/**
* Creates the RDS table, if it does not already exist.
*/
public void init() {
try {
LOGGER.info("Creating RDS table: {}", table);
String sql = String.format("create table if not exists %s ("
+ " %s varchar(255),"
+ " %s varchar(25),"
+ " %s varchar(255),"
+ " %s varchar(10),"
+ " %s varchar(10),"
+ " %s BIGINT,"
+ " %s varchar(4096),"
+ " %s varchar(4096),"
+ " %s varchar(4096) )",
table,
Cluster.CLUSTER,
Cluster.REGION,
Cluster.OWNER_EMAIL,
Cluster.IS_CONFORMING,
Cluster.IS_OPTEDOUT,
Cluster.UPDATE_TIMESTAMP,
Cluster.EXCLUDED_RULES,
"conformities",
Cluster.CONFORMITY_RULES);
LOGGER.debug("Create SQL is: '{}'", sql);
jdbcTemplate.execute(sql);
} catch (AmazonClientException e) {
LOGGER.warn("Error while trying to auto-create RDS table", e);
}
}
}
| 13,658
| 38.025714
| 133
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/conformity/rule/InstanceIsHealthyInEureka.java
|
/*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.conformity.rule;
import com.google.common.collect.Lists;
import com.netflix.simianarmy.conformity.AutoScalingGroup;
import com.netflix.simianarmy.conformity.Cluster;
import com.netflix.simianarmy.conformity.Conformity;
import com.netflix.simianarmy.conformity.ConformityRule;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
/**
* The class implements a conformity rule to check if all instances in the cluster are healthy in Discovery.
*/
public class InstanceIsHealthyInEureka implements ConformityRule {
private static final Logger LOGGER = LoggerFactory.getLogger(InstanceIsHealthyInEureka.class);
private static final String RULE_NAME = "InstanceIsHealthyInEureka";
private static final String REASON = "Instances are not 'UP' in Eureka.";
private final ConformityEurekaClient conformityEurekaClient;
/**
* Constructor.
* @param conformityEurekaClient
* the client to access the Discovery/Eureka service for checking the status of instances.
*/
public InstanceIsHealthyInEureka(ConformityEurekaClient conformityEurekaClient) {
Validate.notNull(conformityEurekaClient);
this.conformityEurekaClient = conformityEurekaClient;
}
@Override
public Conformity check(Cluster cluster) {
Collection<String> failedComponents = Lists.newArrayList();
for (AutoScalingGroup asg : cluster.getAutoScalingGroups()) {
// ignore suspended ASGs
if (asg.isSuspended()) {
LOGGER.info(String.format("ASG %s is suspended, ignore.", asg.getName()));
continue;
}
for (String instance : asg.getInstances()) {
if (!conformityEurekaClient.isHealthy(cluster.getRegion(), instance)) {
LOGGER.info(String.format("Instance %s is not healthy in Eureka.", instance));
failedComponents.add(instance);
}
}
}
return new Conformity(getName(), failedComponents);
}
@Override
public String getName() {
return RULE_NAME;
}
@Override
public String getNonconformingReason() {
return REASON;
}
}
| 2,941
| 35.320988
| 108
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/conformity/rule/InstanceHasStatusUrl.java
|
/*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.conformity.rule;
import com.google.common.collect.Lists;
import com.netflix.simianarmy.conformity.AutoScalingGroup;
import com.netflix.simianarmy.conformity.Cluster;
import com.netflix.simianarmy.conformity.Conformity;
import com.netflix.simianarmy.conformity.ConformityRule;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
/**
* The class implementing a conformity rule that checks if all instances in a cluster has status url.
*/
public class InstanceHasStatusUrl implements ConformityRule {
private static final Logger LOGGER = LoggerFactory.getLogger(InstanceHasStatusUrl.class);
private static final String RULE_NAME = "InstanceHasStatusUrl";
private static final String REASON = "Status url not defined";
private final ConformityEurekaClient conformityEurekaClient;
/**
* Constructor.
* @param conformityEurekaClient
* the client to access the Discovery/Eureka service for checking the status of instances.
*/
public InstanceHasStatusUrl(ConformityEurekaClient conformityEurekaClient) {
Validate.notNull(conformityEurekaClient);
this.conformityEurekaClient = conformityEurekaClient;
}
@Override
public Conformity check(Cluster cluster) {
Collection<String> failedComponents = Lists.newArrayList();
for (AutoScalingGroup asg : cluster.getAutoScalingGroups()) {
if (asg.isSuspended()) {
continue;
}
for (String instance : asg.getInstances()) {
if (!conformityEurekaClient.hasStatusUrl(cluster.getRegion(), instance)) {
LOGGER.info(String.format("Instance %s does not have a status page url in discovery.",
instance));
failedComponents.add(instance);
}
}
}
return new Conformity(getName(), failedComponents);
}
@Override
public String getName() {
return RULE_NAME;
}
@Override
public String getNonconformingReason() {
return REASON;
}
}
| 2,826
| 34.3375
| 106
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/conformity/rule/BasicConformityEurekaClient.java
|
/*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.conformity.rule;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.discovery.DiscoveryClient;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.Set;
/**
* The class implementing a client to access Eureda for getting instance information that is used
* by Conformity Monkey.
*/
public class BasicConformityEurekaClient implements ConformityEurekaClient {
private static final Logger LOGGER = LoggerFactory.getLogger(BasicConformityEurekaClient.class);
private final DiscoveryClient discoveryClient;
/**
* Constructor.
* @param discoveryClient the client to access Discovery/Eureka service.
*/
public BasicConformityEurekaClient(DiscoveryClient discoveryClient) {
this.discoveryClient = discoveryClient;
}
@Override
public boolean hasHealthCheckUrl(String region, String instanceId) {
List<InstanceInfo> instanceInfos = discoveryClient.getInstancesById(instanceId);
for (InstanceInfo info : instanceInfos) {
Set<String> healthCheckUrls = info.getHealthCheckUrls();
if (healthCheckUrls != null && !healthCheckUrls.isEmpty()) {
return true;
}
}
return false;
}
@Override
public boolean hasStatusUrl(String region, String instanceId) {
List<InstanceInfo> instanceInfos = discoveryClient.getInstancesById(instanceId);
for (InstanceInfo info : instanceInfos) {
String statusPageUrl = info.getStatusPageUrl();
if (!StringUtils.isEmpty(statusPageUrl)) {
return true;
}
}
return false;
}
@Override
public boolean isHealthy(String region, String instanceId) {
List<InstanceInfo> instanceInfos = discoveryClient.getInstancesById(instanceId);
if (instanceInfos.isEmpty()) {
LOGGER.info(String.format("Instance %s is not registered in Eureka in region %s.", instanceId, region));
return false;
} else {
for (InstanceInfo info : instanceInfos) {
InstanceInfo.InstanceStatus status = info.getStatus();
if (!status.equals(InstanceInfo.InstanceStatus.UP)
&& !status.equals(InstanceInfo.InstanceStatus.STARTING)) {
LOGGER.info(String.format("Instance %s is not healthy in Eureka with status %s.",
instanceId, status.name()));
return false;
}
}
}
return true;
}
}
| 3,306
| 35.744444
| 116
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/conformity/rule/InstanceHasHealthCheckUrl.java
|
/*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.conformity.rule;
import com.google.common.collect.Lists;
import com.netflix.simianarmy.conformity.AutoScalingGroup;
import com.netflix.simianarmy.conformity.Cluster;
import com.netflix.simianarmy.conformity.Conformity;
import com.netflix.simianarmy.conformity.ConformityRule;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
/**
* The class implementing a conformity rule that checks if all instances in a cluster has health check url
* in Discovery/Eureka.
*/
public class InstanceHasHealthCheckUrl implements ConformityRule {
private static final Logger LOGGER = LoggerFactory.getLogger(InstanceHasHealthCheckUrl.class);
private static final String RULE_NAME = "InstanceHasHealthCheckUrl";
private static final String REASON = "Health check url not defined";
private final ConformityEurekaClient conformityEurekaClient;
/**
* Constructor.
* @param conformityEurekaClient
* the client to access the Discovery/Eureka service for checking the status of instances.
*/
public InstanceHasHealthCheckUrl(ConformityEurekaClient conformityEurekaClient) {
Validate.notNull(conformityEurekaClient);
this.conformityEurekaClient = conformityEurekaClient;
}
@Override
public Conformity check(Cluster cluster) {
Collection<String> failedComponents = Lists.newArrayList();
for (AutoScalingGroup asg : cluster.getAutoScalingGroups()) {
if (asg.isSuspended()) {
continue;
}
for (String instance : asg.getInstances()) {
if (!conformityEurekaClient.hasHealthCheckUrl(cluster.getRegion(), instance)) {
LOGGER.info(String.format("Instance %s does not have health check url in discovery.",
instance));
failedComponents.add(instance);
}
}
}
return new Conformity(getName(), failedComponents);
}
@Override
public String getName() {
return RULE_NAME;
}
@Override
public String getNonconformingReason() {
return REASON;
}
}
| 2,885
| 34.62963
| 106
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/conformity/rule/SameZonesInElbAndAsg.java
|
/*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.conformity.rule;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.services.elasticloadbalancing.model.LoadBalancerDescription;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.conformity.AutoScalingGroup;
import com.netflix.simianarmy.conformity.Cluster;
import com.netflix.simianarmy.conformity.Conformity;
import com.netflix.simianarmy.conformity.ConformityRule;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
/**
* The class implementing a conformity rule that checks if the zones in ELB and ASG are the same.
*/
public class SameZonesInElbAndAsg implements ConformityRule {
private static final Logger LOGGER = LoggerFactory.getLogger(InstanceHasStatusUrl.class);
private final Map<String, AWSClient> regionToAwsClient = Maps.newHashMap();
private AWSCredentialsProvider awsCredentialsProvider;
private static final String RULE_NAME = "SameZonesInElbAndAsg";
private static final String REASON = "Availability zones of ELB and ASG are different";
/**
* Constructs an instance with the default AWS credentials provider chain.
* @see com.amazonaws.auth.DefaultAWSCredentialsProviderChain
*/
public SameZonesInElbAndAsg() {
this(new DefaultAWSCredentialsProviderChain());
}
/**
* Constructs an instance with the passed AWS Credential Provider.
* @param awsCredentialsProvider
*/
public SameZonesInElbAndAsg(AWSCredentialsProvider awsCredentialsProvider) {
this.awsCredentialsProvider = awsCredentialsProvider;
}
@Override
public Conformity check(Cluster cluster) {
List<String> asgNames = Lists.newArrayList();
for (AutoScalingGroup asg : cluster.getAutoScalingGroups()) {
asgNames.add(asg.getName());
}
Collection<String> failedComponents = Lists.newArrayList();
for (AutoScalingGroup asg : cluster.getAutoScalingGroups()) {
List<String> asgZones = getAvailabilityZonesForAsg(cluster.getRegion(), asg.getName());
for (String lbName : getLoadBalancerNamesForAsg(cluster.getRegion(), asg.getName())) {
List<String> lbZones = getAvailabilityZonesForLoadBalancer(cluster.getRegion(), lbName);
if (!haveSameZones(asgZones, lbZones)) {
LOGGER.info(String.format("ASG %s and ELB %s do not have the same availability zones",
asgZones, lbZones));
failedComponents.add(lbName);
}
}
}
return new Conformity(getName(), failedComponents);
}
@Override
public String getName() {
return RULE_NAME;
}
@Override
public String getNonconformingReason() {
return REASON;
}
/**
* Gets the load balancer names of an ASG. Can be overridden in subclasses.
* @param region the region
* @param asgName the ASG name
* @return the list of load balancer names
*/
protected List<String> getLoadBalancerNamesForAsg(String region, String asgName) {
List<com.amazonaws.services.autoscaling.model.AutoScalingGroup> asgs =
getAwsClient(region).describeAutoScalingGroups(asgName);
if (asgs.isEmpty()) {
LOGGER.error(String.format("Not found ASG with name %s", asgName));
return Collections.emptyList();
} else {
return asgs.get(0).getLoadBalancerNames();
}
}
/**
* Gets the list of availability zones for an ASG. Can be overridden in subclasses.
* @param region the region
* @param asgName the ASG name.
* @return the list of the availability zones that the ASG has.
*/
protected List<String> getAvailabilityZonesForAsg(String region, String asgName) {
List<com.amazonaws.services.autoscaling.model.AutoScalingGroup> asgs =
getAwsClient(region).describeAutoScalingGroups(asgName);
if (asgs.isEmpty()) {
LOGGER.error(String.format("Not found ASG with name %s", asgName));
return null;
} else {
return asgs.get(0).getAvailabilityZones();
}
}
/**
* Gets the list of availability zones for a load balancer. Can be overridden in subclasses.
* @param region the region
* @param lbName the load balancer name.
* @return the list of the availability zones that the load balancer has.
*/
protected List<String> getAvailabilityZonesForLoadBalancer(String region, String lbName) {
List<LoadBalancerDescription> lbs =
getAwsClient(region).describeElasticLoadBalancers(lbName);
if (lbs.isEmpty()) {
LOGGER.error(String.format("Not found load balancer with name %s", lbName));
return null;
} else {
return lbs.get(0).getAvailabilityZones();
}
}
private AWSClient getAwsClient(String region) {
AWSClient awsClient = regionToAwsClient.get(region);
if (awsClient == null) {
awsClient = new AWSClient(region, awsCredentialsProvider);
regionToAwsClient.put(region, awsClient);
}
return awsClient;
}
private boolean haveSameZones(List<String> zones1, List<String> zones2) {
if (zones1 == null || zones2 == null) {
return true;
}
if (zones1.size() != zones1.size()) {
return false;
}
for (String zone : zones1) {
if (!zones2.contains(zone)) {
return false;
}
}
for (String zone : zones2) {
if (!zones1.contains(zone)) {
return false;
}
}
return true;
}
}
| 6,676
| 36.301676
| 106
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/conformity/rule/CrossZoneLoadBalancing.java
|
/*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.conformity.rule;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import com.netflix.simianarmy.client.MonkeyRestClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.services.elasticloadbalancing.model.LoadBalancerAttributes;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.conformity.AutoScalingGroup;
import com.netflix.simianarmy.conformity.Cluster;
import com.netflix.simianarmy.conformity.Conformity;
import com.netflix.simianarmy.conformity.ConformityRule;
/**
* The class implementing a conformity rule that checks if the cross-zone load balancing is enabled
* for all cluster ELBs.
*/
public class CrossZoneLoadBalancing implements ConformityRule {
private static final Logger LOGGER = LoggerFactory.getLogger(CrossZoneLoadBalancing.class);
private final Map<String, AWSClient> regionToAwsClient = Maps.newHashMap();
private AWSCredentialsProvider awsCredentialsProvider;
private static final String RULE_NAME = "CrossZoneLoadBalancing";
private static final String REASON = "Cross-zone load balancing is disabled";
/**
* Constructs an instance with the default AWS credentials provider chain.
* @see com.amazonaws.auth.DefaultAWSCredentialsProviderChain
*/
public CrossZoneLoadBalancing() {
this(new DefaultAWSCredentialsProviderChain());
}
/**
* Constructs an instance with the passed AWS Credential Provider.
* @param awsCredentialsProvider
*/
public CrossZoneLoadBalancing(AWSCredentialsProvider awsCredentialsProvider) {
this.awsCredentialsProvider = awsCredentialsProvider;
}
@Override
public Conformity check(Cluster cluster) {
Collection<String> failedComponents = Lists.newArrayList();
for (AutoScalingGroup asg : cluster.getAutoScalingGroups()) {
try {
for (String lbName : getLoadBalancerNamesForAsg(cluster.getRegion(), asg.getName())) {
if (!isCrossZoneLoadBalancingEnabled(cluster.getRegion(), lbName)) {
LOGGER.info(String.format("ELB %s in %s does not have cross-zone load balancing enabled",
lbName, cluster.getRegion()));
failedComponents.add(lbName);
}
}
} catch (MonkeyRestClient.DataReadException e) {
LOGGER.error(String.format("Transient error reading ELB for %s in %s - skipping this check",
asg.getName(), cluster.getRegion()), e);
}
}
return new Conformity(getName(), failedComponents);
}
/**
* Gets the cross-zone load balancing option for an ELB. Can be overridden in subclasses.
* @param region the region
* @param lbName the ELB name
* @return {@code true} if cross-zone load balancing is enabled
*/
protected boolean isCrossZoneLoadBalancingEnabled(String region, String lbName) {
LoadBalancerAttributes attrs = getAwsClient(region).describeElasticLoadBalancerAttributes(lbName);
return attrs.getCrossZoneLoadBalancing().isEnabled();
}
@Override
public String getName() {
return RULE_NAME;
}
@Override
public String getNonconformingReason() {
return REASON;
}
/**
* Gets the load balancer names of an ASG. Can be overridden in subclasses.
* @param region the region
* @param asgName the ASG name
* @return the list of load balancer names
*/
protected List<String> getLoadBalancerNamesForAsg(String region, String asgName) {
List<com.amazonaws.services.autoscaling.model.AutoScalingGroup> asgs =
getAwsClient(region).describeAutoScalingGroups(asgName);
if (asgs.isEmpty()) {
LOGGER.error(String.format("Not found ASG with name %s", asgName));
return Collections.emptyList();
} else {
return asgs.get(0).getLoadBalancerNames();
}
}
private AWSClient getAwsClient(String region) {
AWSClient awsClient = regionToAwsClient.get(region);
if (awsClient == null) {
awsClient = new AWSClient(region, awsCredentialsProvider);
regionToAwsClient.put(region, awsClient);
}
return awsClient;
}
}
| 5,297
| 36.842857
| 113
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/conformity/rule/InstanceInSecurityGroup.java
|
/*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.conformity.rule;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.services.ec2.model.GroupIdentifier;
import com.amazonaws.services.ec2.model.Instance;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.conformity.AutoScalingGroup;
import com.netflix.simianarmy.conformity.Cluster;
import com.netflix.simianarmy.conformity.Conformity;
import com.netflix.simianarmy.conformity.ConformityRule;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
import java.util.List;
import java.util.Map;
/**
* The class implementing a conformity rule that checks whether or not all instances in a cluster are in
* specific security groups.
*/
public class InstanceInSecurityGroup implements ConformityRule {
private static final Logger LOGGER = LoggerFactory.getLogger(InstanceHasStatusUrl.class);
private static final String RULE_NAME = "InstanceInSecurityGroup";
private final String reason;
private final Collection<String> requiredSecurityGroupNames = Sets.newHashSet();
private AWSCredentialsProvider awsCredentialsProvider;
/**
* Constructor.
* @param requiredSecurityGroupNames
* The security group names that are required to have for every instance of a cluster.
*/
public InstanceInSecurityGroup(String... requiredSecurityGroupNames) {
this(new DefaultAWSCredentialsProviderChain(), requiredSecurityGroupNames);
}
/**
* Constructor.
* @param awsCredentialsProvider
* The AWS credentials provider
* @param requiredSecurityGroupNames
* The security group names that are required to have for every instance of a cluster.
*/
public InstanceInSecurityGroup(AWSCredentialsProvider awsCredentialsProvider, String... requiredSecurityGroupNames)
{
this.awsCredentialsProvider = awsCredentialsProvider;
Validate.notNull(requiredSecurityGroupNames);
for (String sgName : requiredSecurityGroupNames) {
Validate.notNull(sgName);
this.requiredSecurityGroupNames.add(sgName.trim());
}
this.reason = String.format("Instances are not part of security groups (%s)",
StringUtils.join(this.requiredSecurityGroupNames, ","));
}
@Override
public Conformity check(Cluster cluster) {
List<String> instanceIds = Lists.newArrayList();
for (AutoScalingGroup asg : cluster.getAutoScalingGroups()) {
instanceIds.addAll(asg.getInstances());
}
Collection<String> failedComponents = Lists.newArrayList();
if (instanceIds.size() != 0) {
Map<String, List<String>> instanceIdToSecurityGroup = getInstanceSecurityGroups(
cluster.getRegion(), instanceIds.toArray(new String[instanceIds.size()]));
for (Map.Entry<String, List<String>> entry : instanceIdToSecurityGroup.entrySet()) {
String instanceId = entry.getKey();
if (!checkSecurityGroups(entry.getValue())) {
LOGGER.info(String.format("Instance %s does not have all required security groups", instanceId));
failedComponents.add(instanceId);
}
}
}
return new Conformity(getName(), failedComponents);
}
@Override
public String getName() {
return RULE_NAME;
}
@Override
public String getNonconformingReason() {
return reason;
}
/**
* Checks whether the collection of security group names are valid. The default implementation here is to check
* whether the security groups contain the required security groups. The method can be overridden for different
* rules.
* @param sgNames
* The collection of security group names
* @return
* true if the security group names are valid, false otherwise.
*/
protected boolean checkSecurityGroups(Collection<String> sgNames) {
for (String requiredSg : requiredSecurityGroupNames) {
if (!sgNames.contains(requiredSg)) {
LOGGER.info(String.format("Required security group %s is not found.", requiredSg));
return false;
}
}
return true;
}
/**
* Gets the security groups for a list of instance ids of the same region. The default implementation
* is using an AWS client. The method can be overridden in subclasses to get the security groups differently.
* @param region
* the region of the instances
* @param instanceIds
* the instance ids, all instances should be in the same region.
* @return
* the map from instance id to the list of security group names the instance has
*/
protected Map<String, List<String>> getInstanceSecurityGroups(String region, String... instanceIds) {
Map<String, List<String>> result = Maps.newHashMap();
if (instanceIds == null || instanceIds.length == 0) {
return result;
}
AWSClient awsClient = new AWSClient(region, awsCredentialsProvider);
for (Instance instance : awsClient.describeInstances(instanceIds)) {
// Ignore instances that are in VPC
if (StringUtils.isNotEmpty(instance.getVpcId())) {
LOGGER.info(String.format("Instance %s is in VPC and is ignored.", instance.getInstanceId()));
continue;
}
if (!"running".equals(instance.getState().getName())) {
LOGGER.info(String.format("Instance %s is not running, state is %s.",
instance.getInstanceId(), instance.getState().getName()));
continue;
}
List<String> sgs = Lists.newArrayList();
for (GroupIdentifier groupId : instance.getSecurityGroups()) {
sgs.add(groupId.getGroupName());
}
result.put(instance.getInstanceId(), sgs);
}
return result;
}
}
| 6,985
| 39.616279
| 119
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/conformity/rule/ConformityEurekaClient.java
|
/*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.conformity.rule;
/**
* The interface for a client to access Eureka service to get the status of instances for Conformity Monkey.
*/
public interface ConformityEurekaClient {
/**
* Checks whether an instance has health check url in Eureka.
* @param region the region of the instance
* @param instanceId the instance id
* @return true if the instance has health check url in Eureka, false otherwise.
*/
boolean hasHealthCheckUrl(String region, String instanceId);
/**
* Checks whether an instance has status url in Eureka.
* @param region the region of the instance
* @param instanceId the instance id
* @return true if the instance has status url in Eureka, false otherwise.
*/
boolean hasStatusUrl(String region, String instanceId);
/**
* Checks whether an instance is healthy in Eureka.
* @param region the region of the instance
* @param instanceId the instance id
* @return true if the instance is healthy in Eureka, false otherwise.
*/
boolean isHealthy(String region, String instanceId);
}
| 1,774
| 35.979167
| 108
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/conformity/rule/InstanceInVPC.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.conformity.rule;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.services.ec2.model.Instance;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.conformity.AutoScalingGroup;
import com.netflix.simianarmy.conformity.Cluster;
import com.netflix.simianarmy.conformity.Conformity;
import com.netflix.simianarmy.conformity.ConformityRule;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* The class implements a conformity rule to check an instance is in a virtual private cloud.
*/
public class InstanceInVPC implements ConformityRule {
private static final Logger LOGGER = LoggerFactory.getLogger(InstanceInVPC.class);
private final Map<String, AWSClient> regionToAwsClient = Maps.newHashMap();
private AWSCredentialsProvider awsCredentialsProvider;
private static final String RULE_NAME = "InstanceInVPC";
private static final String REASON = "VPC_ID not defined";
/**
* Constructs an instance with the default AWS credentials provider chain.
* @see com.amazonaws.auth.DefaultAWSCredentialsProviderChain
*/
public InstanceInVPC() {
this(new DefaultAWSCredentialsProviderChain());
}
/**
* Constructs an instance with the passed AWS credentials provider.
* @param awsCredentialsProvider
* The AWS credentials provider
*/
public InstanceInVPC(AWSCredentialsProvider awsCredentialsProvider) {
this.awsCredentialsProvider = awsCredentialsProvider;
}
@Override
public Conformity check(Cluster cluster) {
Collection<String> failedComponents = Lists.newArrayList();
//check all instances
Set<String> failedInstances = checkInstancesInVPC(cluster.getRegion(), cluster.getSoloInstances());
failedComponents.addAll(failedInstances);
//check asg instances
for (AutoScalingGroup asg : cluster.getAutoScalingGroups()) {
if (asg.isSuspended()) {
continue;
}
Set<String> asgFailedInstances = checkInstancesInVPC(cluster.getRegion(), asg.getInstances());
failedComponents.addAll(asgFailedInstances);
}
return new Conformity(getName(), failedComponents);
}
@Override
public String getName() {
return RULE_NAME;
}
@Override
public String getNonconformingReason() {
return REASON;
}
private AWSClient getAwsClient(String region) {
AWSClient awsClient = regionToAwsClient.get(region);
if (awsClient == null) {
awsClient = new AWSClient(region, awsCredentialsProvider);
regionToAwsClient.put(region, awsClient);
}
return awsClient;
}
private Set<String> checkInstancesInVPC(String region, Collection<String> instances) {
Set<String> failedInstances = Sets.newHashSet();
for (String instanceId : instances) {
for (Instance awsInstance : getAWSInstances(region, instanceId)) {
if (awsInstance.getVpcId() == null) {
LOGGER.info(String.format("Instance %s is not in a virtual private cloud", instanceId));
failedInstances.add(instanceId);
}
}
}
return failedInstances;
}
/**
* Gets the list of AWS instances. Can be overridden
* @param region the region
* @param instanceId the instance id.
* @return the list of the AWS instances with the given id.
*/
protected List<Instance> getAWSInstances(String region, String instanceId) {
AWSClient awsClient = getAwsClient(region);
return awsClient.describeInstances(instanceId);
}
}
| 4,666
| 34.9
| 108
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/conformity/rule/InstanceTooOld.java
|
/*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.conformity.rule;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.services.ec2.model.Instance;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.conformity.AutoScalingGroup;
import com.netflix.simianarmy.conformity.Cluster;
import com.netflix.simianarmy.conformity.Conformity;
import com.netflix.simianarmy.conformity.ConformityRule;
import org.apache.commons.lang.Validate;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
import java.util.List;
import java.util.Map;
/**
* The class implementing a conformity rule that checks if there are instances that are older than certain days.
* Instances are not considered to be permanent in the cloud, so sometimes having too old instances could indicate
* potential issues.
*/
public class InstanceTooOld implements ConformityRule {
private static final Logger LOGGER = LoggerFactory.getLogger(InstanceHasStatusUrl.class);
private static final String RULE_NAME = "InstanceTooOld";
private final String reason;
private final int instanceAgeThreshold;
private AWSCredentialsProvider awsCredentialsProvider;
/**
* Constructor.
* @param instanceAgeThreshold
* The age in days that makes an instance be considered too old.
*/
public InstanceTooOld(int instanceAgeThreshold) {
this(new DefaultAWSCredentialsProviderChain(), instanceAgeThreshold);
}
/**
* Constructor.
* @param awsCredentialsProvider
* The AWS credentials provider
* @param instanceAgeThreshold
* The age in days that makes an instance be considered too old.
*/
public InstanceTooOld(AWSCredentialsProvider awsCredentialsProvider, int instanceAgeThreshold) {
this.awsCredentialsProvider = awsCredentialsProvider;
Validate.isTrue(instanceAgeThreshold > 0);
this.instanceAgeThreshold = instanceAgeThreshold;
this.reason = String.format("Instances are older than %d days", instanceAgeThreshold);
}
@Override
public Conformity check(Cluster cluster) {
List<String> instanceIds = Lists.newArrayList();
for (AutoScalingGroup asg : cluster.getAutoScalingGroups()) {
instanceIds.addAll(asg.getInstances());
}
Map<String, Long> instanceIdToLaunchTime = getInstanceLaunchTimes(
cluster.getRegion(), instanceIds.toArray(new String[instanceIds.size()]));
Collection<String> failedComponents = Lists.newArrayList();
long creationTimeThreshold = DateTime.now().minusDays(instanceAgeThreshold).getMillis();
for (Map.Entry<String, Long> entry : instanceIdToLaunchTime.entrySet()) {
String instanceId = entry.getKey();
if (creationTimeThreshold > entry.getValue()) {
LOGGER.info(String.format("Instance %s was created more than %d days ago",
instanceId, instanceAgeThreshold));
failedComponents.add(instanceId);
}
}
return new Conformity(getName(), failedComponents);
}
@Override
public String getName() {
return RULE_NAME;
}
@Override
public String getNonconformingReason() {
return reason;
}
/**
* Gets the launch time (in milliseconds) for a list of instance ids of the same region. The default
* implementation is using an AWS client. The method can be overridden in subclasses to get the instance
* launch times differently.
* @param region
* the region of the instances
* @param instanceIds
* the instance ids, all instances should be in the same region.
* @return
* the map from instance id to the launch time in milliseconds
*/
protected Map<String, Long> getInstanceLaunchTimes(String region, String... instanceIds) {
Map<String, Long> result = Maps.newHashMap();
if (instanceIds == null || instanceIds.length == 0) {
return result;
}
AWSClient awsClient = new AWSClient(region, awsCredentialsProvider);
for (Instance instance : awsClient.describeInstances(instanceIds)) {
if (instance.getLaunchTime() != null) {
result.put(instance.getInstanceId(), instance.getLaunchTime().getTime());
} else {
LOGGER.warn(String.format("No launch time found for instance %s", instance.getInstanceId()));
}
}
return result;
}
}
| 5,370
| 38.785185
| 114
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/aws/conformity/crawler/AWSClusterCrawler.java
|
/*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.conformity.crawler;
import com.amazonaws.services.autoscaling.model.AutoScalingGroup;
import com.amazonaws.services.autoscaling.model.TagDescription;
import com.amazonaws.services.autoscaling.model.Instance;
import com.amazonaws.services.autoscaling.model.SuspendedProcess;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.simianarmy.basic.BasicSimianArmyContext;
import com.netflix.simianarmy.MonkeyConfiguration;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.conformity.Cluster;
import com.netflix.simianarmy.conformity.ClusterCrawler;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* The class implementing a crawler that gets the auto scaling groups from AWS.
*/
public class AWSClusterCrawler implements ClusterCrawler {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(AWSClusterCrawler.class);
private static final String NS = "simianarmy.conformity.cluster";
/** The map from region to the aws client in the region. */
private final Map<String, AWSClient> regionToAwsClient = Maps.newHashMap();
private final MonkeyConfiguration cfg;
/**
* Instantiates a new cluster crawler.
*
* @param regionToAwsClient
* the map from region to the corresponding aws client for the region
*/
public AWSClusterCrawler(Map<String, AWSClient> regionToAwsClient, MonkeyConfiguration cfg) {
Validate.notNull(regionToAwsClient);
Validate.notNull(cfg);
for (Map.Entry<String, AWSClient> entry : regionToAwsClient.entrySet()) {
this.regionToAwsClient.put(entry.getKey(), entry.getValue());
}
this.cfg = cfg;
}
/**
* In this implementation, every auto scaling group is considered a cluster.
* @param clusterNames
* the cluster names
* @return the list of clusters matching the names, when names are empty, return all clusters
*/
@Override
public List<Cluster> clusters(String... clusterNames) {
List<Cluster> list = Lists.newArrayList();
for (Map.Entry<String, AWSClient> entry : regionToAwsClient.entrySet()) {
String region = entry.getKey();
AWSClient awsClient = entry.getValue();
Set<String> asgInstances = Sets.newHashSet();
LOGGER.info(String.format("Crawling clusters in region %s", region));
for (AutoScalingGroup asg : awsClient.describeAutoScalingGroups(clusterNames)) {
List<String> instances = Lists.newArrayList();
for (Instance instance : asg.getInstances()) {
instances.add(instance.getInstanceId());
asgInstances.add(instance.getInstanceId());
}
com.netflix.simianarmy.conformity.AutoScalingGroup conformityAsg =
new com.netflix.simianarmy.conformity.AutoScalingGroup(
asg.getAutoScalingGroupName(),
instances.toArray(new String[instances.size()]));
for (SuspendedProcess sp : asg.getSuspendedProcesses()) {
if ("AddToLoadBalancer".equals(sp.getProcessName())) {
LOGGER.info(String.format("ASG %s is suspended: %s", asg.getAutoScalingGroupName(),
asg.getSuspendedProcesses()));
conformityAsg.setSuspended(true);
}
}
Cluster cluster = new Cluster(asg.getAutoScalingGroupName(), region, conformityAsg);
List<TagDescription> tagDescriptions = asg.getTags();
for (TagDescription tagDescription : tagDescriptions) {
if ( BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY.equalsIgnoreCase(tagDescription.getKey()) ) {
String value = tagDescription.getValue();
if (value != null) {
cluster.setOwnerEmail(value);
}
}
}
updateCluster(cluster);
list.add(cluster);
}
//Cluster containing all solo instances
Set<String> instances = Sets.newHashSet();
for (com.amazonaws.services.ec2.model.Instance awsInstance : awsClient.describeInstances()) {
if (!asgInstances.contains(awsInstance.getInstanceId())) {
LOGGER.info(String.format("Adding instance %s to soloInstances cluster.",
awsInstance.getInstanceId()));
instances.add(awsInstance.getInstanceId());
}
}
//Only create cluster if we have solo instances.
if (!instances.isEmpty()) {
Cluster cluster = new Cluster("SoloInstances", region, instances);
updateCluster(cluster);
list.add(cluster);
}
}
return list;
}
private void updateCluster(Cluster cluster) {
updateExcludedConformityRules(cluster);
cluster.setOwnerEmail(getOwnerEmailForCluster(cluster));
String prop = String.format("simianarmy.conformity.cluster.%s.optedOut", cluster.getName());
if (cfg.getBoolOrElse(prop, false)) {
LOGGER.info(String.format("Cluster %s is opted out of Conformity Monkey.", cluster.getName()));
cluster.setOptOutOfConformity(true);
} else {
cluster.setOptOutOfConformity(false);
}
}
/**
* Gets the owner email from the monkey configuration.
* @param cluster
* the cluster
* @return the owner email if it is defined in the configuration, null otherwise.
*/
@Override
public String getOwnerEmailForCluster(Cluster cluster) {
String prop = String.format("%s.%s.ownerEmail", NS, cluster.getName());
String ownerEmail = cfg.getStr(prop);
if (ownerEmail == null) {
ownerEmail = cluster.getOwnerEmail();
if (ownerEmail == null) {
LOGGER.info(String.format("No owner email is found for cluster %s in configuration "
+ "%s or tag %s.", cluster.getName(), prop, BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY));
} else {
LOGGER.info(String.format("Found owner email %s for cluster %s in tag %s.",
ownerEmail, cluster.getName(), BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY));
return ownerEmail;
}
} else {
LOGGER.info(String.format("Found owner email %s for cluster %s in configuration %s.",
ownerEmail, cluster.getName(), prop));
}
return ownerEmail;
}
@Override
public void updateExcludedConformityRules(Cluster cluster) {
String prop = String.format("%s.%s.excludedRules", NS, cluster.getName());
String excludedRules = cfg.getStr(prop);
if (StringUtils.isNotBlank(excludedRules)) {
LOGGER.info(String.format("Excluded rules for cluster %s are : %s", cluster.getName(), excludedRules));
cluster.excludeRules(StringUtils.split(excludedRules, ","));
}
}
}
| 8,218
| 43.188172
| 115
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/conformity/ConformityMonkey.java
|
/*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.conformity;
import com.netflix.simianarmy.Monkey;
import com.netflix.simianarmy.MonkeyConfiguration;
import com.netflix.simianarmy.MonkeyType;
import java.util.Collection;
/**
* The abstract class for Conformity Monkey.
*/
public abstract class ConformityMonkey extends Monkey {
/**
* The Interface Context.
*/
public interface Context extends Monkey.Context {
/**
* Configuration.
*
* @return the monkey configuration
*/
MonkeyConfiguration configuration();
/**
* Crawler that gets information of all clusters for conformity check.
* @return all clusters for conformity check
*/
ClusterCrawler clusterCrawler();
/**
* Conformity rule engine.
* @return the Conformity rule engine
*/
ConformityRuleEngine ruleEngine();
/**
* Email notifier used to send notifications by the Conformity monkey.
* @return the email notifier
*/
ConformityEmailNotifier emailNotifier();
/**
* The regions the monkey is running in.
* @return the regions the monkey is running in.
*/
Collection<String> regions();
/**
* The tracker of the clusters for conformity monkey to check.
* @return the tracker of the clusters for conformity monkey to check.
*/
ConformityClusterTracker clusterTracker();
/**
* Gets the flag to indicate whether the monkey is leashed.
* @return true if the monkey is leashed and does not make real change or send notifications to
* cluster owners, false otherwise.
*/
boolean isLeashed();
}
/** The context. */
private final Context ctx;
/**
* Instantiates a new Conformity monkey.
*
* @param ctx
* the context.
*/
public ConformityMonkey(Context ctx) {
super(ctx);
this.ctx = ctx;
}
/**
* The monkey Type.
*/
public enum Type implements MonkeyType {
/** Conformity monkey. */
CONFORMITY
}
/** {@inheritDoc} */
@Override
public final Type type() {
return Type.CONFORMITY;
}
/** {@inheritDoc} */
@Override
public Context context() {
return ctx;
}
/** {@inheritDoc} */
@Override
public abstract void doMonkeyBusiness();
}
| 3,123
| 24.818182
| 103
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/conformity/ConformityEmailNotifier.java
|
/*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.conformity;
import com.amazonaws.services.simpleemail.AmazonSimpleEmailServiceClient;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.simianarmy.aws.AWSEmailNotifier;
import org.apache.commons.lang.Validate;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
import java.util.List;
import java.util.Map;
/**
* The email notifier implemented for Janitor Monkey.
*/
public class ConformityEmailNotifier extends AWSEmailNotifier {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(ConformityEmailNotifier.class);
private static final String UNKNOWN_EMAIL = "UNKNOWN";
private final Collection<String> regions = Lists.newArrayList();
private final String defaultEmail;
private final List<String> ccEmails = Lists.newArrayList();
private final ConformityClusterTracker clusterTracker;
private final ConformityEmailBuilder emailBuilder;
private final String sourceEmail;
private final Map<String, Collection<Cluster>> invalidEmailToClusters = Maps.newHashMap();
private final Collection<ConformityRule> rules = Lists.newArrayList();
private final int openHour;
private final int closeHour;
/**
* The Interface Context.
*/
public interface Context {
/**
* Gets the Amazon Simple Email Service client.
* @return the Amazon Simple Email Service client
*/
AmazonSimpleEmailServiceClient sesClient();
/**
* Gets the open hour the email notifications are sent.
* @return
* the open hour the email notifications are sent
*/
int openHour();
/**
* Gets the close hour the email notifications are sent.
* @return
* the close hour the email notifications are sent
*/
int closeHour();
/**
* Gets the source email the notifier uses to send email.
* @return the source email
*/
String sourceEmail();
/**
* Gets the default email the notifier sends to when there is no owner specified for a cluster.
* @return the default email
*/
String defaultEmail();
/**
* Gets the regions the notifier is running in.
* @return the regions the notifier is running in.
*/
Collection<String> regions();
/** Gets the Conformity Monkey's cluster tracker.
* @return the Conformity Monkey's cluster tracker
*/
ConformityClusterTracker clusterTracker();
/** Gets the Conformity email builder.
* @return the Conformity email builder
*/
ConformityEmailBuilder emailBuilder();
/** Gets the cc email addresses.
* @return the cc email addresses
*/
String[] ccEmails();
/**
* Gets all the conformity rules.
* @return all conformity rules.
*/
Collection<ConformityRule> rules();
}
/**
* Constructor.
* @param ctx the context.
*/
public ConformityEmailNotifier(Context ctx) {
super(ctx.sesClient());
this.openHour = ctx.openHour();
this.closeHour = ctx.closeHour();
for (String region : ctx.regions()) {
this.regions.add(region);
}
this.defaultEmail = ctx.defaultEmail();
this.clusterTracker = ctx.clusterTracker();
this.emailBuilder = ctx.emailBuilder();
String[] ctxCCs = ctx.ccEmails();
if (ctxCCs != null) {
for (String ccEmail : ctxCCs) {
this.ccEmails.add(ccEmail);
}
}
this.sourceEmail = ctx.sourceEmail();
Validate.notNull(ctx.rules());
for (ConformityRule rule : ctx.rules()) {
rules.add(rule);
}
}
/**
* Gets all the clusters that are not conforming and sends email notifications to the owners.
*/
public void sendNotifications() {
int currentHour = DateTime.now().getHourOfDay();
if (currentHour < openHour || currentHour > closeHour) {
LOGGER.info("It is not the time for Conformity Monkey to send notifications. You can change "
+ "simianarmy.conformity.notification.openHour and simianarmy.conformity.notification.openHour"
+ " to make it work at this hour.");
return;
}
validateEmails();
Map<String, Collection<Cluster>> emailToClusters = Maps.newHashMap();
for (Cluster cluster : clusterTracker.getNonconformingClusters(regions.toArray(new String[regions.size()]))) {
if (cluster.isOptOutOfConformity()) {
LOGGER.info(String.format("Cluster %s is opted out of Conformity Monkey so no notification is sent.",
cluster.getName()));
continue;
}
if (!cluster.isConforming()) {
String email = cluster.getOwnerEmail();
if (!isValidEmail(email)) {
if (defaultEmail != null) {
LOGGER.info(String.format("Email %s is not valid, send to the default email address %s",
email, defaultEmail));
putEmailAndCluster(emailToClusters, defaultEmail, cluster);
} else {
if (email == null) {
email = UNKNOWN_EMAIL;
}
LOGGER.info(String.format("Email %s is not valid and default email is not set for cluster %s",
email, cluster.getName()));
putEmailAndCluster(invalidEmailToClusters, email, cluster);
}
} else {
putEmailAndCluster(emailToClusters, email, cluster);
}
} else {
LOGGER.debug(String.format("Cluster %s is conforming so no notification needs to be sent.",
cluster.getName()));
}
}
emailBuilder.setEmailToClusters(emailToClusters, rules);
for (Map.Entry<String, Collection<Cluster>> entry : emailToClusters.entrySet()) {
String email = entry.getKey();
String emailBody = emailBuilder.buildEmailBody(email);
String subject = buildEmailSubject(email);
sendEmail(email, subject, emailBody);
for (Cluster cluster : entry.getValue()) {
LOGGER.debug(String.format("Notification is sent for cluster %s to %s", cluster.getName(), email));
}
LOGGER.info(String.format("Email notification has been sent to %s for %d clusters.",
email, entry.getValue().size()));
}
}
@Override
public String buildEmailSubject(String to) {
return String.format("Conformity Monkey Notification for %s", to);
}
@Override
public String[] getCcAddresses(String to) {
return ccEmails.toArray(new String[ccEmails.size()]);
}
@Override
public String getSourceAddress(String to) {
return sourceEmail;
}
private void validateEmails() {
if (defaultEmail != null) {
Validate.isTrue(isValidEmail(defaultEmail), String.format("Default email %s is invalid", defaultEmail));
}
if (ccEmails != null) {
for (String ccEmail : ccEmails) {
Validate.isTrue(isValidEmail(ccEmail), String.format("CC email %s is invalid", ccEmail));
}
}
}
private void putEmailAndCluster(Map<String, Collection<Cluster>> map, String email, Cluster cluster) {
Collection<Cluster> clusters = map.get(email);
if (clusters == null) {
clusters = Lists.newArrayList();
map.put(email, clusters);
}
clusters.add(cluster);
}
}
| 8,711
| 35.759494
| 118
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/conformity/ConformityRule.java
|
/*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.conformity;
/**
* Interface for a conformity check rule.
*/
public interface ConformityRule {
/**
* Performs the conformity check against the rule.
* @param cluster
* the cluster to check for conformity
* @return
* the conformity check result
*/
Conformity check(Cluster cluster);
/**
* Gets the name/id of the rule.
* @return
* the name of the rule
*/
String getName();
/**
* Gets the human-readable reason to explain why the cluster is not conforming.
* @return the human-readable reason to explain why the cluster is not conforming
*/
String getNonconformingReason();
}
| 1,363
| 27.416667
| 85
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/conformity/Cluster.java
|
/*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.conformity;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* The class implementing clusters. Cluster is the basic unit of conformity check. It can be a single ASG or
* a group of ASGs that belong to the same application, for example, a cluster in the Asgard deployment system.
*/
public class Cluster {
public static final String OWNER_EMAIL = "ownerEmail";
public static final String CLUSTER = "cluster";
public static final String REGION = "region";
public static final String IS_CONFORMING = "isConforming";
public static final String IS_OPTEDOUT = "isOptedOut";
public static final String UPDATE_TIMESTAMP = "updateTimestamp";
public static final String EXCLUDED_RULES = "excludedRules";
public static final String CONFORMITY_RULES = "conformityRules";
public static final DateTimeFormatter DATE_FORMATTER = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss");
private final String name;
private final Collection<AutoScalingGroup> autoScalingGroups = Lists.newArrayList();
private final String region;
private String ownerEmail;
private Date updateTime;
private final Map<String, Conformity> conformities = Maps.newHashMap();
private final Collection<String> excludedConformityRules = Sets.newHashSet();
private boolean isConforming;
private boolean isOptOutOfConformity;
private final Set<String> soloInstances = Sets.newHashSet();
/**
* Constructor.
* @param name
* the name of the cluster
* @param autoScalingGroups
* the auto scaling groups in the cluster
*/
public Cluster(String name, String region, AutoScalingGroup... autoScalingGroups) {
Validate.notNull(name);
Validate.notNull(region);
Validate.notNull(autoScalingGroups);
this.name = name;
this.region = region;
for (AutoScalingGroup asg : autoScalingGroups) {
this.autoScalingGroups.add(asg);
}
}
/**
* Constructor.
* @param name
* the name of the cluster
* @param soloInstances
* the list of all instances
*/
public Cluster(String name, String region, Set<String> soloInstances) {
Validate.notNull(name);
Validate.notNull(region);
Validate.notNull(soloInstances);
this.name = name;
this.region = region;
for (String soleInstance : soloInstances) {
this.soloInstances.add(soleInstance);
}
}
/**
* Gets the name of the cluster.
* @return
* the name of the cluster
*/
public String getName() {
return name;
}
/**
* Gets the region of the cluster.
* @return
* the region of the cluster
*/
public String getRegion() {
return region;
}
/**
* * Gets the auto scaling groups of the auto scaling group.
* @return
* the auto scaling groups in the cluster
*/
public Collection<AutoScalingGroup> getAutoScalingGroups() {
return Collections.unmodifiableCollection(autoScalingGroups);
}
/**
* Gets the owner email of the cluster.
* @return
* the owner email of the cluster
*/
public String getOwnerEmail() {
return ownerEmail;
}
/**
* Sets the owner email of the cluster.
* @param ownerEmail
* the owner email of the cluster
*/
public void setOwnerEmail(String ownerEmail) {
this.ownerEmail = ownerEmail;
}
/**
* Gets the update time of the cluster.
* @return
* the update time of the cluster
*/
public Date getUpdateTime() {
return new Date(updateTime.getTime());
}
/**
* Sets the update time of the cluster.
* @param updateTime
* the update time of the cluster
*/
public void setUpdateTime(Date updateTime) {
this.updateTime = new Date(updateTime.getTime());
}
/**
* Gets all conformity check information of the cluster.
* @return
* all conformity check information of the cluster
*/
public Collection<Conformity> getConformties() {
return conformities.values();
}
/**
* Gets the conformity information for a conformity rule.
* @param rule
* the conformity rule
* @return
* the conformity for the rule
*/
public Conformity getConformity(ConformityRule rule) {
Validate.notNull(rule);
return conformities.get(rule.getName());
}
/**
* Updates the cluster with a new conformity check result.
* @param conformity
* the conformity to update
* @return
* the cluster itself
*
*/
public Cluster updateConformity(Conformity conformity) {
Validate.notNull(conformity);
conformities.put(conformity.getRuleId(), conformity);
return this;
}
/**
* Clears the conformity check results.
*/
public void clearConformities() {
conformities.clear();
}
/**
* Gets the boolean flag to indicate whether the cluster is conforming to
* all non-excluded conformity rules.
* @return
* true if the cluster is conforming against all non-excluded rules,
* false otherwise
*/
public boolean isConforming() {
return isConforming;
}
/**
* Sets the boolean flag to indicate whether the cluster is conforming to
* all non-excluded conformity rules.
* @param conforming
* true if the cluster is conforming against all non-excluded rules,
* false otherwise
*/
public void setConforming(boolean conforming) {
isConforming = conforming;
}
/**
* Gets names of all excluded conformity rules for this cluster.
* @return
* names of all excluded conformity rules for this cluster
*/
public Collection<String> getExcludedRules() {
return Collections.unmodifiableCollection(excludedConformityRules);
}
/**
* Excludes rules for the cluster.
* @param ruleIds
* the rule ids to exclude
* @return
* the cluster itself
*/
public Cluster excludeRules(String... ruleIds) {
Validate.notNull(ruleIds);
for (String ruleId : ruleIds) {
Validate.notNull(ruleId);
excludedConformityRules.add(ruleId.trim());
}
return this;
}
/**
* Gets the flag to indicate whether the cluster is opted out of Conformity monkey.
* @return true if the cluster is not handled by Conformity monkey, false otherwise
*/
public boolean isOptOutOfConformity() {
return isOptOutOfConformity;
}
/**
* Sets the flag to indicate whether the cluster is opted out of Conformity monkey.
* @param optOutOfConformity
* true if the cluster is not handled by Conformity monkey, false otherwise
*/
public void setOptOutOfConformity(boolean optOutOfConformity) {
isOptOutOfConformity = optOutOfConformity;
}
/**
* Gets a map from fields of resources to corresponding values. Values are represented
* as Strings so they can be displayed or stored in databases like SimpleDB.
* @return a map from field name to field value
*/
public Map<String, String> getFieldToValueMap() {
Map<String, String> map = Maps.newHashMap();
putToMapIfNotNull(map, CLUSTER, name);
putToMapIfNotNull(map, REGION, region);
putToMapIfNotNull(map, OWNER_EMAIL, ownerEmail);
putToMapIfNotNull(map, UPDATE_TIMESTAMP, String.valueOf(DATE_FORMATTER.print(updateTime.getTime())));
putToMapIfNotNull(map, IS_CONFORMING, String.valueOf(isConforming));
putToMapIfNotNull(map, IS_OPTEDOUT, String.valueOf(isOptOutOfConformity));
putToMapIfNotNull(map, EXCLUDED_RULES, StringUtils.join(excludedConformityRules, ","));
List<String> ruleIds = Lists.newArrayList();
for (Conformity conformity : conformities.values()) {
map.put(conformity.getRuleId(), StringUtils.join(conformity.getFailedComponents(), ","));
ruleIds.add(conformity.getRuleId());
}
putToMapIfNotNull(map, CONFORMITY_RULES, StringUtils.join(ruleIds, ","));
return map;
}
/**
* Parse a map from field name to value to a cluster.
* @param fieldToValue the map from field name to value
* @return the cluster that is de-serialized from the map
*/
public static Cluster parseFieldToValueMap(Map<String, String> fieldToValue) {
Validate.notNull(fieldToValue);
Cluster cluster = new Cluster(fieldToValue.get(CLUSTER),
fieldToValue.get(REGION));
cluster.setOwnerEmail(fieldToValue.get(OWNER_EMAIL));
cluster.setConforming(Boolean.parseBoolean(fieldToValue.get(IS_CONFORMING)));
cluster.setOptOutOfConformity(Boolean.parseBoolean(fieldToValue.get(IS_OPTEDOUT)));
cluster.excludeRules(StringUtils.split(fieldToValue.get(EXCLUDED_RULES), ","));
cluster.setUpdateTime(new Date(DATE_FORMATTER.parseDateTime(fieldToValue.get(UPDATE_TIMESTAMP)).getMillis()));
for (String ruleId : StringUtils.split(fieldToValue.get(CONFORMITY_RULES), ",")) {
cluster.updateConformity(new Conformity(ruleId,
Lists.newArrayList(StringUtils.split(fieldToValue.get(ruleId), ","))));
}
return cluster;
}
private static void putToMapIfNotNull(Map<String, String> map, String key, String value) {
Validate.notNull(map);
Validate.notNull(key);
if (value != null) {
map.put(key, value);
}
}
public Set<String> getSoloInstances() {
return Collections.unmodifiableSet(soloInstances);
}
}
| 10,990
| 33.027864
| 118
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/conformity/AutoScalingGroup.java
|
/*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.conformity;
import com.google.common.collect.Lists;
import org.apache.commons.lang.Validate;
import java.util.Collection;
import java.util.Collections;
/**
* The class implementing the auto scaling groups.
*/
public class AutoScalingGroup {
private final String name;
private final Collection<String> instances = Lists.newArrayList();
private boolean isSuspended;
/**
* Constructor.
* @param name
* the name of the auto scaling group
* @param instances
* the instance ids in the auto scaling group
*/
public AutoScalingGroup(String name, String... instances) {
Validate.notNull(instances);
this.name = name;
for (String instance : instances) {
this.instances.add(instance);
}
this.isSuspended = false;
}
/**
* Gets the name of the auto scaling group.
* @return
* the name of the auto scaling group
*/
public String getName() {
return name;
}
/**
* * Gets the instances of the auto scaling group.
* @return
* the instances of the auto scaling group
*/
public Collection<String> getInstances() {
return Collections.unmodifiableCollection(instances);
}
/**
* Gets the flag to indicate whether the ASG is suspended.
* @return true if the ASG is suspended, false otherwise
*/
public boolean isSuspended() {
return isSuspended;
}
/**
* Sets the flag to indicate whether the ASG is suspended.
* @param suspended true if the ASG is suspended, false otherwise
*/
public void setSuspended(boolean suspended) {
isSuspended = suspended;
}
}
| 2,392
| 27.488095
| 79
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/conformity/ClusterCrawler.java
|
/*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.conformity;
import java.util.List;
/**
* The interface of the crawler for Conformity Monkey to get the cluster information.
*/
public interface ClusterCrawler {
/**
* Gets the up to date information for a collection of clusters. When the input argument is null
* or empty, the method returns all clusters.
*
* @param clusterNames
* the cluster names
* @return the list of clusters
*/
List<Cluster> clusters(String... clusterNames);
/**
* Gets the owner email for a cluster to set the ownerEmail field when crawl.
* @param cluster
* the cluster
* @return the owner email of the cluster
*/
String getOwnerEmailForCluster(Cluster cluster);
/**
* Updates the excluded conformity rules for the given cluster.
* @param cluster
*/
void updateExcludedConformityRules(Cluster cluster);
}
| 1,574
| 29.882353
| 100
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/conformity/ConformityRuleEngine.java
|
/*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.conformity;
import com.google.common.collect.Lists;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
import java.util.Collections;
/**
* The class implementing the conformity rule engine.
*/
public class ConformityRuleEngine {
private static final Logger LOGGER = LoggerFactory.getLogger(ConformityRuleEngine.class);
private final Collection<ConformityRule> rules = Lists.newArrayList();
/**
* Checks whether a cluster is conforming or not against the rules in the engine. This
* method runs the checks the cluster against all the rules.
*
* @param cluster
* the cluster
* @return true if the cluster is conforming, false otherwise.
*/
public boolean check(Cluster cluster) {
Validate.notNull(cluster);
cluster.clearConformities();
for (ConformityRule rule : rules) {
if (!cluster.getExcludedRules().contains(rule.getName())) {
LOGGER.info(String.format("Running conformity rule %s on cluster %s",
rule.getName(), cluster.getName()));
cluster.updateConformity(rule.check(cluster));
} else {
LOGGER.info(String.format("Conformity rule %s is excluded on cluster %s",
rule.getName(), cluster.getName()));
}
}
boolean isConforming = true;
for (Conformity conformity : cluster.getConformties()) {
if (!conformity.getFailedComponents().isEmpty()) {
isConforming = false;
}
}
cluster.setConforming(isConforming);
return isConforming;
}
/**
* Add a conformity rule.
*
* @param rule
* The conformity rule to add.
* @return The Conformity rule engine object.
*/
public ConformityRuleEngine addRule(ConformityRule rule) {
Validate.notNull(rule);
rules.add(rule);
return this;
}
/**
* Gets all conformity rules in the rule engine.
* @return all conformity rules in the rule engine
*/
public Collection<ConformityRule> rules() {
return Collections.unmodifiableCollection(rules);
}
}
| 2,950
| 32.534091
| 93
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/conformity/Conformity.java
|
/*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.conformity;
import com.google.common.collect.Lists;
import org.apache.commons.lang.Validate;
import java.util.Collection;
import java.util.Collections;
/**
* The class defining the result of a conformity check.
*/
public class Conformity {
private final String ruleId;
private final Collection<String> failedComponents = Lists.newArrayList();
/**
* Constructor.
* @param ruleId
* the conformity rule id
* @param failedComponents
* the components that cause the conformity check to fail, if there is
* no failed components, it means the conformity check passes.
*/
public Conformity(String ruleId, Collection<String> failedComponents) {
Validate.notNull(ruleId);
Validate.notNull(failedComponents);
this.ruleId = ruleId;
for (String failedComponent : failedComponents) {
this.failedComponents.add(failedComponent);
}
}
/**
* Gets the conformity rule id.
* @return
* the conformity rule id
*/
public String getRuleId() {
return ruleId;
}
/**
* Gets the components that cause the conformity check to fail.
* @return
* the components that cause the conformity check to fail
*/
public Collection<String> getFailedComponents() {
return Collections.unmodifiableCollection(failedComponents);
}
}
| 2,092
| 29.333333
| 83
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/conformity/ConformityEmailBuilder.java
|
/*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.conformity;
import com.netflix.simianarmy.AbstractEmailBuilder;
import java.util.Collection;
import java.util.Map;
/** The abstract class for building Conformity monkey email notifications. */
public abstract class ConformityEmailBuilder extends AbstractEmailBuilder {
/**
* Sets the map from an owner email to the clusters that belong to the owner
* and need to send notifications for.
* @param emailToClusters the map from owner email to the owned clusters
* @param rules all conformity rules that are used to find the description of each rule to display
*/
public abstract void setEmailToClusters(Map<String, Collection<Cluster>> emailToClusters,
Collection<ConformityRule> rules);
}
| 1,407
| 37.054054
| 102
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/conformity/ConformityClusterTracker.java
|
/*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.conformity;
import java.util.List;
/**
* The interface that defines the tracker to manage clusters for Conformity monkey to use.
*/
public interface ConformityClusterTracker {
/**
* Adds a cluster to the tracker. If the cluster with the same name already exists,
* the method updates the record with the cluster parameter.
* @param cluster
* the cluster to add or update
*/
void addOrUpdate(Cluster cluster);
/**
* Gets the list of clusters in a list of regions.
* @param regions
* the regions of the clusters, when the parameter is null or empty, the method returns
* clusters from all regions
* @return list of clusters in the given regions
*/
List<Cluster> getAllClusters(String... regions);
/**
* Gets the list of non-conforming clusters in a list of regions.
* @param regions the regions of the clusters, when the parameter is null or empty, the method returns
* clusters from all regions
* @return list of clusters in the given regions
*/
List<Cluster> getNonconformingClusters(String... regions);
/**
* Gets the cluster with a specific name from .
* @param name the cluster name
* @param region the region of the cluster
* @return the cluster with the name
*/
Cluster getCluster(String name, String region);
/**
* Deletes a list of clusters from the tracker.
* @param clusters the list of clusters to delete. The parameter cannot be null. If it is empty,
* no cluster is deleted.
*/
void deleteClusters(Cluster... clusters);
}
| 2,315
| 33.058824
| 106
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/basic/BasicMonkeyServer.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic;
import java.io.IOException;
import java.io.InputStream;
import java.util.Properties;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import com.netflix.simianarmy.basic.conformity.BasicConformityMonkey;
import com.netflix.simianarmy.basic.conformity.BasicConformityMonkeyContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.MonkeyRunner;
import com.netflix.simianarmy.aws.janitor.VolumeTaggingMonkey;
import com.netflix.simianarmy.basic.janitor.BasicJanitorMonkey;
import com.netflix.simianarmy.basic.janitor.BasicJanitorMonkeyContext;
import com.netflix.simianarmy.basic.janitor.BasicVolumeTaggingMonkeyContext;
/**
* Will periodically run the configured monkeys.
*/
@SuppressWarnings("serial")
public class BasicMonkeyServer extends HttpServlet {
private static final Logger LOGGER = LoggerFactory.getLogger(BasicMonkeyServer.class);
private static final MonkeyRunner RUNNER = MonkeyRunner.getInstance();
/**
* Add the monkeys that will be run.
*/
@SuppressWarnings("unchecked")
public void addMonkeysToRun() {
LOGGER.info("Adding Chaos Monkey.");
RUNNER.replaceMonkey(this.chaosClass, this.chaosContextClass);
LOGGER.info("Adding Volume Tagging Monkey.");
RUNNER.replaceMonkey(VolumeTaggingMonkey.class, BasicVolumeTaggingMonkeyContext.class);
LOGGER.info("Adding Janitor Monkey.");
RUNNER.replaceMonkey(BasicJanitorMonkey.class, BasicJanitorMonkeyContext.class);
LOGGER.info("Adding Conformity Monkey.");
RUNNER.replaceMonkey(BasicConformityMonkey.class, BasicConformityMonkeyContext.class);
}
/**
* make the class of the client object configurable.
*/
@SuppressWarnings("rawtypes")
private Class chaosContextClass = com.netflix.simianarmy.basic.BasicChaosMonkeyContext.class;
/**
* make the class of the chaos object configurable.
*/
@SuppressWarnings("rawtypes")
private Class chaosClass = com.netflix.simianarmy.basic.chaos.BasicChaosMonkey.class;
@Override
public void init() throws ServletException {
super.init();
configureClient();
addMonkeysToRun();
RUNNER.start();
}
/**
* Loads the client that is configured.
* @throws ServletException
* if the configured client cannot be loaded properly
*/
@SuppressWarnings("rawtypes")
private void configureClient() throws ServletException {
Properties clientConfig = loadClientConfigProperties();
Class newContextClass = loadClientClass(clientConfig, "simianarmy.client.context.class");
this.chaosContextClass = (newContextClass == null ? this.chaosContextClass : newContextClass);
Class newChaosClass = loadClientClass(clientConfig, "simianarmy.client.chaos.class");
this.chaosClass = (newChaosClass == null ? this.chaosClass : newChaosClass);
}
@SuppressWarnings("rawtypes")
private Class loadClientClass(Properties clientConfig, String key) throws ServletException {
ClassLoader classLoader = BasicMonkeyServer.class.getClassLoader();
try {
String clientClassName = clientConfig.getProperty(key);
if (clientClassName == null || clientClassName.isEmpty()) {
LOGGER.info("using standard client for " + key);
return null;
}
Class newClass = classLoader.loadClass(clientClassName);
LOGGER.info("using " + key + " loaded " + newClass.getCanonicalName());
return newClass;
} catch (ClassNotFoundException e) {
throw new ServletException("Could not load " + key, e);
}
}
/**
* Load the client config properties file.
*
* @return Properties The contents of the client config file
* @throws ServletException
* if the file cannot be read
*/
private Properties loadClientConfigProperties() throws ServletException {
String propertyFileName = "client.properties";
String clientConfigFileName = System.getProperty(propertyFileName, "/" + propertyFileName);
LOGGER.info("using client properties " + clientConfigFileName);
InputStream input = null;
Properties p = new Properties();
try {
try {
input = BasicMonkeyServer.class.getResourceAsStream(clientConfigFileName);
p.load(input);
return p;
} finally {
if (input != null) {
input.close();
}
}
} catch (IOException e) {
throw new ServletException("Could not load " + clientConfigFileName, e);
}
}
@SuppressWarnings("unchecked")
@Override
public void destroy() {
RUNNER.stop();
LOGGER.info("Stopping Chaos Monkey.");
RUNNER.removeMonkey(this.chaosClass);
LOGGER.info("Stopping Volume Tagging Monkey.");
RUNNER.removeMonkey(VolumeTaggingMonkey.class);
LOGGER.info("Stopping Janitor Monkey.");
RUNNER.removeMonkey(BasicJanitorMonkey.class);
LOGGER.info("Stopping Conformity Monkey.");
RUNNER.removeMonkey(BasicConformityMonkey.class);
super.destroy();
}
}
| 6,007
| 36.786164
| 102
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/basic/LocalDbRecorder.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic;
import java.io.File;
import java.io.IOException;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentNavigableMap;
import org.mapdb.Atomic;
import org.mapdb.DB;
import org.mapdb.DBMaker;
import org.mapdb.Fun;
import com.netflix.simianarmy.EventType;
import com.netflix.simianarmy.MonkeyConfiguration;
import com.netflix.simianarmy.MonkeyRecorder;
import com.netflix.simianarmy.MonkeyType;
import com.netflix.simianarmy.chaos.ChaosMonkey;
/**
* Replacement for SimpleDB on non-AWS: use an embedded db.
*
* @author jgardner
*
*/
public class LocalDbRecorder implements MonkeyRecorder {
private static DB db = null;
private static Atomic.Long nextId = null;
private static ConcurrentNavigableMap<Fun.Tuple2<Long, Long>, Event> eventMap = null;
// Upper bound, so we don't fill the disk with monkey events
private static final double MAX_EVENTS = 10000;
private double maxEvents = MAX_EVENTS;
private String dbFilename = "simianarmy_events";
private String dbpassword = null;
/** Constructor.
*
*/
public LocalDbRecorder(MonkeyConfiguration configuration) {
if (configuration != null) {
dbFilename = configuration.getStrOrElse("simianarmy.recorder.localdb.file", null);
maxEvents = configuration.getNumOrElse("simianarmy.recorder.localdb.max_events", MAX_EVENTS);
dbpassword = configuration.getStrOrElse("simianarmy.recorder.localdb.password", null);
}
}
private synchronized void init() {
if (nextId != null) {
return;
}
File dbFile = null;
dbFile = (dbFilename == null) ? tempDbFile() : new File(dbFilename);
if (dbpassword != null) {
db = DBMaker.newFileDB(dbFile)
.closeOnJvmShutdown()
.encryptionEnable(dbpassword)
.make();
} else {
db = DBMaker.newFileDB(dbFile)
.closeOnJvmShutdown()
.make();
}
eventMap = db.getTreeMap("eventMap");
nextId = db.createAtomicLong("next", 1);
}
private static File tempDbFile() {
try {
final File tmpFile = File.createTempFile("mapdb", "db");
tmpFile.deleteOnExit();
return tmpFile;
} catch (IOException e) {
throw new RuntimeException("Temporary DB file could not be created", e);
}
}
/* (non-Javadoc)
* @see com.netflix.simianarmy.MonkeyRecorder#newEvent(MonkeyType, EventType, String, String)
*/
@Override
public Event newEvent(MonkeyType monkeyType, EventType eventType, String region,
String id) {
init();
return new MapDbRecorderEvent(monkeyType, eventType, region, id);
}
/* (non-Javadoc)
* @see com.netflix.simianarmy.MonkeyRecorder#recordEvent(com.netflix.simianarmy.MonkeyRecorder.Event)
*/
@Override
public void recordEvent(Event evt) {
init();
Fun.Tuple2<Long, Long> id = Fun.t2(evt.eventTime().getTime(),
nextId.incrementAndGet());
if (eventMap.size() + 1 > maxEvents) {
eventMap.remove(eventMap.firstKey());
}
eventMap.put(id, evt);
db.commit();
}
/* (non-Javadoc)
* @see com.netflix.simianarmy.MonkeyRecorder#findEvents(java.util.Map, java.util.Date)
*/
@Override
public List<Event> findEvents(Map<String, String> query, Date after) {
init();
List<Event> foundEvents = new ArrayList<Event>();
for (Event evt : eventMap.tailMap(toKey(after)).values()) {
boolean matched = true;
for (Map.Entry<String, String> pair : query.entrySet()) {
if (pair.getKey().equals("id") && !evt.id().equals(pair.getValue())) {
matched = false;
}
if (pair.getKey().equals("monkeyType") && !evt.monkeyType().toString().equals(pair.getValue())) {
matched = false;
}
if (pair.getKey().equals("eventType") && !evt.eventType().toString().equals(pair.getValue())) {
matched = false;
}
}
if (matched) {
foundEvents.add(evt);
}
}
return foundEvents;
}
/* (non-Javadoc)
* @see com.netflix.simianarmy.MonkeyRecorder#findEvents(MonkeyType, Map, Date)
*/
@Override
public List<Event> findEvents(MonkeyType monkeyType, Map<String, String> query,
Date after) {
Map<String, String> copy = new LinkedHashMap<String, String>(query);
copy.put("monkeyType", monkeyType.name());
return findEvents(copy, after);
}
/* (non-Javadoc)
* @see com.netflix.simianarmy.MonkeyRecorder#findEvents(MonkeyType, EventType, Map, Date)
*/
@Override
public List<Event> findEvents(MonkeyType monkeyType, EventType eventType,
Map<String, String> query, Date after) {
Map<String, String> copy = new LinkedHashMap<String, String>(query);
copy.put("monkeyType", monkeyType.name());
copy.put("eventType", eventType.name());
return findEvents(copy, after);
}
private Fun.Tuple2<Long, Long> toKey(Date date) {
return Fun.t2(date.getTime(), 0L);
}
/** Loggable event for LocalDbRecorder.
*
*/
public static class MapDbRecorderEvent implements MonkeyRecorder.Event, Serializable {
/** The monkey type. */
private MonkeyType monkeyType;
/** The event type. */
private EventType eventType;
/** The event id. */
private String id;
/** The event region. */
private String region;
/** The fields. */
private Map<String, String> fields = new HashMap<String, String>();
/** The event time. */
private Date date;
private static final long serialVersionUID = 1L;
/** Constructor.
* @param monkeyType
* @param eventType
* @param region
* @param id
*/
public MapDbRecorderEvent(MonkeyType monkeyType, EventType eventType,
String region, String id) {
this.monkeyType = monkeyType;
this.eventType = eventType;
this.id = id;
this.region = region;
this.date = new Date();
}
/** Constructor.
* @param monkeyType
* @param eventType
* @param region
* @param id
* @param time
*/
public MapDbRecorderEvent(MonkeyType monkeyType, EventType eventType,
String region, String id, long time) {
this.monkeyType = monkeyType;
this.eventType = eventType;
this.id = id;
this.region = region;
this.date = new Date(time);
}
/* (non-Javadoc)
* @see com.netflix.simianarmy.MonkeyRecorder.Event#id()
*/
@Override
public String id() {
return id;
}
/* (non-Javadoc)
* @see com.netflix.simianarmy.MonkeyRecorder.Event#eventTime()
*/
@Override
public Date eventTime() {
return new Date(date.getTime());
}
/* (non-Javadoc)
* @see com.netflix.simianarmy.MonkeyRecorder.Event#monkeyType()
*/
@Override
public MonkeyType monkeyType() {
return monkeyType;
}
/* (non-Javadoc)
* @see com.netflix.simianarmy.MonkeyRecorder.Event#eventType()
*/
@Override
public EventType eventType() {
return eventType;
}
/* (non-Javadoc)
* @see com.netflix.simianarmy.MonkeyRecorder.Event#region()
*/
@Override
public String region() {
return region;
}
/* (non-Javadoc)
* @see com.netflix.simianarmy.MonkeyRecorder.Event#fields()
*/
@Override
public Map<String, String> fields() {
return Collections.unmodifiableMap(fields);
}
/* (non-Javadoc)
* @see com.netflix.simianarmy.MonkeyRecorder.Event#field(java.lang.String)
*/
@Override
public String field(String name) {
return fields.get(name);
}
/* (non-Javadoc)
* @see com.netflix.simianarmy.MonkeyRecorder.Event#addField(java.lang.String, java.lang.String)
*/
@Override
public Event addField(String name, String value) {
fields.put(name, value);
return this;
}
}
/** Appears to be used for testing, if so should be moved to a unit test. (2/16/2014, mgeis)
* @param args
*/
public static void main(String[] args) {
LocalDbRecorder r = new LocalDbRecorder(null);
r.init();
List<Event> events2 = r.findEvents(new HashMap<String, String>(), new Date(0));
for (Event event : events2) {
System.out.println("Got:" + event + ": " + event.eventTime().getTime());
}
for (int i = 0; i < 10; i++) {
Event event = r.newEvent(ChaosMonkey.Type.CHAOS,
ChaosMonkey.EventTypes.CHAOS_TERMINATION, "1", "1");
r.recordEvent(event);
System.out.println("Added:" + event + ": " + event.eventTime().getTime());
}
List<Event> events = r.findEvents(new HashMap<String, String>(), new Date(0));
for (Event event : events) {
System.out.println("Got:" + event + ": " + event.eventTime().getTime());
}
}
}
| 10,581
| 31.262195
| 113
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/basic/BasicRecorderEvent.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import com.netflix.simianarmy.EventType;
import com.netflix.simianarmy.MonkeyRecorder;
import com.netflix.simianarmy.MonkeyType;
/**
* The Class BasicRecorderEvent.
*/
public class BasicRecorderEvent implements MonkeyRecorder.Event {
/** The monkey type. */
private MonkeyType monkeyType;
/** The event type. */
private EventType eventType;
/** The event id. */
private String id;
/** The event region. */
private String region;
/** The fields. */
private Map<String, String> fields = new HashMap<String, String>();
/** The event time. */
private Date date;
/**
* Instantiates a new basic recorder event.
*
* @param monkeyType
* the monkey type
* @param eventType
* the event type
* @param region
* the region event occurred in
* @param id
* the event id
*/
public BasicRecorderEvent(MonkeyType monkeyType, EventType eventType, String region, String id) {
this.monkeyType = monkeyType;
this.eventType = eventType;
this.id = id;
this.region = region;
this.date = new Date();
}
/**
* Instantiates a new basic recorder event.
*
* @param monkeyType
* the monkey type
* @param eventType
* the event type
* @param region
* the region event occurred in
* @param id
* the event id
* @param time
* the event time
*/
public BasicRecorderEvent(MonkeyType monkeyType, EventType eventType, String region, String id, long time) {
this.monkeyType = monkeyType;
this.eventType = eventType;
this.id = id;
this.region = region;
this.date = new Date(time);
}
/** {@inheritDoc} */
public String id() {
return id;
}
/** {@inheritDoc} */
public String region() {
return region;
}
/** {@inheritDoc} */
public Date eventTime() {
return new Date(date.getTime());
}
/** {@inheritDoc} */
public MonkeyType monkeyType() {
return monkeyType;
}
/** {@inheritDoc} */
public EventType eventType() {
return eventType;
}
/** {@inheritDoc} */
public Map<String, String> fields() {
return Collections.unmodifiableMap(fields);
}
/** {@inheritDoc} */
public String field(String name) {
return fields.get(name);
}
/**
* Adds the fields.
*
* @param toAdd
* the fields to set
* @return <b>this</b> so you can chain many addFields calls together
*/
public MonkeyRecorder.Event addFields(Map<String, String> toAdd) {
fields.putAll(toAdd);
return this;
}
/** {@inheritDoc} */
public MonkeyRecorder.Event addField(String name, String value) {
fields.put(name, value);
return this;
}
}
| 3,749
| 24.510204
| 112
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/basic/BasicConfiguration.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic;
import com.netflix.simianarmy.MonkeyConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Properties;
/**
* The Class BasicConfiguration.
*/
public class BasicConfiguration implements MonkeyConfiguration {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(BasicConfiguration.class);
/** The properties. */
private Properties props;
/**
* Instantiates a new basic configuration.
* @param props
* the properties
*/
public BasicConfiguration(Properties props) {
this.props = props;
}
/** {@inheritDoc} */
@Override
public boolean getBool(String property) {
return getBoolOrElse(property, false);
}
/** {@inheritDoc} */
@Override
public boolean getBoolOrElse(String property, boolean dflt) {
String val = props.getProperty(property);
if (val == null) {
return dflt;
}
val = val.trim();
return Boolean.parseBoolean(val);
}
/** {@inheritDoc} */
@Override
public double getNumOrElse(String property, double dflt) {
String val = props.getProperty(property);
double result = dflt;
if (val != null && !val.isEmpty()) {
try {
result = Double.parseDouble(val);
} catch (NumberFormatException e) {
LOGGER.error("failed to parse property: " + property + "; returning default value: " + dflt, e);
}
}
return result;
}
/** {@inheritDoc} */
@Override
public String getStr(String property) {
return getStrOrElse(property, null);
}
/** {@inheritDoc} */
@Override
public String getStrOrElse(String property, String dflt) {
String val = props.getProperty(property);
return val == null ? dflt : val;
}
/** {@inheritDoc} */
@Override
public void reload() {
// BasicConfiguration is based on static properties, so reload is a no-op
}
@Override
public void reload(String groupName) {
// BasicConfiguration is based on static properties, so reload is a no-op
}
}
| 2,886
| 27.584158
| 112
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/basic/BasicCalendar.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic;
import java.util.Calendar;
import java.util.Date;
import java.util.Set;
import java.util.TimeZone;
import java.util.TreeSet;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.Monkey;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.MonkeyConfiguration;
// CHECKSTYLE IGNORE MagicNumberCheck
/**
* The Class BasicCalendar.
*/
public class BasicCalendar implements MonkeyCalendar {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(BasicCalendar.class);
/** The open hour. */
private final int openHour;
/** The close hour. */
private final int closeHour;
/** The tz. */
private final TimeZone tz;
/** The holidays. */
protected final Set<Integer> holidays = new TreeSet<Integer>();
/** The cfg. */
private MonkeyConfiguration cfg;
/**
* Instantiates a new basic calendar.
*
* @param cfg
* the monkey configuration
*/
public BasicCalendar(MonkeyConfiguration cfg) {
this.cfg = cfg;
openHour = (int) cfg.getNumOrElse("simianarmy.calendar.openHour", 9);
closeHour = (int) cfg.getNumOrElse("simianarmy.calendar.closeHour", 15);
tz = TimeZone.getTimeZone(cfg.getStrOrElse("simianarmy.calendar.timezone", "America/Los_Angeles"));
}
/**
* Instantiates a new basic calendar.
*
* @param open
* the open hour
* @param close
* the close hour
* @param timezone
* the timezone
*/
public BasicCalendar(int open, int close, TimeZone timezone) {
openHour = open;
closeHour = close;
tz = timezone;
}
/**
* Instantiates a new basic calendar.
*
* @param open
* the open hour
* @param close
* the close hour
* @param timezone
* the timezone
*/
public BasicCalendar(MonkeyConfiguration cfg, int open, int close, TimeZone timezone) {
this.cfg = cfg;
openHour = open;
closeHour = close;
tz = timezone;
}
/** {@inheritDoc} */
@Override
public int openHour() {
return openHour;
}
/** {@inheritDoc} */
@Override
public int closeHour() {
return closeHour;
}
/** {@inheritDoc} */
@Override
public Calendar now() {
return Calendar.getInstance(tz);
}
/** {@inheritDoc} */
@Override
public boolean isMonkeyTime(Monkey monkey) {
if (cfg != null && cfg.getStr("simianarmy.calendar.isMonkeyTime") != null) {
boolean monkeyTime = cfg.getBool("simianarmy.calendar.isMonkeyTime");
if (monkeyTime) {
LOGGER.debug("isMonkeyTime: Found property 'simianarmy.calendar.isMonkeyTime': " + monkeyTime + ". Time for monkey.");
return monkeyTime;
} else {
LOGGER.debug("isMonkeyTime: Found property 'simianarmy.calendar.isMonkeyTime': " + monkeyTime + ". Continuing regular calendar check for monkey time.");
}
}
Calendar now = now();
int dow = now.get(Calendar.DAY_OF_WEEK);
if (dow == Calendar.SATURDAY || dow == Calendar.SUNDAY) {
LOGGER.debug("isMonkeyTime: Happy Weekend! Not time for monkey.");
return false;
}
int hour = now.get(Calendar.HOUR_OF_DAY);
if (hour < openHour || hour > closeHour) {
LOGGER.debug("isMonkeyTime: Not inside open hours. Not time for monkey.");
return false;
}
if (isHoliday(now)) {
LOGGER.debug("isMonkeyTime: Happy Holiday! Not time for monkey.");
return false;
}
LOGGER.debug("isMonkeyTime: Time for monkey.");
return true;
}
/**
* Checks if is holiday.
*
* @param now
* the current time
* @return true, if is holiday
*/
protected boolean isHoliday(Calendar now) {
if (!holidays.contains(now.get(Calendar.YEAR))) {
loadHolidays(now.get(Calendar.YEAR));
}
return holidays.contains(now.get(Calendar.DAY_OF_YEAR));
}
/**
* Load holidays.
*
* @param year
* the year
*/
protected void loadHolidays(int year) {
holidays.clear();
// these aren't all strictly holidays, but days when engineers will likely
// not be in the office to respond to rampaging monkeys
// new years, or closest work day
holidays.add(workDayInYear(year, Calendar.JANUARY, 1));
// 3rd monday == MLK Day
holidays.add(dayOfYear(year, Calendar.JANUARY, Calendar.MONDAY, 3));
// 3rd monday == Presidents Day
holidays.add(dayOfYear(year, Calendar.FEBRUARY, Calendar.MONDAY, 3));
// last monday == Memorial Day
holidays.add(dayOfYear(year, Calendar.MAY, Calendar.MONDAY, -1));
// 4th of July, or closest work day
holidays.add(workDayInYear(year, Calendar.JULY, 4));
// first monday == Labor Day
holidays.add(dayOfYear(year, Calendar.SEPTEMBER, Calendar.MONDAY, 1));
// second monday == Columbus Day
holidays.add(dayOfYear(year, Calendar.OCTOBER, Calendar.MONDAY, 2));
// veterans day, Nov 11th, or closest work day
holidays.add(workDayInYear(year, Calendar.NOVEMBER, 11));
// 4th thursday == Thanksgiving
holidays.add(dayOfYear(year, Calendar.NOVEMBER, Calendar.THURSDAY, 4));
// 4th friday == "black friday", monkey goes shopping!
holidays.add(dayOfYear(year, Calendar.NOVEMBER, Calendar.FRIDAY, 4));
// christmas eve
holidays.add(dayOfYear(year, Calendar.DECEMBER, 24));
// christmas day
holidays.add(dayOfYear(year, Calendar.DECEMBER, 25));
// day after christmas
holidays.add(dayOfYear(year, Calendar.DECEMBER, 26));
// new years eve
holidays.add(dayOfYear(year, Calendar.DECEMBER, 31));
// mark the holiday set with the year, so on Jan 1 it will automatically
// recalculate the holidays for next year
holidays.add(year);
}
/**
* Day of year.
*
* @param year
* the year
* @param month
* the month
* @param day
* the day
* @return the day of the year
*/
protected int dayOfYear(int year, int month, int day) {
Calendar holiday = now();
holiday.set(Calendar.YEAR, year);
holiday.set(Calendar.MONTH, month);
holiday.set(Calendar.DAY_OF_MONTH, day);
return holiday.get(Calendar.DAY_OF_YEAR);
}
/**
* Day of year.
*
* @param year
* the year
* @param month
* the month
* @param dayOfWeek
* the day of week
* @param weekInMonth
* the week in month
* @return the day of the year
*/
protected int dayOfYear(int year, int month, int dayOfWeek, int weekInMonth) {
Calendar holiday = now();
holiday.set(Calendar.YEAR, year);
holiday.set(Calendar.MONTH, month);
holiday.set(Calendar.DAY_OF_WEEK, dayOfWeek);
holiday.set(Calendar.DAY_OF_WEEK_IN_MONTH, weekInMonth);
return holiday.get(Calendar.DAY_OF_YEAR);
}
/**
* Work day in year.
*
* @param year
* the year
* @param month
* the month
* @param day
* the day
* @return the day of the year adjusted to the closest workday
*/
protected int workDayInYear(int year, int month, int day) {
Calendar holiday = now();
holiday.set(Calendar.YEAR, year);
holiday.set(Calendar.MONTH, month);
holiday.set(Calendar.DAY_OF_MONTH, day);
int doy = holiday.get(Calendar.DAY_OF_YEAR);
int dow = holiday.get(Calendar.DAY_OF_WEEK);
if (dow == Calendar.SATURDAY) {
return doy - 1; // FRIDAY
}
if (dow == Calendar.SUNDAY) {
return doy + 1; // MONDAY
}
return doy;
}
@Override
public Date getBusinessDay(Date date, int n) {
Validate.isTrue(n >= 0);
Calendar calendar = now();
calendar.setTime(date);
while (isHoliday(calendar) || isWeekend(calendar) || n-- > 0) {
calendar.add(Calendar.DATE, 1);
}
return calendar.getTime();
}
private boolean isWeekend(Calendar calendar) {
int dow = calendar.get(Calendar.DAY_OF_WEEK);
return dow == Calendar.SATURDAY || dow == Calendar.SUNDAY;
}
}
| 9,411
| 28.879365
| 162
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/basic/BasicSimianArmyContext.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.regions.Region;
import com.amazonaws.regions.Regions;
import com.netflix.simianarmy.CloudClient;
import com.netflix.simianarmy.Monkey;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.MonkeyConfiguration;
import com.netflix.simianarmy.MonkeyRecorder;
import com.netflix.simianarmy.MonkeyRecorder.Event;
import com.netflix.simianarmy.MonkeyScheduler;
import com.netflix.simianarmy.aws.RDSRecorder;
import com.netflix.simianarmy.aws.STSAssumeRoleSessionCredentialsProvider;
import com.netflix.simianarmy.aws.SimpleDBRecorder;
import com.netflix.simianarmy.client.aws.AWSClient;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.InputStream;
import java.lang.reflect.Constructor;
import java.util.LinkedList;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
/**
* The Class BasicSimianArmyContext.
*/
public class BasicSimianArmyContext implements Monkey.Context {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(BasicSimianArmyContext.class);
/** The configuration properties. */
private final Properties properties = new Properties();
/** The Constant MONKEY_THREADS. */
private static final int MONKEY_THREADS = 1;
/** The scheduler. */
private MonkeyScheduler scheduler;
/** The calendar. */
private MonkeyCalendar calendar;
/** The config. */
private BasicConfiguration config;
/** The client. */
private AWSClient client;
/** The recorder. */
private MonkeyRecorder recorder;
/** The reported events. */
private final LinkedList<Event> eventReport;
/** The AWS credentials provider to be used. */
private AWSCredentialsProvider awsCredentialsProvider = new DefaultAWSCredentialsProviderChain();
/** If configured, the ARN of Role to be assumed. */
private final String assumeRoleArn;
private final String accountName;
private final String account;
private final String secret;
private final String region;
protected ClientConfiguration awsClientConfig = new ClientConfiguration();
/* If configured, the proxy to be used when making AWS API requests */
private final String proxyHost;
private final String proxyPort;
private final String proxyUsername;
private final String proxyPassword;
/** The key name of the tag owner used to tag resources - across all Monkeys */
public static String GLOBAL_OWNER_TAGKEY;
/** protected constructor as the Shell is meant to be subclassed. */
protected BasicSimianArmyContext(String... configFiles) {
eventReport = new LinkedList<Event>();
// Load the config files into props following the provided order.
for (String configFile : configFiles) {
loadConfigurationFileIntoProperties(configFile);
}
LOGGER.info("The following are properties in the context.");
for (Entry<Object, Object> prop : properties.entrySet()) {
Object propertyKey = prop.getKey();
if (isSafeToLog(propertyKey)) {
LOGGER.info(String.format("%s = %s", propertyKey, prop.getValue()));
} else {
LOGGER.info(String.format("%s = (not shown here)", propertyKey));
}
}
config = new BasicConfiguration(properties);
account = config.getStr("simianarmy.client.aws.accountKey");
secret = config.getStr("simianarmy.client.aws.secretKey");
accountName = config.getStrOrElse("simianarmy.client.aws.accountName", "Default");
String defaultRegion = "us-east-1";
Region currentRegion = Regions.getCurrentRegion();
if (currentRegion != null) {
defaultRegion = currentRegion.getName();
}
region = config.getStrOrElse("simianarmy.client.aws.region", defaultRegion);
GLOBAL_OWNER_TAGKEY = config.getStrOrElse("simianarmy.tags.owner", "owner");
// Check for and configure optional proxy configuration
proxyHost = config.getStr("simianarmy.client.aws.proxyHost");
proxyPort = config.getStr("simianarmy.client.aws.proxyPort");
proxyUsername = config.getStr("simianarmy.client.aws.proxyUser");
proxyPassword = config.getStr("simianarmy.client.aws.proxyPassword");
if ((proxyHost != null) && (proxyPort != null)) {
awsClientConfig.setProxyHost(proxyHost);
awsClientConfig.setProxyPort(Integer.parseInt(proxyPort));
if ((proxyUsername != null) && (proxyPassword != null)) {
awsClientConfig.setProxyUsername(proxyUsername);
awsClientConfig.setProxyPassword(proxyPassword);
}
}
assumeRoleArn = config.getStr("simianarmy.client.aws.assumeRoleArn");
if (assumeRoleArn != null) {
this.awsCredentialsProvider = new STSAssumeRoleSessionCredentialsProvider(assumeRoleArn, awsClientConfig);
LOGGER.info("Using STSAssumeRoleSessionCredentialsProvider with assume role " + assumeRoleArn);
}
// if credentials are set explicitly make them available to the AWS SDK
if (StringUtils.isNotBlank(account) && StringUtils.isNotBlank(secret)) {
this.exportCredentials(account, secret);
}
createClient();
createCalendar();
createScheduler();
createRecorder();
}
/**
* Checks whether it is safe to log the property based on the given
* property key.
* @param propertyKey The key for the property, expected to be resolvable to a String
* @return A boolean indicating whether it is safe to log the corresponding property
*/
protected boolean isSafeToLog(Object propertyKey) {
String propertyKeyName = propertyKey.toString();
return !propertyKeyName.contains("secretKey")
&& !propertyKeyName.contains("vsphere.password");
}
/** loads the given config on top of the config read by previous calls. */
protected void loadConfigurationFileIntoProperties(String propertyFileName) {
String propFile = System.getProperty(propertyFileName, "/" + propertyFileName);
try {
LOGGER.info("loading properties file: " + propFile);
InputStream is = BasicSimianArmyContext.class.getResourceAsStream(propFile);
try {
properties.load(is);
} finally {
is.close();
}
} catch (Exception e) {
String msg = "Unable to load properties file " + propFile + " set System property \"" + propertyFileName
+ "\" to valid file";
LOGGER.error(msg);
throw new RuntimeException(msg, e);
}
}
private void createScheduler() {
int freq = (int) config.getNumOrElse("simianarmy.scheduler.frequency", 1);
TimeUnit freqUnit = TimeUnit.valueOf(config.getStrOrElse("simianarmy.scheduler.frequencyUnit", "HOURS"));
int threads = (int) config.getNumOrElse("simianarmy.scheduler.threads", MONKEY_THREADS);
setScheduler(new BasicScheduler(freq, freqUnit, threads));
}
@SuppressWarnings("unchecked")
private void createRecorder() {
@SuppressWarnings("rawtypes")
Class recorderClass = loadClientClass("simianarmy.client.recorder.class");
if (recorderClass != null && recorderClass.equals(RDSRecorder.class)) {
String dbDriver = configuration().getStr("simianarmy.recorder.db.driver");
String dbUser = configuration().getStr("simianarmy.recorder.db.user");
String dbPass = configuration().getStr("simianarmy.recorder.db.pass");
String dbUrl = configuration().getStr("simianarmy.recorder.db.url");
String dbTable = configuration().getStr("simianarmy.recorder.db.table");
RDSRecorder rdsRecorder = new RDSRecorder(dbDriver, dbUser, dbPass, dbUrl, dbTable, client.region());
rdsRecorder.init();
setRecorder(rdsRecorder);
} else if (recorderClass == null || recorderClass.equals(SimpleDBRecorder.class)) {
String domain = config.getStrOrElse("simianarmy.recorder.sdb.domain", "SIMIAN_ARMY");
if (client != null) {
SimpleDBRecorder simpleDbRecorder = new SimpleDBRecorder(client, domain);
simpleDbRecorder.init();
setRecorder(simpleDbRecorder);
}
} else {
setRecorder((MonkeyRecorder) factory(recorderClass));
}
}
@SuppressWarnings("unchecked")
private void createCalendar() {
@SuppressWarnings("rawtypes")
Class calendarClass = loadClientClass("simianarmy.calendar.class");
if (calendarClass == null || calendarClass.equals(BasicCalendar.class)) {
setCalendar(new BasicCalendar(config));
} else {
setCalendar((MonkeyCalendar) factory(calendarClass));
}
}
/**
* Create the specific client with region taken from properties.
* Override to provide your own client.
*/
protected void createClient() {
createClient(region);
}
/**
* Create the specific client within passed region, using the appropriate AWS credentials provider
* and client configuration.
* @param clientRegion
*/
protected void createClient(String clientRegion) {
this.client = new AWSClient(clientRegion, awsCredentialsProvider, awsClientConfig);
setCloudClient(this.client);
}
/**
* Gets the AWS client.
* @return the AWS client
*/
public AWSClient awsClient() {
return client;
}
/**
* Gets the region.
* @return the region
*/
public String region() {
return region;
}
/**
* Gets the accountName
* @return the accountName
*/
public String accountName() {
return accountName;
}
@Override
public void reportEvent(Event evt) {
this.eventReport.add(evt);
}
@Override
public void resetEventReport() {
eventReport.clear();
}
@Override
public String getEventReport() {
StringBuilder report = new StringBuilder();
for (Event event : this.eventReport) {
report.append(String.format("%s %s (", event.eventType(), event.id()));
boolean isFirst = true;
for (Entry<String, String> field : event.fields().entrySet()) {
if (!isFirst) {
report.append(", ");
} else {
isFirst = false;
}
report.append(String.format("%s:%s", field.getKey(), field.getValue()));
}
report.append(")\n");
}
return report.toString();
}
/**
* Exports credentials as Java system properties
* to be picked up by AWS SDK clients.
* @param accountKey
* @param secretKey
*/
public void exportCredentials(String accountKey, String secretKey) {
System.setProperty("aws.accessKeyId", accountKey);
System.setProperty("aws.secretKey", secretKey);
}
/** {@inheritDoc} */
@Override
public MonkeyScheduler scheduler() {
return scheduler;
}
/**
* Sets the scheduler.
*
* @param scheduler
* the new scheduler
*/
protected void setScheduler(MonkeyScheduler scheduler) {
this.scheduler = scheduler;
}
/** {@inheritDoc} */
@Override
public MonkeyCalendar calendar() {
return calendar;
}
/**
* Sets the calendar.
*
* @param calendar
* the new calendar
*/
protected void setCalendar(MonkeyCalendar calendar) {
this.calendar = calendar;
}
/** {@inheritDoc} */
@Override
public MonkeyConfiguration configuration() {
return config;
}
/**
* Sets the configuration.
*
* @param configuration
* the new configuration
*/
protected void setConfiguration(MonkeyConfiguration configuration) {
this.config = (BasicConfiguration) configuration;
}
/** {@inheritDoc} */
@Override
public CloudClient cloudClient() {
return client;
}
/**
* Sets the cloud client.
*
* @param cloudClient
* the new cloud client
*/
protected void setCloudClient(CloudClient cloudClient) {
this.client = (AWSClient) cloudClient;
}
/** {@inheritDoc} */
@Override
public MonkeyRecorder recorder() {
return recorder;
}
/**
* Sets the recorder.
*
* @param recorder
* the new recorder
*/
protected void setRecorder(MonkeyRecorder recorder) {
this.recorder = recorder;
}
/**
* Gets the configuration properties.
* @return the configuration properties
*/
protected Properties getProperties() {
return this.properties;
}
/**
* Gets the AWS credentials provider.
* @return the AWS credentials provider
*/
public AWSCredentialsProvider getAwsCredentialsProvider() {
return awsCredentialsProvider;
}
/**
* Gets the AWS client configuration.
* @return the AWS client configuration
*/
public ClientConfiguration getAwsClientConfig() {
return awsClientConfig;
}
/**
* Load a class specified by the config; for drop-in replacements.
* (Duplicates a method in MonkeyServer; refactor to util?).
*
* @param key
* @return the loaded class or null if the class is not found
*/
@SuppressWarnings("rawtypes")
private Class loadClientClass(String key) {
ClassLoader classLoader = getClass().getClassLoader();
try {
String clientClassName = config.getStrOrElse(key, null);
if (clientClassName == null || clientClassName.isEmpty()) {
LOGGER.info("using standard class for " + key);
return null;
}
Class newClass = classLoader.loadClass(clientClassName);
LOGGER.info("using " + key + " loaded " + newClass.getCanonicalName());
return newClass;
} catch (ClassNotFoundException e) {
throw new RuntimeException("Could not load " + key, e);
}
}
/**
* Generic factory to create monkey collateral types.
*
* @param <T>
* the generic type to create
* @param implClass
* the actual concrete type to instantiate.
* @return an object of the requested type
*/
private <T> T factory(Class<T> implClass) {
try {
// then find corresponding ctor
for (Constructor<?> ctor : implClass.getDeclaredConstructors()) {
Class<?>[] paramTypes = ctor.getParameterTypes();
if (paramTypes.length != 1) {
continue;
}
if (paramTypes[0].getName().endsWith("Configuration")) {
@SuppressWarnings("unchecked")
T impl = (T) ctor.newInstance(config);
return impl;
}
}
// Last ditch; try no-arg.
return implClass.newInstance();
} catch (Exception e) {
LOGGER.error("context config error, cannot make an instance of " + implClass.getName(), e);
}
return null;
}
}
| 16,485
| 32.440162
| 118
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/basic/BasicChaosMonkeyContext.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic;
import com.amazonaws.regions.Region;
import com.amazonaws.regions.Regions;
import com.amazonaws.services.simpleemail.AmazonSimpleEmailServiceClient;
import com.netflix.simianarmy.MonkeyConfiguration;
import com.netflix.simianarmy.basic.chaos.BasicChaosEmailNotifier;
import com.netflix.simianarmy.basic.chaos.BasicChaosInstanceSelector;
import com.netflix.simianarmy.chaos.ChaosCrawler;
import com.netflix.simianarmy.chaos.ChaosEmailNotifier;
import com.netflix.simianarmy.chaos.ChaosInstanceSelector;
import com.netflix.simianarmy.chaos.ChaosMonkey;
import com.netflix.simianarmy.client.aws.chaos.ASGChaosCrawler;
import com.netflix.simianarmy.client.aws.chaos.FilteringChaosCrawler;
import com.netflix.simianarmy.client.aws.chaos.TagPredicate;
/**
* The Class BasicContext. This provide the basic context needed for the Chaos Monkey to run. It will configure
* the Chaos Monkey based on a simianarmy.properties file and chaos.properties. The properties file can be
* overridden with -Dsimianarmy.properties=/path/to/my.properties
*/
public class BasicChaosMonkeyContext extends BasicSimianArmyContext implements ChaosMonkey.Context {
/** The crawler. */
private ChaosCrawler crawler;
/** The selector. */
private ChaosInstanceSelector selector;
/** The chaos email notifier. */
private ChaosEmailNotifier chaosEmailNotifier;
/**
* Instantiates a new basic context.
*/
public BasicChaosMonkeyContext() {
super("simianarmy.properties", "client.properties", "chaos.properties");
MonkeyConfiguration cfg = configuration();
String tagKey = cfg.getStrOrElse("simianarmy.chaos.ASGtag.key", "");
String tagValue = cfg.getStrOrElse("simianarmy.chaos.ASGtag.value", "");
ASGChaosCrawler chaosCrawler = new ASGChaosCrawler(awsClient());
setChaosCrawler(tagKey.isEmpty() ? chaosCrawler : new FilteringChaosCrawler(chaosCrawler, new TagPredicate(tagKey, tagValue)));
setChaosInstanceSelector(new BasicChaosInstanceSelector());
AmazonSimpleEmailServiceClient sesClient = new AmazonSimpleEmailServiceClient(awsClientConfig);
if (configuration().getStr("simianarmy.aws.email.region") != null) {
sesClient.setRegion(Region.getRegion(Regions.fromName(configuration().getStr("simianarmy.aws.email.region"))));
}
setChaosEmailNotifier(new BasicChaosEmailNotifier(cfg, sesClient, null));
}
/** {@inheritDoc} */
@Override
public ChaosCrawler chaosCrawler() {
return crawler;
}
/**
* Sets the chaos crawler.
*
* @param chaosCrawler
* the new chaos crawler
*/
protected void setChaosCrawler(ChaosCrawler chaosCrawler) {
this.crawler = chaosCrawler;
}
/** {@inheritDoc} */
@Override
public ChaosInstanceSelector chaosInstanceSelector() {
return selector;
}
/**
* Sets the chaos instance selector.
*
* @param chaosInstanceSelector
* the new chaos instance selector
*/
protected void setChaosInstanceSelector(ChaosInstanceSelector chaosInstanceSelector) {
this.selector = chaosInstanceSelector;
}
@Override
public ChaosEmailNotifier chaosEmailNotifier() {
return chaosEmailNotifier;
}
/**
* Sets the chaos email notifier.
*
* @param notifier
* the chaos email notifier
*/
protected void setChaosEmailNotifier(ChaosEmailNotifier notifier) {
this.chaosEmailNotifier = notifier;
}
}
| 4,231
| 35.482759
| 135
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/basic/BasicScheduler.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic;
import java.util.Calendar;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.Monkey;
import com.netflix.simianarmy.MonkeyRecorder.Event;
import com.netflix.simianarmy.MonkeyScheduler;
/**
* The Class BasicScheduler.
*/
public class BasicScheduler implements MonkeyScheduler {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(BasicScheduler.class);
/** The futures. */
private HashMap<String, ScheduledFuture<?>> futures = new HashMap<String, ScheduledFuture<?>>();
/** The scheduler. */
private final ScheduledExecutorService scheduler;
/** the frequency. */
private int frequency = 1;
/** the frequencyUnit. */
private TimeUnit frequencyUnit = TimeUnit.HOURS;
/**
* Instantiates a new basic scheduler.
*/
public BasicScheduler() {
scheduler = Executors.newScheduledThreadPool(1);
}
/**
* Instantiates a new basic scheduler.
*
* @param freq
* the frequency to run on
* @param freqUnit
* the unit for the freq argument
* @param concurrent
* the concurrent number of threads
*/
public BasicScheduler(int freq, TimeUnit freqUnit, int concurrent) {
frequency = freq;
frequencyUnit = freqUnit;
scheduler = Executors.newScheduledThreadPool(concurrent);
}
/** {@inheritDoc} */
@Override
public int frequency() {
return frequency;
}
/** {@inheritDoc} */
@Override
public TimeUnit frequencyUnit() {
return frequencyUnit;
}
/** {@inheritDoc} */
@Override
public void start(Monkey monkey, Runnable command) {
long cycle = TimeUnit.MILLISECONDS.convert(frequency(), frequencyUnit());
// go back 1 cycle to see if we have any events
Calendar cal = Calendar.getInstance();
cal.add(Calendar.MILLISECOND, (int) (-1 * cycle));
Date then = cal.getTime();
List<Event> events = monkey.context().recorder()
.findEvents(monkey.type(), Collections.<String, String>emptyMap(), then);
if (events.isEmpty()) {
// no events so just run now
futures.put(monkey.type().name(),
scheduler.scheduleWithFixedDelay(command, 0, frequency(), frequencyUnit()));
} else {
// we have events, so set the start time to the time left in what would have been the last cycle
Date eventTime = events.get(0).eventTime();
Date now = new Date();
long init = cycle - (now.getTime() - eventTime.getTime());
LOGGER.info("Detected previous events within cycle, setting " + monkey.type().name() + " start to "
+ new Date(now.getTime() + init));
futures.put(monkey.type().name(),
scheduler.scheduleWithFixedDelay(command, init, cycle, TimeUnit.MILLISECONDS));
}
}
/** {@inheritDoc} */
@Override
public void stop(Monkey monkey) {
if (futures.containsKey(monkey.type().name())) {
futures.remove(monkey.type().name()).cancel(true);
}
}
}
| 4,168
| 31.570313
| 111
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/basic/janitor/BasicJanitorMonkey.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic.janitor;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.Monitors;
import com.netflix.simianarmy.*;
import com.netflix.simianarmy.MonkeyRecorder.Event;
import com.netflix.simianarmy.janitor.AbstractJanitor;
import com.netflix.simianarmy.janitor.JanitorEmailNotifier;
import com.netflix.simianarmy.janitor.JanitorMonkey;
import com.netflix.simianarmy.janitor.JanitorResourceTracker;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** The basic implementation of Janitor Monkey. */
public class BasicJanitorMonkey extends JanitorMonkey {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(BasicJanitorMonkey.class);
/** The Constant NS. */
private static final String NS = "simianarmy.janitor.";
/** The cfg. */
private final MonkeyConfiguration cfg;
private final List<AbstractJanitor> janitors;
private final JanitorEmailNotifier emailNotifier;
private final String region;
private final String accountName;
private final JanitorResourceTracker resourceTracker;
private final MonkeyRecorder recorder;
private final MonkeyCalendar calendar;
/** Keep track of the number of monkey runs */
protected final AtomicLong monkeyRuns = new AtomicLong(0);
/** Keep track of the number of monkey errors */
protected final AtomicLong monkeyErrors = new AtomicLong(0);
/** Emit a servor signal to track the running monkey */
protected final AtomicLong monkeyRunning = new AtomicLong(0);
/**
* Instantiates a new basic janitor monkey.
*
* @param ctx
* the ctx
*/
public BasicJanitorMonkey(Context ctx) {
super(ctx);
this.cfg = ctx.configuration();
janitors = ctx.janitors();
emailNotifier = ctx.emailNotifier();
region = ctx.region();
accountName = ctx.accountName();
resourceTracker = ctx.resourceTracker();
recorder = ctx.recorder();
calendar = ctx.calendar();
// register this janitor with servo
Monitors.registerObject("simianarmy.janitor", this);
}
/** {@inheritDoc} */
@Override
public void doMonkeyBusiness() {
cfg.reload();
context().resetEventReport();
if (!isJanitorMonkeyEnabled()) {
return;
} else {
LOGGER.info(String.format("Marking resources with %d janitors.", janitors.size()));
monkeyRuns.incrementAndGet();
monkeyRunning.set(1);
// prepare to run, this just resets the counts so monitoring is sane
for (AbstractJanitor janitor : janitors) {
janitor.prepareToRun();
}
for (AbstractJanitor janitor : janitors) {
LOGGER.info(String.format("Running %s janitor for region %s", janitor.getResourceType(), janitor.getRegion()));
try {
janitor.markResources();
} catch (Exception e) {
monkeyErrors.incrementAndGet();
LOGGER.error(String.format("Got an exception while %s janitor was marking for region %s", janitor.getResourceType(), janitor.getRegion()), e);
}
LOGGER.info(String.format("Marked %d resources of type %s in the last run.",
janitor.getMarkedResources().size(), janitor.getResourceType().name()));
LOGGER.info(String.format("Unmarked %d resources of type %s in the last run.",
janitor.getUnmarkedResources().size(), janitor.getResourceType()));
}
if (!cfg.getBoolOrElse("simianarmy.janitor.leashed", true)) {
emailNotifier.sendNotifications();
} else {
LOGGER.info("Janitor Monkey is leashed, no notification is sent.");
}
LOGGER.info(String.format("Cleaning resources with %d janitors.", janitors.size()));
for (AbstractJanitor janitor : janitors) {
try {
janitor.cleanupResources();
} catch (Exception e) {
monkeyErrors.incrementAndGet();
LOGGER.error(String.format("Got an exception while %s janitor was cleaning for region %s", janitor.getResourceType(), janitor.getRegion()), e);
}
LOGGER.info(String.format("Cleaned %d resources of type %s in the last run.",
janitor.getCleanedResources().size(), janitor.getResourceType()));
LOGGER.info(String.format("Failed to clean %d resources of type %s in the last run.",
janitor.getFailedToCleanResources().size(), janitor.getResourceType()));
}
if (cfg.getBoolOrElse(NS + "summaryEmail.enabled", true)) {
sendJanitorSummaryEmail();
}
monkeyRunning.set(0);
}
}
@Override
public Event optInResource(String resourceId) {
return optInOrOutResource(resourceId, true, region);
}
@Override
public Event optOutResource(String resourceId) {
return optInOrOutResource(resourceId, false, region);
}
@Override
public Event optInResource(String resourceId, String resourceRegion) {
return optInOrOutResource(resourceId, true, resourceRegion);
}
@Override
public Event optOutResource(String resourceId, String resourceRegion) {
return optInOrOutResource(resourceId, false, resourceRegion);
}
private Event optInOrOutResource(String resourceId, boolean optIn, String resourceRegion) {
if (resourceRegion == null) {
resourceRegion = region;
}
Resource resource = resourceTracker.getResource(resourceId, resourceRegion);
if (resource == null) {
return null;
}
EventTypes eventType = optIn ? EventTypes.OPT_IN_RESOURCE : EventTypes.OPT_OUT_RESOURCE;
long timestamp = calendar.now().getTimeInMillis();
// The same resource can have multiple events, so we add the timestamp to the id.
Event evt = recorder.newEvent(Type.JANITOR, eventType, resource, resourceId + "@" + timestamp);
recorder.recordEvent(evt);
resource.setOptOutOfJanitor(!optIn);
resourceTracker.addOrUpdate(resource);
return evt;
}
/**
* Send a summary email with about the last run of the janitor monkey.
*/
protected void sendJanitorSummaryEmail() {
String summaryEmailTarget = cfg.getStr(NS + "summaryEmail.to");
if (!StringUtils.isEmpty(summaryEmailTarget)) {
if (!emailNotifier.isValidEmail(summaryEmailTarget)) {
LOGGER.error(String.format("The email target address '%s' for Janitor summary email is invalid",
summaryEmailTarget));
return;
}
StringBuilder message = new StringBuilder();
for (AbstractJanitor janitor : janitors) {
ResourceType resourceType = janitor.getResourceType();
appendSummary(message, "markings", resourceType, janitor.getMarkedResources(), janitor.getRegion());
appendSummary(message, "unmarkings", resourceType, janitor.getUnmarkedResources(), janitor.getRegion());
appendSummary(message, "cleanups", resourceType, janitor.getCleanedResources(), janitor.getRegion());
appendSummary(message, "cleanup failures", resourceType, janitor.getFailedToCleanResources(),
janitor.getRegion());
}
String subject = getSummaryEmailSubject();
emailNotifier.sendEmail(summaryEmailTarget, subject, message.toString());
}
}
private void appendSummary(StringBuilder message, String summaryName,
ResourceType resourceType, Collection<Resource> resources, String janitorRegion) {
message.append(String.format("Total %s for %s = %d in region %s<br/>",
summaryName, resourceType.name(), resources.size(), janitorRegion));
message.append(String.format("List: %s<br/>", printResources(resources)));
}
private String printResources(Collection<Resource> resources) {
StringBuilder sb = new StringBuilder();
boolean isFirst = true;
for (Resource r : resources) {
if (!isFirst) {
sb.append(",");
} else {
isFirst = false;
}
sb.append(r.getId());
}
return sb.toString();
}
/**
* Gets the summary email subject for the last run of janitor monkey.
* @return the subject of the summary email
*/
protected String getSummaryEmailSubject() {
return String.format("Janitor monkey execution summary (%s, %s)", accountName, region);
}
/**
* Handle cleanup error. This has been abstracted so subclasses can decide to continue causing chaos if desired.
*
* @param resource
* the instance
* @param e
* the exception
*/
protected void handleCleanupError(Resource resource, Throwable e) {
String msg = String.format("Failed to clean up %s resource %s with error %s",
resource.getResourceType(), resource.getId(), e.getMessage());
LOGGER.error(msg);
throw new RuntimeException(msg, e);
}
private boolean isJanitorMonkeyEnabled() {
String prop = NS + "enabled";
if (cfg.getBoolOrElse(prop, true)) {
return true;
}
LOGGER.info("JanitorMonkey disabled, set {}=true", prop);
return false;
}
@Monitor(name="runs", type=DataSourceType.COUNTER)
public long getMonkeyRuns() {
return monkeyRuns.get();
}
@Monitor(name="errors", type=DataSourceType.GAUGE)
public long getMonkeyErrors() {
return monkeyErrors.get();
}
@Monitor(name="running", type=DataSourceType.GAUGE)
public long getMonkeyRunning() {
return monkeyRunning.get();
}
}
| 11,035
| 37.722807
| 160
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/basic/janitor/BasicJanitorMonkeyContext.java
|
/*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// CHECKSTYLE IGNORE MagicNumberCheck
package com.netflix.simianarmy.basic.janitor;
import com.amazonaws.regions.Region;
import com.amazonaws.regions.Regions;
import com.amazonaws.services.simpleemail.AmazonSimpleEmailServiceClient;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.netflix.discovery.DiscoveryClient;
import com.netflix.discovery.guice.EurekaModule;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.MonkeyConfiguration;
import com.netflix.simianarmy.MonkeyRecorder;
import com.netflix.simianarmy.aws.janitor.*;
import com.netflix.simianarmy.aws.janitor.crawler.*;
import com.netflix.simianarmy.aws.janitor.crawler.edda.*;
import com.netflix.simianarmy.aws.janitor.rule.ami.UnusedImageRule;
import com.netflix.simianarmy.aws.janitor.rule.asg.*;
import com.netflix.simianarmy.aws.janitor.rule.elb.OrphanedELBRule;
import com.netflix.simianarmy.aws.janitor.rule.generic.TagValueExclusionRule;
import com.netflix.simianarmy.aws.janitor.rule.generic.UntaggedRule;
import com.netflix.simianarmy.aws.janitor.rule.instance.OrphanedInstanceRule;
import com.netflix.simianarmy.aws.janitor.rule.launchconfig.OldUnusedLaunchConfigRule;
import com.netflix.simianarmy.aws.janitor.rule.snapshot.NoGeneratedAMIRule;
import com.netflix.simianarmy.aws.janitor.rule.volume.DeleteOnTerminationRule;
import com.netflix.simianarmy.aws.janitor.rule.volume.OldDetachedVolumeRule;
import com.netflix.simianarmy.basic.BasicSimianArmyContext;
import com.netflix.simianarmy.client.edda.EddaClient;
import com.netflix.simianarmy.janitor.*;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
/**
* The basic implementation of the context class for Janitor monkey.
*/
public class BasicJanitorMonkeyContext extends BasicSimianArmyContext implements JanitorMonkey.Context {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(BasicJanitorMonkeyContext.class);
/** The email notifier. */
private final JanitorEmailNotifier emailNotifier;
private final JanitorResourceTracker janitorResourceTracker;
/** The janitors. */
private final List<AbstractJanitor> janitors;
private final String monkeyRegion;
private final MonkeyCalendar monkeyCalendar;
private final AmazonSimpleEmailServiceClient sesClient;
private final JanitorEmailBuilder janitorEmailBuilder;
private final String defaultEmail;
private final String[] ccEmails;
private final String sourceEmail;
private final String ownerEmailDomain;
private final int daysBeforeTermination;
/**
* The constructor.
*/
public BasicJanitorMonkeyContext() {
super("simianarmy.properties", "client.properties", "janitor.properties");
monkeyRegion = region();
monkeyCalendar = calendar();
String resourceDomain = configuration().getStrOrElse("simianarmy.janitor.resources.sdb.domain", "SIMIAN_ARMY");
Set<String> enabledResourceSet = getEnabledResourceSet();
String dbDriver = configuration().getStr("simianarmy.recorder.db.driver");
String dbUser = configuration().getStr("simianarmy.recorder.db.user");
String dbPass = configuration().getStr("simianarmy.recorder.db.pass");
String dbUrl = configuration().getStr("simianarmy.recorder.db.url");
String dbTable = configuration().getStr("simianarmy.janitor.resources.db.table");
if (dbDriver == null) {
janitorResourceTracker = new SimpleDBJanitorResourceTracker(awsClient(), resourceDomain);
} else {
RDSJanitorResourceTracker rdsTracker = new RDSJanitorResourceTracker(dbDriver, dbUser, dbPass, dbUrl, dbTable);
rdsTracker.init();
janitorResourceTracker = rdsTracker;
}
janitorEmailBuilder = new BasicJanitorEmailBuilder();
sesClient = new AmazonSimpleEmailServiceClient();
if (configuration().getStr("simianarmy.aws.email.region") != null) {
sesClient.setRegion(Region.getRegion(Regions.fromName(configuration().getStr("simianarmy.aws.email.region"))));
}
defaultEmail = configuration().getStrOrElse("simianarmy.janitor.notification.defaultEmail", "");
ccEmails = StringUtils.split(
configuration().getStrOrElse("simianarmy.janitor.notification.ccEmails", ""), ",");
sourceEmail = configuration().getStrOrElse("simianarmy.janitor.notification.sourceEmail", "");
ownerEmailDomain = configuration().getStrOrElse("simianarmy.janitor.notification.ownerEmailDomain", "");
daysBeforeTermination =
(int) configuration().getNumOrElse("simianarmy.janitor.notification.daysBeforeTermination", 3);
emailNotifier = new JanitorEmailNotifier(getJanitorEmailNotifierContext());
janitors = new ArrayList<AbstractJanitor>();
if (enabledResourceSet.contains("ASG")) {
janitors.add(getASGJanitor());
}
if (enabledResourceSet.contains("INSTANCE")) {
janitors.add(getInstanceJanitor());
}
if (enabledResourceSet.contains("EBS_VOLUME")) {
janitors.add(getEBSVolumeJanitor());
}
if (enabledResourceSet.contains("EBS_SNAPSHOT")) {
janitors.add(getEBSSnapshotJanitor());
}
if (enabledResourceSet.contains("LAUNCH_CONFIG")) {
janitors.add(getLaunchConfigJanitor());
}
if (enabledResourceSet.contains("IMAGE")) {
janitors.add(getImageJanitor());
}
if (enabledResourceSet.contains("ELB")) {
janitors.add(getELBJanitor());
}
}
protected JanitorRuleEngine createJanitorRuleEngine() {
JanitorRuleEngine ruleEngine = new BasicJanitorRuleEngine();
if (configuration().getBoolOrElse("simianarmy.janitor.rule.TagValueExclusionRule.enabled", false)) {
String tagsList = configuration().getStr("simianarmy.janitor.rule.TagValueExclusionRule.tags");
String valsList = configuration().getStr("simianarmy.janitor.rule.TagValueExclusionRule.vals");
if (tagsList != null && valsList != null) {
TagValueExclusionRule rule = new TagValueExclusionRule(tagsList.split(","), valsList.split(","));
ruleEngine.addExclusionRule(rule);
}
}
return ruleEngine;
}
private ASGJanitor getASGJanitor() {
JanitorRuleEngine ruleEngine = createJanitorRuleEngine();
boolean discoveryEnabled = configuration().getBoolOrElse("simianarmy.janitor.Eureka.enabled", false);
ASGInstanceValidator instanceValidator;
if (discoveryEnabled) {
LOGGER.info("Initializing Discovery client.");
Injector injector = Guice.createInjector(new EurekaModule());
DiscoveryClient discoveryClient = injector.getInstance(DiscoveryClient.class);
instanceValidator = new DiscoveryASGInstanceValidator(discoveryClient);
} else {
LOGGER.info("Discovery/Eureka is not enabled, use the dummy instance validator.");
instanceValidator = new DummyASGInstanceValidator();
}
if (configuration().getBoolOrElse("simianarmy.janitor.rule.oldEmptyASGRule.enabled", false)) {
ruleEngine.addRule(new OldEmptyASGRule(monkeyCalendar,
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.oldEmptyASGRule.launchConfigAgeThreshold", 50),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.oldEmptyASGRule.retentionDays", 10),
instanceValidator
));
}
if (configuration().getBoolOrElse("simianarmy.janitor.rule.suspendedASGRule.enabled", false)) {
ruleEngine.addRule(new SuspendedASGRule(monkeyCalendar,
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.suspendedASGRule.suspensionAgeThreshold", 2),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.suspendedASGRule.retentionDays", 5),
instanceValidator
));
}
if (configuration().getBoolOrElse("simianarmy.janitor.rule.untaggedRule.enabled", false)
&& getUntaggedRuleResourceSet().contains("ASG")) {
ruleEngine.addRule(new UntaggedRule(monkeyCalendar, getPropertySet("simianarmy.janitor.rule.untaggedRule.requiredTags"),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.untaggedRule.retentionDaysWithOwner", 3),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.untaggedRule.retentionDaysWithoutOwner",
8)));
}
JanitorCrawler crawler;
if (configuration().getBoolOrElse("simianarmy.janitor.edda.enabled", false)) {
crawler = new EddaASGJanitorCrawler(createEddaClient(), awsClient().region());
} else {
crawler = new ASGJanitorCrawler(awsClient());
}
BasicJanitorContext asgJanitorCtx = new BasicJanitorContext(
monkeyRegion, ruleEngine, crawler, janitorResourceTracker,
monkeyCalendar, configuration(), recorder());
return new ASGJanitor(awsClient(), asgJanitorCtx);
}
private InstanceJanitor getInstanceJanitor() {
JanitorRuleEngine ruleEngine = createJanitorRuleEngine();
if (configuration().getBoolOrElse("simianarmy.janitor.rule.orphanedInstanceRule.enabled", false)) {
ruleEngine.addRule(new OrphanedInstanceRule(monkeyCalendar,
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.orphanedInstanceRule.instanceAgeThreshold", 2),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.orphanedInstanceRule.retentionDaysWithOwner", 3),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.orphanedInstanceRule.retentionDaysWithoutOwner",
8),
configuration().getBoolOrElse(
"simianarmy.janitor.rule.orphanedInstanceRule.opsworks.parentage",
false)));
}
if (configuration().getBoolOrElse("simianarmy.janitor.rule.untaggedRule.enabled", false)
&& getUntaggedRuleResourceSet().contains("INSTANCE")) {
ruleEngine.addRule(new UntaggedRule(monkeyCalendar, getPropertySet("simianarmy.janitor.rule.untaggedRule.requiredTags"),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.untaggedRule.retentionDaysWithOwner", 3),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.untaggedRule.retentionDaysWithoutOwner",
8)));
}
JanitorCrawler instanceCrawler;
if (configuration().getBoolOrElse("simianarmy.janitor.edda.enabled", false)) {
instanceCrawler = new EddaInstanceJanitorCrawler(createEddaClient(), awsClient().region());
} else {
instanceCrawler = new InstanceJanitorCrawler(awsClient());
}
BasicJanitorContext instanceJanitorCtx = new BasicJanitorContext(
monkeyRegion, ruleEngine, instanceCrawler, janitorResourceTracker,
monkeyCalendar, configuration(), recorder());
return new InstanceJanitor(awsClient(), instanceJanitorCtx);
}
private EBSVolumeJanitor getEBSVolumeJanitor() {
JanitorRuleEngine ruleEngine = createJanitorRuleEngine();
if (configuration().getBoolOrElse("simianarmy.janitor.rule.oldDetachedVolumeRule.enabled", false)) {
ruleEngine.addRule(new OldDetachedVolumeRule(monkeyCalendar,
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.oldDetachedVolumeRule.detachDaysThreshold", 30),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.oldDetachedVolumeRule.retentionDays", 7)));
if (configuration().getBoolOrElse("simianarmy.janitor.edda.enabled", false)
&& configuration().getBoolOrElse("simianarmy.janitor.rule.deleteOnTerminationRule.enabled", false)) {
ruleEngine.addRule(new DeleteOnTerminationRule(monkeyCalendar, (int) configuration().getNumOrElse(
"simianarmy.janitor.rule.deleteOnTerminationRule.retentionDays", 3)));
}
}
if (configuration().getBoolOrElse("simianarmy.janitor.rule.untaggedRule.enabled", false)
&& getUntaggedRuleResourceSet().contains("EBS_VOLUME")) {
ruleEngine.addRule(new UntaggedRule(monkeyCalendar, getPropertySet("simianarmy.janitor.rule.untaggedRule.requiredTags"),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.untaggedRule.retentionDaysWithOwner", 3),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.untaggedRule.retentionDaysWithoutOwner",
8)));
}
JanitorCrawler volumeCrawler;
if (configuration().getBoolOrElse("simianarmy.janitor.edda.enabled", false)) {
volumeCrawler = new EddaEBSVolumeJanitorCrawler(createEddaClient(), awsClient().region());
} else {
volumeCrawler = new EBSVolumeJanitorCrawler(awsClient());
}
BasicJanitorContext volumeJanitorCtx = new BasicJanitorContext(
monkeyRegion, ruleEngine, volumeCrawler, janitorResourceTracker,
monkeyCalendar, configuration(), recorder());
return new EBSVolumeJanitor(awsClient(), volumeJanitorCtx);
}
private EBSSnapshotJanitor getEBSSnapshotJanitor() {
JanitorRuleEngine ruleEngine = createJanitorRuleEngine();
if (configuration().getBoolOrElse("simianarmy.janitor.rule.noGeneratedAMIRule.enabled", false)) {
ruleEngine.addRule(new NoGeneratedAMIRule(monkeyCalendar,
(int) configuration().getNumOrElse("simianarmy.janitor.rule.noGeneratedAMIRule.ageThreshold", 30),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.noGeneratedAMIRule.retentionDays", 7),
configuration().getStrOrElse(
"simianarmy.janitor.rule.noGeneratedAMIRule.ownerEmail", null)));
}
if (configuration().getBoolOrElse("simianarmy.janitor.rule.untaggedRule.enabled", false)
&& getUntaggedRuleResourceSet().contains("EBS_SNAPSHOT")) {
ruleEngine.addRule(new UntaggedRule(monkeyCalendar, getPropertySet("simianarmy.janitor.rule.untaggedRule.requiredTags"),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.untaggedRule.retentionDaysWithOwner", 3),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.untaggedRule.retentionDaysWithoutOwner",
8)));
}
JanitorCrawler snapshotCrawler;
if (configuration().getBoolOrElse("simianarmy.janitor.edda.enabled", false)) {
snapshotCrawler = new EddaEBSSnapshotJanitorCrawler(
configuration().getStr("simianarmy.janitor.snapshots.ownerId"),
createEddaClient(), awsClient().region());
} else {
snapshotCrawler = new EBSSnapshotJanitorCrawler(awsClient());
}
BasicJanitorContext snapshotJanitorCtx = new BasicJanitorContext(
monkeyRegion, ruleEngine, snapshotCrawler, janitorResourceTracker,
monkeyCalendar, configuration(), recorder());
return new EBSSnapshotJanitor(awsClient(), snapshotJanitorCtx);
}
private LaunchConfigJanitor getLaunchConfigJanitor() {
JanitorRuleEngine ruleEngine = createJanitorRuleEngine();
if (configuration().getBoolOrElse("simianarmy.janitor.rule.oldUnusedLaunchConfigRule.enabled", false)) {
ruleEngine.addRule(new OldUnusedLaunchConfigRule(monkeyCalendar,
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.oldUnusedLaunchConfigRule.ageThreshold", 4),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.oldUnusedLaunchConfigRule.retentionDays", 3)));
}
if (configuration().getBoolOrElse("simianarmy.janitor.rule.untaggedRule.enabled", false)
&& getUntaggedRuleResourceSet().contains("LAUNCH_CONFIG")) {
ruleEngine.addRule(new UntaggedRule(monkeyCalendar, getPropertySet("simianarmy.janitor.rule.untaggedRule.requiredTags"),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.untaggedRule.retentionDaysWithOwner", 3),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.untaggedRule.retentionDaysWithoutOwner",
8)));
}
JanitorCrawler crawler;
if (configuration().getBoolOrElse("simianarmy.janitor.edda.enabled", false)) {
crawler = new EddaLaunchConfigJanitorCrawler(
createEddaClient(), awsClient().region());
} else {
crawler = new LaunchConfigJanitorCrawler(awsClient());
}
BasicJanitorContext janitorCtx = new BasicJanitorContext(
monkeyRegion, ruleEngine, crawler, janitorResourceTracker,
monkeyCalendar, configuration(), recorder());
return new LaunchConfigJanitor(awsClient(), janitorCtx);
}
private ImageJanitor getImageJanitor() {
JanitorCrawler crawler;
if (configuration().getBoolOrElse("simianarmy.janitor.edda.enabled", false)) {
crawler = new EddaImageJanitorCrawler(createEddaClient(),
configuration().getStr("simianarmy.janitor.image.ownerId"),
(int) configuration().getNumOrElse("simianarmy.janitor.image.crawler.lookBackDays", 60),
awsClient().region());
} else {
throw new RuntimeException("Image Janitor only works when Edda is enabled.");
}
JanitorRuleEngine ruleEngine = createJanitorRuleEngine();
if (configuration().getBoolOrElse("simianarmy.janitor.rule.unusedImageRule.enabled", false)) {
ruleEngine.addRule(new UnusedImageRule(monkeyCalendar,
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.unusedImageRule.retentionDays", 3),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.unusedImageRule.lastReferenceDaysThreshold", 45)));
}
if (configuration().getBoolOrElse("simianarmy.janitor.rule.untaggedRule.enabled", false)
&& getUntaggedRuleResourceSet().contains("IMAGE")) {
ruleEngine.addRule(new UntaggedRule(monkeyCalendar, getPropertySet("simianarmy.janitor.rule.untaggedRule.requiredTags"),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.untaggedRule.retentionDaysWithOwner", 3),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.untaggedRule.retentionDaysWithoutOwner",
8)));
}
BasicJanitorContext janitorCtx = new BasicJanitorContext(
monkeyRegion, ruleEngine, crawler, janitorResourceTracker,
monkeyCalendar, configuration(), recorder());
return new ImageJanitor(awsClient(), janitorCtx);
}
private ELBJanitor getELBJanitor() {
JanitorRuleEngine ruleEngine = createJanitorRuleEngine();
if (configuration().getBoolOrElse("simianarmy.janitor.rule.orphanedELBRule.enabled", false)) {
ruleEngine.addRule(new OrphanedELBRule(monkeyCalendar,
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.orphanedELBRule.retentionDays", 7)));
}
JanitorCrawler elbCrawler;
if (configuration().getBoolOrElse("simianarmy.janitor.edda.enabled", false)) {
boolean useEddaApplicationOwner = configuration().getBoolOrElse("simianarmy.janitor.rule.orphanedELBRule.edda.useApplicationOwner", false);
String eddaFallbackOwnerEmail = configuration().getStr("simianarmy.janitor.rule.orphanedELBRule.edda.fallbackOwnerEmail");
elbCrawler = new EddaELBJanitorCrawler(createEddaClient(), eddaFallbackOwnerEmail, useEddaApplicationOwner, awsClient().region());
} else {
elbCrawler = new ELBJanitorCrawler(awsClient());
}
BasicJanitorContext elbJanitorCtx = new BasicJanitorContext(
monkeyRegion, ruleEngine, elbCrawler, janitorResourceTracker,
monkeyCalendar, configuration(), recorder());
return new ELBJanitor(awsClient(), elbJanitorCtx);
}
private EddaClient createEddaClient() {
return new EddaClient((int) configuration().getNumOrElse("simianarmy.janitor.edda.client.timeout", 30000),
(int) configuration().getNumOrElse("simianarmy.janitor.edda.client.retries", 3),
(int) configuration().getNumOrElse("simianarmy.janitor.edda.client.retryInterval", 1000),
configuration());
}
private Set<String> getEnabledResourceSet() {
Set<String> enabledResourceSet = new HashSet<String>();
String enabledResources = configuration().getStr("simianarmy.janitor.enabledResources");
if (StringUtils.isNotBlank(enabledResources)) {
for (String resourceType : enabledResources.split(",")) {
enabledResourceSet.add(resourceType.trim().toUpperCase());
}
}
return enabledResourceSet;
}
private Set<String> getUntaggedRuleResourceSet() {
Set<String> untaggedRuleResourceSet = new HashSet<String>();
if (configuration().getBoolOrElse("simianarmy.janitor.rule.untaggedRule.enabled", false)) {
String untaggedRuleResources = configuration().getStr("simianarmy.janitor.rule.untaggedRule.resources");
if (StringUtils.isNotBlank(untaggedRuleResources)) {
for (String resourceType : untaggedRuleResources.split(",")) {
untaggedRuleResourceSet.add(resourceType.trim().toUpperCase());
}
}
}
return untaggedRuleResourceSet;
}
private Set<String> getPropertySet(String property) {
Set<String> propertyValueSet = new HashSet<String>();
String propertyValue = configuration().getStr(property);
if (StringUtils.isNotBlank(propertyValue)) {
for (String propertyValueItem : propertyValue.split(",")) {
propertyValueSet.add(propertyValueItem.trim());
}
}
return propertyValueSet;
}
public JanitorEmailNotifier.Context getJanitorEmailNotifierContext() {
return new JanitorEmailNotifier.Context() {
@Override
public AmazonSimpleEmailServiceClient sesClient() {
return sesClient;
}
@Override
public String defaultEmail() {
return defaultEmail;
}
@Override
public int daysBeforeTermination() {
return daysBeforeTermination;
}
@Override
public String region() {
return monkeyRegion;
}
@Override
public JanitorResourceTracker resourceTracker() {
return janitorResourceTracker;
}
@Override
public JanitorEmailBuilder emailBuilder() {
return janitorEmailBuilder;
}
@Override
public MonkeyCalendar calendar() {
return monkeyCalendar;
}
@Override
public String[] ccEmails() {
return ccEmails;
}
@Override
public String sourceEmail() {
return sourceEmail;
}
@Override
public String ownerEmailDomain() {
return ownerEmailDomain;
}
};
}
/** {@inheritDoc} */
@Override
public List<AbstractJanitor> janitors() {
return janitors;
}
/** {@inheritDoc} */
@Override
public JanitorEmailNotifier emailNotifier() {
return emailNotifier;
}
@Override
public JanitorResourceTracker resourceTracker() {
return janitorResourceTracker;
}
/** The Context class for Janitor.
*/
public static class BasicJanitorContext implements AbstractJanitor.Context {
private final String region;
private final JanitorRuleEngine ruleEngine;
private final JanitorCrawler crawler;
private final JanitorResourceTracker resourceTracker;
private final MonkeyCalendar calendar;
private final MonkeyConfiguration config;
private final MonkeyRecorder recorder;
/**
* Constructor.
* @param region the region of the janitor
* @param ruleEngine the rule engine used by the janitor
* @param crawler the crawler used by the janitor
* @param resourceTracker the resource tracker used by the janitor
* @param calendar the calendar used by the janitor
* @param config the monkey configuration used by the janitor
*/
public BasicJanitorContext(String region, JanitorRuleEngine ruleEngine, JanitorCrawler crawler,
JanitorResourceTracker resourceTracker, MonkeyCalendar calendar, MonkeyConfiguration config,
MonkeyRecorder recorder) {
this.region = region;
this.resourceTracker = resourceTracker;
this.ruleEngine = ruleEngine;
this.crawler = crawler;
this.calendar = calendar;
this.config = config;
this.recorder = recorder;
}
@Override
public String region() {
return region;
}
@Override
public MonkeyConfiguration configuration() {
return config;
}
@Override
public MonkeyCalendar calendar() {
return calendar;
}
@Override
public JanitorRuleEngine janitorRuleEngine() {
return ruleEngine;
}
@Override
public JanitorCrawler janitorCrawler() {
return crawler;
}
@Override
public JanitorResourceTracker janitorResourceTracker() {
return resourceTracker;
}
@Override
public MonkeyRecorder recorder() {
return recorder;
}
}
}
| 28,517
| 45.827586
| 151
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/basic/janitor/BasicVolumeTaggingMonkeyContext.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic.janitor;
import com.google.common.collect.Lists;
import com.netflix.simianarmy.aws.janitor.VolumeTaggingMonkey;
import com.netflix.simianarmy.basic.BasicSimianArmyContext;
import com.netflix.simianarmy.client.aws.AWSClient;
import org.apache.commons.lang.StringUtils;
import java.util.Collection;
/** The basic context for the monkey that tags volumes with Janitor meta data.
*/
public class BasicVolumeTaggingMonkeyContext extends BasicSimianArmyContext implements VolumeTaggingMonkey.Context {
private final Collection<AWSClient> awsClients = Lists.newArrayList();
/**
* The constructor.
*/
public BasicVolumeTaggingMonkeyContext() {
super("simianarmy.properties", "client.properties", "volumeTagging.properties");
for (String r : StringUtils.split(region(), ",")) {
createClient(r);
awsClients.add(awsClient());
}
}
@Override
public Collection<AWSClient> awsClients() {
return awsClients;
}
}
| 1,674
| 32.5
| 116
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/basic/janitor/BasicJanitorRuleEngine.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic.janitor;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.janitor.JanitorRuleEngine;
import com.netflix.simianarmy.janitor.Rule;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
/**
* Basic implementation of janitor rule engine that runs all containing rules to decide if a resource should be
* a candidate of cleanup.
*/
public class BasicJanitorRuleEngine implements JanitorRuleEngine {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(BasicJanitorRuleEngine.class);
/** The rules to decide if a resource should be a candidate for cleanup. **/
private final List<Rule> rules;
/** The rules to decide if a resource should be excluded for cleanup. **/
private final List<Rule> exclusionRules;
/**
* The constructor of JanitorRuleEngine.
*/
public BasicJanitorRuleEngine() {
rules = new ArrayList<Rule>();
exclusionRules = new ArrayList<Rule>();
}
/**
* Decides whether the resource should be a candidate of cleanup based on the underlying rules. If any rule in the
* rule set thinks the resource should be a candidate of cleanup, the method returns false which indicates that the
* resource should be marked for cleanup. If multiple rules think the resource should be cleaned up, the rule with
* the nearest expected termination time fills the termination reason and expected termination time.
*
* @param resource
* The resource
* @return true if the resource is valid and should not be a candidate of cleanup based on the underlying rules,
* false otherwise.
*/
@Override
public boolean isValid(Resource resource) {
LOGGER.debug(String.format("Checking if resource %s of type %s is a cleanup candidate against %d rules and %d exclusion rules.",
resource.getId(), resource.getResourceType(), rules.size(), exclusionRules.size()));
for (Rule exclusionRule : exclusionRules) {
if (exclusionRule.isValid(resource)) {
LOGGER.info(String.format("Resource %s is not marked as a cleanup candidate because of an exclusion rule.", resource.getId()));
return true;
}
}
// We create a clone of the resource each time when we try the rule. In the first iteration of the rules
// we identify the rule with the nearest termination date if there is any rule considers the resource
// as a cleanup candidate. Then the rule is applied to the original resource.
Rule nearestRule = null;
if (rules.size() == 1) {
nearestRule = rules.get(0);
} else {
Date nearestTerminationTime = null;
for (Rule rule : rules) {
Resource clone = resource.cloneResource();
if (!rule.isValid(clone)) {
if (clone.getExpectedTerminationTime() != null) {
if (nearestTerminationTime == null || nearestTerminationTime.after(clone.getExpectedTerminationTime())) {
nearestRule = rule;
nearestTerminationTime = clone.getExpectedTerminationTime();
}
}
}
}
}
if (nearestRule != null && !nearestRule.isValid(resource)) {
LOGGER.info(String.format("Resource %s is marked as a cleanup candidate.", resource.getId()));
return false;
} else {
LOGGER.info(String.format("Resource %s is not marked as a cleanup candidate.", resource.getId()));
return true;
}
}
/** {@inheritDoc} */
@Override
public BasicJanitorRuleEngine addRule(Rule rule) {
rules.add(rule);
return this;
}
/** {@inheritDoc} */
@Override
public BasicJanitorRuleEngine addExclusionRule(Rule rule){
exclusionRules.add(rule);
return this;
}
/** {@inheritDoc} */
@Override
public List<Rule> getRules() {
return this.rules;
}
/** {@inheritDoc} */
@Override
public List<Rule> getExclusionRules() {
return this.exclusionRules;
}
}
| 4,999
| 36.313433
| 143
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/basic/janitor/BasicJanitorEmailBuilder.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic.janitor;
import java.util.Collection;
import java.util.Map;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.janitor.JanitorEmailBuilder;
/** The basic implementation of the email builder for Janitor monkey. */
public class BasicJanitorEmailBuilder extends JanitorEmailBuilder {
private static final String[] TABLE_COLUMNS =
{"Resource Type", "Resource", "Region", "Description", "Expected Termination Time",
"Termination Reason", "View/Edit"};
private static final String AHREF_TEMPLATE = "<a href=\"%s\">%s</a>";
private static final DateTimeFormatter DATE_FORMATTER = DateTimeFormat.forPattern("EEE, MMM dd, yyyy");
private Map<String, Collection<Resource>> emailToResources;
@Override
public void setEmailToResources(Map<String, Collection<Resource>> emailToResources) {
Validate.notNull(emailToResources);
this.emailToResources = emailToResources;
}
@Override
protected String getHeader() {
StringBuilder header = new StringBuilder();
header.append("<b><h2>Janitor Notifications</h2></b>");
header.append(
"The following resource(s) have been marked for cleanup by Janitor monkey "
+ "as potential unused resources. This is a non-repeating notification.<br/>");
return header.toString();
}
@Override
protected String getEntryTable(String emailAddress) {
StringBuilder table = new StringBuilder();
table.append(getHtmlTableHeader(getTableColumns()));
for (Resource resource : emailToResources.get(emailAddress)) {
table.append(getResourceRow(resource));
}
table.append("</table>");
return table.toString();
}
@Override
protected String getFooter() {
return "<br/>Janitor Monkey wiki: https://github.com/Netflix/SimianArmy/wiki<br/>";
}
/**
* Gets the url to view the details of the resource.
* @param resource the resource
* @return the url to view/edit the resource.
*/
protected String getResourceUrl(Resource resource) {
return null;
}
/**
* Gets the string when displaying the resource, e.g. the id.
* @param resource the resource to display
* @return the string to represent the resource
*/
protected String getResourceDisplay(Resource resource) {
return resource.getId();
}
/**
* Gets the url to edit the Janitor termination of the resource.
* @param resource the resource
* @return the url to edit the Janitor termination the resource.
*/
protected String getJanitorResourceUrl(Resource resource) {
return null;
}
/** Gets the table columns for the table in the email.
*
* @return the array of column names
*/
protected String[] getTableColumns() {
return TABLE_COLUMNS;
}
/**
* Gets the row for a resource in the table in the email body.
* @param resource the resource to display
* @return the table row in the email body
*/
protected String getResourceRow(Resource resource) {
StringBuilder message = new StringBuilder();
message.append("<tr>");
message.append(getHtmlCell(resource.getResourceType().name()));
String resourceUrl = getResourceUrl(resource);
if (!StringUtils.isEmpty(resourceUrl)) {
message.append(getHtmlCell(String.format(AHREF_TEMPLATE, resourceUrl, getResourceDisplay(resource))));
} else {
message.append(getHtmlCell(getResourceDisplay(resource)));
}
message.append(getHtmlCell(resource.getRegion()));
if (resource.getDescription() == null) {
message.append(getHtmlCell(""));
} else {
message.append(getHtmlCell(resource.getDescription().replace(";", "<br/>").replace(",", "<br/>")));
}
message.append(getHtmlCell(DATE_FORMATTER.print(resource.getExpectedTerminationTime().getTime())));
message.append(getHtmlCell(resource.getTerminationReason()));
String janitorUrl = getJanitorResourceUrl(resource);
if (!StringUtils.isEmpty(janitorUrl)) {
message.append(getHtmlCell(String.format(AHREF_TEMPLATE, janitorUrl, "View/Extend")));
} else {
message.append(getHtmlCell(""));
}
message.append("</tr>");
return message.toString();
}
}
| 5,298
| 36.316901
| 114
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/basic/conformity/BasicConformityEmailBuilder.java
|
/*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic.conformity;
import com.google.common.collect.Maps;
import com.netflix.simianarmy.conformity.Cluster;
import com.netflix.simianarmy.conformity.Conformity;
import com.netflix.simianarmy.conformity.ConformityEmailBuilder;
import com.netflix.simianarmy.conformity.ConformityRule;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
import java.util.Map;
/** The basic implementation of the email builder for Conformity monkey. */
public class BasicConformityEmailBuilder extends ConformityEmailBuilder {
private static final String[] TABLE_COLUMNS = {"Cluster", "Region", "Rule", "Failed Components"};
private static final String AHREF_TEMPLATE = "<a href=\"%s\">%s</a>";
private static final Logger LOGGER = LoggerFactory.getLogger(BasicConformityEmailBuilder.class);
private Map<String, Collection<Cluster>> emailToClusters;
private final Map<String, ConformityRule> idToRule = Maps.newHashMap();
@Override
public void setEmailToClusters(Map<String, Collection<Cluster>> clustersByEmail, Collection<ConformityRule> rules) {
Validate.notNull(clustersByEmail);
Validate.notNull(rules);
this.emailToClusters = clustersByEmail;
idToRule.clear();
for (ConformityRule rule : rules) {
idToRule.put(rule.getName(), rule);
}
}
@Override
protected String getHeader() {
StringBuilder header = new StringBuilder();
header.append("<b><h2>Conformity Report</h2></b>");
header.append("The following is a list of failed conformity rules for your cluster(s).<br/>");
return header.toString();
}
@Override
protected String getEntryTable(String emailAddress) {
StringBuilder table = new StringBuilder();
table.append(getHtmlTableHeader(getTableColumns()));
for (Cluster cluster : emailToClusters.get(emailAddress)) {
for (Conformity conformity : cluster.getConformties()) {
if (!conformity.getFailedComponents().isEmpty()) {
table.append(getClusterRow(cluster, conformity));
}
}
}
table.append("</table>");
return table.toString();
}
@Override
protected String getFooter() {
return "<br/>Conformity Monkey wiki: https://github.com/Netflix/SimianArmy/wiki<br/>";
}
/**
* Gets the url to view the details of the cluster.
* @param cluster the cluster
* @return the url to view/edit the cluster.
*/
protected String getClusterUrl(Cluster cluster) {
return null;
}
/**
* Gets the string when displaying the cluster, e.g. the id.
* @param cluster the cluster to display
* @return the string to represent the cluster
*/
protected String getClusterDisplay(Cluster cluster) {
return cluster.getName();
}
/** Gets the table columns for the table in the email.
*
* @return the array of column names
*/
protected String[] getTableColumns() {
return TABLE_COLUMNS;
}
/**
* Gets the row for a cluster and a failed conformity check in the table in the email body.
* @param cluster the cluster to display
* @param conformity the failed conformity check
* @return the table row in the email body
*/
protected String getClusterRow(Cluster cluster, Conformity conformity) {
StringBuilder message = new StringBuilder();
message.append("<tr>");
String clusterUrl = getClusterUrl(cluster);
if (!StringUtils.isEmpty(clusterUrl)) {
message.append(getHtmlCell(String.format(AHREF_TEMPLATE, clusterUrl, getClusterDisplay(cluster))));
} else {
message.append(getHtmlCell(getClusterDisplay(cluster)));
}
message.append(getHtmlCell(cluster.getRegion()));
ConformityRule rule = idToRule.get(conformity.getRuleId());
String ruleDesc;
if (rule == null) {
LOGGER.warn(String.format("Not found rule with name %s", conformity.getRuleId()));
ruleDesc = conformity.getRuleId();
} else {
ruleDesc = rule.getNonconformingReason();
}
message.append(getHtmlCell(ruleDesc));
message.append(getHtmlCell(StringUtils.join(conformity.getFailedComponents(), ",")));
message.append("</tr>");
return message.toString();
}
}
| 5,195
| 36.652174
| 120
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/basic/conformity/BasicConformityMonkey.java
|
/*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic.conformity;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.MonkeyConfiguration;
import com.netflix.simianarmy.conformity.Cluster;
import com.netflix.simianarmy.conformity.ClusterCrawler;
import com.netflix.simianarmy.conformity.ConformityClusterTracker;
import com.netflix.simianarmy.conformity.ConformityEmailNotifier;
import com.netflix.simianarmy.conformity.ConformityMonkey;
import com.netflix.simianarmy.conformity.ConformityRuleEngine;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
import java.util.Date;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
/** The basic implementation of Conformity Monkey. */
public class BasicConformityMonkey extends ConformityMonkey {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(BasicConformityMonkey.class);
/** The Constant NS. */
private static final String NS = "simianarmy.conformity.";
/** The cfg. */
private final MonkeyConfiguration cfg;
private final ClusterCrawler crawler;
private final ConformityEmailNotifier emailNotifier;
private final Collection<String> regions = Lists.newArrayList();
private final ConformityClusterTracker clusterTracker;
private final MonkeyCalendar calendar;
private final ConformityRuleEngine ruleEngine;
/** Flag to indicate whether the monkey is leashed. */
private boolean leashed;
/**
* Clusters that are not conforming in the last check.
*/
private final Map<String, Collection<Cluster>> nonconformingClusters = Maps.newHashMap();
/**
* Clusters that are conforming in the last check.
*/
private final Map<String, Collection<Cluster>> conformingClusters = Maps.newHashMap();
/**
* Clusters that the monkey failed to check for some reason.
*/
private final Map<String, Collection<Cluster>> failedClusters = Maps.newHashMap();
/**
* Clusters that do not exist in the cloud anymore.
*/
private final Map<String, Collection<Cluster>> nonexistentClusters = Maps.newHashMap();
/**
* Instantiates a new basic conformity monkey.
*
* @param ctx
* the ctx
*/
public BasicConformityMonkey(Context ctx) {
super(ctx);
cfg = ctx.configuration();
crawler = ctx.clusterCrawler();
ruleEngine = ctx.ruleEngine();
emailNotifier = ctx.emailNotifier();
for (String region : ctx.regions()) {
regions.add(region);
}
clusterTracker = ctx.clusterTracker();
calendar = ctx.calendar();
leashed = ctx.isLeashed();
}
/** {@inheritDoc} */
@Override
public void doMonkeyBusiness() {
cfg.reload();
context().resetEventReport();
if (isConformityMonkeyEnabled()) {
nonconformingClusters.clear();
conformingClusters.clear();
failedClusters.clear();
nonexistentClusters.clear();
List<Cluster> clusters = crawler.clusters();
Map<String, Set<String>> existingClusterNamesByRegion = Maps.newHashMap();
for (String region : regions) {
existingClusterNamesByRegion.put(region, new HashSet<String>());
}
for (Cluster cluster : clusters) {
existingClusterNamesByRegion.get(cluster.getRegion()).add(cluster.getName());
}
List<Cluster> trackedClusters = clusterTracker.getAllClusters(regions.toArray(new String[regions.size()]));
for (Cluster trackedCluster : trackedClusters) {
if (!existingClusterNamesByRegion.get(trackedCluster.getRegion()).contains(trackedCluster.getName())) {
addCluster(nonexistentClusters, trackedCluster);
}
}
for (String region : regions) {
Collection<Cluster> toDelete = nonexistentClusters.get(region);
if (toDelete != null) {
clusterTracker.deleteClusters(toDelete.toArray(new Cluster[toDelete.size()]));
}
}
LOGGER.info(String.format("Performing conformity check for %d crawled clusters.", clusters.size()));
Date now = calendar.now().getTime();
for (Cluster cluster : clusters) {
boolean conforming;
try {
conforming = ruleEngine.check(cluster);
} catch (Exception e) {
LOGGER.error(String.format("Failed to perform conformity check for cluster %s", cluster.getName()),
e);
addCluster(failedClusters, cluster);
continue;
}
cluster.setUpdateTime(now);
cluster.setConforming(conforming);
if (conforming) {
LOGGER.info(String.format("Cluster %s is conforming", cluster.getName()));
addCluster(conformingClusters, cluster);
} else {
LOGGER.info(String.format("Cluster %s is not conforming", cluster.getName()));
addCluster(nonconformingClusters, cluster);
}
if (!leashed) {
LOGGER.info(String.format("Saving cluster %s", cluster.getName()));
clusterTracker.addOrUpdate(cluster);
} else {
LOGGER.info(String.format(
"The conformity monkey is leashed, no data change is made for cluster %s.",
cluster.getName()));
}
}
if (!leashed) {
emailNotifier.sendNotifications();
} else {
LOGGER.info("Conformity monkey is leashed, no notification is sent.");
}
if (cfg.getBoolOrElse(NS + "summaryEmail.enabled", true)) {
sendConformitySummaryEmail();
}
}
}
private static void addCluster(Map<String, Collection<Cluster>> map, Cluster cluster) {
Collection<Cluster> clusters = map.get(cluster.getRegion());
if (clusters == null) {
clusters = Lists.newArrayList();
map.put(cluster.getRegion(), clusters);
}
clusters.add(cluster);
}
/**
* Send a summary email with about the last run of the conformity monkey.
*/
protected void sendConformitySummaryEmail() {
String summaryEmailTarget = cfg.getStr(NS + "summaryEmail.to");
if (!StringUtils.isEmpty(summaryEmailTarget)) {
if (!emailNotifier.isValidEmail(summaryEmailTarget)) {
LOGGER.error(String.format("The email target address '%s' for Conformity summary email is invalid",
summaryEmailTarget));
return;
}
StringBuilder message = new StringBuilder();
for (String region : regions) {
appendSummary(message, "nonconforming", nonconformingClusters, region, true);
appendSummary(message, "failed to check", failedClusters, region, true);
appendSummary(message, "nonexistent", nonexistentClusters, region, true);
appendSummary(message, "conforming", conformingClusters, region, false);
}
String subject = getSummaryEmailSubject();
emailNotifier.sendEmail(summaryEmailTarget, subject, message.toString());
}
}
private void appendSummary(StringBuilder message, String summaryName,
Map<String, Collection<Cluster>> regionToClusters, String region, boolean showDetails) {
Collection<Cluster> clusters = regionToClusters.get(region);
if (clusters == null) {
clusters = Lists.newArrayList();
}
message.append(String.format("Total %s clusters = %d in region %s<br/>",
summaryName, clusters.size(), region));
if (showDetails) {
List<String> clusterNames = Lists.newArrayList();
for (Cluster cluster : clusters) {
clusterNames.add(cluster.getName());
}
message.append(String.format("List: %s<br/><br/>", StringUtils.join(clusterNames, ",")));
}
}
/**
* Gets the summary email subject for the last run of conformity monkey.
* @return the subject of the summary email
*/
protected String getSummaryEmailSubject() {
return String.format("Conformity monkey execution summary (%s)", StringUtils.join(regions, ","));
}
private boolean isConformityMonkeyEnabled() {
String prop = NS + "enabled";
if (cfg.getBoolOrElse(prop, true)) {
return true;
}
LOGGER.info("Conformity Monkey is disabled, set {}=true", prop);
return false;
}
}
| 9,788
| 38.471774
| 119
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/basic/conformity/BasicConformityMonkeyContext.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// CHECKSTYLE IGNORE MagicNumberCheck
package com.netflix.simianarmy.basic.conformity;
import java.util.Collection;
import java.util.Map;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.amazonaws.regions.Region;
import com.amazonaws.regions.Regions;
import com.amazonaws.services.simpleemail.AmazonSimpleEmailServiceClient;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.netflix.discovery.DiscoveryClient;
import com.netflix.discovery.guice.EurekaModule;
import com.netflix.simianarmy.aws.conformity.RDSConformityClusterTracker;
import com.netflix.simianarmy.aws.conformity.SimpleDBConformityClusterTracker;
import com.netflix.simianarmy.aws.conformity.crawler.AWSClusterCrawler;
import com.netflix.simianarmy.aws.conformity.rule.BasicConformityEurekaClient;
import com.netflix.simianarmy.aws.conformity.rule.ConformityEurekaClient;
import com.netflix.simianarmy.aws.conformity.rule.CrossZoneLoadBalancing;
import com.netflix.simianarmy.aws.conformity.rule.InstanceHasHealthCheckUrl;
import com.netflix.simianarmy.aws.conformity.rule.InstanceHasStatusUrl;
import com.netflix.simianarmy.aws.conformity.rule.InstanceInSecurityGroup;
import com.netflix.simianarmy.aws.conformity.rule.InstanceInVPC;
import com.netflix.simianarmy.aws.conformity.rule.InstanceIsHealthyInEureka;
import com.netflix.simianarmy.aws.conformity.rule.InstanceTooOld;
import com.netflix.simianarmy.aws.conformity.rule.SameZonesInElbAndAsg;
import com.netflix.simianarmy.basic.BasicSimianArmyContext;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.conformity.ClusterCrawler;
import com.netflix.simianarmy.conformity.ConformityClusterTracker;
import com.netflix.simianarmy.conformity.ConformityEmailBuilder;
import com.netflix.simianarmy.conformity.ConformityEmailNotifier;
import com.netflix.simianarmy.conformity.ConformityMonkey;
import com.netflix.simianarmy.conformity.ConformityRule;
import com.netflix.simianarmy.conformity.ConformityRuleEngine;
/**
* The basic implementation of the context class for Conformity monkey.
*/
public class BasicConformityMonkeyContext extends BasicSimianArmyContext implements ConformityMonkey.Context {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(BasicConformityMonkeyContext.class);
/** The email notifier. */
private final ConformityEmailNotifier emailNotifier;
private final ConformityClusterTracker clusterTracker;
private final Collection<String> regions;
private final ClusterCrawler clusterCrawler;
private final AmazonSimpleEmailServiceClient sesClient;
private final ConformityEmailBuilder conformityEmailBuilder;
private final String defaultEmail;
private final String[] ccEmails;
private final String sourceEmail;
private final ConformityRuleEngine ruleEngine;
private final boolean leashed;
private final Map<String, AWSClient> regionToAwsClient = Maps.newHashMap();
/**
* The constructor.
*/
public BasicConformityMonkeyContext() {
super("simianarmy.properties", "client.properties", "conformity.properties");
regions = Lists.newArrayList(region());
// By default, the monkey is leashed
leashed = configuration().getBoolOrElse("simianarmy.conformity.leashed", true);
LOGGER.info(String.format("Conformity Monkey is running in: %s", regions));
String sdbDomain = configuration().getStrOrElse("simianarmy.conformity.sdb.domain", "SIMIAN_ARMY");
String dbDriver = configuration().getStr("simianarmy.recorder.db.driver");
String dbUser = configuration().getStr("simianarmy.recorder.db.user");
String dbPass = configuration().getStr("simianarmy.recorder.db.pass");
String dbUrl = configuration().getStr("simianarmy.recorder.db.url");
String dbTable = configuration().getStr("simianarmy.conformity.resources.db.table");
if (dbDriver == null) {
clusterTracker = new SimpleDBConformityClusterTracker(awsClient(), sdbDomain);
} else {
RDSConformityClusterTracker rdsClusterTracker = new RDSConformityClusterTracker(dbDriver, dbUser, dbPass, dbUrl, dbTable);
rdsClusterTracker.init();
clusterTracker = rdsClusterTracker;
}
ruleEngine = new ConformityRuleEngine();
boolean eurekaEnabled = configuration().getBoolOrElse("simianarmy.conformity.Eureka.enabled", false);
if (eurekaEnabled) {
LOGGER.info("Initializing Discovery client.");
Injector injector = Guice.createInjector(new EurekaModule());
DiscoveryClient discoveryClient = injector.getInstance(DiscoveryClient.class);
ConformityEurekaClient conformityEurekaClient = new BasicConformityEurekaClient(discoveryClient);
if (configuration().getBoolOrElse(
"simianarmy.conformity.rule.InstanceIsHealthyInEureka.enabled", false)) {
ruleEngine.addRule(new InstanceIsHealthyInEureka(conformityEurekaClient));
}
if (configuration().getBoolOrElse(
"simianarmy.conformity.rule.InstanceHasHealthCheckUrl.enabled", false)) {
ruleEngine.addRule(new InstanceHasHealthCheckUrl(conformityEurekaClient));
}
if (configuration().getBoolOrElse(
"simianarmy.conformity.rule.InstanceHasStatusUrl.enabled", false)) {
ruleEngine.addRule(new InstanceHasStatusUrl(conformityEurekaClient));
}
} else {
LOGGER.info("Discovery/Eureka is not enabled, the conformity rules that need Eureka are not added.");
}
if (configuration().getBoolOrElse(
"simianarmy.conformity.rule.InstanceInSecurityGroup.enabled", false)) {
String requiredSecurityGroups = configuration().getStr(
"simianarmy.conformity.rule.InstanceInSecurityGroup.requiredSecurityGroups");
if (!StringUtils.isBlank(requiredSecurityGroups)) {
ruleEngine.addRule(new InstanceInSecurityGroup(getAwsCredentialsProvider(),
StringUtils.split(requiredSecurityGroups, ",")));
} else {
LOGGER.info("No required security groups is specified, "
+ "the conformity rule InstanceInSecurityGroup is ignored.");
}
}
if (configuration().getBoolOrElse(
"simianarmy.conformity.rule.InstanceTooOld.enabled", false)) {
ruleEngine.addRule(new InstanceTooOld(getAwsCredentialsProvider(), (int) configuration().getNumOrElse(
"simianarmy.conformity.rule.InstanceTooOld.instanceAgeThreshold", 180)));
}
if (configuration().getBoolOrElse(
"simianarmy.conformity.rule.SameZonesInElbAndAsg.enabled", false)) {
ruleEngine().addRule(new SameZonesInElbAndAsg(getAwsCredentialsProvider()));
}
if (configuration().getBoolOrElse(
"simianarmy.conformity.rule.InstanceInVPC.enabled", false)) {
ruleEngine.addRule(new InstanceInVPC(getAwsCredentialsProvider()));
}
if (configuration().getBoolOrElse(
"simianarmy.conformity.rule.CrossZoneLoadBalancing.enabled", false)) {
ruleEngine().addRule(new CrossZoneLoadBalancing(getAwsCredentialsProvider()));
}
createClient(region());
regionToAwsClient.put(region(), awsClient());
clusterCrawler = new AWSClusterCrawler(regionToAwsClient, configuration());
sesClient = new AmazonSimpleEmailServiceClient();
if (configuration().getStr("simianarmy.aws.email.region") != null) {
sesClient.setRegion(Region.getRegion(Regions.fromName(configuration().getStr("simianarmy.aws.email.region"))));
}
defaultEmail = configuration().getStrOrElse("simianarmy.conformity.notification.defaultEmail", null);
ccEmails = StringUtils.split(
configuration().getStrOrElse("simianarmy.conformity.notification.ccEmails", ""), ",");
sourceEmail = configuration().getStrOrElse("simianarmy.conformity.notification.sourceEmail", null);
conformityEmailBuilder = new BasicConformityEmailBuilder();
emailNotifier = new ConformityEmailNotifier(getConformityEmailNotifierContext());
}
public ConformityEmailNotifier.Context getConformityEmailNotifierContext() {
return new ConformityEmailNotifier.Context() {
@Override
public AmazonSimpleEmailServiceClient sesClient() {
return sesClient;
}
@Override
public int openHour() {
return (int) configuration().getNumOrElse("simianarmy.conformity.notification.openHour", 0);
}
@Override
public int closeHour() {
return (int) configuration().getNumOrElse("simianarmy.conformity.notification.closeHour", 24);
}
@Override
public String defaultEmail() {
return defaultEmail;
}
@Override
public Collection<String> regions() {
return regions;
}
@Override
public ConformityClusterTracker clusterTracker() {
return clusterTracker;
}
@Override
public ConformityEmailBuilder emailBuilder() {
return conformityEmailBuilder;
}
@Override
public String[] ccEmails() {
return ccEmails;
}
@Override
public Collection<ConformityRule> rules() {
return ruleEngine.rules();
}
@Override
public String sourceEmail() {
return sourceEmail;
}
};
}
@Override
public ClusterCrawler clusterCrawler() {
return clusterCrawler;
}
@Override
public ConformityRuleEngine ruleEngine() {
return ruleEngine;
}
/** {@inheritDoc} */
@Override
public ConformityEmailNotifier emailNotifier() {
return emailNotifier;
}
@Override
public Collection<String> regions() {
return regions;
}
@Override
public boolean isLeashed() {
return leashed;
}
@Override
public ConformityClusterTracker clusterTracker() {
return clusterTracker;
}
}
| 11,354
| 39.698925
| 131
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/basic/calendars/BavarianCalendar.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic.calendars;
import com.netflix.simianarmy.MonkeyConfiguration;
import com.netflix.simianarmy.basic.BasicCalendar;
import java.util.Arrays;
import java.util.Calendar;
import java.util.Collection;
// CHECKSTYLE IGNORE MagicNumberCheck
/**
* The Class BavarianCalendar.
*/
public class BavarianCalendar extends BasicCalendar
{
/**
* Instantiates a new basic calendar.
*
* @param cfg the monkey configuration
*/
public BavarianCalendar(MonkeyConfiguration cfg)
{
super(cfg);
}
/** {@inheritDoc} */
@Override
protected void loadHolidays(int year) {
holidays.clear();
// these aren't all strictly holidays, but days when engineers will likely
// not be in the office to respond to rampaging monkeys
// first of all, we need easter sunday doy,
// because ome other holidays calculated from it
int easter = westernEasterDayOfYear(year);
// new year
holidays.addAll(getHolidayWithBridgeDays(year, dayOfYear(year, Calendar.JANUARY, 1)));
// epiphanie
holidays.addAll(getHolidayWithBridgeDays(year, dayOfYear(year, Calendar.JANUARY, 6)));
// good friday, always friday, don't need to check if it's bridge day
holidays.add(easter - 2);
// easter monday, always monday, don't need to check if it's bridge day
holidays.add(easter + 1);
// labor day
holidays.addAll(getHolidayWithBridgeDays(year, dayOfYear(year, Calendar.MAY, 1)));
// ascension day
holidays.addAll(getHolidayWithBridgeDays(year, easter + 39));
// whit monday, always monday, don't need to check if it's bridge day
holidays.add(easter + 50);
// corpus christi
holidays.add(westernEasterDayOfYear(year) + 60);
// assumption day
holidays.addAll(getHolidayWithBridgeDays(year, dayOfYear(year, Calendar.AUGUST, 15)));
// german unity day
holidays.addAll(getHolidayWithBridgeDays(year, dayOfYear(year, Calendar.OCTOBER, 3)));
// all saints
holidays.addAll(getHolidayWithBridgeDays(year, dayOfYear(year, Calendar.NOVEMBER, 1)));
// monkey goes on christmas vacations between christmas and new year!
holidays.addAll(getHolidayWithBridgeDays(year, dayOfYear(year, Calendar.DECEMBER, 24)));
holidays.add(dayOfYear(year, Calendar.DECEMBER, 25));
holidays.add(dayOfYear(year, Calendar.DECEMBER, 26));
holidays.add(dayOfYear(year, Calendar.DECEMBER, 27));
holidays.add(dayOfYear(year, Calendar.DECEMBER, 28));
holidays.add(dayOfYear(year, Calendar.DECEMBER, 29));
holidays.add(dayOfYear(year, Calendar.DECEMBER, 30));
holidays.add(dayOfYear(year, Calendar.DECEMBER, 31));
// mark the holiday set with the year, so on Jan 1 it will automatically
// recalculate the holidays for next year
holidays.add(year);
}
/**
* Returns collection of holidays, including Monday or Friday
* if given holiday is Thuesday or Thursday.
*
* The behaviour to take Monday as day off if official holiday is Thuesday
* and to take Friday as day off if official holiday is Thursday
* is specific to [at least] Germany.
* We call it, literally, "bridge day".
*
* @param dayOfYear holiday day of year
*/
private Collection<Integer> getHolidayWithBridgeDays(int year, int dayOfYear) {
Calendar holiday = now();
holiday.set(Calendar.YEAR, year);
holiday.set(Calendar.DAY_OF_YEAR, dayOfYear);
int dow = holiday.get(Calendar.DAY_OF_WEEK);
int mon = holiday.get(Calendar.MONTH);
int dom = holiday.get(Calendar.DAY_OF_MONTH);
// We don't want to include Monday if Thuesday is January 1.
if (dow == Calendar.TUESDAY && dayOfYear != 1)
return Arrays.asList(dayOfYear, dayOfYear - 1);
// We don't want to include Friday if Thursday is December 31.
if (dow == Calendar.THURSDAY && (mon != Calendar.DECEMBER || dom != 31))
return Arrays.asList(dayOfYear, dayOfYear + 1);
return Arrays.asList(dayOfYear);
}
/**
* Western easter sunday in year.
*
* @param year
* the year
* @return the day of the year of western easter sunday
*/
protected int westernEasterDayOfYear(int year) {
int a = year % 19,
b = year / 100,
c = year % 100,
d = b / 4,
e = b % 4,
g = (8 * b + 13) / 25,
h = (19 * a + b - d - g + 15) % 30,
j = c / 4,
k = c % 4,
m = (a + 11 * h) / 319,
r = (2 * e + 2 * j - k - h + m + 32) % 7;
int oneBasedMonth = (h - m + r + 90) / 25;
int dayOfYear = (h - m + r + oneBasedMonth + 19) % 32;
return dayOfYear(year, oneBasedMonth - 1, dayOfYear);
}
}
| 5,660
| 34.829114
| 96
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/basic/chaos/BasicChaosInstanceSelector.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic.chaos;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import com.google.common.collect.Lists;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.chaos.ChaosCrawler.InstanceGroup;
import com.netflix.simianarmy.chaos.ChaosInstanceSelector;
/**
* The Class BasicChaosInstanceSelector.
*/
public class BasicChaosInstanceSelector implements ChaosInstanceSelector {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(BasicChaosInstanceSelector.class);
/** The Constant RANDOM. */
private static final Random RANDOM = new Random();
/**
* Logger, this is abstracted so subclasses (for testing) can reset logger to make it less verbose.
* @return the logger
*/
protected Logger logger() {
return LOGGER;
}
/** {@inheritDoc} */
@Override
public Collection<String> select(InstanceGroup group, double probability) {
int n = ((int) probability);
String selected = selectOneInstance(group, probability - n);
Collection<String> result = selectNInstances(group.instances(), n, selected);
if (selected != null) {
result.add(selected);
}
return result;
}
private Collection<String> selectNInstances(Collection<String> instances, int n, String selected) {
logger().info("Randomly selecting {} from {} instances, excluding {}",
new Object[] {n, instances.size(), selected});
List<String> copy = Lists.newArrayList();
for (String instance : instances) {
if (!instance.equals(selected)) {
copy.add(instance);
}
}
if (n >= copy.size()) {
return copy;
}
Collections.shuffle(copy);
return copy.subList(0, n);
}
private String selectOneInstance(InstanceGroup group, double probability) {
Validate.isTrue(probability < 1);
if (probability <= 0) {
logger().info("Group {} [type {}] has disabled probability: {}",
new Object[] {group.name(), group.type(), probability});
return null;
}
double rand = Math.random();
if (rand > probability || group.instances().isEmpty()) {
logger().info("Group {} [type {}] got lucky: {} > {}",
new Object[] {group.name(), group.type(), rand, probability});
return null;
}
return group.instances().get(RANDOM.nextInt(group.instances().size()));
}
}
| 3,335
| 33.75
| 103
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/basic/chaos/BasicInstanceGroup.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic.chaos;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import com.amazonaws.services.autoscaling.model.TagDescription;
import com.netflix.simianarmy.GroupType;
import com.netflix.simianarmy.chaos.ChaosCrawler.InstanceGroup;
/**
* The Class BasicInstanceGroup.
*/
public class BasicInstanceGroup implements InstanceGroup {
/** The name. */
private final String name;
/** The type. */
private final GroupType type;
/** The region. */
private final String region;
/** list of the tags of the ASG */
private final List<TagDescription> tags;
/**
* Instantiates a new basic instance group.
*
* @param name
* the name
* @param type
* the type
* @param tags
* the ASG tags
*/
public BasicInstanceGroup(String name, GroupType type, String region, List<TagDescription> tags) {
this.name = name;
this.type = type;
this.region = region;
this.tags = tags;
}
/** {@inheritDoc} */
public GroupType type() {
return type;
}
/** {@inheritDoc} */
public String name() {
return name;
}
/** {@inheritDoc} */
public String region() {
return region;
}
/** {@inheritDoc} */
public List<TagDescription> tags() {
return tags;
}
/** The list. */
private List<String> list = new LinkedList<String>();
/** {@inheritDoc} */
@Override
public List<String> instances() {
return Collections.unmodifiableList(list);
}
/** {@inheritDoc} */
@Override
public void addInstance(String instance) {
list.add(instance);
}
/** {@inheritDoc} */
@Override
public BasicInstanceGroup copyAs(String newName) {
BasicInstanceGroup newGroup = new BasicInstanceGroup(newName, this.type(), this.region(), this.tags());
for (String instance: this.instances()) {
newGroup.addInstance(instance);
}
return newGroup;
}
}
| 2,743
| 24.174312
| 111
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/basic/chaos/CloudFormationChaosMonkey.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic.chaos;
import com.netflix.simianarmy.MonkeyRecorder.Event;
import com.netflix.simianarmy.chaos.ChaosCrawler.InstanceGroup;
import com.netflix.simianarmy.chaos.ChaosType;
/**
* The Class CloudFormationChaosMonkey. Strips out the random string generated by the CloudFormation api in
* the instance group name of the ASG we want to kill instances on
*/
public class CloudFormationChaosMonkey extends BasicChaosMonkey {
/**
* Instantiates a new cloud formation chaos monkey.
* @param ctx
* the ctx
*/
public CloudFormationChaosMonkey(Context ctx) {
super(ctx);
}
/**
* {@inheritDoc}
*/
@Override
protected boolean isGroupEnabled(InstanceGroup group) {
InstanceGroup noSuffixGroup = noSuffixInstanceGroup(group);
return super.isGroupEnabled(noSuffixGroup);
}
/**
* {@inheritDoc}
*/
@Override
protected Event terminateInstance(InstanceGroup group, String inst, ChaosType chaosType) {
InstanceGroup noSuffixGroup = noSuffixInstanceGroup(group);
return super.terminateInstance(noSuffixGroup, inst, chaosType);
}
/**
* {@inheritDoc}
*/
@Override
protected boolean isMaxTerminationCountExceeded(InstanceGroup group) {
InstanceGroup noSuffixGroup = noSuffixInstanceGroup(group);
return super.isMaxTerminationCountExceeded(noSuffixGroup);
}
/**
* {@inheritDoc}
*/
@Override
protected double getEffectiveProbability(InstanceGroup group) {
InstanceGroup noSuffixGroup = noSuffixInstanceGroup(group);
if (!super.isGroupEnabled(noSuffixGroup)) {
return 0;
}
return getEffectiveProbabilityFromCfg(noSuffixGroup);
}
/**
* Returns the lastOptInTimeInMilliseconds parameter for a group omitting the
* randomly generated suffix.
*/
@Override
protected long getLastOptInMilliseconds(InstanceGroup group) {
InstanceGroup noSuffixGroup = noSuffixInstanceGroup(group);
return super.getLastOptInMilliseconds(noSuffixGroup);
}
/**
* Return a copy of the instance group removing the randomly generated suffix from
* its name.
*/
public InstanceGroup noSuffixInstanceGroup(InstanceGroup group) {
String newName = group.name().replaceAll("(-)([^-]*$)", "");
InstanceGroup noSuffixGroup = group.copyAs(newName);
return noSuffixGroup;
}
}
| 3,141
| 31.061224
| 107
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/basic/chaos/BasicChaosEmailNotifier.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic.chaos;
import java.util.Arrays;
import java.util.List;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.amazonaws.services.simpleemail.AmazonSimpleEmailServiceClient;
import com.netflix.simianarmy.MonkeyConfiguration;
import com.netflix.simianarmy.chaos.ChaosCrawler.InstanceGroup;
import com.netflix.simianarmy.chaos.ChaosEmailNotifier;
import com.netflix.simianarmy.chaos.ChaosType;
/** The basic implementation of the email notifier for Chaos monkey.
*
*/
public class BasicChaosEmailNotifier extends ChaosEmailNotifier {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(BasicChaosEmailNotifier.class);
private final MonkeyConfiguration cfg;
private final String defaultEmail;
private final List<String> ccAddresses;
/** Constructor.
*
* @param cfg the monkey configuration
* @param sesClient the Amazon SES client
* @param defaultEmail the default email address to notify when the group does not have a
* owner email specified
* @param ccAddresses the cc email addresses for notifications
*/
public BasicChaosEmailNotifier(MonkeyConfiguration cfg, AmazonSimpleEmailServiceClient sesClient,
String defaultEmail, String... ccAddresses) {
super(sesClient);
this.cfg = cfg;
this.defaultEmail = defaultEmail;
this.ccAddresses = Arrays.asList(ccAddresses);
}
/**
* Sends an email notification for a termination of instance to a global
* email address.
* @param group the instance group
* @param instanceId the instance id
* @param chaosType the chosen chaos strategy
*/
@Override
public void sendTerminationGlobalNotification(InstanceGroup group, String instanceId, ChaosType chaosType) {
String to = cfg.getStr("simianarmy.chaos.notification.global.receiverEmail");
if (StringUtils.isBlank(to)) {
LOGGER.warn("Global email address was not set, but global email notification was enabled!");
return;
}
LOGGER.info("sending termination notification to global email address {}", to);
buildAndSendEmail(to, group, instanceId, chaosType);
}
/**
* Sends an email notification for a termination of instance to the group
* owner's email address.
* @param group the instance group
* @param instanceId the instance id
* @param chaosType the chosen chaos strategy
*/
@Override
public void sendTerminationNotification(InstanceGroup group, String instanceId, ChaosType chaosType) {
String to = getOwnerEmail(group);
LOGGER.info("sending termination notification to group owner email address {}", to);
buildAndSendEmail(to, group, instanceId, chaosType);
}
/**
* Gets the owner's email for a instance group.
* @param group the instance group
* @return the owner email of the instance group
*/
protected String getOwnerEmail(InstanceGroup group) {
String prop = String.format("simianarmy.chaos.%s.%s.ownerEmail", group.type(), group.name());
String ownerEmail = cfg.getStr(prop);
if (ownerEmail == null) {
LOGGER.info(String.format("Property %s is not set, use the default email address %s as"
+ " the owner email of group %s of type %s",
prop, defaultEmail, group.name(), group.type()));
return defaultEmail;
} else {
return ownerEmail;
}
}
/**
* Builds the body and subject for the email, sends the email.
* @param group
* the instance group
* @param instanceId
* the instance id
* @param to
* the email address to be sent to
* @param chaosType the chosen chaos strategy
*/
public void buildAndSendEmail(String to, InstanceGroup group, String instanceId, ChaosType chaosType) {
String body = buildEmailBody(group, instanceId, chaosType);
String subject;
boolean emailSubjectIsBody = cfg.getBoolOrElse(
"simianarmy.chaos.notification.subject.isBody", false);
if (emailSubjectIsBody) {
subject = body;
} else {
subject = buildEmailSubject(to);
}
sendEmail(to, subject, body);
}
@Override
public String buildEmailSubject(String to) {
String emailSubjectPrefix = cfg.getStrOrElse("simianarmy.chaos.notification.subject.prefix", "");
String emailSubjectSuffix = cfg.getStrOrElse("simianarmy.chaos.notification.subject.suffix", "");
return String.format("%sChaos Monkey Termination Notification for %s%s",
emailSubjectPrefix, to, emailSubjectSuffix);
}
/**
* Builds the body for the email.
* @param group
* the instance group
* @param instanceId
* the instance id
* @param chaosType the chosen chaos strategy
* @return the created string
*/
public String buildEmailBody(InstanceGroup group, String instanceId, ChaosType chaosType) {
String emailBodyPrefix = cfg.getStrOrElse("simianarmy.chaos.notification.body.prefix", "");
String emailBodySuffix = cfg.getStrOrElse("simianarmy.chaos.notification.body.suffix", "");
String body = emailBodyPrefix;
body += String.format("Instance %s of %s %s is being terminated by Chaos monkey.",
instanceId, group.type(), group.name());
if (chaosType != null) {
body += "\n";
body += String.format("Chaos type: %s.", chaosType.getKey());
}
body += emailBodySuffix;
return body;
}
@Override
public String[] getCcAddresses(String to) {
return ccAddresses.toArray(new String[ccAddresses.size()]);
}
@Override
public String getSourceAddress(String to) {
String prop = "simianarmy.chaos.notification.sourceEmail";
String sourceEmail = cfg.getStr(prop);
if (sourceEmail == null || !isValidEmail(sourceEmail)) {
String msg = String.format("Property %s is not set or its value is not a valid email.", prop);
LOGGER.error(msg);
throw new RuntimeException(msg);
}
return sourceEmail;
}
}
| 7,079
| 37.688525
| 112
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/basic/chaos/BasicChaosMonkey.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic.chaos;
import com.google.common.collect.Lists;
import com.netflix.simianarmy.*;
import com.netflix.simianarmy.MonkeyRecorder.Event;
import com.netflix.simianarmy.chaos.*;
import com.netflix.simianarmy.chaos.ChaosCrawler.InstanceGroup;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.concurrent.TimeUnit;
/**
* The Class BasicChaosMonkey.
*/
public class BasicChaosMonkey extends ChaosMonkey {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(BasicChaosMonkey.class);
/** The Constant NS. */
private static final String NS = "simianarmy.chaos.";
/** The cfg. */
private final MonkeyConfiguration cfg;
/** The runs per day. */
private final long runsPerDay;
/** The minimum value of the maxTerminationCountPerday property to be considered non-zero. **/
private static final double MIN_MAX_TERMINATION_COUNT_PER_DAY = 0.001;
private final MonkeyCalendar monkeyCalendar;
// When a mandatory termination is triggered due to the minimum termination limit is breached,
// the value below is used as the termination probability.
private static final double DEFAULT_MANDATORY_TERMINATION_PROBABILITY = 0.5;
private final List<ChaosType> allChaosTypes;
/**
* Instantiates a new basic chaos monkey.
* @param ctx
* the ctx
*/
public BasicChaosMonkey(ChaosMonkey.Context ctx) {
super(ctx);
this.cfg = ctx.configuration();
this.monkeyCalendar = ctx.calendar();
Calendar open = monkeyCalendar.now();
Calendar close = monkeyCalendar.now();
open.set(Calendar.HOUR, monkeyCalendar.openHour());
close.set(Calendar.HOUR, monkeyCalendar.closeHour());
allChaosTypes = Lists.newArrayList();
allChaosTypes.add(new ShutdownInstanceChaosType(cfg));
allChaosTypes.add(new BlockAllNetworkTrafficChaosType(cfg));
allChaosTypes.add(new DetachVolumesChaosType(cfg));
allChaosTypes.add(new BurnCpuChaosType(cfg));
allChaosTypes.add(new BurnIoChaosType(cfg));
allChaosTypes.add(new KillProcessesChaosType(cfg));
allChaosTypes.add(new NullRouteChaosType(cfg));
allChaosTypes.add(new FailEc2ChaosType(cfg));
allChaosTypes.add(new FailDnsChaosType(cfg));
allChaosTypes.add(new FailDynamoDbChaosType(cfg));
allChaosTypes.add(new FailS3ChaosType(cfg));
allChaosTypes.add(new FillDiskChaosType(cfg));
allChaosTypes.add(new NetworkCorruptionChaosType(cfg));
allChaosTypes.add(new NetworkLatencyChaosType(cfg));
allChaosTypes.add(new NetworkLossChaosType(cfg));
TimeUnit freqUnit = ctx.scheduler().frequencyUnit();
if (TimeUnit.DAYS == freqUnit) {
runsPerDay = ctx.scheduler().frequency();
} else {
long units = freqUnit.convert(close.getTimeInMillis() - open.getTimeInMillis(), TimeUnit.MILLISECONDS);
runsPerDay = units / ctx.scheduler().frequency();
}
}
/** {@inheritDoc} */
@Override
public void doMonkeyBusiness() {
context().resetEventReport();
cfg.reload();
if (!isChaosMonkeyEnabled()) {
return;
}
for (InstanceGroup group : context().chaosCrawler().groups()) {
if (isGroupEnabled(group)) {
if (isMaxTerminationCountExceeded(group)) {
continue;
}
double prob = getEffectiveProbability(group);
Collection<String> instances = context().chaosInstanceSelector().select(group, prob / runsPerDay);
for (String inst : instances) {
if (isMaxTerminationCountExceeded(group)) {
break;
}
ChaosType chaosType = pickChaosType(context().cloudClient(), inst);
if (chaosType == null) {
// This is surprising ... normally we can always just terminate it
LOGGER.warn("No chaos type was applicable to the instance: {}", inst);
continue;
}
terminateInstance(group, inst, chaosType);
}
}
}
}
private ChaosType pickChaosType(CloudClient cloudClient, String instanceId) {
Random random = new Random();
SshConfig sshConfig = new SshConfig(cfg);
ChaosInstance instance = new ChaosInstance(cloudClient, instanceId, sshConfig);
List<ChaosType> applicable = Lists.newArrayList();
for (ChaosType chaosType : allChaosTypes) {
if (chaosType.isEnabled() && chaosType.canApply(instance)) {
applicable.add(chaosType);
}
}
if (applicable.isEmpty()) {
return null;
}
int index = random.nextInt(applicable.size());
return applicable.get(index);
}
@Override
public Event terminateNow(String type, String name, ChaosType chaosType)
throws FeatureNotEnabledException, InstanceGroupNotFoundException {
Validate.notNull(type);
Validate.notNull(name);
cfg.reload(name);
if (!isChaosMonkeyEnabled()) {
String msg = String.format("Chaos monkey is not enabled for group %s [type %s]",
name, type);
LOGGER.info(msg);
throw new FeatureNotEnabledException(msg);
}
String prop = NS + "terminateOndemand.enabled";
if (cfg.getBool(prop)) {
InstanceGroup group = findInstanceGroup(type, name);
if (group == null) {
throw new InstanceGroupNotFoundException(type, name);
}
Collection<String> instances = context().chaosInstanceSelector().select(group, 1.0);
Validate.isTrue(instances.size() <= 1);
if (instances.size() == 1) {
return terminateInstance(group, instances.iterator().next(), chaosType);
} else {
throw new NotFoundException(String.format("No instance is found in group %s [type %s]",
name, type));
}
} else {
String msg = String.format("Group %s [type %s] does not allow on-demand termination, set %s=true",
name, type, prop);
LOGGER.info(msg);
throw new FeatureNotEnabledException(msg);
}
}
private void reportEventForSummary(EventTypes eventType, InstanceGroup group, String instanceId) {
context().reportEvent(createEvent(eventType, group, instanceId));
}
/**
* Handle termination error. This has been abstracted so subclasses can decide to continue causing chaos if desired.
*
* @param instance
* the instance
* @param e
* the exception
*/
protected void handleTerminationError(String instance, Throwable e) {
LOGGER.error("failed to terminate instance " + instance, e);
throw new RuntimeException("failed to terminate instance " + instance, e);
}
/** {@inheritDoc} */
@Override
public Event recordTermination(InstanceGroup group, String instance, ChaosType chaosType) {
Event evt = context().recorder().newEvent(Type.CHAOS, EventTypes.CHAOS_TERMINATION, group.region(), instance);
evt.addField("groupType", group.type().name());
evt.addField("groupName", group.name());
evt.addField("chaosType", chaosType.getKey());
context().recorder().recordEvent(evt);
return evt;
}
/** {@inheritDoc} */
@Override
public int getPreviousTerminationCount(InstanceGroup group, Date after) {
Map<String, String> query = new HashMap<String, String>();
query.put("groupType", group.type().name());
query.put("groupName", group.name());
List<Event> evts = context().recorder().findEvents(Type.CHAOS, EventTypes.CHAOS_TERMINATION, query, after);
return evts.size();
}
private Event createEvent(EventTypes chaosTermination, InstanceGroup group, String instance) {
Event evt = context().recorder().newEvent(Type.CHAOS, chaosTermination, group.region(), instance);
evt.addField("groupType", group.type().name());
evt.addField("groupName", group.name());
return evt;
}
/**
* Gets the effective probability value, returns 0 if the group is not enabled. Otherwise calls
* getEffectiveProbability.
* @param group
* @return the effective probability value for the instance group
*/
protected double getEffectiveProbability(InstanceGroup group) {
if (!isGroupEnabled(group)) {
return 0;
}
return getEffectiveProbabilityFromCfg(group);
}
/**
* Gets the effective probability value when the monkey processes an instance group, it uses the following
* logic in the order as listed below.
*
* 1) When minimum mandatory termination is enabled, a default non-zero probability is used for opted-in
* groups, if a) the application has been opted in for the last mandatory termination window
* and b) there was no terminations in the last mandatory termination window
* 2) Use the probability configured for the group type and name
* 3) Use the probability configured for the group
* 4) Use 1.0
* @param group
* @return double
*/
protected double getEffectiveProbabilityFromCfg(InstanceGroup group) {
String propName;
if (cfg.getBool(NS + "mandatoryTermination.enabled")) {
String mtwProp = NS + "mandatoryTermination.windowInDays";
int mandatoryTerminationWindowInDays = (int) cfg.getNumOrElse(mtwProp, 0);
if (mandatoryTerminationWindowInDays > 0
&& noTerminationInLastWindow(group, mandatoryTerminationWindowInDays)) {
double mandatoryProb = cfg.getNumOrElse(NS + "mandatoryTermination.defaultProbability",
DEFAULT_MANDATORY_TERMINATION_PROBABILITY);
LOGGER.info("There has been no terminations for group {} [type {}] in the last {} days,"
+ "setting the probability to {} for mandatory termination.",
new Object[]{group.name(), group.type(), mandatoryTerminationWindowInDays, mandatoryProb});
return mandatoryProb;
}
}
propName = "probability";
double prob = getNumFromCfgOrDefault(group, propName, 1.0);
LOGGER.info("Group {} [type {}] enabled [prob {}]", new Object[]{group.name(), group.type(), prob});
return prob;
}
protected double getNumFromCfgOrDefault(InstanceGroup group, String propName, double defaultValue) {
String defaultProp = String.format("%s%s.%s", NS, group.type(), propName);
String prop = String.format("%s%s.%s.%s", NS, group.type(), group.name(), propName);
return cfg.getNumOrElse(prop, cfg.getNumOrElse(defaultProp, defaultValue));
}
protected boolean getBoolFromCfgOrDefault(InstanceGroup group, String propName, boolean defaultValue) {
String defaultProp = String.format("%s%s.%s", NS, group.type(), propName);
String prop = String.format("%s%s.%s.%s", NS, group.type(), group.name(), propName);
return cfg.getBoolOrElse(prop, cfg.getBoolOrElse(defaultProp, defaultValue));
}
/**
* Returns lastOptInTimeInMilliseconds from the .properties file.
*
* @param group
* @return long
*/
protected long getLastOptInMilliseconds(InstanceGroup group) {
String prop = NS + group.type() + "." + group.name() + ".lastOptInTimeInMilliseconds";
long lastOptInTimeInMilliseconds = (long) cfg.getNumOrElse(prop, -1);
return lastOptInTimeInMilliseconds;
}
private boolean noTerminationInLastWindow(InstanceGroup group, int mandatoryTerminationWindowInDays) {
long lastOptInTimeInMilliseconds = getLastOptInMilliseconds(group);
if (lastOptInTimeInMilliseconds < 0) {
return false;
}
Calendar windowStart = monkeyCalendar.now();
windowStart.add(Calendar.DATE, -1 * mandatoryTerminationWindowInDays);
// return true if the window start is after the last opt-in time and
// there has been no termination since the window start
if (windowStart.getTimeInMillis() > lastOptInTimeInMilliseconds
&& getPreviousTerminationCount(group, windowStart.getTime()) <= 0) {
return true;
}
return false;
}
/**
* Checks to see if the given instance group is enabled.
* @param group
* @return boolean
*/
protected boolean isGroupEnabled(InstanceGroup group) {
boolean enabled = getBoolFromCfgOrDefault(group, "enabled", false);
if (enabled) {
return true;
} else {
String prop = NS + group.type() + "." + group.name() + ".enabled";
String defaultProp = NS + group.type() + ".enabled";
LOGGER.info("Group {} [type {}] disabled, set {}=true or {}=true",
new Object[]{group.name(), group.type(), prop, defaultProp});
return false;
}
}
private boolean isChaosMonkeyEnabled() {
String prop = NS + "enabled";
if (cfg.getBoolOrElse(prop, true)) {
return true;
}
LOGGER.info("ChaosMonkey disabled, set {}=true", prop);
return false;
}
private InstanceGroup findInstanceGroup(String type, String name) {
// Calling context().chaosCrawler().groups(name) causes a new crawl to get
// the up to date information for the group name.
for (InstanceGroup group : context().chaosCrawler().groups(name)) {
if (group.type().toString().equals(type) && group.name().equals(name)) {
return group;
}
}
LOGGER.warn("Failed to find instance group for type {} and name {}", type, name);
return null;
}
protected Event terminateInstance(InstanceGroup group, String inst, ChaosType chaosType) {
Validate.notNull(group);
Validate.notEmpty(inst);
String prop = NS + "leashed";
if (cfg.getBoolOrElse(prop, true)) {
LOGGER.info("leashed ChaosMonkey prevented from killing {} from group {} [{}], set {}=false",
new Object[]{inst, group.name(), group.type(), prop});
reportEventForSummary(EventTypes.CHAOS_TERMINATION_SKIPPED, group, inst);
return null;
} else {
try {
Event evt = recordTermination(group, inst, chaosType);
sendTerminationNotification(group, inst, chaosType);
SshConfig sshConfig = new SshConfig(cfg);
ChaosInstance chaosInstance = new ChaosInstance(context().cloudClient(), inst, sshConfig);
chaosType.apply(chaosInstance);
LOGGER.info("Terminated {} from group {} [{}] with {}",
new Object[]{inst, group.name(), group.type(), chaosType.getKey() });
reportEventForSummary(EventTypes.CHAOS_TERMINATION, group, inst);
return evt;
} catch (NotFoundException e) {
LOGGER.warn("Failed to terminate " + inst + ", it does not exist. Perhaps it was already terminated");
reportEventForSummary(EventTypes.CHAOS_TERMINATION_SKIPPED, group, inst);
return null;
} catch (Exception e) {
handleTerminationError(inst, e);
reportEventForSummary(EventTypes.CHAOS_TERMINATION_SKIPPED, group, inst);
return null;
}
}
}
/**
* Checks to see if the maximum termination window has been exceeded.
*
* @param group
* @return boolean
*/
protected boolean isMaxTerminationCountExceeded(InstanceGroup group) {
Validate.notNull(group);
String propName = "maxTerminationsPerDay";
double maxTerminationsPerDay = getNumFromCfgOrDefault(group, propName, 1.0);
if (maxTerminationsPerDay <= MIN_MAX_TERMINATION_COUNT_PER_DAY) {
String prop = String.format("%s%s.%s.%s", NS, group.type(), group.name(), propName);
LOGGER.info("ChaosMonkey is configured to not allow any killing from group {} [{}] "
+ "with max daily count set as {}", new Object[]{group.name(), group.type(), prop});
return true;
} else {
int daysBack = 1;
int maxCount = (int) maxTerminationsPerDay;
if (maxTerminationsPerDay < 1.0) {
daysBack = (int) Math.ceil(1 / maxTerminationsPerDay);
maxCount = 1;
}
Calendar after = monkeyCalendar.now();
after.add(Calendar.DATE, -1 * daysBack);
// Check if the group has exceeded the maximum terminations for the last period
int terminationCount = getPreviousTerminationCount(group, after.getTime());
if (terminationCount >= maxCount) {
LOGGER.info("The count of terminations for group {} [{}] in the last {} days is {},"
+ " equal or greater than the max count threshold {}",
new Object[]{group.name(), group.type(), daysBack, terminationCount, maxCount});
return true;
}
}
return false;
}
@Override
public void sendTerminationNotification(InstanceGroup group, String instance, ChaosType chaosType) {
String propEmailGlobalEnabled = "simianarmy.chaos.notification.global.enabled";
String propEmailGroupEnabled = String.format("%s%s.%s.notification.enabled", NS, group.type(), group.name());
ChaosEmailNotifier notifier = context().chaosEmailNotifier();
if (notifier == null) {
String msg = "Chaos email notifier is not set.";
LOGGER.error(msg);
throw new RuntimeException(msg);
}
if (cfg.getBoolOrElse(propEmailGroupEnabled, false)) {
notifier.sendTerminationNotification(group, instance, chaosType);
}
if (cfg.getBoolOrElse(propEmailGlobalEnabled, false)) {
notifier.sendTerminationGlobalNotification(group, instance, chaosType);
}
}
/**
* {@inheritDoc}
*/
@Override
public List<ChaosType> getChaosTypes() {
return Lists.newArrayList(allChaosTypes);
}
}
| 19,531
| 41.833333
| 120
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/chaos/FailS3ChaosType.java
|
/*
*
* Copyright 2013 Justin Santa Barbara.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import com.netflix.simianarmy.MonkeyConfiguration;
/**
* Adds entries to /etc/hosts so that S3 API endpoints are unreachable.
*/
public class FailS3ChaosType extends ScriptChaosType {
/**
* Constructor.
*
* @param config
* Configuration to use
*/
public FailS3ChaosType(MonkeyConfiguration config) {
super(config, "FailS3");
}
}
| 1,075
| 28.888889
| 79
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/chaos/FillDiskChaosType.java
|
/*
*
* Copyright 2013 Justin Santa Barbara.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.MonkeyConfiguration;
/**
* Creates a huge file on the root device so that the disk fills up.
*/
public class FillDiskChaosType extends ScriptChaosType {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(FillDiskChaosType.class);
/**
* Enhancement: As with BurnIoChaosType, it would be nice to randomize the volume.
*
* coryb suggested this, and proposed this script:
*
* nohup dd if=/dev/urandom of=/burn bs=1M count=$(df -ml /burn | awk '/\//{print $2}') iflag=fullblock &
*/
/**
* Constructor.
*
* @param config
* Configuration to use
*/
public FillDiskChaosType(MonkeyConfiguration config) {
super(config, "FillDisk");
}
@Override
public boolean canApply(ChaosInstance instance) {
if (!super.canApply(instance)) {
return false;
}
if (isRootVolumeEbs(instance) && !isBurnMoneyEnabled()) {
LOGGER.debug("Root volume is EBS so FillDisk would cost money; skipping");
return false;
}
return true;
}
}
| 1,908
| 28.828125
| 110
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/chaos/ChaosEmailNotifier.java
|
/*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import com.amazonaws.services.simpleemail.AmazonSimpleEmailServiceClient;
import com.netflix.simianarmy.aws.AWSEmailNotifier;
import com.netflix.simianarmy.chaos.ChaosCrawler.InstanceGroup;
/** The email notifier for Chaos monkey.
*
*/
public abstract class ChaosEmailNotifier extends AWSEmailNotifier {
/** Constructor. Currently the notifier is fixed the email client to
* Amazon Simple Email Service. We can release this restriction when
* we want to support different email clients.
*
* @param sesClient the AWS simple email service client.
*/
public ChaosEmailNotifier(AmazonSimpleEmailServiceClient sesClient) {
super(sesClient);
}
/**
* Sends an email notification for a termination of instance to group
* owner's email address.
* @param group the instance group
* @param instance the instance id
* @param chaosType the chosen chaos strategy
*/
public abstract void sendTerminationNotification(InstanceGroup group, String instance, ChaosType chaosType);
/**
* Sends an email notification for a termination of instance to a global
* email address.
* @param group the instance group
* @param instance the instance id
* @param chaosType the chosen chaos strategy
*/
public abstract void sendTerminationGlobalNotification(InstanceGroup group, String instance, ChaosType chaosType);
}
| 2,094
| 35.12069
| 118
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/chaos/DetachVolumesChaosType.java
|
/*
*
* Copyright 2013 Justin Santa Barbara.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.CloudClient;
import com.netflix.simianarmy.MonkeyConfiguration;
import com.netflix.simianarmy.basic.chaos.BasicChaosMonkey;
/**
* We force-detach all the EBS volumes.
*
* This is supposed to simulate a catastrophic failure of EBS, however the instance will (possibly) still keep running;
* e.g. it should continue to respond to pings.
*/
public class DetachVolumesChaosType extends ChaosType {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(BasicChaosMonkey.class);
/**
* Constructor.
*
* @param config
* Configuration to use
*/
public DetachVolumesChaosType(MonkeyConfiguration config) {
super(config, "DetachVolumes");
}
/**
* Strategy can be applied iff there are any EBS volumes attached.
*/
@Override
public boolean canApply(ChaosInstance instance) {
CloudClient cloudClient = instance.getCloudClient();
String instanceId = instance.getInstanceId();
List<String> volumes = cloudClient.listAttachedVolumes(instanceId, false);
if (volumes.isEmpty()) {
LOGGER.debug("Can't apply strategy: no non-root EBS volumes");
return false;
}
return super.canApply(instance);
}
/**
* Force-detaches all attached EBS volumes from the instance.
*/
@Override
public void apply(ChaosInstance instance) {
CloudClient cloudClient = instance.getCloudClient();
String instanceId = instance.getInstanceId();
// IDEA: We could have a strategy where we detach some of the volumes...
boolean force = true;
for (String volumeId : cloudClient.listAttachedVolumes(instanceId, false)) {
cloudClient.detachVolume(instanceId, volumeId, force);
}
}
}
| 2,625
| 31.02439
| 119
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/chaos/ScriptChaosType.java
|
/*
*
* Copyright 2013 Justin Santa Barbara.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import java.io.IOException;
import java.net.URL;
import org.jclouds.compute.domain.ExecResponse;
import org.jclouds.ssh.SshClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Charsets;
import com.google.common.io.Resources;
import com.netflix.simianarmy.MonkeyConfiguration;
/**
* Base class for chaos types that run a script over JClouds/SSH on the node.
*/
public abstract class ScriptChaosType extends ChaosType {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(ScriptChaosType.class);
/**
* Constructor.
*
* @param config
* Configuration to use
* @param key
* Key for the chaos money
*/
public ScriptChaosType(MonkeyConfiguration config, String key) {
super(config, key);
}
/**
* We can apply the strategy iff we can SSH to the instance.
*/
@Override
public boolean canApply(ChaosInstance instance) {
if (!instance.getSshConfig().isEnabled()) {
LOGGER.info("Strategy disabled because SSH credentials not set");
return false;
}
if (!instance.canConnectSsh(instance)) {
LOGGER.warn("Strategy disabled because SSH credentials failed");
return false;
}
return super.canApply(instance);
}
/**
* Runs the script.
*/
@Override
public void apply(ChaosInstance instance) {
LOGGER.info("Running script for {} on instance {}", getKey(), instance.getInstanceId());
SshClient ssh = instance.connectSsh();
String filename = getKey().toLowerCase() + ".sh";
URL url = Resources.getResource(ScriptChaosType.class, "/scripts/" + filename);
String script;
try {
script = Resources.toString(url, Charsets.UTF_8);
} catch (IOException e) {
throw new IllegalStateException("Error reading script resource", e);
}
ssh.put("/tmp/" + filename, script);
ExecResponse response = ssh.exec("/bin/bash /tmp/" + filename);
if (response.getExitStatus() != 0) {
LOGGER.warn("Got non-zero output from running script: {}", response);
}
ssh.disconnect();
}
}
| 2,974
| 30.648936
| 96
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/chaos/ChaosInstance.java
|
/*
*
* Copyright 2013 Justin Santa Barbara.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import org.jclouds.domain.LoginCredentials;
import org.jclouds.ssh.SshClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.CloudClient;
/**
* Wrapper around an instance on which we are going to cause chaos.
*/
public class ChaosInstance {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(ChaosInstance.class);
private final CloudClient cloudClient;
private final String instanceId;
private final SshConfig sshConfig;
/**
* Constructor.
*
* @param cloudClient
* client for cloud access
* @param instanceId
* id of instance on cloud
* @param sshConfig
* SSH configuration to access instance
*/
public ChaosInstance(CloudClient cloudClient, String instanceId, SshConfig sshConfig) {
this.cloudClient = cloudClient;
this.instanceId = instanceId;
this.sshConfig = sshConfig;
}
/**
* Gets the {@link SshConfig} used to SSH to the instance.
*
* @return the {@link SshConfig}
*/
public SshConfig getSshConfig() {
return sshConfig;
}
/**
* Gets the {@link CloudClient} used to access the cloud.
*
* @return the {@link CloudClient}
*/
public CloudClient getCloudClient() {
return cloudClient;
}
/**
* Returns the instance id to identify the instance to the cloud client.
*
* @return instance id
*/
public String getInstanceId() {
return instanceId;
}
/**
* Memoize canConnectSsh function.
*/
private Boolean canConnectSsh = null;
/**
* Check if the SSH credentials are working.
*
* This is cached for the duration of this object.
*
* @return true iff ssh is configured and able to log on to instance.
*/
public boolean canConnectSsh(ChaosInstance instance) {
if (!sshConfig.isEnabled()) {
return false;
}
if (canConnectSsh == null) {
try {
// It would be nicer to keep this connection open, but then we'd have to be closed.
SshClient client = connectSsh();
client.disconnect();
canConnectSsh = true;
} catch (Exception e) {
LOGGER.warn("Error making SSH connection to instance", e);
canConnectSsh = false;
}
}
return canConnectSsh;
}
/**
* Connect to the instance over SSH.
*
* @return {@link SshClient} for connection
*/
public SshClient connectSsh() {
if (!sshConfig.isEnabled()) {
throw new IllegalStateException();
}
LoginCredentials credentials = sshConfig.getCredentials();
SshClient ssh = cloudClient.connectSsh(instanceId, credentials);
return ssh;
}
}
| 3,617
| 27.265625
| 99
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/chaos/FailEc2ChaosType.java
|
/*
*
* Copyright 2013 Justin Santa Barbara.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import com.netflix.simianarmy.MonkeyConfiguration;
/**
* Adds entries to /etc/hosts so that EC2 API endpoints are unreachable.
*/
public class FailEc2ChaosType extends ScriptChaosType {
/**
* Constructor.
*
* @param config
* Configuration to use
*/
public FailEc2ChaosType(MonkeyConfiguration config) {
super(config, "FailEc2");
}
}
| 1,079
| 29
| 79
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/chaos/NetworkLossChaosType.java
|
/*
*
* Copyright 2013 Justin Santa Barbara.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import com.netflix.simianarmy.MonkeyConfiguration;
/**
* Introduces network packet loss using traffic-shaping.
*/
public class NetworkLossChaosType extends ScriptChaosType {
/**
* Constructor.
*
* @param config
* Configuration to use
*/
public NetworkLossChaosType(MonkeyConfiguration config) {
super(config, "NetworkLoss");
}
}
| 1,075
| 28.888889
| 79
|
java
|
SimianArmy
|
SimianArmy-master/src/main/java/com/netflix/simianarmy/chaos/NetworkLatencyChaosType.java
|
/*
*
* Copyright 2013 Justin Santa Barbara.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import com.netflix.simianarmy.MonkeyConfiguration;
/**
* Introduces network latency using traffic-shaping.
*/
public class NetworkLatencyChaosType extends ScriptChaosType {
/**
* Constructor.
*
* @param config
* Configuration to use
*/
public NetworkLatencyChaosType(MonkeyConfiguration config) {
super(config, "NetworkLatency");
}
}
| 1,080
| 29.027778
| 79
|
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.