code
stringlengths
3
1.18M
language
stringclasses
1 value
package org.dodgybits.shuffle.server.service; import org.dodgybits.shuffle.server.model.AppUser; public class AppUserDao extends ObjectifyDao<AppUser> { // Inherit all methods from generic Dao }
Java
package org.dodgybits.shuffle.server.service; import java.lang.reflect.Field; import java.lang.reflect.Modifier; import java.lang.reflect.ParameterizedType; import java.util.Collection; import java.util.Iterator; import java.util.List; import java.util.Map; import javax.persistence.Embedded; import javax.persistence.Transient; import org.dodgybits.shuffle.server.model.Task; import org.dodgybits.shuffle.server.model.AppUser; import com.google.appengine.api.datastore.EntityNotFoundException; import com.google.web.bindery.requestfactory.server.RequestFactoryServlet; import com.googlecode.objectify.Key; import com.googlecode.objectify.ObjectifyService; import com.googlecode.objectify.Query; import com.googlecode.objectify.util.DAOBase; /** * Generic DAO for use with Objectify */ public class ObjectifyDao<T> extends DAOBase { static final int BAD_MODIFIERS = Modifier.FINAL | Modifier.STATIC | Modifier.TRANSIENT; static { ObjectifyService.register(Task.class); ObjectifyService.register(AppUser.class); } protected Class<T> clazz; @SuppressWarnings("unchecked") public ObjectifyDao() { clazz = (Class<T>) ((ParameterizedType) getClass() .getGenericSuperclass()).getActualTypeArguments()[0]; } public Key<T> put(T entity) { return ofy().put(entity); } public Map<Key<T>, T> putAll(Iterable<T> entities) { return ofy().put(entities); } public void delete(T entity) { ofy().delete(entity); } public void deleteKey(Key<T> entityKey) { ofy().delete(entityKey); } public void deleteAll(Iterable<T> entities) { ofy().delete(entities); } public void deleteKeys(Iterable<Key<T>> keys) { ofy().delete(keys); } public T get(Long id) throws EntityNotFoundException { return ofy().get(this.clazz, id); } public T get(Key<T> key) throws EntityNotFoundException { return ofy().get(key); } public Map<Key<T>, T> get(Iterable<Key<T>> keys) { return ofy().get(keys); } public List<T> listAll() { Query<T> q = ofy().query(clazz); return q.list(); } /** * Convenience method to get all objects matching a single property * * @param propName * @param propValue * @return T matching Object * @throws TooManyResultsException */ public T getByProperty(String propName, Object propValue) { Query<T> q = ofy().query(clazz); q.filter(propName, propValue); Iterator<T> fetch = q.limit(2).list().iterator(); if (!fetch.hasNext()) { return null; } T obj = fetch.next(); if (fetch.hasNext()) { throw new RuntimeException(q.toString() + " returned too many results"); } return obj; } public List<T> listByProperty(String propName, Object propValue) { Query<T> q = ofy().query(clazz); q.filter(propName, propValue); return q.list(); } public List<Key<T>> listKeysByProperty(String propName, Object propValue) { Query<T> q = ofy().query(clazz); q.filter(propName, propValue); return q.listKeys(); } public T getByExample(T exampleObj) { Query<T> q = buildQueryByExample(exampleObj); Iterator<T> fetch = q.limit(2).list().iterator(); if (!fetch.hasNext()) { return null; } T obj = fetch.next(); if (fetch.hasNext()) { throw new RuntimeException(q.toString() + " returned too many results"); } return obj; } public List<T> listByExample(T exampleObj) { Query<T> queryByExample = buildQueryByExample(exampleObj); return queryByExample.list(); } public Key<T> getKey(Long id) { return new Key<T>(this.clazz, id); } public Key<T> key(T obj) { return ObjectifyService.factory().getKey(obj); } public List<T> listChildren(Object parent) { return ofy().query(clazz).ancestor(parent).list(); } public List<Key<T>> listChildKeys(Object parent) { return ofy().query(clazz).ancestor(parent).listKeys(); } protected Query<T> buildQueryByExample(T exampleObj) { Query<T> q = ofy().query(clazz); // Add all non-null properties to query filter for (Field field : clazz.getDeclaredFields()) { // Ignore transient, embedded, array, and collection properties if (field.isAnnotationPresent(Transient.class) || (field.isAnnotationPresent(Embedded.class)) || (field.getType().isArray()) || (field.getType().isArray()) || (Collection.class.isAssignableFrom(field.getType())) || ((field.getModifiers() & BAD_MODIFIERS) != 0)) continue; field.setAccessible(true); Object value; try { value = field.get(exampleObj); } catch (IllegalArgumentException e) { throw new RuntimeException(e); } catch (IllegalAccessException e) { throw new RuntimeException(e); } if (value != null) { q.filter(field.getName(), value); } } return q; } /* * Application-specific methods to retrieve items owned by a specific user */ public List<T> listAllForUser() { Key<AppUser> userKey = new Key<AppUser>(AppUser.class, getCurrentUser() .getId()); return listByProperty("owner", userKey); } private AppUser getCurrentUser() { return (AppUser) RequestFactoryServlet.getThreadLocalRequest() .getAttribute(LoginService.AUTH_USER); } }
Java
package org.dodgybits.shuffle.server.service; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.dodgybits.shuffle.server.model.AppUser; import org.dodgybits.shuffle.server.servlet.AuthFilter; import com.google.appengine.api.users.UserService; import com.google.appengine.api.users.UserServiceFactory; /** * Server-side class that provides all login-related * operations. Called only from server code. */ public class LoginService { public static final String AUTH_USER = "loggedInUser"; public static AppUser login(HttpServletRequest req, HttpServletResponse res) { UserService userService = UserServiceFactory.getUserService(); // User is logged into GAE // Find or add user in our app Datastore String userEmail = userService.getCurrentUser().getEmail(); AppUser loggedInUser = findUser(userEmail); if (loggedInUser == null) { // Auto-add user loggedInUser = addUser(userEmail); } req.setAttribute(AUTH_USER, loggedInUser); return loggedInUser; } public static AppUser getLoggedInUser() { HttpServletRequest req = AuthFilter.getThreadLocalRequest(); return (AppUser)req.getAttribute(AUTH_USER); } private static AppUser findUser(String userEmail) { AppUserDao userDao = new AppUserDao(); // Query for user by email return userDao.getByProperty("email", userEmail); } private static AppUser addUser(String email) { AppUserDao userDao = new AppUserDao(); AppUser newUser = new AppUser(email); userDao.put(newUser); return newUser; } }
Java
package org.dodgybits.shuffle.server.service; import java.util.List; import com.google.appengine.api.datastore.EntityNotFoundException; import org.dodgybits.shuffle.server.model.AppUser; import org.dodgybits.shuffle.server.model.Task; public class TaskDao extends ObjectifyDao<Task> { @Override public List<Task> listAll() { List<Task> tasks = listAllForUser(); return tasks; } /** * Wraps put() so as not to return a Key, which RequestFactory can't handle */ public void save(Task task) { AppUser loggedInUser = LoginService.getLoggedInUser(); task.setOwner(loggedInUser); this.put(task); } public Task saveAndReturn(Task task) { AppUser loggedInUser = LoginService.getLoggedInUser(); task.setOwner(loggedInUser); this.put(task); return task; } public void deleteTask(Task task) { this.delete(task); } public Task findById(Long id) { Task task = null; try { task = super.get(id); AppUser loggedInUser = LoginService.getLoggedInUser(); if (!task.getOwner().equals(loggedInUser)) { // wrong user - bail task = null; } } catch (EntityNotFoundException e) { // couldn't find task } return task; } }
Java
/* * Copyright 2010 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package org.dodgybits.shuffle.server.servlet; import java.io.IOException; import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; import javax.servlet.ServletException; import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.dodgybits.shuffle.server.service.LoginService; import com.google.appengine.api.users.UserService; import com.google.appengine.api.users.UserServiceFactory; /** * A servlet filter that handles basic GAE user authentication. * Based on http://code.google.com/p/google-web-toolkit/source/browse/trunk/samples/expenses/src/main/java/com/google/gwt/sample/gaerequest/server/GaeAuthFilter.java */ public class AuthFilter implements Filter { private static final ThreadLocal<HttpServletRequest> perThreadRequest = new ThreadLocal<HttpServletRequest>(); /** * Returns the thread-local {@link HttpServletRequest}. * * @return an {@link HttpServletRequest} instance */ public static HttpServletRequest getThreadLocalRequest() { return perThreadRequest.get(); } @Override public void init(FilterConfig config) { } @Override public void destroy() { } @Override public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain) throws IOException, ServletException { UserService userService = UserServiceFactory.getUserService(); HttpServletRequest request = (HttpServletRequest) servletRequest; HttpServletResponse response = (HttpServletResponse) servletResponse; perThreadRequest.set(request); try { if (!userService.isUserLoggedIn()) { // User is not logged in to App Engine so redirect to login page response.setHeader("login", userService.createLoginURL(request.getRequestURI())); response.sendError(HttpServletResponse.SC_UNAUTHORIZED); return; } LoginService.login(request, response); filterChain.doFilter(request, response); } finally { perThreadRequest.set(null); } } }
Java
package org.dodgybits.shuffle.server.servlet; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; import java.util.Date; import java.util.List; import java.util.logging.Level; import java.util.logging.Logger; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.commons.fileupload.FileItemIterator; import org.apache.commons.fileupload.FileItemStream; import org.apache.commons.fileupload.FileUploadException; import org.apache.commons.fileupload.servlet.ServletFileUpload; import org.apache.commons.fileupload.util.Streams; import org.dodgybits.shuffle.dto.ShuffleProtos; import org.dodgybits.shuffle.dto.ShuffleProtos.Catalogue; import org.dodgybits.shuffle.server.model.Task; import org.dodgybits.shuffle.server.service.TaskDao; @SuppressWarnings("serial") public class RestoreBackupServlet extends HttpServlet { private static final Logger logger = Logger.getLogger(RestoreBackupServlet.class.getName()); @Override protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { // Check that we have a file upload request boolean isMultipart = ServletFileUpload.isMultipartContent(request); logger.log(Level.FINE, "Request multipart " + isMultipart); // Create a new file upload handler ServletFileUpload upload = new ServletFileUpload(); // Parse the request try { FileItemIterator iter = upload.getItemIterator(request); while (iter.hasNext()) { FileItemStream item = iter.next(); String name = item.getFieldName(); InputStream stream = item.openStream(); if (item.isFormField()) { System.out.println("Form field " + name + " with value " + Streams.asString(stream) + " detected."); } else { System.out.println("File field " + name + " with file name " + item.getName() + " detected."); // Process the input stream Catalogue catalogue = Catalogue.parseFrom(stream); int tasksSaved = saveAll(catalogue); response.getWriter().println("Saved " + tasksSaved + " actions."); response.flushBuffer(); } } } catch (FileUploadException e) { throw new ServletException(e); } } private int saveAll(Catalogue catalogue) { List<ShuffleProtos.Task> protoTasks = catalogue.getTaskList(); List<Task> tasks = new ArrayList<Task>(protoTasks.size()); TaskDao dao = new TaskDao(); for (ShuffleProtos.Task protoTask : protoTasks) { logger.info("Saving task: " + protoTask.toString()); Task task = toModelTask(protoTask); dao.save(task); tasks.add(task); } return tasks.size(); } private Task toModelTask(ShuffleProtos.Task protoTask) { Task task = new Task(); task.setDescription(protoTask.getDescription()); task.setDetails(protoTask.getDetails()); task.setActive(protoTask.getActive()); task.setComplete(protoTask.getComplete()); task.setCreatedDate(new Date(protoTask.getCreated().getMillis())); task.setDeleted(protoTask.getDeleted()); task.setModifiedDate(new Date(protoTask.getModified().getMillis())); task.setOrder(protoTask.getOrder()); return task; } }
Java
/* * Copyright 2010 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.dodgybits.shuffle.server; import com.google.android.c2dm.server.PMF; import com.google.appengine.api.datastore.Key; import java.util.ArrayList; import java.util.Date; import java.util.List; import java.util.Locale; import java.util.logging.Logger; import javax.jdo.PersistenceManager; import javax.jdo.Query; import javax.jdo.annotations.IdentityType; import javax.jdo.annotations.PersistenceCapable; import javax.jdo.annotations.Persistent; import javax.jdo.annotations.PrimaryKey; /** * Registration info. * * An account may be associated with multiple phones, * and a phone may be associated with multiple accounts. * * registrations lists different phones registered to that account. */ @PersistenceCapable(identityType = IdentityType.APPLICATION) public class DeviceInfo { private static final Logger log = Logger.getLogger(DeviceInfo.class.getName()); /** * User-email # device-id * * Device-id can be specified by device, default is hash of abs(registration * id). * * user@example.com#1234 */ @PrimaryKey @Persistent private Key key; /** * The ID used for sending messages to. */ @Persistent private String deviceRegistrationID; /** * Current supported types: * (default) - ac2dm, regular froyo+ devices using C2DM protocol * * New types may be defined - for example for sending to chrome. */ @Persistent private String type; /** * For statistics - and to provide hints to the user. */ @Persistent private Date registrationTimestamp; @Persistent private Boolean debug; public DeviceInfo(Key key, String deviceRegistrationID) { log.info("new DeviceInfo: key=" + key + ", deviceRegistrationId=" + deviceRegistrationID); this.key = key; this.deviceRegistrationID = deviceRegistrationID; this.setRegistrationTimestamp(new Date()); // now } public DeviceInfo(Key key) { log.info("new DeviceInfo: key=" + key); this.key = key; } public Key getKey() { log.info("DeviceInfo: return key=" + key); return key; } public void setKey(Key key) { log.info("DeviceInfo: set key=" + key); this.key = key; } // Accessor methods for properties added later (hence can be null) public String getDeviceRegistrationID() { log.info("DeviceInfo: return deviceRegistrationID=" + deviceRegistrationID); return deviceRegistrationID; } public void setDeviceRegistrationID(String deviceRegistrationID) { log.info("DeviceInfo: set deviceRegistrationID=" + deviceRegistrationID); this.deviceRegistrationID = deviceRegistrationID; } public boolean getDebug() { return (debug != null ? debug.booleanValue() : false); } public void setDebug(boolean debug) { this.debug = new Boolean(debug); } public void setType(String type) { this.type = type; } public String getType() { return type != null ? type : ""; } public void setRegistrationTimestamp(Date registrationTimestamp) { this.registrationTimestamp = registrationTimestamp; } public Date getRegistrationTimestamp() { return registrationTimestamp; } /** * Helper function - will query all registrations for a user. */ @SuppressWarnings("unchecked") public static List<DeviceInfo> getDeviceInfoForUser(String user) { PersistenceManager pm = PMF.get().getPersistenceManager(); try { // Canonicalize user name user = user.toLowerCase(Locale.ENGLISH); Query query = pm.newQuery(DeviceInfo.class); query.setFilter("key >= '" + user + "' && key < '" + user + "$'"); List<DeviceInfo> qresult = (List<DeviceInfo>) query.execute(); // Copy to array - we need to close the query List<DeviceInfo> result = new ArrayList<DeviceInfo>(); for (DeviceInfo di : qresult) { result.add(di); } query.closeAll(); log.info("Return " + result.size() + " devices for user " + user); return result; } finally { pm.close(); } } @Override public String toString() { return "DeviceInfo[key=" + key + ", deviceRegistrationID=" + deviceRegistrationID + ", type=" + type + ", registrationTimestamp=" + registrationTimestamp + ", debug=" + debug + "]"; } }
Java
/* * Copyright 2011 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package org.dodgybits.shuffle.server; import com.google.appengine.api.users.User; import com.google.appengine.api.users.UserService; import com.google.appengine.api.users.UserServiceFactory; import java.util.Date; import java.util.logging.Logger; public class HelloWorldService { private static final Logger log = Logger.getLogger(HelloWorldService.class.getName()); public HelloWorldService() { } public static String getMessage() { UserService userService = UserServiceFactory.getUserService(); User user = userService.getCurrentUser(); String message; if (user == null) { message = "No one is logged in!\nSent from App Engine at " + new Date(); } else { message = "Hello, " + user.getEmail() + "!\nSent from App Engine at " + new Date(); } log.info("Returning message \"" + message + "\""); return message; } }
Java
/* * Copyright 2011 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package org.dodgybits.shuffle.server; import com.google.android.c2dm.server.PMF; import com.google.appengine.api.datastore.Key; import com.google.appengine.api.datastore.KeyFactory; import com.google.appengine.api.users.User; import com.google.appengine.api.users.UserService; import com.google.appengine.api.users.UserServiceFactory; import java.util.Date; import java.util.List; import java.util.logging.Logger; import javax.jdo.JDOObjectNotFoundException; import javax.jdo.PersistenceManager; public class RegistrationInfo { private static final Logger log = Logger.getLogger(RegistrationInfo.class.getName()); private static final int MAX_DEVICES = 5; String deviceId; String deviceRegistrationId; public RegistrationInfo() { } public String getAccountName() { UserService userService = UserServiceFactory.getUserService(); User user = userService.getCurrentUser(); return user.getEmail(); } public String getDeviceId() { return deviceId; } public String getDeviceRegistrationId() { return deviceRegistrationId; } public void register() { log.info("register " + this); try { doRegister(getDeviceRegistrationId(), "ac2dm", getDeviceId(), getAccountName()); } catch (Exception e) { log.info("Got exception in registration: " + e + " - " + e.getMessage()); for (StackTraceElement ste : e.getStackTrace()) { log.info(ste.toString()); } } log.info("Successfully registered"); } public void setDeviceId(String deviceId) { this.deviceId = deviceId; } public void setDeviceRegistrationId(String deviceRegistrationId) { this.deviceRegistrationId = deviceRegistrationId; } @Override public String toString() { return "RegistrationInfo [deviceId=" + deviceId + ", deviceRegistrationId=" + deviceRegistrationId + "]"; } public void unregister() { log.info("unregister " + this); try { doUnregister(getDeviceRegistrationId(), getAccountName()); } catch (Exception e) { log.info("Got exception in unregistration: " + e + " - " + e.getMessage()); for (StackTraceElement ste : e.getStackTrace()) { log.info(ste.toString()); } } log.info("Successfully unregistered"); } private void doRegister(String deviceRegistrationId, String deviceType, String deviceId, String accountName) throws Exception { log.info("in register: accountName = " + accountName); PersistenceManager pm = PMF.get().getPersistenceManager(); try { List<DeviceInfo> registrations = DeviceInfo.getDeviceInfoForUser(accountName); log.info("got registrations"); if (registrations.size() > MAX_DEVICES) { log.info("got registrations > MAX_DEVICES"); // we could return an error - but user can't handle it yet. // we can't let it grow out of bounds. // TODO: we should also define a 'ping' message and expire/remove // unused registrations DeviceInfo oldest = registrations.get(0); if (oldest.getRegistrationTimestamp() == null) { pm.deletePersistent(oldest); } else { long oldestTime = oldest.getRegistrationTimestamp().getTime(); for (int i = 1; i < registrations.size(); i++) { if (registrations.get(i).getRegistrationTimestamp().getTime() < oldestTime) { oldest = registrations.get(i); oldestTime = oldest.getRegistrationTimestamp().getTime(); } } pm.deletePersistent(oldest); } } // Get device if it already exists, else create String suffix = (deviceId != null ? "#" + Long.toHexString(Math.abs(deviceId.hashCode())) : ""); log.info("suffix = " + suffix); Key key = KeyFactory.createKey(DeviceInfo.class.getSimpleName(), accountName + suffix); log.info("key = " + key); DeviceInfo device = null; try { device = pm.getObjectById(DeviceInfo.class, key); } catch (JDOObjectNotFoundException e) { log.info("Caught JDOObjectNotFoundException"); } if (device == null) { device = new DeviceInfo(key, deviceRegistrationId); device.setType(deviceType); } else { // update registration id device.setDeviceRegistrationID(deviceRegistrationId); device.setRegistrationTimestamp(new Date()); } pm.makePersistent(device); return; } catch (Exception e) { log.info("Caught exception: " + e); throw e; } finally { pm.close(); } } private void doUnregister(String deviceRegistrationID, String accountName) { log.info("in unregister: accountName = " + accountName); PersistenceManager pm = PMF.get().getPersistenceManager(); try { List<DeviceInfo> registrations = DeviceInfo.getDeviceInfoForUser(accountName); for (int i = 0; i < registrations.size(); i++) { DeviceInfo deviceInfo = registrations.get(i); if (deviceInfo.getDeviceRegistrationID().equals(deviceRegistrationID)) { pm.deletePersistent(deviceInfo); // Keep looping in case of duplicates } } } catch (JDOObjectNotFoundException e) { log.warning("User " + accountName + " unknown"); } catch (Exception e) { log.warning("Error unregistering device: " + e.getMessage()); } finally { pm.close(); } } }
Java
/* * Copyright 2011 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package org.dodgybits.shuffle.server; import com.google.web.bindery.requestfactory.server.RequestFactoryServlet; import com.google.web.bindery.requestfactory.shared.Locator; public class MessageLocator extends Locator<Message, Void> { @Override public Message create(Class<? extends Message> clazz) { return new Message(RequestFactoryServlet.getThreadLocalRequest().getSession().getServletContext()); } @Override public Message find(Class<? extends Message> clazz, Void id) { throw new UnsupportedOperationException(); } @Override public Class<Message> getDomainType() { throw new UnsupportedOperationException(); } @Override public Void getId(Message domainObject) { throw new UnsupportedOperationException(); } @Override public Class<Void> getIdType() { throw new UnsupportedOperationException(); } @Override public Object getVersion(Message domainObject) { throw new UnsupportedOperationException(); } }
Java
package org.dodgybits.shuffle.server.locator; public class TaskLocator extends ObjectifyLocator { }
Java
package org.dodgybits.shuffle.server.locator; import com.google.web.bindery.requestfactory.shared.Locator; import com.googlecode.objectify.util.DAOBase; import org.dodgybits.shuffle.server.model.DatastoreObject; /** * Generic @Locator for objects that extend DatastoreObject */ public class ObjectifyLocator extends Locator<DatastoreObject, Long> { @Override public DatastoreObject create(Class<? extends DatastoreObject> clazz) { try { return clazz.newInstance(); } catch (InstantiationException e) { throw new RuntimeException(e); } catch (IllegalAccessException e) { throw new RuntimeException(e); } } @Override public DatastoreObject find(Class<? extends DatastoreObject> clazz, Long id) { DAOBase daoBase = new DAOBase(); return daoBase.ofy().find(clazz, id); } @Override public Class<DatastoreObject> getDomainType() { // Never called return null; } @Override public Long getId(DatastoreObject domainObject) { return domainObject.getId(); } @Override public Class<Long> getIdType() { return Long.class; } @Override public Object getVersion(DatastoreObject domainObject) { return domainObject.getVersion(); } }
Java
package org.dodgybits.shuffle.server.locator; import com.google.web.bindery.requestfactory.shared.ServiceLocator; /** * Generic locator service that can be referenced in the @Service annotation * for any RequestFactory service stub */ public class DaoServiceLocator implements ServiceLocator { @Override public Object getInstance(Class<?> clazz) { try { return clazz.newInstance(); } catch (InstantiationException e) { throw new RuntimeException(e); } catch (IllegalAccessException e) { throw new RuntimeException(e); } } }
Java
/* * Copyright 2011 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package org.dodgybits.shuffle.server; import com.google.android.c2dm.server.C2DMessaging; import com.google.android.c2dm.server.PMF; import com.google.appengine.api.datastore.Key; import com.google.appengine.api.users.User; import com.google.appengine.api.users.UserService; import com.google.appengine.api.users.UserServiceFactory; import java.util.List; import java.util.logging.Logger; import javax.jdo.PersistenceManager; import javax.servlet.ServletContext; /** * Send a message using C2DM. */ public class SendMessage { private static final Logger log = Logger.getLogger(SendMessage.class.getName()); public static String sendMessage(ServletContext context, String recipient, String message) { PersistenceManager pm = PMF.get().getPersistenceManager(); try { UserService userService = UserServiceFactory.getUserService(); User user = userService.getCurrentUser(); String sender = "nobody"; if (user != null) { sender = user.getEmail(); } log.info("sendMessage: sender = " + sender); log.info("sendMessage: recipient = " + recipient); log.info("sendMessage: message = " + message); // ok = we sent to at least one device. boolean ok = false; // Send push message to phone C2DMessaging push = C2DMessaging.get(context); boolean res = false; String collapseKey = "" + message.hashCode(); // delete will fail if the pm is different than the one used to // load the object - we must close the object when we're done List<DeviceInfo> registrations = null; registrations = DeviceInfo.getDeviceInfoForUser(recipient); log.info("sendMessage: got " + registrations.size() + " registrations"); // Deal with upgrades and multi-device: // If user has one device with an old version and few new ones - // the old registration will be deleted. if (registrations.size() > 1) { // Make sure there is no 'bare' registration // Keys are sorted - check the first DeviceInfo first = registrations.get(0); Key oldKey = first.getKey(); if (oldKey.toString().indexOf("#") < 0) { // multiple devices, first is old-style. registrations.remove(0); // don't send to it pm.deletePersistent(first); } } int numSendAttempts = 0; for (DeviceInfo deviceInfo : registrations) { if (!"ac2dm".equals(deviceInfo.getType())) { continue; // user-specified device type } res = doSendViaC2dm(message, sender, push, collapseKey, deviceInfo); numSendAttempts++; if (res) { ok = true; } } if (ok) { return "Success: Message sent"; } else if (numSendAttempts == 0) { return "Failure: User " + recipient + " not registered"; } else { return "Failure: Unable to send message"; } } catch (Exception e) { return "Failure: Got exception " + e; } finally { pm.close(); } } private static boolean doSendViaC2dm(String message, String sender, C2DMessaging push, String collapseKey, DeviceInfo deviceInfo) { // Trim message if needed. if (message.length() > 1000) { message = message.substring(0, 1000) + "[...]"; } return push.sendNoRetry(deviceInfo.getDeviceRegistrationID(), collapseKey, "sender", sender, "message", message); } }
Java
/* * Copyright 2011 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package org.dodgybits.shuffle.server; import java.util.logging.Logger; import javax.servlet.ServletContext; public class Message { private static final Logger log = Logger.getLogger(Message.class.getName()); private final ServletContext context; String recipient; String message; public Message(ServletContext context) { this.context = context; } public String getRecipient() { return recipient; } public String getMessage() { return message; } public String send() { log.info("send " + this); try { return SendMessage.sendMessage(context, recipient, message); } catch (Exception e) { return "Failure: Got exception in send: " + e.getMessage(); } } public void setRecipient(String recipient) { this.recipient = recipient; } public void setMessage(String message) { this.message = message; } @Override public String toString() { return "Message [recipient=" + recipient + ", message=" + message + "]"; } }
Java
package org.dodgybits.shuffle.server.model; import javax.persistence.Id; import javax.persistence.PrePersist; public class DatastoreObject { @Id private Long id; private Integer version = 0; /** * Auto-increment version # whenever persisted */ @PrePersist void onPersist() { this.version++; } public Long getId() { return id; } public void setId(Long id) { this.id = id; } public Integer getVersion() { return version; } public void setVersion(Integer version) { this.version = version; } }
Java
package org.dodgybits.shuffle.server.model; import java.util.Date; import org.dodgybits.shuffle.server.service.AppUserDao; import com.google.appengine.api.datastore.EntityNotFoundException; import com.googlecode.objectify.Key; import com.googlecode.objectify.annotation.Entity; @Entity public class Task extends DatastoreObject { String description; String details; Date createdDate; Date modifiedDate; boolean active = true; boolean deleted; // 0-indexed order within a project. int order; boolean complete; private Key<AppUser> owner; public final String getDescription() { return description; } public final void setDescription(String description) { this.description = description; } public final String getDetails() { return details; } public final void setDetails(String details) { this.details = details; } public final Date getCreatedDate() { return createdDate; } public final void setCreatedDate(Date createdDate) { this.createdDate = createdDate; } public final Date getModifiedDate() { return modifiedDate; } public final void setModifiedDate(Date modifiedDate) { this.modifiedDate = modifiedDate; } public final boolean isActive() { return active; } public final void setActive(boolean active) { this.active = active; } public final boolean isDeleted() { return deleted; } public final void setDeleted(boolean deleted) { this.deleted = deleted; } public final int getOrder() { return order; } public final void setOrder(int order) { this.order = order; } public final boolean isComplete() { return complete; } public final void setComplete(boolean complete) { this.complete = complete; } public AppUser getOwner() { try { return new AppUserDao().get(owner); } catch (EntityNotFoundException e) { throw new RuntimeException(e); } } public void setOwner(AppUser owner) { this.owner = new AppUserDao().key(owner); } }
Java
package org.dodgybits.shuffle.server.model; import com.googlecode.objectify.annotation.Entity; /** * An application user, named with a prefix to avoid confusion with GAE User type */ @Entity public class AppUser extends DatastoreObject { private String email; public AppUser() { // No-arg constructor required by Objectify } public AppUser(String userEmail) { this.email = userEmail; } public String getEmail() { return email; } public void setEmail(String email) { this.email = email; } }
Java
/* * To change this template, choose Tools | Templates * and open the template in the editor. */ package edu.chl.cid.jsfproducts; import java.io.Serializable; import javax.persistence.Basic; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.NamedQueries; import javax.persistence.NamedQuery; import javax.persistence.Table; import javax.validation.constraints.NotNull; import javax.validation.constraints.Size; /** * * @author Daniel */ @Entity @Table(name = "PRODUCT") @NamedQueries({ @NamedQuery(name = "Product.findAll", query = "SELECT p FROM Product p")}) public class Product implements Serializable { private static final long serialVersionUID = 1L; @Id @Basic(optional = false) @NotNull @Column(name = "ID") @GeneratedValue(strategy = GenerationType.AUTO) private Long id; @Size(max = 255) @Column(name = "CAT") private String cat; // @Max(value=?) @Min(value=?)//if you know range of your decimal fields consider using these annotations to enforce field validation @Column(name = "PRICE") private Double price; @Size(max = 255) @Column(name = "NAME") private String name; public Product() { } public Product(long id, String name, String cat, double price){ this.id = id; this.name = name; this.cat = cat; this.price = price; } public Product(Long id) { this.id = id; } public Long getId() { return id; } public void setId(Long id) { this.id = id; } public String getCat() { return cat; } public void setCat(String cat) { this.cat = cat; } public Double getPrice() { return price; } public void setPrice(Double price) { this.price = price; } public String getName() { return name; } public void setName(String name) { this.name = name; } @Override public int hashCode() { int hash = 0; hash += (id != null ? id.hashCode() : 0); return hash; } @Override public boolean equals(Object object) { // TODO: Warning - this method won't work in the case the id fields are not set if (!(object instanceof Product)) { return false; } Product other = (Product) object; if ((this.id == null && other.id != null) || (this.id != null && !this.id.equals(other.id))) { return false; } return true; } @Override public String toString() { return "edu.chl.tdaniel.jpa5.Product[ id=" + id + " ]"; } }
Java
/* * To change this template, choose Tools | Templates * and open the template in the editor. */ package edu.chl.cid.jsfproducts.controllerbeans; import edu.chl.cid.jsfproducts.Product; import edu.chl.cid.jsfproducts.ProductJpaCtrl; import javax.ejb.EJB; import javax.faces.bean.ManagedBean; /** * * @author Mikey */ @ManagedBean public class AddProductControllerBean { @EJB private ProductJpaCtrl productJpaCtrl; private Product product = new Product(); /** Creates a new instance of AddProductControllerBean */ public AddProductControllerBean() { } public String addProduct(){ System.out.println("Addproduct anrooped! "+product.getName()+", "+product.getCat()+", "+product.getPrice()); product = productJpaCtrl.create(product); return "onAddProduct"; } public Product getProduct() { return product; } public void setProduct(Product product) { this.product = product; } }
Java
/* * To change this template, choose Tools | Templates * and open the template in the editor. */ package edu.chl.cid.jsfproducts.controllerbeans; import edu.chl.cid.jsfproducts.NonexistentEntityException; import edu.chl.cid.jsfproducts.Product; import edu.chl.cid.jsfproducts.ProductJpaCtrl; import edu.chl.cid.jsfproducts.modelbeans.ShoppingCartModelBean; import java.util.List; import javax.ejb.EJB; import javax.faces.bean.ManagedBean; import javax.faces.bean.ManagedProperty; import javax.faces.bean.ViewScoped; import javax.faces.context.FacesContext; import javax.servlet.http.HttpSession; /** * * @author Mikey */ @ManagedBean @ViewScoped public class ViewProductsControllerBean { @EJB private ProductJpaCtrl productJpaCtrl; @ManagedProperty("#{shoppingCartModelBean}") private ShoppingCartModelBean cart; /** Creates a new instance of ViewProductsControllerBean */ public ViewProductsControllerBean() { } public List<Product> getProducts(){ return productJpaCtrl.findEntities(); } public ShoppingCartModelBean getCart() { return cart; } public void setCart(ShoppingCartModelBean cart) { this.cart = cart; } public void addToCart(Product p) { cart.addProduct(p); } public void removeProduct(Product product) throws NonexistentEntityException { cart.removeProducts(product); productJpaCtrl.destroy(product.getId()); } public String emptyCart(){ ((HttpSession) FacesContext.getCurrentInstance().getExternalContext().getSession(false)).invalidate(); return "onViewProducts"; } }
Java
/* * To change this template, choose Tools | Templates * and open the template in the editor. */ package edu.chl.cid.jsfproducts.controllerbeans; import edu.chl.cid.jsfproducts.Product; import edu.chl.cid.jsfproducts.modelbeans.ShoppingCartModelBean; import java.util.List; import javax.faces.bean.ManagedBean; import javax.faces.bean.ManagedProperty; import javax.faces.bean.ViewScoped; import javax.faces.context.FacesContext; import javax.servlet.http.HttpServletRequest; /** * * @author Mikey */ @ManagedBean @ViewScoped public class ViewCartControllerBean { @ManagedProperty("#{shoppingCartModelBean}") private ShoppingCartModelBean cart; /** Creates a new instance of ViewCartControllerBean */ public ViewCartControllerBean() { } public List<Product> getProductsInCart() { return cart.getProducts(); } public ShoppingCartModelBean getCart() { return cart; } public void setCart(ShoppingCartModelBean cart) { this.cart = cart; } public String emptyCart() { HttpServletRequest request = (HttpServletRequest) FacesContext.getCurrentInstance(). getExternalContext().getRequest(); request.getSession().invalidate(); return "onEmptyCart"; } }
Java
/* * To change this template, choose Tools | Templates * and open the template in the editor. */ package edu.chl.cid.jsfproducts; import java.util.List; import java.util.logging.Level; import java.util.logging.Logger; import javax.ejb.Stateless; import javax.persistence.EntityManager; import javax.persistence.Query; import javax.persistence.EntityNotFoundException; import javax.persistence.PersistenceContext; import javax.persistence.criteria.CriteriaQuery; /** * A lot to do here Use supplied exceptions if method should throw!! * @author hajo */ @Stateless public class ProductJpaCtrl { @PersistenceContext(unitName="webshop_pu") private EntityManager em; public Product create(Product t) { System.out.println("Persisting : "+t.toString()); em.persist(t); return t; } public void destroy(Long id) throws NonexistentEntityException { Product p = em.getReference(Product.class, id); p.getId(); em.remove(p); } public void edit(Product t) throws NonexistentEntityException, Exception { try { em.getTransaction().begin(); t = em.merge(t); em.getTransaction().commit(); } catch (Exception ex) { String msg = ex.getLocalizedMessage(); if (msg == null || msg.length() == 0) { Long id = t.getId(); if (findEntity(id) == null) { throw new NonexistentEntityException("The product with id " + id + " no longer exists."); } } throw ex; } finally { if (em != null) { em.close(); } } } public Product findEntity(Long id) { try { return em.find(Product.class, id); } finally { em.close(); } } public List<Product> findEntities() { CriteriaQuery cq = em.getCriteriaBuilder().createQuery(); cq.select(cq.from(Product.class)); return em.createQuery(cq).getResultList(); } public List<Product> findEntities(int maxResults, int firstResult) { Query q = em.createQuery("select p from Product p", Product.class); List l = q.getResultList(); int begin = (0 <= firstResult && firstResult < l.size() ? firstResult : l.size()); int end = (firstResult + maxResults < l.size() ? firstResult + maxResults : l.size()); return q.getResultList().subList(begin, end); } public int getEntityCount() { return em.createQuery("select count(p) from Product p", Integer.class).getSingleResult(); } }
Java
/* * To change this template, choose Tools | Templates * and open the template in the editor. */ package edu.chl.cid.jsfproducts; import java.util.List; import javax.persistence.EntityManager; /** * * @author hajo */ public interface IJpaCtrl<T> { void create(T t); void destroy(Long id) throws NonexistentEntityException; void edit(T t) throws NonexistentEntityException, Exception; T findEntity(Long id); List<T> findEntities(); List<T> findEntities(int maxResults, int firstResult); EntityManager getEntityManager(); int getEntityCount(); }
Java
package edu.chl.cid.jsfproducts; public class NonexistentEntityException extends Exception { public NonexistentEntityException(String message, Throwable cause) { super(message, cause); } public NonexistentEntityException(String message) { super(message); } }
Java
/* * To change this template, choose Tools | Templates * and open the template in the editor. */ package edu.chl.cid.jsfproducts.modelbeans; import edu.chl.cid.jsfproducts.Product; import edu.chl.cid.jsfproducts.Product; import java.util.ArrayList; import java.util.Arrays; import javax.faces.bean.ManagedBean; import javax.faces.bean.SessionScoped; /** * * @author Mikey */ @ManagedBean @SessionScoped public class ShoppingCartModelBean { private ArrayList<Product> products; public ArrayList<Product> getProducts() { return products; } public void addProduct(Product product) { products.add(product); } public void removeProduct(Product p){ products.remove(p); } public void removeProducts(Product p) { products.removeAll(Arrays.asList(new Product[]{p})); } public int getNumberOfProducts() { return products.size(); } /** Creates a new instance of ShoppingCartModelBean */ public ShoppingCartModelBean() { products = new ArrayList<Product>(); } }
Java
/* * To change this template, choose Tools | Templates * and open the template in the editor. */ package edu.chl.cid.jsfproducts.supportbeans; import java.text.SimpleDateFormat; import java.util.Calendar; import javax.faces.bean.ManagedBean; import javax.faces.bean.ApplicationScoped; /** * * @author Mikey */ @ManagedBean @ApplicationScoped public class DateTimeSupportBean { /** Creates a new instance of DateTimeSupportBean */ public DateTimeSupportBean() { } public String getCurrentDateTime() { Calendar currentDate = Calendar.getInstance(); SimpleDateFormat formatter= new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); return formatter.format(currentDate.getTime()); } }
Java
package pl.polidea.treeview; import android.content.Context; import android.content.res.TypedArray; import android.graphics.drawable.Drawable; import android.util.AttributeSet; import android.view.Gravity; import android.view.View; import android.widget.AdapterView; import android.widget.ListAdapter; import android.widget.ListView; /** * Tree view, expandable multi-level. * * <pre> * attr ref pl.polidea.treeview.R.styleable#TreeViewList_collapsible * attr ref pl.polidea.treeview.R.styleable#TreeViewList_src_expanded * attr ref pl.polidea.treeview.R.styleable#TreeViewList_src_collapsed * attr ref pl.polidea.treeview.R.styleable#TreeViewList_indent_width * attr ref pl.polidea.treeview.R.styleable#TreeViewList_handle_trackball_press * attr ref pl.polidea.treeview.R.styleable#TreeViewList_indicator_gravity * attr ref pl.polidea.treeview.R.styleable#TreeViewList_indicator_background * attr ref pl.polidea.treeview.R.styleable#TreeViewList_row_background * </pre> */ public class TreeViewList extends ListView { private static final int DEFAULT_COLLAPSED_RESOURCE = R.drawable.collapsed; private static final int DEFAULT_EXPANDED_RESOURCE = R.drawable.expanded; private static final int DEFAULT_INDENT = 0; private static final int DEFAULT_GRAVITY = Gravity.LEFT | Gravity.CENTER_VERTICAL; private Drawable expandedDrawable; private Drawable collapsedDrawable; private Drawable rowBackgroundDrawable; private Drawable indicatorBackgroundDrawable; private int indentWidth = 0; private int indicatorGravity = 0; private AbstractTreeViewAdapter< ? > treeAdapter; private boolean collapsible; private boolean handleTrackballPress; public TreeViewList(final Context context, final AttributeSet attrs) { this(context, attrs, R.style.treeViewListStyle); } public TreeViewList(final Context context) { this(context, null); } public TreeViewList(final Context context, final AttributeSet attrs, final int defStyle) { super(context, attrs, defStyle); parseAttributes(context, attrs); } private void parseAttributes(final Context context, final AttributeSet attrs) { final TypedArray a = context.obtainStyledAttributes(attrs, R.styleable.TreeViewList); expandedDrawable = a.getDrawable(R.styleable.TreeViewList_src_expanded); if (expandedDrawable == null) { expandedDrawable = context.getResources().getDrawable( DEFAULT_EXPANDED_RESOURCE); } collapsedDrawable = a .getDrawable(R.styleable.TreeViewList_src_collapsed); if (collapsedDrawable == null) { collapsedDrawable = context.getResources().getDrawable( DEFAULT_COLLAPSED_RESOURCE); } indentWidth = a.getDimensionPixelSize( R.styleable.TreeViewList_indent_width, DEFAULT_INDENT); indicatorGravity = a.getInteger( R.styleable.TreeViewList_indicator_gravity, DEFAULT_GRAVITY); indicatorBackgroundDrawable = a .getDrawable(R.styleable.TreeViewList_indicator_background); rowBackgroundDrawable = a .getDrawable(R.styleable.TreeViewList_row_background); collapsible = a.getBoolean(R.styleable.TreeViewList_collapsible, true); handleTrackballPress = a.getBoolean( R.styleable.TreeViewList_handle_trackball_press, true); } @Override public void setAdapter(final ListAdapter adapter) { if (!(adapter instanceof AbstractTreeViewAdapter)) { throw new TreeConfigurationException( "The adapter is not of TreeViewAdapter type"); } treeAdapter = (AbstractTreeViewAdapter< ? >) adapter; syncAdapter(); super.setAdapter(treeAdapter); } private void syncAdapter() { treeAdapter.setCollapsedDrawable(collapsedDrawable); treeAdapter.setExpandedDrawable(expandedDrawable); treeAdapter.setIndicatorGravity(indicatorGravity); treeAdapter.setIndentWidth(indentWidth); treeAdapter.setIndicatorBackgroundDrawable(indicatorBackgroundDrawable); treeAdapter.setRowBackgroundDrawable(rowBackgroundDrawable); treeAdapter.setCollapsible(collapsible); if (handleTrackballPress) { setOnItemClickListener(new OnItemClickListener() { @Override public void onItemClick(final AdapterView< ? > parent, final View view, final int position, final long id) { treeAdapter.handleItemClick(view, view.getTag()); } }); } else { setOnClickListener(null); } } public void setExpandedDrawable(final Drawable expandedDrawable) { this.expandedDrawable = expandedDrawable; syncAdapter(); treeAdapter.refresh(); } public void setCollapsedDrawable(final Drawable collapsedDrawable) { this.collapsedDrawable = collapsedDrawable; syncAdapter(); treeAdapter.refresh(); } public void setRowBackgroundDrawable(final Drawable rowBackgroundDrawable) { this.rowBackgroundDrawable = rowBackgroundDrawable; syncAdapter(); treeAdapter.refresh(); } public void setIndicatorBackgroundDrawable( final Drawable indicatorBackgroundDrawable) { this.indicatorBackgroundDrawable = indicatorBackgroundDrawable; syncAdapter(); treeAdapter.refresh(); } public void setIndentWidth(final int indentWidth) { this.indentWidth = indentWidth; syncAdapter(); treeAdapter.refresh(); } public void setIndicatorGravity(final int indicatorGravity) { this.indicatorGravity = indicatorGravity; syncAdapter(); treeAdapter.refresh(); } public void setCollapsible(final boolean collapsible) { this.collapsible = collapsible; syncAdapter(); treeAdapter.refresh(); } public void setHandleTrackballPress(final boolean handleTrackballPress) { this.handleTrackballPress = handleTrackballPress; syncAdapter(); treeAdapter.refresh(); } public Drawable getExpandedDrawable() { return expandedDrawable; } public Drawable getCollapsedDrawable() { return collapsedDrawable; } public Drawable getRowBackgroundDrawable() { return rowBackgroundDrawable; } public Drawable getIndicatorBackgroundDrawable() { return indicatorBackgroundDrawable; } public int getIndentWidth() { return indentWidth; } public int getIndicatorGravity() { return indicatorGravity; } public boolean isCollapsible() { return collapsible; } public boolean isHandleTrackballPress() { return handleTrackballPress; } }
Java
package pl.polidea.treeview; import android.util.Log; /** * Allows to build tree easily in sequential mode (you have to know levels of * all the tree elements upfront). You should rather use this class rather than * manager if you build initial tree from some external data source. * * @param <T> */ public class TreeBuilder<T> { private static final String TAG = TreeBuilder.class.getSimpleName(); private final TreeStateManager<T> manager; private T lastAddedId = null; private int lastLevel = -1; public TreeBuilder(final TreeStateManager<T> manager) { this.manager = manager; } public void clear() { manager.clear(); } /** * Adds new relation to existing tree. Child is set as the last child of the * parent node. Parent has to already exist in the tree, child cannot yet * exist. This method is mostly useful in case you add entries layer by * layer - i.e. first top level entries, then children for all parents, then * grand-children and so on. * * @param parent * parent id * @param child * child id */ public synchronized void addRelation(final T parent, final T child) { Log.d(TAG, "Adding relation parent:" + parent + " -> child: " + child); manager.addAfterChild(parent, child, null); lastAddedId = child; lastLevel = manager.getLevel(child); } /** * Adds sequentially new node. Using this method is the simplest way of * building tree - if you have all the elements in the sequence as they * should be displayed in fully-expanded tree. You can combine it with add * relation - for example you can add information about few levels using * {@link addRelation} and then after the right level is added as parent, * you can continue adding them using sequential operation. * * @param id * id of the node * @param level * its level */ public synchronized void sequentiallyAddNextNode(final T id, final int level) { Log.d(TAG, "Adding sequentiall node " + id + " at level " + level); if (lastAddedId == null) { addNodeToParentOneLevelDown(null, id, level); } else { if (level <= lastLevel) { final T parent = findParentAtLevel(lastAddedId, level - 1); addNodeToParentOneLevelDown(parent, id, level); } else { addNodeToParentOneLevelDown(lastAddedId, id, level); } } } /** * Find parent of the node at the level specified. * * @param node * node from which we start * @param levelToFind * level which we are looking for * @return the node found (null if it is topmost node). */ private T findParentAtLevel(final T node, final int levelToFind) { T parent = manager.getParent(node); while (parent != null) { if (manager.getLevel(parent) == levelToFind) { break; } parent = manager.getParent(parent); } return parent; } /** * Adds note to parent at the level specified. But it verifies that the * level is one level down than the parent! * * @param parent * parent parent * @param id * new node id * @param level * should always be parent's level + 1 */ private void addNodeToParentOneLevelDown(final T parent, final T id, final int level) { if (parent == null && level != 0) { throw new TreeConfigurationException("Trying to add new id " + id + " to top level with level != 0 (" + level + ")"); } if (parent != null && manager.getLevel(parent) != level - 1) { throw new TreeConfigurationException("Trying to add new id " + id + " <" + level + "> to " + parent + " <" + manager.getLevel(parent) + ">. The difference in levels up is bigger than 1."); } manager.addAfterChild(parent, id, null); setLastAdded(id, level); } private void setLastAdded(final T id, final int level) { lastAddedId = id; lastLevel = level; } }
Java
package pl.polidea.treeview; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import android.database.DataSetObserver; import android.util.Log; /** * In-memory manager of tree state. * * @param <T> * type of identifier */ public class InMemoryTreeStateManager<T> implements TreeStateManager<T> { private static final String TAG = InMemoryTreeStateManager.class .getSimpleName(); private static final long serialVersionUID = 1L; private final Map<T, InMemoryTreeNode<T>> allNodes = new HashMap<T, InMemoryTreeNode<T>>(); private final InMemoryTreeNode<T> topSentinel = new InMemoryTreeNode<T>( null, null, -1, true); private transient List<T> visibleListCache = null; // lasy initialised private transient List<T> unmodifiableVisibleList = null; private boolean visibleByDefault = true; private final transient Set<DataSetObserver> observers = new HashSet<DataSetObserver>(); private synchronized void internalDataSetChanged() { visibleListCache = null; unmodifiableVisibleList = null; for (final DataSetObserver observer : observers) { observer.onChanged(); } } /** * If true new nodes are visible by default. * * @param visibleByDefault * if true, then newly added nodes are expanded by default */ public void setVisibleByDefault(final boolean visibleByDefault) { this.visibleByDefault = visibleByDefault; } private InMemoryTreeNode<T> getNodeFromTreeOrThrow(final T id) { if (id == null) { throw new NodeNotInTreeException("(null)"); } final InMemoryTreeNode<T> node = allNodes.get(id); if (node == null) { throw new NodeNotInTreeException(id.toString()); } return node; } private InMemoryTreeNode<T> getNodeFromTreeOrThrowAllowRoot(final T id) { if (id == null) { return topSentinel; } return getNodeFromTreeOrThrow(id); } private void expectNodeNotInTreeYet(final T id) { final InMemoryTreeNode<T> node = allNodes.get(id); if (node != null) { throw new NodeAlreadyInTreeException(id.toString(), node.toString()); } } @Override public synchronized TreeNodeInfo<T> getNodeInfo(final T id) { final InMemoryTreeNode<T> node = getNodeFromTreeOrThrow(id); final List<InMemoryTreeNode<T>> children = node.getChildren(); boolean expanded = false; if (!children.isEmpty() && children.get(0).isVisible()) { expanded = true; } return new TreeNodeInfo<T>(id, node.getLevel(), !children.isEmpty(), node.isVisible(), expanded); } @Override public synchronized List<T> getChildren(final T id) { final InMemoryTreeNode<T> node = getNodeFromTreeOrThrowAllowRoot(id); return node.getChildIdList(); } @Override public synchronized T getParent(final T id) { final InMemoryTreeNode<T> node = getNodeFromTreeOrThrowAllowRoot(id); return node.getParent(); } private boolean getChildrenVisibility(final InMemoryTreeNode<T> node) { boolean visibility; final List<InMemoryTreeNode<T>> children = node.getChildren(); if (children.isEmpty()) { visibility = visibleByDefault; } else { visibility = children.get(0).isVisible(); } return visibility; } @Override public synchronized void addBeforeChild(final T parent, final T newChild, final T beforeChild) { expectNodeNotInTreeYet(newChild); final InMemoryTreeNode<T> node = getNodeFromTreeOrThrowAllowRoot(parent); final boolean visibility = getChildrenVisibility(node); // top nodes are always expanded. if (beforeChild == null) { final InMemoryTreeNode<T> added = node.add(0, newChild, visibility); allNodes.put(newChild, added); } else { final int index = node.indexOf(beforeChild); final InMemoryTreeNode<T> added = node.add(index == -1 ? 0 : index, newChild, visibility); allNodes.put(newChild, added); } if (visibility) { internalDataSetChanged(); } } @Override public synchronized void addAfterChild(final T parent, final T newChild, final T afterChild) { expectNodeNotInTreeYet(newChild); final InMemoryTreeNode<T> node = getNodeFromTreeOrThrowAllowRoot(parent); final boolean visibility = getChildrenVisibility(node); if (afterChild == null) { final InMemoryTreeNode<T> added = node.add( node.getChildrenListSize(), newChild, visibility); allNodes.put(newChild, added); } else { final int index = node.indexOf(afterChild); final InMemoryTreeNode<T> added = node.add( index == -1 ? node.getChildrenListSize() : index + 1, newChild, visibility); allNodes.put(newChild, added); } if (visibility) { internalDataSetChanged(); } } @Override public synchronized void removeNodeRecursively(final T id) { final InMemoryTreeNode<T> node = getNodeFromTreeOrThrowAllowRoot(id); final boolean visibleNodeChanged = removeNodeRecursively(node); final T parent = node.getParent(); final InMemoryTreeNode<T> parentNode = getNodeFromTreeOrThrowAllowRoot(parent); parentNode.removeChild(id); if (visibleNodeChanged) { internalDataSetChanged(); } } private boolean removeNodeRecursively(final InMemoryTreeNode<T> node) { boolean visibleNodeChanged = false; for (final InMemoryTreeNode<T> child : node.getChildren()) { if (removeNodeRecursively(child)) { visibleNodeChanged = true; } } node.clearChildren(); if (node.getId() != null) { allNodes.remove(node.getId()); if (node.isVisible()) { visibleNodeChanged = true; } } return visibleNodeChanged; } private void setChildrenVisibility(final InMemoryTreeNode<T> node, final boolean visible, final boolean recursive) { for (final InMemoryTreeNode<T> child : node.getChildren()) { child.setVisible(visible); if (recursive) { setChildrenVisibility(child, visible, true); } } } @Override public synchronized void expandDirectChildren(final T id) { Log.d(TAG, "Expanding direct children of " + id); final InMemoryTreeNode<T> node = getNodeFromTreeOrThrowAllowRoot(id); setChildrenVisibility(node, true, false); internalDataSetChanged(); } @Override public synchronized void expandEverythingBelow(final T id) { Log.d(TAG, "Expanding all children below " + id); final InMemoryTreeNode<T> node = getNodeFromTreeOrThrowAllowRoot(id); setChildrenVisibility(node, true, true); internalDataSetChanged(); } @Override public synchronized void collapseChildren(final T id) { final InMemoryTreeNode<T> node = getNodeFromTreeOrThrowAllowRoot(id); if (node == topSentinel) { for (final InMemoryTreeNode<T> n : topSentinel.getChildren()) { setChildrenVisibility(n, false, true); } } else { setChildrenVisibility(node, false, true); } internalDataSetChanged(); } @Override public synchronized T getNextSibling(final T id) { final T parent = getParent(id); final InMemoryTreeNode<T> parentNode = getNodeFromTreeOrThrowAllowRoot(parent); boolean returnNext = false; for (final InMemoryTreeNode<T> child : parentNode.getChildren()) { if (returnNext) { return child.getId(); } if (child.getId().equals(id)) { returnNext = true; } } return null; } @Override public synchronized T getPreviousSibling(final T id) { final T parent = getParent(id); final InMemoryTreeNode<T> parentNode = getNodeFromTreeOrThrowAllowRoot(parent); final T previousSibling = null; for (final InMemoryTreeNode<T> child : parentNode.getChildren()) { if (child.getId().equals(id)) { return previousSibling; } } return null; } @Override public synchronized boolean isInTree(final T id) { return allNodes.containsKey(id); } @Override public synchronized int getVisibleCount() { return getVisibleList().size(); } @Override public synchronized List<T> getVisibleList() { T currentId = null; if (visibleListCache == null) { visibleListCache = new ArrayList<T>(allNodes.size()); do { currentId = getNextVisible(currentId); if (currentId == null) { break; } else { visibleListCache.add(currentId); } } while (true); } if (unmodifiableVisibleList == null) { unmodifiableVisibleList = Collections .unmodifiableList(visibleListCache); } return unmodifiableVisibleList; } public synchronized T getNextVisible(final T id) { final InMemoryTreeNode<T> node = getNodeFromTreeOrThrowAllowRoot(id); if (!node.isVisible()) { return null; } final List<InMemoryTreeNode<T>> children = node.getChildren(); if (!children.isEmpty()) { final InMemoryTreeNode<T> firstChild = children.get(0); if (firstChild.isVisible()) { return firstChild.getId(); } } final T sibl = getNextSibling(id); if (sibl != null) { return sibl; } T parent = node.getParent(); do { if (parent == null) { return null; } final T parentSibling = getNextSibling(parent); if (parentSibling != null) { return parentSibling; } parent = getNodeFromTreeOrThrow(parent).getParent(); } while (true); } @Override public synchronized void registerDataSetObserver( final DataSetObserver observer) { observers.add(observer); } @Override public synchronized void unregisterDataSetObserver( final DataSetObserver observer) { observers.remove(observer); } @Override public int getLevel(final T id) { return getNodeFromTreeOrThrow(id).getLevel(); } @Override public Integer[] getHierarchyDescription(final T id) { final int level = getLevel(id); final Integer[] hierarchy = new Integer[level + 1]; int currentLevel = level; T currentId = id; T parent = getParent(currentId); while (currentLevel >= 0) { hierarchy[currentLevel--] = getChildren(parent).indexOf(currentId); currentId = parent; parent = getParent(parent); } return hierarchy; } private void appendToSb(final StringBuilder sb, final T id) { if (id != null) { final TreeNodeInfo<T> node = getNodeInfo(id); final int indent = node.getLevel() * 4; final char[] indentString = new char[indent]; Arrays.fill(indentString, ' '); sb.append(indentString); sb.append(node.toString()); sb.append(Arrays.asList(getHierarchyDescription(id)).toString()); sb.append("\n"); } final List<T> children = getChildren(id); for (final T child : children) { appendToSb(sb, child); } } @Override public synchronized String toString() { final StringBuilder sb = new StringBuilder(); appendToSb(sb, null); return sb.toString(); } @Override public synchronized void clear() { allNodes.clear(); topSentinel.clearChildren(); internalDataSetChanged(); } @Override public void refresh() { internalDataSetChanged(); } }
Java
package pl.polidea.treeview; /** * The node being added is already in the tree. * */ public class NodeAlreadyInTreeException extends RuntimeException { private static final long serialVersionUID = 1L; public NodeAlreadyInTreeException(final String id, final String oldNode) { super("The node has already been added to the tree: " + id + ". Old node is:" + oldNode); } }
Java
/** * Provides expandable Tree View implementation. */ package pl.polidea.treeview;
Java
package pl.polidea.treeview; import android.app.Activity; import android.content.Context; import android.database.DataSetObserver; import android.graphics.drawable.Drawable; import android.util.Log; import android.view.LayoutInflater; import android.view.View; import android.view.View.OnClickListener; import android.view.ViewGroup; import android.widget.BaseAdapter; import android.widget.FrameLayout; import android.widget.FrameLayout.LayoutParams; import android.widget.ImageView; import android.widget.ImageView.ScaleType; import android.widget.LinearLayout; import android.widget.ListAdapter; /** * Adapter used to feed the table view. * * @param <T> * class for ID of the tree */ public abstract class AbstractTreeViewAdapter<T> extends BaseAdapter implements ListAdapter { private static final String TAG = AbstractTreeViewAdapter.class .getSimpleName(); private final TreeStateManager<T> treeStateManager; private final int numberOfLevels; private final LayoutInflater layoutInflater; private int indentWidth = 0; private int indicatorGravity = 0; private Drawable collapsedDrawable; private Drawable expandedDrawable; private Drawable indicatorBackgroundDrawable; private Drawable rowBackgroundDrawable; private final OnClickListener indicatorClickListener = new OnClickListener() { @Override public void onClick(final View v) { @SuppressWarnings("unchecked") final T id = (T) v.getTag(); expandCollapse(id); } }; private boolean collapsible; private final Activity activity; public Activity getActivity() { return activity; } protected TreeStateManager<T> getManager() { return treeStateManager; } protected void expandCollapse(final T id) { final TreeNodeInfo<T> info = treeStateManager.getNodeInfo(id); if (!info.isWithChildren()) { // ignore - no default action return; } if (info.isExpanded()) { treeStateManager.collapseChildren(id); } else { treeStateManager.expandDirectChildren(id); } } private void calculateIndentWidth() { if (expandedDrawable != null) { indentWidth = Math.max(getIndentWidth(), expandedDrawable.getIntrinsicWidth()); } if (collapsedDrawable != null) { indentWidth = Math.max(getIndentWidth(), collapsedDrawable.getIntrinsicWidth()); } } public AbstractTreeViewAdapter(final Activity activity, final TreeStateManager<T> treeStateManager, final int numberOfLevels) { this.activity = activity; this.treeStateManager = treeStateManager; this.layoutInflater = (LayoutInflater) activity .getSystemService(Context.LAYOUT_INFLATER_SERVICE); this.numberOfLevels = numberOfLevels; this.collapsedDrawable = null; this.expandedDrawable = null; this.rowBackgroundDrawable = null; this.indicatorBackgroundDrawable = null; } @Override public void registerDataSetObserver(final DataSetObserver observer) { treeStateManager.registerDataSetObserver(observer); } @Override public void unregisterDataSetObserver(final DataSetObserver observer) { treeStateManager.unregisterDataSetObserver(observer); } @Override public int getCount() { return treeStateManager.getVisibleCount(); } @Override public Object getItem(final int position) { return getItemId(position); } public T getTreeId(final int position) { return treeStateManager.getVisibleList().get(position); } public TreeNodeInfo<T> getTreeNodeInfo(final int position) { return treeStateManager.getNodeInfo(getTreeId(position)); } @Override public boolean hasStableIds() { // NOPMD return true; } @Override public int getItemViewType(final int position) { return getTreeNodeInfo(position).getLevel(); } @Override public int getViewTypeCount() { return numberOfLevels; } @Override public boolean isEmpty() { return getCount() == 0; } @Override public boolean areAllItemsEnabled() { // NOPMD return true; } @Override public boolean isEnabled(final int position) { // NOPMD return true; } protected int getTreeListItemWrapperId() { return R.layout.tree_list_item_wrapper; } @Override public final View getView(final int position, final View convertView, final ViewGroup parent) { Log.d(TAG, "Creating a view based on " + convertView + " with position " + position); final TreeNodeInfo<T> nodeInfo = getTreeNodeInfo(position); if (convertView == null) { Log.d(TAG, "Creating the view a new"); final LinearLayout layout = (LinearLayout) layoutInflater.inflate( getTreeListItemWrapperId(), null); return populateTreeItem(layout, getNewChildView(nodeInfo), nodeInfo, true); } else { Log.d(TAG, "Reusing the view"); final LinearLayout linear = (LinearLayout) convertView; final FrameLayout frameLayout = (FrameLayout) linear .findViewById(R.id.treeview_list_item_frame); final View childView = frameLayout.getChildAt(0); updateView(childView, nodeInfo); return populateTreeItem(linear, childView, nodeInfo, false); } } /** * Called when new view is to be created. * * @param treeNodeInfo * node info * @return view that should be displayed as tree content */ public abstract View getNewChildView(TreeNodeInfo<T> treeNodeInfo); /** * Called when new view is going to be reused. You should update the view * and fill it in with the data required to display the new information. You * can also create a new view, which will mean that the old view will not be * reused. * * @param view * view that should be updated with the new values * @param treeNodeInfo * node info used to populate the view * @return view to used as row indented content */ public abstract View updateView(View view, TreeNodeInfo<T> treeNodeInfo); /** * Retrieves background drawable for the node. * * @param treeNodeInfo * node info * @return drawable returned as background for the whole row. Might be null, * then default background is used */ public Drawable getBackgroundDrawable(final TreeNodeInfo<T> treeNodeInfo) { // NOPMD return null; } private Drawable getDrawableOrDefaultBackground(final Drawable r) { if (r == null) { return activity.getResources() .getDrawable(R.drawable.list_selector_background).mutate(); } else { return r; } } public final LinearLayout populateTreeItem(final LinearLayout layout, final View childView, final TreeNodeInfo<T> nodeInfo, final boolean newChildView) { final Drawable individualRowDrawable = getBackgroundDrawable(nodeInfo); layout.setBackgroundDrawable(individualRowDrawable == null ? getDrawableOrDefaultBackground(rowBackgroundDrawable) : individualRowDrawable); final LinearLayout.LayoutParams indicatorLayoutParams = new LinearLayout.LayoutParams( calculateIndentation(nodeInfo), LayoutParams.FILL_PARENT); final LinearLayout indicatorLayout = (LinearLayout) layout .findViewById(R.id.treeview_list_item_image_layout); indicatorLayout.setGravity(indicatorGravity); indicatorLayout.setLayoutParams(indicatorLayoutParams); final ImageView image = (ImageView) layout .findViewById(R.id.treeview_list_item_image); image.setImageDrawable(getDrawable(nodeInfo)); image.setBackgroundDrawable(getDrawableOrDefaultBackground(indicatorBackgroundDrawable)); image.setScaleType(ScaleType.CENTER); image.setTag(nodeInfo.getId()); if (nodeInfo.isWithChildren() && collapsible) { image.setOnClickListener(indicatorClickListener); } else { image.setOnClickListener(null); } layout.setTag(nodeInfo.getId()); final FrameLayout frameLayout = (FrameLayout) layout .findViewById(R.id.treeview_list_item_frame); final FrameLayout.LayoutParams childParams = new FrameLayout.LayoutParams( LayoutParams.FILL_PARENT, LayoutParams.FILL_PARENT); if (newChildView) { frameLayout.addView(childView, childParams); } frameLayout.setTag(nodeInfo.getId()); return layout; } protected int calculateIndentation(final TreeNodeInfo<T> nodeInfo) { return getIndentWidth() * (nodeInfo.getLevel() + (collapsible ? 1 : 0)); } protected Drawable getDrawable(final TreeNodeInfo<T> nodeInfo) { if (!nodeInfo.isWithChildren() || !collapsible) { return getDrawableOrDefaultBackground(indicatorBackgroundDrawable); } if (nodeInfo.isExpanded()) { return expandedDrawable; } else { return collapsedDrawable; } } public void setIndicatorGravity(final int indicatorGravity) { this.indicatorGravity = indicatorGravity; } public void setCollapsedDrawable(final Drawable collapsedDrawable) { this.collapsedDrawable = collapsedDrawable; calculateIndentWidth(); } public void setExpandedDrawable(final Drawable expandedDrawable) { this.expandedDrawable = expandedDrawable; calculateIndentWidth(); } public void setIndentWidth(final int indentWidth) { this.indentWidth = indentWidth; calculateIndentWidth(); } public void setRowBackgroundDrawable(final Drawable rowBackgroundDrawable) { this.rowBackgroundDrawable = rowBackgroundDrawable; } public void setIndicatorBackgroundDrawable( final Drawable indicatorBackgroundDrawable) { this.indicatorBackgroundDrawable = indicatorBackgroundDrawable; } public void setCollapsible(final boolean collapsible) { this.collapsible = collapsible; } public void refresh() { treeStateManager.refresh(); } private int getIndentWidth() { return indentWidth; } @SuppressWarnings("unchecked") public void handleItemClick(final View view, final Object id) { expandCollapse((T) id); } }
Java
package pl.polidea.treeview; /** * Exception thrown when there is a problem with configuring tree. * */ public class TreeConfigurationException extends RuntimeException { private static final long serialVersionUID = 1L; public TreeConfigurationException(final String detailMessage) { super(detailMessage); } }
Java
package pl.polidea.treeview; import java.io.Serializable; import java.util.LinkedList; import java.util.List; /** * Node. It is package protected so that it cannot be used outside. * * @param <T> * type of the identifier used by the tree */ class InMemoryTreeNode<T> implements Serializable { private static final long serialVersionUID = 1L; private final T id; private final T parent; private final int level; private boolean visible = true; private final List<InMemoryTreeNode<T>> children = new LinkedList<InMemoryTreeNode<T>>(); private List<T> childIdListCache = null; public InMemoryTreeNode(final T id, final T parent, final int level, final boolean visible) { super(); this.id = id; this.parent = parent; this.level = level; this.visible = visible; } public int indexOf(final T id) { return getChildIdList().indexOf(id); } /** * Cache is built lasily only if needed. The cache is cleaned on any * structure change for that node!). * * @return list of ids of children */ public synchronized List<T> getChildIdList() { if (childIdListCache == null) { childIdListCache = new LinkedList<T>(); for (final InMemoryTreeNode<T> n : children) { childIdListCache.add(n.getId()); } } return childIdListCache; } public boolean isVisible() { return visible; } public void setVisible(final boolean visible) { this.visible = visible; } public int getChildrenListSize() { return children.size(); } public synchronized InMemoryTreeNode<T> add(final int index, final T child, final boolean visible) { childIdListCache = null; // Note! top levell children are always visible (!) final InMemoryTreeNode<T> newNode = new InMemoryTreeNode<T>(child, getId(), getLevel() + 1, getId() == null ? true : visible); children.add(index, newNode); return newNode; } /** * Note. This method should technically return unmodifiable collection, but * for performance reason on small devices we do not do it. * * @return children list */ public List<InMemoryTreeNode<T>> getChildren() { return children; } public synchronized void clearChildren() { children.clear(); childIdListCache = null; } public synchronized void removeChild(final T child) { final int childIndex = indexOf(child); if (childIndex != -1) { children.remove(childIndex); childIdListCache = null; } } @Override public String toString() { return "InMemoryTreeNode [id=" + getId() + ", parent=" + getParent() + ", level=" + getLevel() + ", visible=" + visible + ", children=" + children + ", childIdListCache=" + childIdListCache + "]"; } T getId() { return id; } T getParent() { return parent; } int getLevel() { return level; } }
Java
package pl.polidea.treeview; import java.io.Serializable; import java.util.List; import android.database.DataSetObserver; /** * Manages information about state of the tree. It only keeps information about * tree elements, not the elements themselves. * * @param <T> * type of the identifier for nodes in the tree */ public interface TreeStateManager<T> extends Serializable { /** * Returns array of integers showing the location of the node in hierarchy. * It corresponds to heading numbering. {0,0,0} in 3 level node is the first * node {0,0,1} is second leaf (assuming that there are two leaves in first * subnode of the first node). * * @param id * id of the node * @return textual description of the hierarchy in tree for the node. */ Integer[] getHierarchyDescription(T id); /** * Returns level of the node. * * @param id * id of the node * @return level in the tree */ int getLevel(T id); /** * Returns information about the node. * * @param id * node id * @return node info */ TreeNodeInfo<T> getNodeInfo(T id); /** * Returns children of the node. * * @param id * id of the node or null if asking for top nodes * @return children of the node */ List<T> getChildren(T id); /** * Returns parent of the node. * * @param id * id of the node * @return parent id or null if no parent */ T getParent(T id); /** * Adds the node before child or at the beginning. * * @param parent * id of the parent node. If null - adds at the top level * @param newChild * new child to add if null - adds at the beginning. * @param beforeChild * child before which to add the new child */ void addBeforeChild(T parent, T newChild, T beforeChild); /** * Adds the node after child or at the end. * * @param parent * id of the parent node. If null - adds at the top level. * @param newChild * new child to add. If null - adds at the end. * @param afterChild * child after which to add the new child */ void addAfterChild(T parent, T newChild, T afterChild); /** * Removes the node and all children from the tree. * * @param id * id of the node to remove or null if all nodes are to be * removed. */ void removeNodeRecursively(T id); /** * Expands all children of the node. * * @param id * node which children should be expanded. cannot be null (top * nodes are always expanded!). */ void expandDirectChildren(T id); /** * Expands everything below the node specified. Might be null - then expands * all. * * @param id * node which children should be expanded or null if all nodes * are to be expanded. */ void expandEverythingBelow(T id); /** * Collapse children. * * @param id * id collapses everything below node specified. If null, * collapses everything but top-level nodes. */ void collapseChildren(T id); /** * Returns next sibling of the node (or null if no further sibling). * * @param id * node id * @return the sibling (or null if no next) */ T getNextSibling(T id); /** * Returns previous sibling of the node (or null if no previous sibling). * * @param id * node id * @return the sibling (or null if no previous) */ T getPreviousSibling(T id); /** * Checks if given node is already in tree. * * @param id * id of the node * @return true if node is already in tree. */ boolean isInTree(T id); /** * Count visible elements. * * @return number of currently visible elements. */ int getVisibleCount(); /** * Returns visible node list. * * @return return the list of all visible nodes in the right sequence */ List<T> getVisibleList(); /** * Registers observers with the manager. * * @param observer * observer */ void registerDataSetObserver(final DataSetObserver observer); /** * Unregisters observers with the manager. * * @param observer * observer */ void unregisterDataSetObserver(final DataSetObserver observer); /** * Cleans tree stored in manager. After this operation the tree is empty. * */ void clear(); /** * Refreshes views connected to the manager. */ void refresh(); }
Java
package pl.polidea.treeview; /** * This exception is thrown when the tree does not contain node requested. * */ public class NodeNotInTreeException extends RuntimeException { private static final long serialVersionUID = 1L; public NodeNotInTreeException(final String id) { super("The tree does not contain the node specified: " + id); } }
Java
package pl.polidea.treeview; /** * Information about the node. * * @param <T> * type of the id for the tree */ public class TreeNodeInfo<T> { private final T id; private final int level; private final boolean withChildren; private final boolean visible; private final boolean expanded; /** * Creates the node information. * * @param id * id of the node * @param level * level of the node * @param withChildren * whether the node has children. * @param visible * whether the tree node is visible. * @param expanded * whether the tree node is expanded * */ public TreeNodeInfo(final T id, final int level, final boolean withChildren, final boolean visible, final boolean expanded) { super(); this.id = id; this.level = level; this.withChildren = withChildren; this.visible = visible; this.expanded = expanded; } public T getId() { return id; } public boolean isWithChildren() { return withChildren; } public boolean isVisible() { return visible; } public boolean isExpanded() { return expanded; } public int getLevel() { return level; } @Override public String toString() { return "TreeNodeInfo [id=" + id + ", level=" + level + ", withChildren=" + withChildren + ", visible=" + visible + ", expanded=" + expanded + "]"; } }
Java
/** * Provides just demo of the TreeView widget. */ package pl.polidea.treeview.demo;
Java
package pl.polidea.treeview.demo; import java.util.Arrays; import java.util.Set; import pl.polidea.treeview.AbstractTreeViewAdapter; import pl.polidea.treeview.R; import pl.polidea.treeview.TreeNodeInfo; import pl.polidea.treeview.TreeStateManager; import android.view.View; import android.view.ViewGroup; import android.widget.CheckBox; import android.widget.CompoundButton; import android.widget.CompoundButton.OnCheckedChangeListener; import android.widget.LinearLayout; import android.widget.TextView; /** * This is a very simple adapter that provides very basic tree view with a * checkboxes and simple item description. * */ class SimpleStandardAdapter extends AbstractTreeViewAdapter<Long> { private final Set<Long> selected; private final OnCheckedChangeListener onCheckedChange = new OnCheckedChangeListener() { @Override public void onCheckedChanged(final CompoundButton buttonView, final boolean isChecked) { final Long id = (Long) buttonView.getTag(); changeSelected(isChecked, id); } }; private void changeSelected(final boolean isChecked, final Long id) { if (isChecked) { selected.add(id); } else { selected.remove(id); } } public SimpleStandardAdapter(final TreeViewListDemo treeViewListDemo, final Set<Long> selected, final TreeStateManager<Long> treeStateManager, final int numberOfLevels) { super(treeViewListDemo, treeStateManager, numberOfLevels); this.selected = selected; } private String getDescription(final long id) { final Integer[] hierarchy = getManager().getHierarchyDescription(id); return "Node " + id + Arrays.asList(hierarchy); } @Override public View getNewChildView(final TreeNodeInfo<Long> treeNodeInfo) { final LinearLayout viewLayout = (LinearLayout) getActivity() .getLayoutInflater().inflate(R.layout.demo_list_item, null); return updateView(viewLayout, treeNodeInfo); } @Override public LinearLayout updateView(final View view, final TreeNodeInfo<Long> treeNodeInfo) { final LinearLayout viewLayout = (LinearLayout) view; final TextView descriptionView = (TextView) viewLayout .findViewById(R.id.demo_list_item_description); final TextView levelView = (TextView) viewLayout .findViewById(R.id.demo_list_item_level); descriptionView.setText(getDescription(treeNodeInfo.getId())); levelView.setText(Integer.toString(treeNodeInfo.getLevel())); final CheckBox box = (CheckBox) viewLayout .findViewById(R.id.demo_list_checkbox); box.setTag(treeNodeInfo.getId()); if (treeNodeInfo.isWithChildren()) { box.setVisibility(View.GONE); } else { box.setVisibility(View.VISIBLE); box.setChecked(selected.contains(treeNodeInfo.getId())); } box.setOnCheckedChangeListener(onCheckedChange); return viewLayout; } @Override public void handleItemClick(final View view, final Object id) { final Long longId = (Long) id; final TreeNodeInfo<Long> info = getManager().getNodeInfo(longId); if (info.isWithChildren()) { super.handleItemClick(view, id); } else { final ViewGroup vg = (ViewGroup) view; final CheckBox cb = (CheckBox) vg .findViewById(R.id.demo_list_checkbox); cb.performClick(); } } @Override public long getItemId(final int position) { return getTreeId(position); } }
Java
package pl.polidea.treeview.demo; import java.util.Set; import pl.polidea.treeview.R; import pl.polidea.treeview.TreeNodeInfo; import pl.polidea.treeview.TreeStateManager; import android.graphics.Color; import android.graphics.drawable.ColorDrawable; import android.graphics.drawable.Drawable; import android.view.View; import android.widget.LinearLayout; import android.widget.TextView; final class FancyColouredVariousSizesAdapter extends SimpleStandardAdapter { public FancyColouredVariousSizesAdapter(final TreeViewListDemo activity, final Set<Long> selected, final TreeStateManager<Long> treeStateManager, final int numberOfLevels) { super(activity, selected, treeStateManager, numberOfLevels); } @Override public LinearLayout updateView(final View view, final TreeNodeInfo<Long> treeNodeInfo) { final LinearLayout viewLayout = super.updateView(view, treeNodeInfo); final TextView descriptionView = (TextView) viewLayout .findViewById(R.id.demo_list_item_description); final TextView levelView = (TextView) viewLayout .findViewById(R.id.demo_list_item_level); descriptionView.setTextSize(20 - 2 * treeNodeInfo.getLevel()); levelView.setTextSize(20 - 2 * treeNodeInfo.getLevel()); return viewLayout; } @Override public Drawable getBackgroundDrawable(final TreeNodeInfo<Long> treeNodeInfo) { switch (treeNodeInfo.getLevel()) { case 0: return new ColorDrawable(Color.WHITE); case 1: return new ColorDrawable(Color.GRAY); case 2: return new ColorDrawable(Color.YELLOW); default: return null; } } }
Java
package pl.polidea.treeview.demo; import java.io.Serializable; import java.util.HashSet; import java.util.Set; import pl.polidea.treeview.InMemoryTreeStateManager; import pl.polidea.treeview.R; import pl.polidea.treeview.TreeBuilder; import pl.polidea.treeview.TreeNodeInfo; import pl.polidea.treeview.TreeStateManager; import pl.polidea.treeview.TreeViewList; import android.app.Activity; import android.os.Bundle; import android.util.Log; import android.view.ContextMenu; import android.view.ContextMenu.ContextMenuInfo; import android.view.Menu; import android.view.MenuInflater; import android.view.MenuItem; import android.view.View; import android.widget.AdapterView.AdapterContextMenuInfo; /** * Demo activity showing how the tree view can be used. * */ public class TreeViewListDemo extends Activity { private enum TreeType implements Serializable { SIMPLE, FANCY } private final Set<Long> selected = new HashSet<Long>(); private static final String TAG = TreeViewListDemo.class.getSimpleName(); private TreeViewList treeView; private static final int[] DEMO_NODES = new int[] { 0, 0, 1, 1, 1, 2, 2, 1, 1, 2, 1, 0, 0, 0, 1, 2, 3, 2, 0, 0, 1, 2, 0, 1, 2, 0, 1 }; private static final int LEVEL_NUMBER = 4; private TreeStateManager<Long> manager = null; private FancyColouredVariousSizesAdapter fancyAdapter; private SimpleStandardAdapter simpleAdapter; private TreeType treeType; private boolean collapsible; @SuppressWarnings("unchecked") @Override public void onCreate(final Bundle savedInstanceState) { super.onCreate(savedInstanceState); TreeType newTreeType = null; boolean newCollapsible; if (savedInstanceState == null) { manager = new InMemoryTreeStateManager<Long>(); final TreeBuilder<Long> treeBuilder = new TreeBuilder<Long>(manager); for (int i = 0; i < DEMO_NODES.length; i++) { treeBuilder.sequentiallyAddNextNode((long) i, DEMO_NODES[i]); } Log.d(TAG, manager.toString()); newTreeType = TreeType.SIMPLE; newCollapsible = true; } else { manager = (TreeStateManager<Long>) savedInstanceState .getSerializable("treeManager"); newTreeType = (TreeType) savedInstanceState .getSerializable("treeType"); newCollapsible = savedInstanceState.getBoolean("collapsible"); } setContentView(R.layout.main_demo); treeView = (TreeViewList) findViewById(R.id.mainTreeView); fancyAdapter = new FancyColouredVariousSizesAdapter(this, selected, manager, LEVEL_NUMBER); simpleAdapter = new SimpleStandardAdapter(this, selected, manager, LEVEL_NUMBER); setTreeAdapter(newTreeType); setCollapsible(newCollapsible); registerForContextMenu(treeView); } @Override protected void onSaveInstanceState(final Bundle outState) { outState.putSerializable("treeManager", manager); outState.putSerializable("treeType", treeType); outState.putBoolean("collapsible", this.collapsible); super.onSaveInstanceState(outState); } protected final void setTreeAdapter(final TreeType newTreeType) { this.treeType = newTreeType; switch (newTreeType) { case SIMPLE: treeView.setAdapter(simpleAdapter); break; case FANCY: treeView.setAdapter(fancyAdapter); break; default: treeView.setAdapter(simpleAdapter); } } protected final void setCollapsible(final boolean newCollapsible) { this.collapsible = newCollapsible; treeView.setCollapsible(this.collapsible); } @Override public boolean onCreateOptionsMenu(final Menu menu) { final MenuInflater inflater = getMenuInflater(); inflater.inflate(R.menu.main_menu, menu); return true; } @Override public boolean onPrepareOptionsMenu(final Menu menu) { final MenuItem collapsibleMenu = menu .findItem(R.id.collapsible_menu_item); if (collapsible) { collapsibleMenu.setTitle(R.string.collapsible_menu_disable); collapsibleMenu.setTitleCondensed(getResources().getString( R.string.collapsible_condensed_disable)); } else { collapsibleMenu.setTitle(R.string.collapsible_menu_enable); collapsibleMenu.setTitleCondensed(getResources().getString( R.string.collapsible_condensed_enable)); } return super.onPrepareOptionsMenu(menu); } @Override public boolean onOptionsItemSelected(final MenuItem item) { if (item.getItemId() == R.id.simple_menu_item) { setTreeAdapter(TreeType.SIMPLE); } else if (item.getItemId() == R.id.fancy_menu_item) { setTreeAdapter(TreeType.FANCY); } else if (item.getItemId() == R.id.collapsible_menu_item) { setCollapsible(!this.collapsible); } else if (item.getItemId() == R.id.expand_all_menu_item) { manager.expandEverythingBelow(null); } else if (item.getItemId() == R.id.collapse_all_menu_item) { manager.collapseChildren(null); } else { return false; } return true; } @Override public void onCreateContextMenu(final ContextMenu menu, final View v, final ContextMenuInfo menuInfo) { final AdapterContextMenuInfo adapterInfo = (AdapterContextMenuInfo) menuInfo; final long id = adapterInfo.id; final TreeNodeInfo<Long> info = manager.getNodeInfo(id); final MenuInflater menuInflater = getMenuInflater(); menuInflater.inflate(R.menu.context_menu, menu); if (info.isWithChildren()) { if (info.isExpanded()) { menu.findItem(R.id.context_menu_expand_item).setVisible(false); menu.findItem(R.id.context_menu_expand_all).setVisible(false); } else { menu.findItem(R.id.context_menu_collapse).setVisible(false); } } else { menu.findItem(R.id.context_menu_expand_item).setVisible(false); menu.findItem(R.id.context_menu_expand_all).setVisible(false); menu.findItem(R.id.context_menu_collapse).setVisible(false); } super.onCreateContextMenu(menu, v, menuInfo); } @Override public boolean onContextItemSelected(final MenuItem item) { final AdapterContextMenuInfo info = (AdapterContextMenuInfo) item .getMenuInfo(); final long id = info.id; if (item.getItemId() == R.id.context_menu_collapse) { manager.collapseChildren(id); return true; } else if (item.getItemId() == R.id.context_menu_expand_all) { manager.expandEverythingBelow(id); return true; } else if (item.getItemId() == R.id.context_menu_expand_item) { manager.expandDirectChildren(id); return true; } else if (item.getItemId() == R.id.context_menu_delete) { manager.removeNodeRecursively(id); return true; } else { return super.onContextItemSelected(item); } } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.examples.multiple; import java.util.List; import java.util.regex.Pattern; import edu.uci.ics.crawler4j.crawler.Page; import edu.uci.ics.crawler4j.crawler.WebCrawler; import edu.uci.ics.crawler4j.parser.HtmlParseData; import edu.uci.ics.crawler4j.url.WebURL; /** * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class BasicCrawler extends WebCrawler { private final static Pattern FILTERS = Pattern.compile(".*(\\.(css|js|bmp|gif|jpe?g" + "|png|tiff?|mid|mp2|mp3|mp4" + "|wav|avi|mov|mpeg|ram|m4v|pdf" + "|rm|smil|wmv|swf|wma|zip|rar|gz))$"); private String[] myCrawlDomains; @Override public void onStart() { myCrawlDomains = (String[]) myController.getCustomData(); } @Override public boolean shouldVisit(WebURL url) { String href = url.getURL().toLowerCase(); if (FILTERS.matcher(href).matches()) { return false; } for (String crawlDomain : myCrawlDomains) { if (href.startsWith(crawlDomain)) { return true; } } return false; } @Override public void visit(Page page) { int docid = page.getWebURL().getDocid(); String url = page.getWebURL().getURL(); int parentDocid = page.getWebURL().getParentDocid(); System.out.println("Docid: " + docid); System.out.println("URL: " + url); System.out.println("Docid of parent page: " + parentDocid); if (page.getParseData() instanceof HtmlParseData) { HtmlParseData htmlParseData = (HtmlParseData) page.getParseData(); String text = htmlParseData.getText(); String html = htmlParseData.getHtml(); List<WebURL> links = htmlParseData.getOutgoingUrls(); System.out.println("Text length: " + text.length()); System.out.println("Html length: " + html.length()); System.out.println("Number of outgoing links: " + links.size()); } System.out.println("============="); } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.examples.multiple; import edu.uci.ics.crawler4j.crawler.CrawlConfig; import edu.uci.ics.crawler4j.crawler.CrawlController; import edu.uci.ics.crawler4j.fetcher.PageFetcher; import edu.uci.ics.crawler4j.robotstxt.RobotstxtConfig; import edu.uci.ics.crawler4j.robotstxt.RobotstxtServer; /** * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class MultipleCrawlerController { public static void main(String[] args) throws Exception { if (args.length != 1) { System.out.println("Needed parameter: "); System.out.println("\t rootFolder (it will contain intermediate crawl data)"); return; } /* * crawlStorageFolder is a folder where intermediate crawl data is * stored. */ String crawlStorageFolder = args[0]; CrawlConfig config1 = new CrawlConfig(); CrawlConfig config2 = new CrawlConfig(); /* * The two crawlers should have different storage folders for their * intermediate data */ config1.setCrawlStorageFolder(crawlStorageFolder + "/crawler1"); config2.setCrawlStorageFolder(crawlStorageFolder + "/crawler2"); config1.setPolitenessDelay(1000); config2.setPolitenessDelay(2000); config1.setMaxPagesToFetch(50); config2.setMaxPagesToFetch(100); /* * We will use different PageFetchers for the two crawlers. */ PageFetcher pageFetcher1 = new PageFetcher(config1); PageFetcher pageFetcher2 = new PageFetcher(config2); /* * We will use the same RobotstxtServer for both of the crawlers. */ RobotstxtConfig robotstxtConfig = new RobotstxtConfig(); RobotstxtServer robotstxtServer = new RobotstxtServer(robotstxtConfig, pageFetcher1); CrawlController controller1 = new CrawlController(config1, pageFetcher1, robotstxtServer); CrawlController controller2 = new CrawlController(config2, pageFetcher2, robotstxtServer); String[] crawler1Domains = new String[] { "http://www.ics.uci.edu/", "http://www.cnn.com/" }; String[] crawler2Domains = new String[] { "http://en.wikipedia.org/" }; controller1.setCustomData(crawler1Domains); controller2.setCustomData(crawler2Domains); controller1.addSeed("http://www.ics.uci.edu/"); controller1.addSeed("http://www.cnn.com/"); controller1.addSeed("http://www.ics.uci.edu/~lopes/"); controller1.addSeed("http://www.cnn.com/POLITICS/"); controller2.addSeed("http://en.wikipedia.org/wiki/Main_Page"); controller2.addSeed("http://en.wikipedia.org/wiki/Obama"); controller2.addSeed("http://en.wikipedia.org/wiki/Bing"); /* * The first crawler will have 5 cuncurrent threads and the second * crawler will have 7 threads. */ controller1.startNonBlocking(BasicCrawler.class, 5); controller2.startNonBlocking(BasicCrawler.class, 7); controller1.waitUntilFinish(); System.out.println("Crawler 1 is finished."); controller2.waitUntilFinish(); System.out.println("Crawler 2 is finished."); } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.examples.statushandler; import java.util.regex.Pattern; import org.apache.http.HttpStatus; import edu.uci.ics.crawler4j.crawler.Page; import edu.uci.ics.crawler4j.crawler.WebCrawler; import edu.uci.ics.crawler4j.url.WebURL; /** * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class StatusHandlerCrawler extends WebCrawler { private final static Pattern FILTERS = Pattern.compile(".*(\\.(css|js|bmp|gif|jpe?g" + "|png|tiff?|mid|mp2|mp3|mp4" + "|wav|avi|mov|mpeg|ram|m4v|pdf" + "|rm|smil|wmv|swf|wma|zip|rar|gz))$"); /** * You should implement this function to specify whether * the given url should be crawled or not (based on your * crawling logic). */ @Override public boolean shouldVisit(WebURL url) { String href = url.getURL().toLowerCase(); return !FILTERS.matcher(href).matches() && href.startsWith("http://www.ics.uci.edu/"); } /** * This function is called when a page is fetched and ready * to be processed by your program. */ @Override public void visit(Page page) { // Do nothing } @Override protected void handlePageStatusCode(WebURL webUrl, int statusCode, String statusDescription) { if (statusCode != HttpStatus.SC_OK) { if (statusCode == HttpStatus.SC_NOT_FOUND) { System.out.println("Broken link: " + webUrl.getURL() + ", this link was found in page: " + webUrl.getParentUrl()); } else { System.out.println("Non success status for link: " + webUrl.getURL() + ", status code: " + statusCode + ", description: " + statusDescription); } } } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.examples.statushandler; import edu.uci.ics.crawler4j.crawler.CrawlConfig; import edu.uci.ics.crawler4j.crawler.CrawlController; import edu.uci.ics.crawler4j.fetcher.PageFetcher; import edu.uci.ics.crawler4j.robotstxt.RobotstxtConfig; import edu.uci.ics.crawler4j.robotstxt.RobotstxtServer; /** * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class StatusHandlerCrawlController { public static void main(String[] args) throws Exception { if (args.length != 2) { System.out.println("Needed parameters: "); System.out.println("\t rootFolder (it will contain intermediate crawl data)"); System.out.println("\t numberOfCralwers (number of concurrent threads)"); return; } /* * crawlStorageFolder is a folder where intermediate crawl data is * stored. */ String crawlStorageFolder = args[0]; /* * numberOfCrawlers shows the number of concurrent threads that should * be initiated for crawling. */ int numberOfCrawlers = Integer.parseInt(args[1]); CrawlConfig config = new CrawlConfig(); config.setCrawlStorageFolder(crawlStorageFolder); /* * Be polite: Make sure that we don't send more than 1 request per * second (1000 milliseconds between requests). */ config.setPolitenessDelay(1000); /* * You can set the maximum crawl depth here. The default value is -1 for * unlimited depth */ config.setMaxDepthOfCrawling(2); /* * You can set the maximum number of pages to crawl. The default value * is -1 for unlimited number of pages */ config.setMaxPagesToFetch(1000); /* * Do you need to set a proxy? If so, you can use: * config.setProxyHost("proxyserver.example.com"); * config.setProxyPort(8080); * * If your proxy also needs authentication: * config.setProxyUsername(username); config.getProxyPassword(password); */ /* * This config parameter can be used to set your crawl to be resumable * (meaning that you can resume the crawl from a previously * interrupted/crashed crawl). Note: if you enable resuming feature and * want to start a fresh crawl, you need to delete the contents of * rootFolder manually. */ config.setResumableCrawling(false); /* * Instantiate the controller for this crawl. */ PageFetcher pageFetcher = new PageFetcher(config); RobotstxtConfig robotstxtConfig = new RobotstxtConfig(); RobotstxtServer robotstxtServer = new RobotstxtServer(robotstxtConfig, pageFetcher); CrawlController controller = new CrawlController(config, pageFetcher, robotstxtServer); /* * For each crawl, you need to add some seed urls. These are the first * URLs that are fetched and then the crawler starts following links * which are found in these pages */ controller.addSeed("http://www.ics.uci.edu/~welling/"); controller.addSeed("http://www.ics.uci.edu/~lopes/"); controller.addSeed("http://www.ics.uci.edu/"); /* * Start the crawl. This is a blocking operation, meaning that your code * will reach the line after this only when crawling is finished. */ controller.start(StatusHandlerCrawler.class, numberOfCrawlers); } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.examples.shutdown; import edu.uci.ics.crawler4j.crawler.CrawlConfig; import edu.uci.ics.crawler4j.crawler.CrawlController; import edu.uci.ics.crawler4j.fetcher.PageFetcher; import edu.uci.ics.crawler4j.robotstxt.RobotstxtConfig; import edu.uci.ics.crawler4j.robotstxt.RobotstxtServer; /** * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class ControllerWithShutdown { public static void main(String[] args) throws Exception { if (args.length != 2) { System.out.println("Needed parameters: "); System.out.println("\t rootFolder (it will contain intermediate crawl data)"); System.out.println("\t numberOfCralwers (number of concurrent threads)"); return; } /* * crawlStorageFolder is a folder where intermediate crawl data is * stored. */ String crawlStorageFolder = args[0]; /* * numberOfCrawlers shows the number of concurrent threads that should * be initiated for crawling. */ int numberOfCrawlers = Integer.parseInt(args[1]); CrawlConfig config = new CrawlConfig(); config.setCrawlStorageFolder(crawlStorageFolder); config.setPolitenessDelay(1000); // Unlimited number of pages can be crawled. config.setMaxPagesToFetch(-1); /* * Instantiate the controller for this crawl. */ PageFetcher pageFetcher = new PageFetcher(config); RobotstxtConfig robotstxtConfig = new RobotstxtConfig(); RobotstxtServer robotstxtServer = new RobotstxtServer(robotstxtConfig, pageFetcher); CrawlController controller = new CrawlController(config, pageFetcher, robotstxtServer); /* * For each crawl, you need to add some seed urls. These are the first * URLs that are fetched and then the crawler starts following links * which are found in these pages */ controller.addSeed("http://www.ics.uci.edu/~welling/"); controller.addSeed("http://www.ics.uci.edu/~lopes/"); controller.addSeed("http://www.ics.uci.edu/"); /* * Start the crawl. This is a blocking operation, meaning that your code * will reach the line after this only when crawling is finished. */ controller.startNonBlocking(BasicCrawler.class, numberOfCrawlers); // Wait for 30 seconds Thread.sleep(30 * 1000); // Send the shutdown request and then wait for finishing controller.shutdown(); controller.waitUntilFinish(); } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.examples.shutdown; import edu.uci.ics.crawler4j.crawler.Page; import edu.uci.ics.crawler4j.crawler.WebCrawler; import edu.uci.ics.crawler4j.parser.HtmlParseData; import edu.uci.ics.crawler4j.url.WebURL; import java.util.List; import java.util.regex.Pattern; /** * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class BasicCrawler extends WebCrawler { private final static Pattern FILTERS = Pattern.compile(".*(\\.(css|js|bmp|gif|jpe?g" + "|png|tiff?|mid|mp2|mp3|mp4" + "|wav|avi|mov|mpeg|ram|m4v|pdf" + "|rm|smil|wmv|swf|wma|zip|rar|gz))$"); private final static String DOMAIN = "http://www.ics.uci.edu/"; @Override public boolean shouldVisit(WebURL url) { String href = url.getURL().toLowerCase(); return !FILTERS.matcher(href).matches() && href.startsWith(DOMAIN); } @Override public void visit(Page page) { int docid = page.getWebURL().getDocid(); String url = page.getWebURL().getURL(); int parentDocid = page.getWebURL().getParentDocid(); System.out.println("Docid: " + docid); System.out.println("URL: " + url); System.out.println("Docid of parent page: " + parentDocid); if (page.getParseData() instanceof HtmlParseData) { HtmlParseData htmlParseData = (HtmlParseData) page.getParseData(); String text = htmlParseData.getText(); String html = htmlParseData.getHtml(); List<WebURL> links = htmlParseData.getOutgoingUrls(); System.out.println("Text length: " + text.length()); System.out.println("Html length: " + html.length()); System.out.println("Number of outgoing links: " + links.size()); } System.out.println("============="); } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.examples.imagecrawler; import java.security.MessageDigest; /** * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class Cryptography { private static final char[] hexChars = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' }; public static String MD5(String str) { try { MessageDigest md = MessageDigest.getInstance("MD5"); md.update(str.getBytes()); return hexStringFromBytes(md.digest()); } catch (Exception e) { e.printStackTrace(); return ""; } } private static String hexStringFromBytes(byte[] b) { String hex = ""; int msb; int lsb; int i; // MSB maps to idx 0 for (i = 0; i < b.length; i++) { msb = (b[i] & 0x000000FF) / 16; lsb = (b[i] & 0x000000FF) % 16; hex = hex + hexChars[msb] + hexChars[lsb]; } return (hex); } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.examples.imagecrawler; import edu.uci.ics.crawler4j.crawler.CrawlConfig; import edu.uci.ics.crawler4j.crawler.CrawlController; import edu.uci.ics.crawler4j.fetcher.PageFetcher; import edu.uci.ics.crawler4j.robotstxt.RobotstxtConfig; import edu.uci.ics.crawler4j.robotstxt.RobotstxtServer; /** * @author Yasser Ganjisaffar <lastname at gmail dot com> */ /* * IMPORTANT: Make sure that you update crawler4j.properties file and set * crawler.include_images to true */ public class ImageCrawlController { public static void main(String[] args) throws Exception { if (args.length < 3) { System.out.println("Needed parameters: "); System.out.println("\t rootFolder (it will contain intermediate crawl data)"); System.out.println("\t numberOfCralwers (number of concurrent threads)"); System.out.println("\t storageFolder (a folder for storing downloaded images)"); return; } String rootFolder = args[0]; int numberOfCrawlers = Integer.parseInt(args[1]); String storageFolder = args[2]; CrawlConfig config = new CrawlConfig(); config.setCrawlStorageFolder(rootFolder); /* * Since images are binary content, we need to set this parameter to * true to make sure they are included in the crawl. */ config.setIncludeBinaryContentInCrawling(true); String[] crawlDomains = new String[] { "http://uci.edu/" }; PageFetcher pageFetcher = new PageFetcher(config); RobotstxtConfig robotstxtConfig = new RobotstxtConfig(); RobotstxtServer robotstxtServer = new RobotstxtServer(robotstxtConfig, pageFetcher); CrawlController controller = new CrawlController(config, pageFetcher, robotstxtServer); for (String domain : crawlDomains) { controller.addSeed(domain); } ImageCrawler.configure(crawlDomains, storageFolder); controller.start(ImageCrawler.class, numberOfCrawlers); } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.examples.imagecrawler; import java.io.File; import java.util.regex.Pattern; import edu.uci.ics.crawler4j.crawler.Page; import edu.uci.ics.crawler4j.crawler.WebCrawler; import edu.uci.ics.crawler4j.parser.BinaryParseData; import edu.uci.ics.crawler4j.url.WebURL; import edu.uci.ics.crawler4j.util.IO; /** * @author Yasser Ganjisaffar <lastname at gmail dot com> */ /* * This class shows how you can crawl images on the web and store them in a * folder. This is just for demonstration purposes and doesn't scale for large * number of images. For crawling millions of images you would need to store * downloaded images in a hierarchy of folders */ public class ImageCrawler extends WebCrawler { private static final Pattern filters = Pattern.compile(".*(\\.(css|js|mid|mp2|mp3|mp4|wav|avi|mov|mpeg|ram|m4v|pdf" + "|rm|smil|wmv|swf|wma|zip|rar|gz))$"); private static final Pattern imgPatterns = Pattern.compile(".*(\\.(bmp|gif|jpe?g|png|tiff?))$"); private static File storageFolder; private static String[] crawlDomains; public static void configure(String[] domain, String storageFolderName) { ImageCrawler.crawlDomains = domain; storageFolder = new File(storageFolderName); if (!storageFolder.exists()) { storageFolder.mkdirs(); } } @Override public boolean shouldVisit(WebURL url) { String href = url.getURL().toLowerCase(); if (filters.matcher(href).matches()) { return false; } if (imgPatterns.matcher(href).matches()) { return true; } for (String domain : crawlDomains) { if (href.startsWith(domain)) { return true; } } return false; } @Override public void visit(Page page) { String url = page.getWebURL().getURL(); // We are only interested in processing images if (!(page.getParseData() instanceof BinaryParseData)) { return; } if (!imgPatterns.matcher(url).matches()) { return; } // Not interested in very small images if (page.getContentData().length < 10 * 1024) { return; } // get a unique name for storing this image String extension = url.substring(url.lastIndexOf(".")); String hashedName = Cryptography.MD5(url) + extension; // store image IO.writeBytesToFile(page.getContentData(), storageFolder.getAbsolutePath() + "/" + hashedName); System.out.println("Stored: " + url); } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.examples.basic; import edu.uci.ics.crawler4j.crawler.CrawlConfig; import edu.uci.ics.crawler4j.crawler.CrawlController; import edu.uci.ics.crawler4j.fetcher.PageFetcher; import edu.uci.ics.crawler4j.robotstxt.RobotstxtConfig; import edu.uci.ics.crawler4j.robotstxt.RobotstxtServer; /** * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class BasicCrawlController { public static void main(String[] args) throws Exception { if (args.length != 2) { System.out.println("Needed parameters: "); System.out.println("\t rootFolder (it will contain intermediate crawl data)"); System.out.println("\t numberOfCralwers (number of concurrent threads)"); return; } /* * crawlStorageFolder is a folder where intermediate crawl data is * stored. */ String crawlStorageFolder = args[0]; /* * numberOfCrawlers shows the number of concurrent threads that should * be initiated for crawling. */ int numberOfCrawlers = Integer.parseInt(args[1]); CrawlConfig config = new CrawlConfig(); config.setCrawlStorageFolder(crawlStorageFolder); /* * Be polite: Make sure that we don't send more than 1 request per * second (1000 milliseconds between requests). */ config.setPolitenessDelay(1000); /* * You can set the maximum crawl depth here. The default value is -1 for * unlimited depth */ config.setMaxDepthOfCrawling(2); /* * You can set the maximum number of pages to crawl. The default value * is -1 for unlimited number of pages */ config.setMaxPagesToFetch(1000); /* * Do you need to set a proxy? If so, you can use: * config.setProxyHost("proxyserver.example.com"); * config.setProxyPort(8080); * * If your proxy also needs authentication: * config.setProxyUsername(username); config.getProxyPassword(password); */ /* * This config parameter can be used to set your crawl to be resumable * (meaning that you can resume the crawl from a previously * interrupted/crashed crawl). Note: if you enable resuming feature and * want to start a fresh crawl, you need to delete the contents of * rootFolder manually. */ config.setResumableCrawling(false); /* * Instantiate the controller for this crawl. */ PageFetcher pageFetcher = new PageFetcher(config); RobotstxtConfig robotstxtConfig = new RobotstxtConfig(); RobotstxtServer robotstxtServer = new RobotstxtServer(robotstxtConfig, pageFetcher); CrawlController controller = new CrawlController(config, pageFetcher, robotstxtServer); /* * For each crawl, you need to add some seed urls. These are the first * URLs that are fetched and then the crawler starts following links * which are found in these pages */ controller.addSeed("http://www.ics.uci.edu/"); controller.addSeed("http://www.ics.uci.edu/~lopes/"); controller.addSeed("http://www.ics.uci.edu/~welling/"); /* * Start the crawl. This is a blocking operation, meaning that your code * will reach the line after this only when crawling is finished. */ controller.start(BasicCrawler.class, numberOfCrawlers); } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.examples.basic; import edu.uci.ics.crawler4j.crawler.Page; import edu.uci.ics.crawler4j.crawler.WebCrawler; import edu.uci.ics.crawler4j.parser.HtmlParseData; import edu.uci.ics.crawler4j.url.WebURL; import java.util.List; import java.util.regex.Pattern; import org.apache.http.Header; /** * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class BasicCrawler extends WebCrawler { private final static Pattern FILTERS = Pattern.compile(".*(\\.(css|js|bmp|gif|jpe?g" + "|png|tiff?|mid|mp2|mp3|mp4" + "|wav|avi|mov|mpeg|ram|m4v|pdf" + "|rm|smil|wmv|swf|wma|zip|rar|gz))$"); /** * You should implement this function to specify whether the given url * should be crawled or not (based on your crawling logic). */ @Override public boolean shouldVisit(WebURL url) { String href = url.getURL().toLowerCase(); return !FILTERS.matcher(href).matches() && href.startsWith("http://www.ics.uci.edu/"); } /** * This function is called when a page is fetched and ready to be processed * by your program. */ @Override public void visit(Page page) { int docid = page.getWebURL().getDocid(); String url = page.getWebURL().getURL(); String domain = page.getWebURL().getDomain(); String path = page.getWebURL().getPath(); String subDomain = page.getWebURL().getSubDomain(); String parentUrl = page.getWebURL().getParentUrl(); String anchor = page.getWebURL().getAnchor(); System.out.println("Docid: " + docid); System.out.println("URL: " + url); System.out.println("Domain: '" + domain + "'"); System.out.println("Sub-domain: '" + subDomain + "'"); System.out.println("Path: '" + path + "'"); System.out.println("Parent page: " + parentUrl); System.out.println("Anchor text: " + anchor); if (page.getParseData() instanceof HtmlParseData) { HtmlParseData htmlParseData = (HtmlParseData) page.getParseData(); String text = htmlParseData.getText(); String html = htmlParseData.getHtml(); List<WebURL> links = htmlParseData.getOutgoingUrls(); System.out.println("Text length: " + text.length()); System.out.println("Html length: " + html.length()); System.out.println("Number of outgoing links: " + links.size()); } Header[] responseHeaders = page.getFetchResponseHeaders(); if (responseHeaders != null) { System.out.println("Response headers:"); for (Header header : responseHeaders) { System.out.println("\t" + header.getName() + ": " + header.getValue()); } } System.out.println("============="); } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.examples.localdata; import org.apache.http.HttpStatus; import edu.uci.ics.crawler4j.crawler.CrawlConfig; import edu.uci.ics.crawler4j.crawler.Page; import edu.uci.ics.crawler4j.fetcher.PageFetchResult; import edu.uci.ics.crawler4j.fetcher.PageFetcher; import edu.uci.ics.crawler4j.parser.HtmlParseData; import edu.uci.ics.crawler4j.parser.ParseData; import edu.uci.ics.crawler4j.parser.Parser; import edu.uci.ics.crawler4j.url.WebURL; /** * This class is a demonstration of how crawler4j can be used to download a * single page and extract its title and text. */ public class Downloader { private Parser parser; private PageFetcher pageFetcher; public Downloader() { CrawlConfig config = new CrawlConfig(); parser = new Parser(config); pageFetcher = new PageFetcher(config); } private Page download(String url) { WebURL curURL = new WebURL(); curURL.setURL(url); PageFetchResult fetchResult = null; try { fetchResult = pageFetcher.fetchHeader(curURL); if (fetchResult.getStatusCode() == HttpStatus.SC_OK) { try { Page page = new Page(curURL); fetchResult.fetchContent(page); if (parser.parse(page, curURL.getURL())) { return page; } } catch (Exception e) { e.printStackTrace(); } } } finally { if (fetchResult != null) { fetchResult.discardContentIfNotConsumed(); } } return null; } public void processUrl(String url) { System.out.println("Processing: " + url); Page page = download(url); if (page != null) { ParseData parseData = page.getParseData(); if (parseData != null) { if (parseData instanceof HtmlParseData) { HtmlParseData htmlParseData = (HtmlParseData) parseData; System.out.println("Title: " + htmlParseData.getTitle()); System.out.println("Text length: " + htmlParseData.getText().length()); System.out.println("Html length: " + htmlParseData.getHtml().length()); } } else { System.out.println("Couldn't parse the content of the page."); } } else { System.out.println("Couldn't fetch the content of the page."); } System.out.println("=============="); } public static void main(String[] args) { Downloader downloader = new Downloader(); downloader.processUrl("http://en.wikipedia.org/wiki/Main_Page/"); downloader.processUrl("http://www.yahoo.com/"); } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.examples.localdata; import java.util.List; import edu.uci.ics.crawler4j.crawler.CrawlConfig; import edu.uci.ics.crawler4j.crawler.CrawlController; import edu.uci.ics.crawler4j.fetcher.PageFetcher; import edu.uci.ics.crawler4j.robotstxt.RobotstxtConfig; import edu.uci.ics.crawler4j.robotstxt.RobotstxtServer; public class LocalDataCollectorController { public static void main(String[] args) throws Exception { if (args.length != 2) { System.out.println("Needed parameters: "); System.out.println("\t rootFolder (it will contain intermediate crawl data)"); System.out.println("\t numberOfCralwers (number of concurrent threads)"); return; } String rootFolder = args[0]; int numberOfCrawlers = Integer.parseInt(args[1]); CrawlConfig config = new CrawlConfig(); config.setCrawlStorageFolder(rootFolder); config.setMaxPagesToFetch(10); config.setPolitenessDelay(1000); PageFetcher pageFetcher = new PageFetcher(config); RobotstxtConfig robotstxtConfig = new RobotstxtConfig(); RobotstxtServer robotstxtServer = new RobotstxtServer(robotstxtConfig, pageFetcher); CrawlController controller = new CrawlController(config, pageFetcher, robotstxtServer); controller.addSeed("http://www.ics.uci.edu/"); controller.start(LocalDataCollectorCrawler.class, numberOfCrawlers); List<Object> crawlersLocalData = controller.getCrawlersLocalData(); long totalLinks = 0; long totalTextSize = 0; int totalProcessedPages = 0; for (Object localData : crawlersLocalData) { CrawlStat stat = (CrawlStat) localData; totalLinks += stat.getTotalLinks(); totalTextSize += stat.getTotalTextSize(); totalProcessedPages += stat.getTotalProcessedPages(); } System.out.println("Aggregated Statistics:"); System.out.println(" Processed Pages: " + totalProcessedPages); System.out.println(" Total Links found: " + totalLinks); System.out.println(" Total Text Size: " + totalTextSize); } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.examples.localdata; public class CrawlStat { private int totalProcessedPages; private long totalLinks; private long totalTextSize; public int getTotalProcessedPages() { return totalProcessedPages; } public void setTotalProcessedPages(int totalProcessedPages) { this.totalProcessedPages = totalProcessedPages; } public void incProcessedPages() { this.totalProcessedPages++; } public long getTotalLinks() { return totalLinks; } public void setTotalLinks(long totalLinks) { this.totalLinks = totalLinks; } public long getTotalTextSize() { return totalTextSize; } public void setTotalTextSize(long totalTextSize) { this.totalTextSize = totalTextSize; } public void incTotalLinks(int count) { this.totalLinks += count; } public void incTotalTextSize(int count) { this.totalTextSize += count; } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.examples.localdata; import edu.uci.ics.crawler4j.crawler.Page; import edu.uci.ics.crawler4j.crawler.WebCrawler; import edu.uci.ics.crawler4j.parser.HtmlParseData; import edu.uci.ics.crawler4j.url.WebURL; import java.io.UnsupportedEncodingException; import java.util.List; import java.util.regex.Pattern; public class LocalDataCollectorCrawler extends WebCrawler { Pattern filters = Pattern.compile(".*(\\.(css|js|bmp|gif|jpe?g" + "|png|tiff?|mid|mp2|mp3|mp4" + "|wav|avi|mov|mpeg|ram|m4v|pdf" + "|rm|smil|wmv|swf|wma|zip|rar|gz))$"); CrawlStat myCrawlStat; public LocalDataCollectorCrawler() { myCrawlStat = new CrawlStat(); } @Override public boolean shouldVisit(WebURL url) { String href = url.getURL().toLowerCase(); return !filters.matcher(href).matches() && href.startsWith("http://www.ics.uci.edu/"); } @Override public void visit(Page page) { System.out.println("Visited: " + page.getWebURL().getURL()); myCrawlStat.incProcessedPages(); if (page.getParseData() instanceof HtmlParseData) { HtmlParseData parseData = (HtmlParseData) page.getParseData(); List<WebURL> links = parseData.getOutgoingUrls(); myCrawlStat.incTotalLinks(links.size()); try { myCrawlStat.incTotalTextSize(parseData.getText().getBytes("UTF-8").length); } catch (UnsupportedEncodingException ignored) { // Do nothing } } // We dump this crawler statistics after processing every 50 pages if (myCrawlStat.getTotalProcessedPages() % 50 == 0) { dumpMyData(); } } // This function is called by controller to get the local data of this // crawler when job is finished @Override public Object getMyLocalData() { return myCrawlStat; } // This function is called by controller before finishing the job. // You can put whatever stuff you need here. @Override public void onBeforeExit() { dumpMyData(); } public void dumpMyData() { int id = getMyId(); // This is just an example. Therefore I print on screen. You may // probably want to write in a text file. System.out.println("Crawler " + id + "> Processed Pages: " + myCrawlStat.getTotalProcessedPages()); System.out.println("Crawler " + id + "> Total Links Found: " + myCrawlStat.getTotalLinks()); System.out.println("Crawler " + id + "> Total Text Size: " + myCrawlStat.getTotalTextSize()); } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.crawler; import java.nio.charset.Charset; import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.entity.ContentType; import org.apache.http.util.EntityUtils; import edu.uci.ics.crawler4j.parser.ParseData; import edu.uci.ics.crawler4j.url.WebURL; /** * This class contains the data for a fetched and parsed page. * * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class Page { /** * The URL of this page. */ protected WebURL url; /** * The content of this page in binary format. */ protected byte[] contentData; /** * The ContentType of this page. * For example: "text/html; charset=UTF-8" */ protected String contentType; /** * The encoding of the content. * For example: "gzip" */ protected String contentEncoding; /** * The charset of the content. * For example: "UTF-8" */ protected String contentCharset; /** * Headers which were present in the response of the * fetch request */ protected Header[] fetchResponseHeaders; /** * The parsed data populated by parsers */ protected ParseData parseData; public Page(WebURL url) { this.url = url; } public WebURL getWebURL() { return url; } public void setWebURL(WebURL url) { this.url = url; } /** * Loads the content of this page from a fetched * HttpEntity. */ public void load(HttpEntity entity) throws Exception { contentType = null; Header type = entity.getContentType(); if (type != null) { contentType = type.getValue(); } contentEncoding = null; Header encoding = entity.getContentEncoding(); if (encoding != null) { contentEncoding = encoding.getValue(); } Charset charset = ContentType.getOrDefault(entity).getCharset(); if (charset != null) { contentCharset = charset.displayName(); } contentData = EntityUtils.toByteArray(entity); } /** * Returns headers which were present in the response of the * fetch request */ public Header[] getFetchResponseHeaders() { return fetchResponseHeaders; } public void setFetchResponseHeaders(Header[] headers) { fetchResponseHeaders = headers; } /** * Returns the parsed data generated for this page by parsers */ public ParseData getParseData() { return parseData; } public void setParseData(ParseData parseData) { this.parseData = parseData; } /** * Returns the content of this page in binary format. */ public byte[] getContentData() { return contentData; } public void setContentData(byte[] contentData) { this.contentData = contentData; } /** * Returns the ContentType of this page. * For example: "text/html; charset=UTF-8" */ public String getContentType() { return contentType; } public void setContentType(String contentType) { this.contentType = contentType; } /** * Returns the encoding of the content. * For example: "gzip" */ public String getContentEncoding() { return contentEncoding; } public void setContentEncoding(String contentEncoding) { this.contentEncoding = contentEncoding; } /** * Returns the charset of the content. * For example: "UTF-8" */ public String getContentCharset() { return contentCharset; } public void setContentCharset(String contentCharset) { this.contentCharset = contentCharset; } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.crawler; import com.sleepycat.je.Environment; import com.sleepycat.je.EnvironmentConfig; import edu.uci.ics.crawler4j.fetcher.PageFetcher; import edu.uci.ics.crawler4j.frontier.DocIDServer; import edu.uci.ics.crawler4j.frontier.Frontier; import edu.uci.ics.crawler4j.robotstxt.RobotstxtServer; import edu.uci.ics.crawler4j.url.URLCanonicalizer; import edu.uci.ics.crawler4j.url.WebURL; import edu.uci.ics.crawler4j.util.IO; import org.apache.log4j.Logger; import java.io.File; import java.util.ArrayList; import java.util.List; /** * The controller that manages a crawling session. This class creates the * crawler threads and monitors their progress. * * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class CrawlController extends Configurable { static final Logger logger = Logger.getLogger(CrawlController.class.getName()); /** * The 'customData' object can be used for passing custom crawl-related * configurations to different components of the crawler. */ protected Object customData; /** * Once the crawling session finishes the controller collects the local data * of the crawler threads and stores them in this List. */ protected List<Object> crawlersLocalData = new ArrayList<>(); /** * Is the crawling of this session finished? */ protected boolean finished; /** * Is the crawling session set to 'shutdown'. Crawler threads monitor this * flag and when it is set they will no longer process new pages. */ protected boolean shuttingDown; protected PageFetcher pageFetcher; protected RobotstxtServer robotstxtServer; protected Frontier frontier; protected DocIDServer docIdServer; protected final Object waitingLock = new Object(); public CrawlController(CrawlConfig config, PageFetcher pageFetcher, RobotstxtServer robotstxtServer) throws Exception { super(config); config.validate(); File folder = new File(config.getCrawlStorageFolder()); if (!folder.exists()) { if (!folder.mkdirs()) { throw new Exception("Couldn't create this folder: " + folder.getAbsolutePath()); } } boolean resumable = config.isResumableCrawling(); EnvironmentConfig envConfig = new EnvironmentConfig(); envConfig.setAllowCreate(true); envConfig.setTransactional(resumable); envConfig.setLocking(resumable); File envHome = new File(config.getCrawlStorageFolder() + "/frontier"); if (!envHome.exists()) { if (!envHome.mkdir()) { throw new Exception("Couldn't create this folder: " + envHome.getAbsolutePath()); } } if (!resumable) { IO.deleteFolderContents(envHome); } Environment env = new Environment(envHome, envConfig); docIdServer = new DocIDServer(env, config); frontier = new Frontier(env, config, docIdServer); this.pageFetcher = pageFetcher; this.robotstxtServer = robotstxtServer; finished = false; shuttingDown = false; } /** * Start the crawling session and wait for it to finish. * * @param _c * the class that implements the logic for crawler threads * @param numberOfCrawlers * the number of concurrent threads that will be contributing in * this crawling session. */ public <T extends WebCrawler> void start(final Class<T> _c, final int numberOfCrawlers) { this.start(_c, numberOfCrawlers, true); } /** * Start the crawling session and return immediately. * * @param _c * the class that implements the logic for crawler threads * @param numberOfCrawlers * the number of concurrent threads that will be contributing in * this crawling session. */ public <T extends WebCrawler> void startNonBlocking(final Class<T> _c, final int numberOfCrawlers) { this.start(_c, numberOfCrawlers, false); } protected <T extends WebCrawler> void start(final Class<T> _c, final int numberOfCrawlers, boolean isBlocking) { try { finished = false; crawlersLocalData.clear(); final List<Thread> threads = new ArrayList<>(); final List<T> crawlers = new ArrayList<>(); for (int i = 1; i <= numberOfCrawlers; i++) { T crawler = _c.newInstance(); Thread thread = new Thread(crawler, "Crawler " + i); crawler.setThread(thread); crawler.init(i, this); thread.start(); crawlers.add(crawler); threads.add(thread); logger.info("Crawler " + i + " started."); } final CrawlController controller = this; Thread monitorThread = new Thread(new Runnable() { @Override public void run() { try { synchronized (waitingLock) { while (true) { sleep(10); boolean someoneIsWorking = false; for (int i = 0; i < threads.size(); i++) { Thread thread = threads.get(i); if (!thread.isAlive()) { if (!shuttingDown) { logger.info("Thread " + i + " was dead, I'll recreate it."); T crawler = _c.newInstance(); thread = new Thread(crawler, "Crawler " + (i + 1)); threads.remove(i); threads.add(i, thread); crawler.setThread(thread); crawler.init(i + 1, controller); thread.start(); crawlers.remove(i); crawlers.add(i, crawler); } } else if (crawlers.get(i).isNotWaitingForNewURLs()) { someoneIsWorking = true; } } if (!someoneIsWorking) { // Make sure again that none of the threads // are // alive. logger.info("It looks like no thread is working, waiting for 10 seconds to make sure..."); sleep(10); someoneIsWorking = false; for (int i = 0; i < threads.size(); i++) { Thread thread = threads.get(i); if (thread.isAlive() && crawlers.get(i).isNotWaitingForNewURLs()) { someoneIsWorking = true; } } if (!someoneIsWorking) { if (!shuttingDown) { long queueLength = frontier.getQueueLength(); if (queueLength > 0) { continue; } logger.info("No thread is working and no more URLs are in queue waiting for another 10 seconds to make sure..."); sleep(10); queueLength = frontier.getQueueLength(); if (queueLength > 0) { continue; } } logger.info("All of the crawlers are stopped. Finishing the process..."); // At this step, frontier notifies the // threads that were // waiting for new URLs and they should // stop frontier.finish(); for (T crawler : crawlers) { crawler.onBeforeExit(); crawlersLocalData.add(crawler.getMyLocalData()); } logger.info("Waiting for 10 seconds before final clean up..."); sleep(10); frontier.close(); docIdServer.close(); pageFetcher.shutDown(); finished = true; waitingLock.notifyAll(); return; } } } } } catch (Exception e) { e.printStackTrace(); } } }); monitorThread.start(); if (isBlocking) { waitUntilFinish(); } } catch (Exception e) { e.printStackTrace(); } } /** * Wait until this crawling session finishes. */ public void waitUntilFinish() { while (!finished) { synchronized (waitingLock) { if (finished) { return; } try { waitingLock.wait(); } catch (InterruptedException e) { e.printStackTrace(); } } } } /** * Once the crawling session finishes the controller collects the local data * of the crawler threads and stores them in a List. This function returns * the reference to this list. */ public List<Object> getCrawlersLocalData() { return crawlersLocalData; } protected static void sleep(int seconds) { try { Thread.sleep(seconds * 1000); } catch (Exception ignored) { // Do nothing } } /** * Adds a new seed URL. A seed URL is a URL that is fetched by the crawler * to extract new URLs in it and follow them for crawling. * * @param pageUrl * the URL of the seed */ public void addSeed(String pageUrl) { addSeed(pageUrl, -1); } /** * Adds a new seed URL. A seed URL is a URL that is fetched by the crawler * to extract new URLs in it and follow them for crawling. You can also * specify a specific document id to be assigned to this seed URL. This * document id needs to be unique. Also, note that if you add three seeds * with document ids 1,2, and 7. Then the next URL that is found during the * crawl will get a doc id of 8. Also you need to ensure to add seeds in * increasing order of document ids. * * Specifying doc ids is mainly useful when you have had a previous crawl * and have stored the results and want to start a new crawl with seeds * which get the same document ids as the previous crawl. * * @param pageUrl * the URL of the seed * @param docId * the document id that you want to be assigned to this seed URL. * */ public void addSeed(String pageUrl, int docId) { String canonicalUrl = URLCanonicalizer.getCanonicalURL(pageUrl); if (canonicalUrl == null) { logger.error("Invalid seed URL: " + pageUrl); return; } if (docId < 0) { docId = docIdServer.getDocId(canonicalUrl); if (docId > 0) { // This URL is already seen. return; } docId = docIdServer.getNewDocID(canonicalUrl); } else { try { docIdServer.addUrlAndDocId(canonicalUrl, docId); } catch (Exception e) { logger.error("Could not add seed: " + e.getMessage()); } } WebURL webUrl = new WebURL(); webUrl.setURL(canonicalUrl); webUrl.setDocid(docId); webUrl.setDepth((short) 0); if (!robotstxtServer.allows(webUrl)) { logger.info("Robots.txt does not allow this seed: " + pageUrl); } else { frontier.schedule(webUrl); } } /** * This function can called to assign a specific document id to a url. This * feature is useful when you have had a previous crawl and have stored the * Urls and their associated document ids and want to have a new crawl which * is aware of the previously seen Urls and won't re-crawl them. * * Note that if you add three seen Urls with document ids 1,2, and 7. Then * the next URL that is found during the crawl will get a doc id of 8. Also * you need to ensure to add seen Urls in increasing order of document ids. * * @param url * the URL of the page * @param docId * the document id that you want to be assigned to this URL. * */ public void addSeenUrl(String url, int docId) { String canonicalUrl = URLCanonicalizer.getCanonicalURL(url); if (canonicalUrl == null) { logger.error("Invalid Url: " + url); return; } try { docIdServer.addUrlAndDocId(canonicalUrl, docId); } catch (Exception e) { logger.error("Could not add seen url: " + e.getMessage()); } } public PageFetcher getPageFetcher() { return pageFetcher; } public void setPageFetcher(PageFetcher pageFetcher) { this.pageFetcher = pageFetcher; } public RobotstxtServer getRobotstxtServer() { return robotstxtServer; } public void setRobotstxtServer(RobotstxtServer robotstxtServer) { this.robotstxtServer = robotstxtServer; } public Frontier getFrontier() { return frontier; } public void setFrontier(Frontier frontier) { this.frontier = frontier; } public DocIDServer getDocIdServer() { return docIdServer; } public void setDocIdServer(DocIDServer docIdServer) { this.docIdServer = docIdServer; } public Object getCustomData() { return customData; } public void setCustomData(Object customData) { this.customData = customData; } public boolean isFinished() { return this.finished; } public boolean isShuttingDown() { return shuttingDown; } /** * Set the current crawling session set to 'shutdown'. Crawler threads * monitor the shutdown flag and when it is set to true, they will no longer * process new pages. */ public void shutdown() { logger.info("Shutting down..."); this.shuttingDown = true; frontier.finish(); } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.crawler; /** * Several core components of crawler4j extend this class * to make them configurable. * * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public abstract class Configurable { protected CrawlConfig config; protected Configurable(CrawlConfig config) { this.config = config; } public CrawlConfig getConfig() { return config; } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.crawler; public class CrawlConfig { /** * The folder which will be used by crawler for storing the intermediate * crawl data. The content of this folder should not be modified manually. */ private String crawlStorageFolder; /** * If this feature is enabled, you would be able to resume a previously * stopped/crashed crawl. However, it makes crawling slightly slower */ private boolean resumableCrawling = false; /** * Maximum depth of crawling For unlimited depth this parameter should be * set to -1 */ private int maxDepthOfCrawling = -1; /** * Maximum number of pages to fetch For unlimited number of pages, this * parameter should be set to -1 */ private int maxPagesToFetch = -1; /** * user-agent string that is used for representing your crawler to web * servers. See http://en.wikipedia.org/wiki/User_agent for more details */ private String userAgentString = "crawler4j (http://code.google.com/p/crawler4j/)"; /** * Politeness delay in milliseconds (delay between sending two requests to * the same host). */ private int politenessDelay = 200; /** * Should we also crawl https pages? */ private boolean includeHttpsPages = false; /** * Should we fetch binary content such as images, audio, ...? */ private boolean includeBinaryContentInCrawling = false; /** * Maximum Connections per host */ private int maxConnectionsPerHost = 100; /** * Maximum total connections */ private int maxTotalConnections = 100; /** * Socket timeout in milliseconds */ private int socketTimeout = 20000; /** * Connection timeout in milliseconds */ private int connectionTimeout = 30000; /** * Max number of outgoing links which are processed from a page */ private int maxOutgoingLinksToFollow = 5000; /** * Max allowed size of a page. Pages larger than this size will not be * fetched. */ private int maxDownloadSize = 1048576; /** * Should we follow redirects? */ private boolean followRedirects = true; /** * If crawler should run behind a proxy, this parameter can be used for * specifying the proxy host. */ private String proxyHost = null; /** * If crawler should run behind a proxy, this parameter can be used for * specifying the proxy port. */ private int proxyPort = 80; /** * If crawler should run behind a proxy and user/pass is needed for * authentication in proxy, this parameter can be used for specifying the * username. */ private String proxyUsername = null; /** * If crawler should run behind a proxy and user/pass is needed for * authentication in proxy, this parameter can be used for specifying the * password. */ private String proxyPassword = null; public CrawlConfig() { } /** * Validates the configs specified by this instance. * * @throws Exception */ public void validate() throws Exception { if (crawlStorageFolder == null) { throw new Exception("Crawl storage folder is not set in the CrawlConfig."); } if (politenessDelay < 0) { throw new Exception("Invalid value for politeness delay: " + politenessDelay); } if (maxDepthOfCrawling < -1) { throw new Exception("Maximum crawl depth should be either a positive number or -1 for unlimited depth."); } if (maxDepthOfCrawling > Short.MAX_VALUE) { throw new Exception("Maximum value for crawl depth is " + Short.MAX_VALUE); } } public String getCrawlStorageFolder() { return crawlStorageFolder; } /** * The folder which will be used by crawler for storing the intermediate * crawl data. The content of this folder should not be modified manually. */ public void setCrawlStorageFolder(String crawlStorageFolder) { this.crawlStorageFolder = crawlStorageFolder; } public boolean isResumableCrawling() { return resumableCrawling; } /** * If this feature is enabled, you would be able to resume a previously * stopped/crashed crawl. However, it makes crawling slightly slower */ public void setResumableCrawling(boolean resumableCrawling) { this.resumableCrawling = resumableCrawling; } public int getMaxDepthOfCrawling() { return maxDepthOfCrawling; } /** * Maximum depth of crawling For unlimited depth this parameter should be * set to -1 */ public void setMaxDepthOfCrawling(int maxDepthOfCrawling) { this.maxDepthOfCrawling = maxDepthOfCrawling; } public int getMaxPagesToFetch() { return maxPagesToFetch; } /** * Maximum number of pages to fetch For unlimited number of pages, this * parameter should be set to -1 */ public void setMaxPagesToFetch(int maxPagesToFetch) { this.maxPagesToFetch = maxPagesToFetch; } public String getUserAgentString() { return userAgentString; } /** * user-agent string that is used for representing your crawler to web * servers. See http://en.wikipedia.org/wiki/User_agent for more details */ public void setUserAgentString(String userAgentString) { this.userAgentString = userAgentString; } public int getPolitenessDelay() { return politenessDelay; } /** * Politeness delay in milliseconds (delay between sending two requests to * the same host). * * @param politenessDelay * the delay in milliseconds. */ public void setPolitenessDelay(int politenessDelay) { this.politenessDelay = politenessDelay; } public boolean isIncludeHttpsPages() { return includeHttpsPages; } /** * Should we also crawl https pages? */ public void setIncludeHttpsPages(boolean includeHttpsPages) { this.includeHttpsPages = includeHttpsPages; } public boolean isIncludeBinaryContentInCrawling() { return includeBinaryContentInCrawling; } /** * Should we fetch binary content such as images, audio, ...? */ public void setIncludeBinaryContentInCrawling(boolean includeBinaryContentInCrawling) { this.includeBinaryContentInCrawling = includeBinaryContentInCrawling; } public int getMaxConnectionsPerHost() { return maxConnectionsPerHost; } /** * Maximum Connections per host */ public void setMaxConnectionsPerHost(int maxConnectionsPerHost) { this.maxConnectionsPerHost = maxConnectionsPerHost; } public int getMaxTotalConnections() { return maxTotalConnections; } /** * Maximum total connections */ public void setMaxTotalConnections(int maxTotalConnections) { this.maxTotalConnections = maxTotalConnections; } public int getSocketTimeout() { return socketTimeout; } /** * Socket timeout in milliseconds */ public void setSocketTimeout(int socketTimeout) { this.socketTimeout = socketTimeout; } public int getConnectionTimeout() { return connectionTimeout; } /** * Connection timeout in milliseconds */ public void setConnectionTimeout(int connectionTimeout) { this.connectionTimeout = connectionTimeout; } public int getMaxOutgoingLinksToFollow() { return maxOutgoingLinksToFollow; } /** * Max number of outgoing links which are processed from a page */ public void setMaxOutgoingLinksToFollow(int maxOutgoingLinksToFollow) { this.maxOutgoingLinksToFollow = maxOutgoingLinksToFollow; } public int getMaxDownloadSize() { return maxDownloadSize; } /** * Max allowed size of a page. Pages larger than this size will not be * fetched. */ public void setMaxDownloadSize(int maxDownloadSize) { this.maxDownloadSize = maxDownloadSize; } public boolean isFollowRedirects() { return followRedirects; } /** * Should we follow redirects? */ public void setFollowRedirects(boolean followRedirects) { this.followRedirects = followRedirects; } public String getProxyHost() { return proxyHost; } /** * If crawler should run behind a proxy, this parameter can be used for * specifying the proxy host. */ public void setProxyHost(String proxyHost) { this.proxyHost = proxyHost; } public int getProxyPort() { return proxyPort; } /** * If crawler should run behind a proxy, this parameter can be used for * specifying the proxy port. */ public void setProxyPort(int proxyPort) { this.proxyPort = proxyPort; } public String getProxyUsername() { return proxyUsername; } /** * If crawler should run behind a proxy and user/pass is needed for * authentication in proxy, this parameter can be used for specifying the * username. */ public void setProxyUsername(String proxyUsername) { this.proxyUsername = proxyUsername; } public String getProxyPassword() { return proxyPassword; } /** * If crawler should run behind a proxy and user/pass is needed for * authentication in proxy, this parameter can be used for specifying the * password. */ public void setProxyPassword(String proxyPassword) { this.proxyPassword = proxyPassword; } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("Crawl storage folder: " + getCrawlStorageFolder() + "\n"); sb.append("Resumable crawling: " + isResumableCrawling() + "\n"); sb.append("Max depth of crawl: " + getMaxDepthOfCrawling() + "\n"); sb.append("Max pages to fetch: " + getMaxPagesToFetch() + "\n"); sb.append("User agent string: " + getUserAgentString() + "\n"); sb.append("Include https pages: " + isIncludeHttpsPages() + "\n"); sb.append("Include binary content: " + isIncludeBinaryContentInCrawling() + "\n"); sb.append("Max connections per host: " + getMaxConnectionsPerHost() + "\n"); sb.append("Max total connections: " + getMaxTotalConnections() + "\n"); sb.append("Socket timeout: " + getSocketTimeout() + "\n"); sb.append("Max total connections: " + getMaxTotalConnections() + "\n"); sb.append("Max outgoing links to follow: " + getMaxOutgoingLinksToFollow() + "\n"); sb.append("Max download size: " + getMaxDownloadSize() + "\n"); sb.append("Should follow redirects?: " + isFollowRedirects() + "\n"); sb.append("Proxy host: " + getProxyHost() + "\n"); sb.append("Proxy port: " + getProxyPort() + "\n"); sb.append("Proxy username: " + getProxyUsername() + "\n"); sb.append("Proxy password: " + getProxyPassword() + "\n"); return sb.toString(); } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.crawler; import edu.uci.ics.crawler4j.fetcher.PageFetchResult; import edu.uci.ics.crawler4j.fetcher.CustomFetchStatus; import edu.uci.ics.crawler4j.fetcher.PageFetcher; import edu.uci.ics.crawler4j.frontier.DocIDServer; import edu.uci.ics.crawler4j.frontier.Frontier; import edu.uci.ics.crawler4j.parser.HtmlParseData; import edu.uci.ics.crawler4j.parser.ParseData; import edu.uci.ics.crawler4j.parser.Parser; import edu.uci.ics.crawler4j.robotstxt.RobotstxtServer; import edu.uci.ics.crawler4j.url.WebURL; import org.apache.http.HttpStatus; import org.apache.log4j.Logger; import java.util.ArrayList; import java.util.List; /** * WebCrawler class in the Runnable class that is executed by each crawler * thread. * * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class WebCrawler implements Runnable { protected static final Logger logger = Logger.getLogger(WebCrawler.class.getName()); /** * The id associated to the crawler thread running this instance */ protected int myId; /** * The controller instance that has created this crawler thread. This * reference to the controller can be used for getting configurations of the * current crawl or adding new seeds during runtime. */ protected CrawlController myController; /** * The thread within which this crawler instance is running. */ private Thread myThread; /** * The parser that is used by this crawler instance to parse the content of * the fetched pages. */ private Parser parser; /** * The fetcher that is used by this crawler instance to fetch the content of * pages from the web. */ private PageFetcher pageFetcher; /** * The RobotstxtServer instance that is used by this crawler instance to * determine whether the crawler is allowed to crawl the content of each * page. */ private RobotstxtServer robotstxtServer; /** * The DocIDServer that is used by this crawler instance to map each URL to * a unique docid. */ private DocIDServer docIdServer; /** * The Frontier object that manages the crawl queue. */ private Frontier frontier; /** * Is the current crawler instance waiting for new URLs? This field is * mainly used by the controller to detect whether all of the crawler * instances are waiting for new URLs and therefore there is no more work * and crawling can be stopped. */ private boolean isWaitingForNewURLs; /** * Initializes the current instance of the crawler * * @param id * the id of this crawler instance * @param crawlController * the controller that manages this crawling session */ public void init(int id, CrawlController crawlController) { this.myId = id; this.pageFetcher = crawlController.getPageFetcher(); this.robotstxtServer = crawlController.getRobotstxtServer(); this.docIdServer = crawlController.getDocIdServer(); this.frontier = crawlController.getFrontier(); this.parser = new Parser(crawlController.getConfig()); this.myController = crawlController; this.isWaitingForNewURLs = false; } /** * Get the id of the current crawler instance * * @return the id of the current crawler instance */ public int getMyId() { return myId; } public CrawlController getMyController() { return myController; } /** * This function is called just before starting the crawl by this crawler * instance. It can be used for setting up the data structures or * initializations needed by this crawler instance. */ public void onStart() { // Do nothing by default // Sub-classed can override this to add their custom functionality } /** * This function is called just before the termination of the current * crawler instance. It can be used for persisting in-memory data or other * finalization tasks. */ public void onBeforeExit() { // Do nothing by default // Sub-classed can override this to add their custom functionality } /** * This function is called once the header of a page is fetched. It can be * overwritten by sub-classes to perform custom logic for different status * codes. For example, 404 pages can be logged, etc. * * @param webUrl * @param statusCode * @param statusDescription */ protected void handlePageStatusCode(WebURL webUrl, int statusCode, String statusDescription) { // Do nothing by default // Sub-classed can override this to add their custom functionality } /** * This function is called if the content of a url could not be fetched. * * @param webUrl */ protected void onContentFetchError(WebURL webUrl) { // Do nothing by default // Sub-classed can override this to add their custom functionality } /** * This function is called if there has been an error in parsing the * content. * * @param webUrl */ protected void onParseError(WebURL webUrl) { // Do nothing by default // Sub-classed can override this to add their custom functionality } /** * The CrawlController instance that has created this crawler instance will * call this function just before terminating this crawler thread. Classes * that extend WebCrawler can override this function to pass their local * data to their controller. The controller then puts these local data in a * List that can then be used for processing the local data of crawlers (if * needed). */ public Object getMyLocalData() { return null; } public void run() { onStart(); while (true) { List<WebURL> assignedURLs = new ArrayList<>(50); isWaitingForNewURLs = true; frontier.getNextURLs(50, assignedURLs); isWaitingForNewURLs = false; if (assignedURLs.size() == 0) { if (frontier.isFinished()) { return; } try { Thread.sleep(3000); } catch (InterruptedException e) { e.printStackTrace(); } } else { for (WebURL curURL : assignedURLs) { if (curURL != null) { processPage(curURL); frontier.setProcessed(curURL); } if (myController.isShuttingDown()) { logger.info("Exiting because of controller shutdown."); return; } } } } } /** * Classes that extends WebCrawler can overwrite this function to tell the * crawler whether the given url should be crawled or not. The following * implementation indicates that all urls should be included in the crawl. * * @param url * the url which we are interested to know whether it should be * included in the crawl or not. * @return if the url should be included in the crawl it returns true, * otherwise false is returned. */ public boolean shouldVisit(WebURL url) { return true; } /** * Classes that extends WebCrawler can overwrite this function to process * the content of the fetched and parsed page. * * @param page * the page object that is just fetched and parsed. */ public void visit(Page page) { // Do nothing by default // Sub-classed can override this to add their custom functionality } private void processPage(WebURL curURL) { if (curURL == null) { return; } PageFetchResult fetchResult = null; try { fetchResult = pageFetcher.fetchHeader(curURL); int statusCode = fetchResult.getStatusCode(); handlePageStatusCode(curURL, statusCode, CustomFetchStatus.getStatusDescription(statusCode)); if (statusCode != HttpStatus.SC_OK) { if (statusCode == HttpStatus.SC_MOVED_PERMANENTLY || statusCode == HttpStatus.SC_MOVED_TEMPORARILY) { if (myController.getConfig().isFollowRedirects()) { String movedToUrl = fetchResult.getMovedToUrl(); if (movedToUrl == null) { return; } int newDocId = docIdServer.getDocId(movedToUrl); if (newDocId > 0) { // Redirect page is already seen return; } WebURL webURL = new WebURL(); webURL.setURL(movedToUrl); webURL.setParentDocid(curURL.getParentDocid()); webURL.setParentUrl(curURL.getParentUrl()); webURL.setDepth(curURL.getDepth()); webURL.setDocid(-1); webURL.setAnchor(curURL.getAnchor()); if (shouldVisit(webURL) && robotstxtServer.allows(webURL)) { webURL.setDocid(docIdServer.getNewDocID(movedToUrl)); frontier.schedule(webURL); } } } else if (fetchResult.getStatusCode() == CustomFetchStatus.PageTooBig) { logger.info("Skipping a page which was bigger than max allowed size: " + curURL.getURL()); } return; } if (!curURL.getURL().equals(fetchResult.getFetchedUrl())) { if (docIdServer.isSeenBefore(fetchResult.getFetchedUrl())) { // Redirect page is already seen return; } curURL.setURL(fetchResult.getFetchedUrl()); curURL.setDocid(docIdServer.getNewDocID(fetchResult.getFetchedUrl())); } Page page = new Page(curURL); int docid = curURL.getDocid(); if (!fetchResult.fetchContent(page)) { onContentFetchError(curURL); return; } if (!parser.parse(page, curURL.getURL())) { onParseError(curURL); return; } ParseData parseData = page.getParseData(); if (parseData instanceof HtmlParseData) { HtmlParseData htmlParseData = (HtmlParseData) parseData; List<WebURL> toSchedule = new ArrayList<>(); int maxCrawlDepth = myController.getConfig().getMaxDepthOfCrawling(); for (WebURL webURL : htmlParseData.getOutgoingUrls()) { webURL.setParentDocid(docid); webURL.setParentUrl(curURL.getURL()); int newdocid = docIdServer.getDocId(webURL.getURL()); if (newdocid > 0) { // This is not the first time that this Url is // visited. So, we set the depth to a negative // number. webURL.setDepth((short) -1); webURL.setDocid(newdocid); } else { webURL.setDocid(-1); webURL.setDepth((short) (curURL.getDepth() + 1)); if (maxCrawlDepth == -1 || curURL.getDepth() < maxCrawlDepth) { if (shouldVisit(webURL) && robotstxtServer.allows(webURL)) { webURL.setDocid(docIdServer.getNewDocID(webURL.getURL())); toSchedule.add(webURL); } } } } frontier.scheduleAll(toSchedule); } try { visit(page); } catch (Exception e) { logger.error("Exception while running the visit method. Message: '" + e.getMessage() + "' at " + e.getStackTrace()[0]); } } catch (Exception e) { logger.error(e.getMessage() + ", while processing: " + curURL.getURL()); } finally { if (fetchResult != null) { fetchResult.discardContentIfNotConsumed(); } } } public Thread getThread() { return myThread; } public void setThread(Thread myThread) { this.myThread = myThread; } public boolean isNotWaitingForNewURLs() { return !isWaitingForNewURLs; } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.util; /** * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class Util { public static byte[] long2ByteArray(long l) { byte[] array = new byte[8]; int i, shift; for(i = 0, shift = 56; i < 8; i++, shift -= 8) { array[i] = (byte)(0xFF & (l >> shift)); } return array; } public static byte[] int2ByteArray(int value) { byte[] b = new byte[4]; for (int i = 0; i < 4; i++) { int offset = (3 - i) * 8; b[i] = (byte) ((value >>> offset) & 0xFF); } return b; } public static void putIntInByteArray(int value, byte[] buf, int offset) { for (int i = 0; i < 4; i++) { int valueOffset = (3 - i) * 8; buf[offset + i] = (byte) ((value >>> valueOffset) & 0xFF); } } public static int byteArray2Int(byte[] b) { int value = 0; for (int i = 0; i < 4; i++) { int shift = (4 - 1 - i) * 8; value += (b[i] & 0x000000FF) << shift; } return value; } public static long byteArray2Long(byte[] b) { int value = 0; for (int i = 0; i < 8; i++) { int shift = (8 - 1 - i) * 8; value += (b[i] & 0x000000FF) << shift; } return value; } public static boolean hasBinaryContent(String contentType) { if (contentType != null) { String typeStr = contentType.toLowerCase(); if (typeStr.contains("image") || typeStr.contains("audio") || typeStr.contains("video") || typeStr.contains("application")) { return true; } } return false; } public static boolean hasPlainTextContent(String contentType) { if (contentType != null) { String typeStr = contentType.toLowerCase(); if (typeStr.contains("text/plain")) { return true; } } return false; } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.util; import java.io.*; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; /** * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class IO { public static boolean deleteFolder(File folder) { return deleteFolderContents(folder) && folder.delete(); } public static boolean deleteFolderContents(File folder) { System.out.println("Deleting content of: " + folder.getAbsolutePath()); File[] files = folder.listFiles(); for (File file : files) { if (file.isFile()) { if (!file.delete()) { return false; } } else { if (!deleteFolder(file)) { return false; } } } return true; } public static void writeBytesToFile(byte[] bytes, String destination) { try { FileChannel fc = new FileOutputStream(destination).getChannel(); fc.write(ByteBuffer.wrap(bytes)); fc.close(); } catch (Exception e) { e.printStackTrace(); } } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.robotstxt; import java.net.MalformedURLException; import java.net.URL; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; import org.apache.http.HttpStatus; import edu.uci.ics.crawler4j.crawler.Page; import edu.uci.ics.crawler4j.fetcher.PageFetchResult; import edu.uci.ics.crawler4j.fetcher.PageFetcher; import edu.uci.ics.crawler4j.url.WebURL; import edu.uci.ics.crawler4j.util.Util; /** * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class RobotstxtServer { protected RobotstxtConfig config; protected final Map<String, HostDirectives> host2directivesCache = new HashMap<>(); protected PageFetcher pageFetcher; public RobotstxtServer(RobotstxtConfig config, PageFetcher pageFetcher) { this.config = config; this.pageFetcher = pageFetcher; } private static String getHost(URL url) { return url.getHost().toLowerCase(); } public boolean allows(WebURL webURL) { if (!config.isEnabled()) { return true; } try { URL url = new URL(webURL.getURL()); String host = getHost(url); String path = url.getPath(); HostDirectives directives = host2directivesCache.get(host); if (directives != null && directives.needsRefetch()) { synchronized (host2directivesCache) { host2directivesCache.remove(host); directives = null; } } if (directives == null) { directives = fetchDirectives(url); } return directives.allows(path); } catch (MalformedURLException e) { e.printStackTrace(); } return true; } private HostDirectives fetchDirectives(URL url) { WebURL robotsTxtUrl = new WebURL(); String host = getHost(url); String port = (url.getPort() == url.getDefaultPort() || url.getPort() == -1) ? "" : ":" + url.getPort(); robotsTxtUrl.setURL("http://" + host + port + "/robots.txt"); HostDirectives directives = null; PageFetchResult fetchResult = null; try { fetchResult = pageFetcher.fetchHeader(robotsTxtUrl); if (fetchResult.getStatusCode() == HttpStatus.SC_OK) { Page page = new Page(robotsTxtUrl); fetchResult.fetchContent(page); if (Util.hasPlainTextContent(page.getContentType())) { try { String content; if (page.getContentCharset() == null) { content = new String(page.getContentData()); } else { content = new String(page.getContentData(), page.getContentCharset()); } directives = RobotstxtParser.parse(content, config.getUserAgentName()); } catch (Exception e) { e.printStackTrace(); } } } } finally { if (fetchResult != null) { fetchResult.discardContentIfNotConsumed(); } } if (directives == null) { // We still need to have this object to keep track of the time we // fetched it directives = new HostDirectives(); } synchronized (host2directivesCache) { if (host2directivesCache.size() == config.getCacheSize()) { String minHost = null; long minAccessTime = Long.MAX_VALUE; for (Entry<String, HostDirectives> entry : host2directivesCache.entrySet()) { if (entry.getValue().getLastAccessTime() < minAccessTime) { minAccessTime = entry.getValue().getLastAccessTime(); minHost = entry.getKey(); } } host2directivesCache.remove(minHost); } host2directivesCache.put(host, directives); } return directives; } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.robotstxt; import java.util.StringTokenizer; /** * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class RobotstxtParser { private static final String PATTERNS_USERAGENT = "(?i)^User-agent:.*"; private static final String PATTERNS_DISALLOW = "(?i)Disallow:.*"; private static final String PATTERNS_ALLOW = "(?i)Allow:.*"; private static final int PATTERNS_USERAGENT_LENGTH = 11; private static final int PATTERNS_DISALLOW_LENGTH = 9; private static final int PATTERNS_ALLOW_LENGTH = 6; public static HostDirectives parse(String content, String myUserAgent) { HostDirectives directives = null; boolean inMatchingUserAgent = false; StringTokenizer st = new StringTokenizer(content, "\n"); while (st.hasMoreTokens()) { String line = st.nextToken(); int commentIndex = line.indexOf("#"); if (commentIndex > -1) { line = line.substring(0, commentIndex); } // remove any html markup line = line.replaceAll("<[^>]+>", ""); line = line.trim(); if (line.length() == 0) { continue; } if (line.matches(PATTERNS_USERAGENT)) { String ua = line.substring(PATTERNS_USERAGENT_LENGTH).trim().toLowerCase(); if (ua.equals("*") || ua.contains(myUserAgent)) { inMatchingUserAgent = true; } else { inMatchingUserAgent = false; } } else if (line.matches(PATTERNS_DISALLOW)) { if (!inMatchingUserAgent) { continue; } String path = line.substring(PATTERNS_DISALLOW_LENGTH).trim(); if (path.endsWith("*")) { path = path.substring(0, path.length() - 1); } path = path.trim(); if (path.length() > 0) { if (directives == null) { directives = new HostDirectives(); } directives.addDisallow(path); } } else if (line.matches(PATTERNS_ALLOW)) { if (!inMatchingUserAgent) { continue; } String path = line.substring(PATTERNS_ALLOW_LENGTH).trim(); if (path.endsWith("*")) { path = path.substring(0, path.length() - 1); } path = path.trim(); if (directives == null) { directives = new HostDirectives(); } directives.addAllow(path); } } return directives; } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.robotstxt; import java.util.SortedSet; import java.util.TreeSet; public class RuleSet extends TreeSet<String> { private static final long serialVersionUID = 1L; @Override public boolean add(String str) { SortedSet<String> sub = headSet(str); if (!sub.isEmpty() && str.startsWith(sub.last())) { // no need to add; prefix is already present return false; } boolean retVal = super.add(str); sub = tailSet(str + "\0"); while (!sub.isEmpty() && sub.first().startsWith(str)) { // remove redundant entries sub.remove(sub.first()); } return retVal; } public boolean containsPrefixOf(String s) { SortedSet<String> sub = headSet(s); // because redundant prefixes have been eliminated, // only a test against last item in headSet is necessary if (!sub.isEmpty() && s.startsWith(sub.last())) { return true; // prefix substring exists } // might still exist exactly (headSet does not contain boundary) return contains(s); } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.robotstxt; public class RobotstxtConfig { /** * Should the crawler obey Robots.txt protocol? More info on Robots.txt is * available at http://www.robotstxt.org/ */ private boolean enabled = true; /** * user-agent name that will be used to determine whether some servers have * specific rules for this agent name. */ private String userAgentName = "crawler4j"; /** * The maximum number of hosts for which their robots.txt is cached. */ private int cacheSize = 500; public boolean isEnabled() { return enabled; } public void setEnabled(boolean enabled) { this.enabled = enabled; } public String getUserAgentName() { return userAgentName; } public void setUserAgentName(String userAgentName) { this.userAgentName = userAgentName; } public int getCacheSize() { return cacheSize; } public void setCacheSize(int cacheSize) { this.cacheSize = cacheSize; } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.robotstxt; /** * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class HostDirectives { // If we fetched the directives for this host more than // 24 hours, we have to re-fetch it. private static final long EXPIRATION_DELAY = 24 * 60 * 1000L; private RuleSet disallows = new RuleSet(); private RuleSet allows = new RuleSet(); private long timeFetched; private long timeLastAccessed; public HostDirectives() { timeFetched = System.currentTimeMillis(); } public boolean needsRefetch() { return (System.currentTimeMillis() - timeFetched > EXPIRATION_DELAY); } public boolean allows(String path) { timeLastAccessed = System.currentTimeMillis(); return !disallows.containsPrefixOf(path) || allows.containsPrefixOf(path); } public void addDisallow(String path) { disallows.add(path); } public void addAllow(String path) { allows.add(path); } public long getLastAccessTime() { return timeLastAccessed; } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.frontier; import com.sleepycat.je.*; import edu.uci.ics.crawler4j.url.WebURL; import edu.uci.ics.crawler4j.util.Util; import java.util.ArrayList; import java.util.List; /** * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class WorkQueues { protected Database urlsDB = null; protected Environment env; protected boolean resumable; protected WebURLTupleBinding webURLBinding; protected final Object mutex = new Object(); public WorkQueues(Environment env, String dbName, boolean resumable) throws DatabaseException { this.env = env; this.resumable = resumable; DatabaseConfig dbConfig = new DatabaseConfig(); dbConfig.setAllowCreate(true); dbConfig.setTransactional(resumable); dbConfig.setDeferredWrite(!resumable); urlsDB = env.openDatabase(null, dbName, dbConfig); webURLBinding = new WebURLTupleBinding(); } public List<WebURL> get(int max) throws DatabaseException { synchronized (mutex) { int matches = 0; List<WebURL> results = new ArrayList<>(max); Cursor cursor = null; OperationStatus result; DatabaseEntry key = new DatabaseEntry(); DatabaseEntry value = new DatabaseEntry(); Transaction txn; if (resumable) { txn = env.beginTransaction(null, null); } else { txn = null; } try { cursor = urlsDB.openCursor(txn, null); result = cursor.getFirst(key, value, null); while (matches < max && result == OperationStatus.SUCCESS) { if (value.getData().length > 0) { results.add(webURLBinding.entryToObject(value)); matches++; } result = cursor.getNext(key, value, null); } } catch (DatabaseException e) { if (txn != null) { txn.abort(); txn = null; } throw e; } finally { if (cursor != null) { cursor.close(); } if (txn != null) { txn.commit(); } } return results; } } public void delete(int count) throws DatabaseException { synchronized (mutex) { int matches = 0; Cursor cursor = null; OperationStatus result; DatabaseEntry key = new DatabaseEntry(); DatabaseEntry value = new DatabaseEntry(); Transaction txn; if (resumable) { txn = env.beginTransaction(null, null); } else { txn = null; } try { cursor = urlsDB.openCursor(txn, null); result = cursor.getFirst(key, value, null); while (matches < count && result == OperationStatus.SUCCESS) { cursor.delete(); matches++; result = cursor.getNext(key, value, null); } } catch (DatabaseException e) { if (txn != null) { txn.abort(); txn = null; } throw e; } finally { if (cursor != null) { cursor.close(); } if (txn != null) { txn.commit(); } } } } /* * The key that is used for storing URLs determines the order * they are crawled. Lower key values results in earlier crawling. * Here our keys are 6 bytes. The first byte comes from the URL priority. * The second byte comes from depth of crawl at which this URL is first found. * The rest of the 4 bytes come from the docid of the URL. As a result, * URLs with lower priority numbers will be crawled earlier. If priority * numbers are the same, those found at lower depths will be crawled earlier. * If depth is also equal, those found earlier (therefore, smaller docid) will * be crawled earlier. */ protected DatabaseEntry getDatabaseEntryKey(WebURL url) { byte[] keyData = new byte[6]; keyData[0] = url.getPriority(); keyData[1] = (url.getDepth() > Byte.MAX_VALUE ? Byte.MAX_VALUE : (byte) url.getDepth()); Util.putIntInByteArray(url.getDocid(), keyData, 2); return new DatabaseEntry(keyData); } public void put(WebURL url) throws DatabaseException { DatabaseEntry value = new DatabaseEntry(); webURLBinding.objectToEntry(url, value); Transaction txn; if (resumable) { txn = env.beginTransaction(null, null); } else { txn = null; } urlsDB.put(txn, getDatabaseEntryKey(url), value); if (resumable) { if (txn != null) { txn.commit(); } } } public long getLength() { try { return urlsDB.count(); } catch (Exception e) { e.printStackTrace(); } return -1; } public void sync() { if (resumable) { return; } if (urlsDB == null) { return; } try { urlsDB.sync(); } catch (DatabaseException e) { e.printStackTrace(); } } public void close() { try { urlsDB.close(); } catch (DatabaseException e) { e.printStackTrace(); } } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.frontier; import org.apache.log4j.Logger; import com.sleepycat.je.Cursor; import com.sleepycat.je.DatabaseEntry; import com.sleepycat.je.DatabaseException; import com.sleepycat.je.Environment; import com.sleepycat.je.OperationStatus; import com.sleepycat.je.Transaction; import edu.uci.ics.crawler4j.url.WebURL; /** * This class maintains the list of pages which are * assigned to crawlers but are not yet processed. * It is used for resuming a previous crawl. * * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class InProcessPagesDB extends WorkQueues { private static final Logger logger = Logger.getLogger(InProcessPagesDB.class.getName()); public InProcessPagesDB(Environment env) throws DatabaseException { super(env, "InProcessPagesDB", true); long docCount = getLength(); if (docCount > 0) { logger.info("Loaded " + docCount + " URLs that have been in process in the previous crawl."); } } public boolean removeURL(WebURL webUrl) { synchronized (mutex) { try { DatabaseEntry key = getDatabaseEntryKey(webUrl); Cursor cursor = null; OperationStatus result; DatabaseEntry value = new DatabaseEntry(); Transaction txn = env.beginTransaction(null, null); try { cursor = urlsDB.openCursor(txn, null); result = cursor.getSearchKey(key, value, null); if (result == OperationStatus.SUCCESS) { result = cursor.delete(); if (result == OperationStatus.SUCCESS) { return true; } } } catch (DatabaseException e) { if (txn != null) { txn.abort(); txn = null; } throw e; } finally { if (cursor != null) { cursor.close(); } if (txn != null) { txn.commit(); } } } catch (Exception e) { e.printStackTrace(); } } return false; } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.frontier; import com.sleepycat.bind.tuple.TupleBinding; import com.sleepycat.bind.tuple.TupleInput; import com.sleepycat.bind.tuple.TupleOutput; import edu.uci.ics.crawler4j.url.WebURL; /** * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class WebURLTupleBinding extends TupleBinding<WebURL> { @Override public WebURL entryToObject(TupleInput input) { WebURL webURL = new WebURL(); webURL.setURL(input.readString()); webURL.setDocid(input.readInt()); webURL.setParentDocid(input.readInt()); webURL.setParentUrl(input.readString()); webURL.setDepth(input.readShort()); webURL.setPriority(input.readByte()); webURL.setAnchor(input.readString()); return webURL; } @Override public void objectToEntry(WebURL url, TupleOutput output) { output.writeString(url.getURL()); output.writeInt(url.getDocid()); output.writeInt(url.getParentDocid()); output.writeString(url.getParentUrl()); output.writeShort(url.getDepth()); output.writeByte(url.getPriority()); output.writeString(url.getAnchor()); } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.frontier; import org.apache.log4j.Logger; import com.sleepycat.je.*; import edu.uci.ics.crawler4j.crawler.Configurable; import edu.uci.ics.crawler4j.crawler.CrawlConfig; import edu.uci.ics.crawler4j.util.Util; /** * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class DocIDServer extends Configurable { protected static final Logger logger = Logger.getLogger(DocIDServer.class.getName()); protected Database docIDsDB = null; protected final Object mutex = new Object(); protected int lastDocID; public DocIDServer(Environment env, CrawlConfig config) throws DatabaseException { super(config); DatabaseConfig dbConfig = new DatabaseConfig(); dbConfig.setAllowCreate(true); dbConfig.setTransactional(config.isResumableCrawling()); dbConfig.setDeferredWrite(!config.isResumableCrawling()); docIDsDB = env.openDatabase(null, "DocIDs", dbConfig); if (config.isResumableCrawling()) { int docCount = getDocCount(); if (docCount > 0) { logger.info("Loaded " + docCount + " URLs that had been detected in previous crawl."); lastDocID = docCount; } } else { lastDocID = 0; } } /** * Returns the docid of an already seen url. * * @param url the URL for which the docid is returned. * @return the docid of the url if it is seen before. Otherwise -1 is returned. */ public int getDocId(String url) { synchronized (mutex) { if (docIDsDB == null) { return -1; } OperationStatus result; DatabaseEntry value = new DatabaseEntry(); try { DatabaseEntry key = new DatabaseEntry(url.getBytes()); result = docIDsDB.get(null, key, value, null); if (result == OperationStatus.SUCCESS && value.getData().length > 0) { return Util.byteArray2Int(value.getData()); } } catch (Exception e) { e.printStackTrace(); } return -1; } } public int getNewDocID(String url) { synchronized (mutex) { try { // Make sure that we have not already assigned a docid for this URL int docid = getDocId(url); if (docid > 0) { return docid; } lastDocID++; docIDsDB.put(null, new DatabaseEntry(url.getBytes()), new DatabaseEntry(Util.int2ByteArray(lastDocID))); return lastDocID; } catch (Exception e) { e.printStackTrace(); } return -1; } } public void addUrlAndDocId(String url, int docId) throws Exception { synchronized (mutex) { if (docId <= lastDocID) { throw new Exception("Requested doc id: " + docId + " is not larger than: " + lastDocID); } // Make sure that we have not already assigned a docid for this URL int prevDocid = getDocId(url); if (prevDocid > 0) { if (prevDocid == docId) { return; } throw new Exception("Doc id: " + prevDocid + " is already assigned to URL: " + url); } docIDsDB.put(null, new DatabaseEntry(url.getBytes()), new DatabaseEntry(Util.int2ByteArray(docId))); lastDocID = docId; } } public boolean isSeenBefore(String url) { return getDocId(url) != -1; } public int getDocCount() { try { return (int) docIDsDB.count(); } catch (DatabaseException e) { e.printStackTrace(); } return -1; } public void sync() { if (config.isResumableCrawling()) { return; } if (docIDsDB == null) { return; } try { docIDsDB.sync(); } catch (DatabaseException e) { e.printStackTrace(); } } public void close() { try { docIDsDB.close(); } catch (DatabaseException e) { e.printStackTrace(); } } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.frontier; import com.sleepycat.je.*; import edu.uci.ics.crawler4j.crawler.Configurable; import edu.uci.ics.crawler4j.crawler.CrawlConfig; import edu.uci.ics.crawler4j.util.Util; import java.util.HashMap; import java.util.Map; /** * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class Counters extends Configurable { public class ReservedCounterNames { public final static String SCHEDULED_PAGES = "Scheduled-Pages"; public final static String PROCESSED_PAGES = "Processed-Pages"; } protected Database statisticsDB = null; protected Environment env; protected final Object mutex = new Object(); protected Map<String, Long> counterValues; public Counters(Environment env, CrawlConfig config) throws DatabaseException { super(config); this.env = env; this.counterValues = new HashMap<>(); /* * When crawling is set to be resumable, we have to keep the statistics * in a transactional database to make sure they are not lost if crawler * is crashed or terminated unexpectedly. */ if (config.isResumableCrawling()) { DatabaseConfig dbConfig = new DatabaseConfig(); dbConfig.setAllowCreate(true); dbConfig.setTransactional(true); dbConfig.setDeferredWrite(false); statisticsDB = env.openDatabase(null, "Statistics", dbConfig); OperationStatus result; DatabaseEntry key = new DatabaseEntry(); DatabaseEntry value = new DatabaseEntry(); Transaction tnx = env.beginTransaction(null, null); Cursor cursor = statisticsDB.openCursor(tnx, null); result = cursor.getFirst(key, value, null); while (result == OperationStatus.SUCCESS) { if (value.getData().length > 0) { String name = new String(key.getData()); long counterValue = Util.byteArray2Long(value.getData()); counterValues.put(name, new Long(counterValue)); } result = cursor.getNext(key, value, null); } cursor.close(); tnx.commit(); } } public long getValue(String name) { synchronized (mutex) { Long value = counterValues.get(name); if (value == null) { return 0; } return value.longValue(); } } public void setValue(String name, long value) { synchronized (mutex) { try { counterValues.put(name, new Long(value)); if (statisticsDB != null) { Transaction txn = env.beginTransaction(null, null); statisticsDB.put(txn, new DatabaseEntry(name.getBytes()), new DatabaseEntry(Util.long2ByteArray(value))); txn.commit(); } } catch (Exception e) { e.printStackTrace(); } } } public void increment(String name) { increment(name, 1); } public void increment(String name, long addition) { synchronized (mutex) { long prevValue = getValue(name); setValue(name, prevValue + addition); } } public void sync() { if (config.isResumableCrawling()) { return; } if (statisticsDB == null) { return; } try { statisticsDB.sync(); } catch (DatabaseException e) { e.printStackTrace(); } } public void close() { try { if (statisticsDB != null) { statisticsDB.close(); } } catch (DatabaseException e) { e.printStackTrace(); } } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.frontier; import com.sleepycat.je.DatabaseException; import com.sleepycat.je.Environment; import edu.uci.ics.crawler4j.crawler.Configurable; import edu.uci.ics.crawler4j.crawler.CrawlConfig; import edu.uci.ics.crawler4j.frontier.Counters.ReservedCounterNames; import edu.uci.ics.crawler4j.url.WebURL; import org.apache.log4j.Logger; import java.util.List; /** * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class Frontier extends Configurable { protected static final Logger logger = Logger.getLogger(Frontier.class.getName()); protected WorkQueues workQueues; protected InProcessPagesDB inProcessPages; protected final Object mutex = new Object(); protected final Object waitingList = new Object(); protected boolean isFinished = false; protected long scheduledPages; protected DocIDServer docIdServer; protected Counters counters; public Frontier(Environment env, CrawlConfig config, DocIDServer docIdServer) { super(config); this.counters = new Counters(env, config); this.docIdServer = docIdServer; try { workQueues = new WorkQueues(env, "PendingURLsDB", config.isResumableCrawling()); if (config.isResumableCrawling()) { scheduledPages = counters.getValue(ReservedCounterNames.SCHEDULED_PAGES); inProcessPages = new InProcessPagesDB(env); long numPreviouslyInProcessPages = inProcessPages.getLength(); if (numPreviouslyInProcessPages > 0) { logger.info("Rescheduling " + numPreviouslyInProcessPages + " URLs from previous crawl."); scheduledPages -= numPreviouslyInProcessPages; while (true) { List<WebURL> urls = inProcessPages.get(100); if (urls.size() == 0) { break; } scheduleAll(urls); inProcessPages.delete(urls.size()); } } } else { inProcessPages = null; scheduledPages = 0; } } catch (DatabaseException e) { logger.error("Error while initializing the Frontier: " + e.getMessage()); workQueues = null; } } public void scheduleAll(List<WebURL> urls) { int maxPagesToFetch = config.getMaxPagesToFetch(); synchronized (mutex) { int newScheduledPage = 0; for (WebURL url : urls) { if (maxPagesToFetch > 0 && (scheduledPages + newScheduledPage) >= maxPagesToFetch) { break; } try { workQueues.put(url); newScheduledPage++; } catch (DatabaseException e) { logger.error("Error while puting the url in the work queue."); } } if (newScheduledPage > 0) { scheduledPages += newScheduledPage; counters.increment(Counters.ReservedCounterNames.SCHEDULED_PAGES, newScheduledPage); } synchronized (waitingList) { waitingList.notifyAll(); } } } public void schedule(WebURL url) { int maxPagesToFetch = config.getMaxPagesToFetch(); synchronized (mutex) { try { if (maxPagesToFetch < 0 || scheduledPages < maxPagesToFetch) { workQueues.put(url); scheduledPages++; counters.increment(Counters.ReservedCounterNames.SCHEDULED_PAGES); } } catch (DatabaseException e) { logger.error("Error while puting the url in the work queue."); } } } public void getNextURLs(int max, List<WebURL> result) { while (true) { synchronized (mutex) { if (isFinished) { return; } try { List<WebURL> curResults = workQueues.get(max); workQueues.delete(curResults.size()); if (inProcessPages != null) { for (WebURL curPage : curResults) { inProcessPages.put(curPage); } } result.addAll(curResults); } catch (DatabaseException e) { logger.error("Error while getting next urls: " + e.getMessage()); e.printStackTrace(); } if (result.size() > 0) { return; } } try { synchronized (waitingList) { waitingList.wait(); } } catch (InterruptedException ignored) { // Do nothing } if (isFinished) { return; } } } public void setProcessed(WebURL webURL) { counters.increment(ReservedCounterNames.PROCESSED_PAGES); if (inProcessPages != null) { if (!inProcessPages.removeURL(webURL)) { logger.warn("Could not remove: " + webURL.getURL() + " from list of processed pages."); } } } public long getQueueLength() { return workQueues.getLength(); } public long getNumberOfAssignedPages() { return inProcessPages.getLength(); } public long getNumberOfProcessedPages() { return counters.getValue(ReservedCounterNames.PROCESSED_PAGES); } public void sync() { workQueues.sync(); docIdServer.sync(); counters.sync(); } public boolean isFinished() { return isFinished; } public void close() { sync(); workQueues.close(); counters.close(); } public void finish() { isFinished = true; synchronized (waitingList) { waitingList.notifyAll(); } } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.parser; public class BinaryParseData implements ParseData { private static BinaryParseData instance = new BinaryParseData(); public static BinaryParseData getInstance() { return instance; } @Override public String toString() { return "[Binary parse data can not be dumped as string]"; } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.parser; import edu.uci.ics.crawler4j.url.WebURL; import java.util.List; public class HtmlParseData implements ParseData { private String html; private String text; private String title; private List<WebURL> outgoingUrls; public String getHtml() { return html; } public void setHtml(String html) { this.html = html; } public String getText() { return text; } public void setText(String text) { this.text = text; } public String getTitle() { return title; } public void setTitle(String title) { this.title = title; } public List<WebURL> getOutgoingUrls() { return outgoingUrls; } public void setOutgoingUrls(List<WebURL> outgoingUrls) { this.outgoingUrls = outgoingUrls; } @Override public String toString() { return text; } }
Java
package edu.uci.ics.crawler4j.parser; public class ExtractedUrlAnchorPair { private String href; private String anchor; public String getHref() { return href; } public void setHref(String href) { this.href = href; } public String getAnchor() { return anchor; } public void setAnchor(String anchor) { this.anchor = anchor; } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.parser; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.io.UnsupportedEncodingException; import java.util.ArrayList; import java.util.List; import org.apache.log4j.Logger; import org.apache.tika.metadata.DublinCore; import org.apache.tika.metadata.Metadata; import org.apache.tika.parser.ParseContext; import org.apache.tika.parser.html.HtmlParser; import edu.uci.ics.crawler4j.crawler.Configurable; import edu.uci.ics.crawler4j.crawler.CrawlConfig; import edu.uci.ics.crawler4j.crawler.Page; import edu.uci.ics.crawler4j.url.URLCanonicalizer; import edu.uci.ics.crawler4j.url.WebURL; import edu.uci.ics.crawler4j.util.Util; /** * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class Parser extends Configurable { protected static final Logger logger = Logger.getLogger(Parser.class.getName()); private HtmlParser htmlParser; private ParseContext parseContext; public Parser(CrawlConfig config) { super(config); htmlParser = new HtmlParser(); parseContext = new ParseContext(); } public boolean parse(Page page, String contextURL) { if (Util.hasBinaryContent(page.getContentType())) { if (!config.isIncludeBinaryContentInCrawling()) { return false; } page.setParseData(BinaryParseData.getInstance()); return true; } else if (Util.hasPlainTextContent(page.getContentType())) { try { TextParseData parseData = new TextParseData(); if (page.getContentCharset() == null) { parseData.setTextContent(new String(page.getContentData())); } else { parseData.setTextContent(new String(page.getContentData(), page.getContentCharset())); } page.setParseData(parseData); return true; } catch (Exception e) { logger.error(e.getMessage() + ", while parsing: " + page.getWebURL().getURL()); } return false; } Metadata metadata = new Metadata(); HtmlContentHandler contentHandler = new HtmlContentHandler(); InputStream inputStream = null; try { inputStream = new ByteArrayInputStream(page.getContentData()); htmlParser.parse(inputStream, contentHandler, metadata, parseContext); } catch (Exception e) { logger.error(e.getMessage() + ", while parsing: " + page.getWebURL().getURL()); } finally { try { if (inputStream != null) { inputStream.close(); } } catch (IOException e) { logger.error(e.getMessage() + ", while parsing: " + page.getWebURL().getURL()); } } if (page.getContentCharset() == null) { page.setContentCharset(metadata.get("Content-Encoding")); } HtmlParseData parseData = new HtmlParseData(); parseData.setText(contentHandler.getBodyText().trim()); parseData.setTitle(metadata.get(DublinCore.TITLE)); List<WebURL> outgoingUrls = new ArrayList<>(); String baseURL = contentHandler.getBaseUrl(); if (baseURL != null) { contextURL = baseURL; } int urlCount = 0; for (ExtractedUrlAnchorPair urlAnchorPair : contentHandler.getOutgoingUrls()) { String href = urlAnchorPair.getHref(); href = href.trim(); if (href.length() == 0) { continue; } String hrefWithoutProtocol = href.toLowerCase(); if (href.startsWith("http://")) { hrefWithoutProtocol = href.substring(7); } if (!hrefWithoutProtocol.contains("javascript:") && !hrefWithoutProtocol.contains("mailto:") && !hrefWithoutProtocol.contains("@")) { String url = URLCanonicalizer.getCanonicalURL(href, contextURL); if (url != null) { WebURL webURL = new WebURL(); webURL.setURL(url); webURL.setAnchor(urlAnchorPair.getAnchor()); outgoingUrls.add(webURL); urlCount++; if (urlCount > config.getMaxOutgoingLinksToFollow()) { break; } } } } parseData.setOutgoingUrls(outgoingUrls); try { if (page.getContentCharset() == null) { parseData.setHtml(new String(page.getContentData())); } else { parseData.setHtml(new String(page.getContentData(), page.getContentCharset())); } } catch (UnsupportedEncodingException e) { e.printStackTrace(); return false; } page.setParseData(parseData); return true; } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.parser; public class TextParseData implements ParseData { private String textContent; public String getTextContent() { return textContent; } public void setTextContent(String textContent) { this.textContent = textContent; } @Override public String toString() { return textContent; } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.parser; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import org.xml.sax.Attributes; import org.xml.sax.SAXException; import org.xml.sax.helpers.DefaultHandler; public class HtmlContentHandler extends DefaultHandler { private final int MAX_ANCHOR_LENGTH = 100; private enum Element { A, AREA, LINK, IFRAME, FRAME, EMBED, IMG, BASE, META, BODY } private static class HtmlFactory { private static Map<String, Element> name2Element; static { name2Element = new HashMap<>(); for (Element element : Element.values()) { name2Element.put(element.toString().toLowerCase(), element); } } public static Element getElement(String name) { return name2Element.get(name); } } private String base; private String metaRefresh; private String metaLocation; private boolean isWithinBodyElement; private StringBuilder bodyText; private List<ExtractedUrlAnchorPair> outgoingUrls; private ExtractedUrlAnchorPair curUrl = null; private boolean anchorFlag = false; private StringBuilder anchorText = new StringBuilder(); public HtmlContentHandler() { isWithinBodyElement = false; bodyText = new StringBuilder(); outgoingUrls = new ArrayList<>(); } @Override public void startElement(String uri, String localName, String qName, Attributes attributes) throws SAXException { Element element = HtmlFactory.getElement(localName); if (element == Element.A || element == Element.AREA || element == Element.LINK) { String href = attributes.getValue("href"); if (href != null) { anchorFlag = true; curUrl = new ExtractedUrlAnchorPair(); curUrl.setHref(href); outgoingUrls.add(curUrl); } return; } if (element == Element.IMG) { String imgSrc = attributes.getValue("src"); if (imgSrc != null) { curUrl = new ExtractedUrlAnchorPair(); curUrl.setHref(imgSrc); outgoingUrls.add(curUrl); } return; } if (element == Element.IFRAME || element == Element.FRAME || element == Element.EMBED) { String src = attributes.getValue("src"); if (src != null) { curUrl = new ExtractedUrlAnchorPair(); curUrl.setHref(src); outgoingUrls.add(curUrl); } return; } if (element == Element.BASE) { if (base != null) { // We only consider the first occurrence of the // Base element. String href = attributes.getValue("href"); if (href != null) { base = href; } } return; } if (element == Element.META) { String equiv = attributes.getValue("http-equiv"); String content = attributes.getValue("content"); if (equiv != null && content != null) { equiv = equiv.toLowerCase(); // http-equiv="refresh" content="0;URL=http://foo.bar/..." if (equiv.equals("refresh") && (metaRefresh == null)) { int pos = content.toLowerCase().indexOf("url="); if (pos != -1) { metaRefresh = content.substring(pos + 4); } curUrl = new ExtractedUrlAnchorPair(); curUrl.setHref(metaRefresh); outgoingUrls.add(curUrl); } // http-equiv="location" content="http://foo.bar/..." if (equiv.equals("location") && (metaLocation == null)) { metaLocation = content; curUrl = new ExtractedUrlAnchorPair(); curUrl.setHref(metaRefresh); outgoingUrls.add(curUrl); } } return; } if (element == Element.BODY) { isWithinBodyElement = true; } } @Override public void endElement(String uri, String localName, String qName) throws SAXException { Element element = HtmlFactory.getElement(localName); if (element == Element.A || element == Element.AREA || element == Element.LINK) { anchorFlag = false; if (curUrl != null) { String anchor = anchorText.toString().replaceAll("\n", " ").replaceAll("\t", " ").trim(); if (!anchor.isEmpty()) { if (anchor.length() > MAX_ANCHOR_LENGTH) { anchor = anchor.substring(0, MAX_ANCHOR_LENGTH) + "..."; } curUrl.setAnchor(anchor); } anchorText.delete(0, anchorText.length()); } curUrl = null; } if (element == Element.BODY) { isWithinBodyElement = false; } } @Override public void characters(char ch[], int start, int length) throws SAXException { if (isWithinBodyElement) { bodyText.append(ch, start, length); if (anchorFlag) { anchorText.append(new String(ch, start, length)); } } } public String getBodyText() { return bodyText.toString(); } public List<ExtractedUrlAnchorPair> getOutgoingUrls() { return outgoingUrls; } public String getBaseUrl() { return base; } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.parser; public interface ParseData { @Override public String toString(); }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.fetcher; import java.util.concurrent.TimeUnit; import org.apache.http.impl.conn.PoolingClientConnectionManager; public class IdleConnectionMonitorThread extends Thread { private final PoolingClientConnectionManager connMgr; private volatile boolean shutdown; public IdleConnectionMonitorThread(PoolingClientConnectionManager connMgr) { super("Connection Manager"); this.connMgr = connMgr; } @Override public void run() { try { while (!shutdown) { synchronized (this) { wait(5000); // Close expired connections connMgr.closeExpiredConnections(); // Optionally, close connections // that have been idle longer than 30 sec connMgr.closeIdleConnections(30, TimeUnit.SECONDS); } } } catch (InterruptedException ex) { // terminate } } public void shutdown() { shutdown = true; synchronized (this) { notifyAll(); } } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"; you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.fetcher; import org.apache.http.HttpStatus; /** * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class CustomFetchStatus { public static final int PageTooBig = 1001; public static final int FatalTransportError = 1005; public static final int UnknownError = 1006; public static String getStatusDescription(int code) { switch (code) { case HttpStatus.SC_OK: return "OK"; case HttpStatus.SC_CREATED: return "Created"; case HttpStatus.SC_ACCEPTED: return "Accepted"; case HttpStatus.SC_NO_CONTENT: return "No Content"; case HttpStatus.SC_MOVED_PERMANENTLY: return "Moved Permanently"; case HttpStatus.SC_MOVED_TEMPORARILY: return "Moved Temporarily"; case HttpStatus.SC_NOT_MODIFIED: return "Not Modified"; case HttpStatus.SC_BAD_REQUEST: return "Bad Request"; case HttpStatus.SC_UNAUTHORIZED: return "Unauthorized"; case HttpStatus.SC_FORBIDDEN: return "Forbidden"; case HttpStatus.SC_NOT_FOUND: return "Not Found"; case HttpStatus.SC_INTERNAL_SERVER_ERROR: return "Internal Server Error"; case HttpStatus.SC_NOT_IMPLEMENTED: return "Not Implemented"; case HttpStatus.SC_BAD_GATEWAY: return "Bad Gateway"; case HttpStatus.SC_SERVICE_UNAVAILABLE: return "Service Unavailable"; case HttpStatus.SC_CONTINUE: return "Continue"; case HttpStatus.SC_TEMPORARY_REDIRECT: return "Temporary Redirect"; case HttpStatus.SC_METHOD_NOT_ALLOWED: return "Method Not Allowed"; case HttpStatus.SC_CONFLICT: return "Conflict"; case HttpStatus.SC_PRECONDITION_FAILED: return "Precondition Failed"; case HttpStatus.SC_REQUEST_TOO_LONG: return "Request Too Long"; case HttpStatus.SC_REQUEST_URI_TOO_LONG: return "Request-URI Too Long"; case HttpStatus.SC_UNSUPPORTED_MEDIA_TYPE: return "Unsupported Media Type"; case HttpStatus.SC_MULTIPLE_CHOICES: return "Multiple Choices"; case HttpStatus.SC_SEE_OTHER: return "See Other"; case HttpStatus.SC_USE_PROXY: return "Use Proxy"; case HttpStatus.SC_PAYMENT_REQUIRED: return "Payment Required"; case HttpStatus.SC_NOT_ACCEPTABLE: return "Not Acceptable"; case HttpStatus.SC_PROXY_AUTHENTICATION_REQUIRED: return "Proxy Authentication Required"; case HttpStatus.SC_REQUEST_TIMEOUT: return "Request Timeout"; case PageTooBig: return "Page size was too big"; case FatalTransportError: return "Fatal transport error"; case UnknownError: return "Unknown error"; default: return "(" + code + ")"; } } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.fetcher; import java.io.IOException; import java.io.InputStream; import java.util.Date; import java.util.zip.GZIPInputStream; import org.apache.http.Header; import org.apache.http.HeaderElement; import org.apache.http.HttpEntity; import org.apache.http.HttpException; import org.apache.http.HttpHost; import org.apache.http.HttpResponse; import org.apache.http.HttpResponseInterceptor; import org.apache.http.HttpStatus; import org.apache.http.HttpVersion; import org.apache.http.auth.AuthScope; import org.apache.http.auth.UsernamePasswordCredentials; import org.apache.http.client.HttpClient; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.params.ClientPNames; import org.apache.http.client.params.CookiePolicy; import org.apache.http.conn.params.ConnRoutePNames; import org.apache.http.conn.scheme.PlainSocketFactory; import org.apache.http.conn.scheme.Scheme; import org.apache.http.conn.scheme.SchemeRegistry; import org.apache.http.conn.ssl.SSLSocketFactory; import org.apache.http.entity.HttpEntityWrapper; import org.apache.http.impl.client.DefaultHttpClient; import org.apache.http.impl.conn.PoolingClientConnectionManager; import org.apache.http.params.BasicHttpParams; import org.apache.http.params.CoreConnectionPNames; import org.apache.http.params.CoreProtocolPNames; import org.apache.http.params.HttpParams; import org.apache.http.params.HttpProtocolParamBean; import org.apache.http.protocol.HttpContext; import org.apache.log4j.Logger; import edu.uci.ics.crawler4j.crawler.Configurable; import edu.uci.ics.crawler4j.crawler.CrawlConfig; import edu.uci.ics.crawler4j.url.URLCanonicalizer; import edu.uci.ics.crawler4j.url.WebURL; /** * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class PageFetcher extends Configurable { protected static final Logger logger = Logger.getLogger(PageFetcher.class); protected PoolingClientConnectionManager connectionManager; protected DefaultHttpClient httpClient; protected final Object mutex = new Object(); protected long lastFetchTime = 0; protected IdleConnectionMonitorThread connectionMonitorThread = null; public PageFetcher(CrawlConfig config) { super(config); HttpParams params = new BasicHttpParams(); HttpProtocolParamBean paramsBean = new HttpProtocolParamBean(params); paramsBean.setVersion(HttpVersion.HTTP_1_1); paramsBean.setContentCharset("UTF-8"); paramsBean.setUseExpectContinue(false); params.setParameter(ClientPNames.COOKIE_POLICY, CookiePolicy.BROWSER_COMPATIBILITY); params.setParameter(CoreProtocolPNames.USER_AGENT, config.getUserAgentString()); params.setIntParameter(CoreConnectionPNames.SO_TIMEOUT, config.getSocketTimeout()); params.setIntParameter(CoreConnectionPNames.CONNECTION_TIMEOUT, config.getConnectionTimeout()); params.setBooleanParameter("http.protocol.handle-redirects", false); SchemeRegistry schemeRegistry = new SchemeRegistry(); schemeRegistry.register(new Scheme("http", 80, PlainSocketFactory.getSocketFactory())); if (config.isIncludeHttpsPages()) { schemeRegistry.register(new Scheme("https", 443, SSLSocketFactory.getSocketFactory())); } connectionManager = new PoolingClientConnectionManager(schemeRegistry); connectionManager.setMaxTotal(config.getMaxTotalConnections()); connectionManager.setDefaultMaxPerRoute(config.getMaxConnectionsPerHost()); httpClient = new DefaultHttpClient(connectionManager, params); if (config.getProxyHost() != null) { if (config.getProxyUsername() != null) { httpClient.getCredentialsProvider().setCredentials( new AuthScope(config.getProxyHost(), config.getProxyPort()), new UsernamePasswordCredentials(config.getProxyUsername(), config.getProxyPassword())); } HttpHost proxy = new HttpHost(config.getProxyHost(), config.getProxyPort()); httpClient.getParams().setParameter(ConnRoutePNames.DEFAULT_PROXY, proxy); } httpClient.addResponseInterceptor(new HttpResponseInterceptor() { @Override public void process(final HttpResponse response, final HttpContext context) throws HttpException, IOException { HttpEntity entity = response.getEntity(); Header contentEncoding = entity.getContentEncoding(); if (contentEncoding != null) { HeaderElement[] codecs = contentEncoding.getElements(); for (HeaderElement codec : codecs) { if (codec.getName().equalsIgnoreCase("gzip")) { response.setEntity(new GzipDecompressingEntity(response.getEntity())); return; } } } } }); if (connectionMonitorThread == null) { connectionMonitorThread = new IdleConnectionMonitorThread(connectionManager); } connectionMonitorThread.start(); } public PageFetchResult fetchHeader(WebURL webUrl) { PageFetchResult fetchResult = new PageFetchResult(); String toFetchURL = webUrl.getURL(); HttpGet get = null; try { get = new HttpGet(toFetchURL); synchronized (mutex) { long now = (new Date()).getTime(); if (now - lastFetchTime < config.getPolitenessDelay()) { Thread.sleep(config.getPolitenessDelay() - (now - lastFetchTime)); } lastFetchTime = (new Date()).getTime(); } get.addHeader("Accept-Encoding", "gzip"); HttpResponse response = httpClient.execute(get); fetchResult.setEntity(response.getEntity()); fetchResult.setResponseHeaders(response.getAllHeaders()); int statusCode = response.getStatusLine().getStatusCode(); if (statusCode != HttpStatus.SC_OK) { if (statusCode != HttpStatus.SC_NOT_FOUND) { if (statusCode == HttpStatus.SC_MOVED_PERMANENTLY || statusCode == HttpStatus.SC_MOVED_TEMPORARILY) { Header header = response.getFirstHeader("Location"); if (header != null) { String movedToUrl = header.getValue(); movedToUrl = URLCanonicalizer.getCanonicalURL(movedToUrl, toFetchURL); fetchResult.setMovedToUrl(movedToUrl); } fetchResult.setStatusCode(statusCode); return fetchResult; } logger.info("Failed: " + response.getStatusLine().toString() + ", while fetching " + toFetchURL); } fetchResult.setStatusCode(response.getStatusLine().getStatusCode()); return fetchResult; } fetchResult.setFetchedUrl(toFetchURL); String uri = get.getURI().toString(); if (!uri.equals(toFetchURL)) { if (!URLCanonicalizer.getCanonicalURL(uri).equals(toFetchURL)) { fetchResult.setFetchedUrl(uri); } } if (fetchResult.getEntity() != null) { long size = fetchResult.getEntity().getContentLength(); if (size == -1) { Header length = response.getLastHeader("Content-Length"); if (length == null) { length = response.getLastHeader("Content-length"); } if (length != null) { size = Integer.parseInt(length.getValue()); } else { size = -1; } } if (size > config.getMaxDownloadSize()) { fetchResult.setStatusCode(CustomFetchStatus.PageTooBig); get.abort(); return fetchResult; } fetchResult.setStatusCode(HttpStatus.SC_OK); return fetchResult; } get.abort(); } catch (IOException e) { logger.error("Fatal transport error: " + e.getMessage() + " while fetching " + toFetchURL + " (link found in doc #" + webUrl.getParentDocid() + ")"); fetchResult.setStatusCode(CustomFetchStatus.FatalTransportError); return fetchResult; } catch (IllegalStateException e) { // ignoring exceptions that occur because of not registering https // and other schemes } catch (Exception e) { if (e.getMessage() == null) { logger.error("Error while fetching " + webUrl.getURL()); } else { logger.error(e.getMessage() + " while fetching " + webUrl.getURL()); } } finally { try { if (fetchResult.getEntity() == null && get != null) { get.abort(); } } catch (Exception e) { e.printStackTrace(); } } fetchResult.setStatusCode(CustomFetchStatus.UnknownError); return fetchResult; } public synchronized void shutDown() { if (connectionMonitorThread != null) { connectionManager.shutdown(); connectionMonitorThread.shutdown(); } } public HttpClient getHttpClient() { return httpClient; } private static class GzipDecompressingEntity extends HttpEntityWrapper { public GzipDecompressingEntity(final HttpEntity entity) { super(entity); } @Override public InputStream getContent() throws IOException, IllegalStateException { // the wrapped entity's getContent() decides about repeatability InputStream wrappedin = wrappedEntity.getContent(); return new GZIPInputStream(wrappedin); } @Override public long getContentLength() { // length of ungzipped content is not known return -1; } } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.fetcher; import java.io.EOFException; import java.io.IOException; import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.util.EntityUtils; import org.apache.log4j.Logger; import edu.uci.ics.crawler4j.crawler.Page; /** * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class PageFetchResult { protected static final Logger logger = Logger.getLogger(PageFetchResult.class); protected int statusCode; protected HttpEntity entity = null; protected Header[] responseHeaders = null; protected String fetchedUrl = null; protected String movedToUrl = null; public int getStatusCode() { return statusCode; } public void setStatusCode(int statusCode) { this.statusCode = statusCode; } public HttpEntity getEntity() { return entity; } public void setEntity(HttpEntity entity) { this.entity = entity; } public Header[] getResponseHeaders() { return responseHeaders; } public void setResponseHeaders(Header[] responseHeaders) { this.responseHeaders = responseHeaders; } public String getFetchedUrl() { return fetchedUrl; } public void setFetchedUrl(String fetchedUrl) { this.fetchedUrl = fetchedUrl; } public boolean fetchContent(Page page) { try { page.load(entity); page.setFetchResponseHeaders(responseHeaders); return true; } catch (Exception e) { logger.info("Exception while fetching content for: " + page.getWebURL().getURL() + " [" + e.getMessage() + "]"); } return false; } public void discardContentIfNotConsumed() { try { if (entity != null) { EntityUtils.consume(entity); } } catch (EOFException e) { // We can ignore this exception. It can happen on compressed streams // which are not repeatable } catch (IOException e) { // We can ignore this exception. It can happen if the stream is // closed. } catch (Exception e) { e.printStackTrace(); } } public String getMovedToUrl() { return movedToUrl; } public void setMovedToUrl(String movedToUrl) { this.movedToUrl = movedToUrl; } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.url; import java.net.MalformedURLException; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; import java.net.URLDecoder; import java.net.URLEncoder; import java.util.HashMap; import java.util.Map; import java.util.SortedMap; import java.util.TreeMap; /** * See http://en.wikipedia.org/wiki/URL_normalization for a reference Note: some * parts of the code are adapted from: http://stackoverflow.com/a/4057470/405418 * * @author Yasser Ganjisaffar <lastname at gmail dot com> */ public class URLCanonicalizer { public static String getCanonicalURL(String url) { return getCanonicalURL(url, null); } public static String getCanonicalURL(String href, String context) { try { URL canonicalURL = new URL(UrlResolver.resolveUrl(context == null ? "" : context, href)); String host = canonicalURL.getHost().toLowerCase(); if (host == "") { // This is an invalid Url. return null; } String path = canonicalURL.getPath(); /* * Normalize: no empty segments (i.e., "//"), no segments equal to * ".", and no segments equal to ".." that are preceded by a segment * not equal to "..". */ path = new URI(path).normalize().toString(); /* * Convert '//' -> '/' */ int idx = path.indexOf("//"); while (idx >= 0) { path = path.replace("//", "/"); idx = path.indexOf("//"); } /* * Drop starting '/../' */ while (path.startsWith("/../")) { path = path.substring(3); } /* * Trim */ path = path.trim(); final SortedMap<String, String> params = createParameterMap(canonicalURL.getQuery()); final String queryString; if (params != null && params.size() > 0) { String canonicalParams = canonicalize(params); queryString = (canonicalParams.isEmpty() ? "" : "?" + canonicalParams); } else { queryString = ""; } /* * Add starting slash if needed */ if (path.length() == 0) { path = "/" + path; } /* * Drop default port: example.com:80 -> example.com */ int port = canonicalURL.getPort(); if (port == canonicalURL.getDefaultPort()) { port = -1; } String protocol = canonicalURL.getProtocol().toLowerCase(); String pathAndQueryString = normalizePath(path) + queryString; URL result = new URL(protocol, host, port, pathAndQueryString); return result.toExternalForm(); } catch (MalformedURLException ex) { return null; } catch (URISyntaxException ex) { return null; } } /** * Takes a query string, separates the constituent name-value pairs, and * stores them in a SortedMap ordered by lexicographical order. * * @return Null if there is no query string. */ private static SortedMap<String, String> createParameterMap(final String queryString) { if (queryString == null || queryString.isEmpty()) { return null; } final String[] pairs = queryString.split("&"); final Map<String, String> params = new HashMap<>(pairs.length); for (final String pair : pairs) { if (pair.length() == 0) { continue; } String[] tokens = pair.split("=", 2); switch (tokens.length) { case 1: if (pair.charAt(0) == '=') { params.put("", tokens[0]); } else { params.put(tokens[0], ""); } break; case 2: params.put(tokens[0], tokens[1]); break; } } return new TreeMap<>(params); } /** * Canonicalize the query string. * * @param sortedParamMap * Parameter name-value pairs in lexicographical order. * @return Canonical form of query string. */ private static String canonicalize(final SortedMap<String, String> sortedParamMap) { if (sortedParamMap == null || sortedParamMap.isEmpty()) { return ""; } final StringBuffer sb = new StringBuffer(100); for (Map.Entry<String, String> pair : sortedParamMap.entrySet()) { final String key = pair.getKey().toLowerCase(); if (key.equals("jsessionid") || key.equals("phpsessid") || key.equals("aspsessionid")) { continue; } if (sb.length() > 0) { sb.append('&'); } sb.append(percentEncodeRfc3986(pair.getKey())); if (!pair.getValue().isEmpty()) { sb.append('='); sb.append(percentEncodeRfc3986(pair.getValue())); } } return sb.toString(); } /** * Percent-encode values according the RFC 3986. The built-in Java * URLEncoder does not encode according to the RFC, so we make the extra * replacements. * * @param string * Decoded string. * @return Encoded string per RFC 3986. */ private static String percentEncodeRfc3986(String string) { try { string = string.replace("+", "%2B"); string = URLDecoder.decode(string, "UTF-8"); string = URLEncoder.encode(string, "UTF-8"); return string.replace("+", "%20").replace("*", "%2A").replace("%7E", "~"); } catch (Exception e) { return string; } } private static String normalizePath(final String path) { return path.replace("%7E", "~").replace(" ", "%20"); } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.url; import java.io.Serializable; import com.sleepycat.persist.model.Entity; import com.sleepycat.persist.model.PrimaryKey; /** * @author Yasser Ganjisaffar <lastname at gmail dot com> */ @Entity public class WebURL implements Serializable { private static final long serialVersionUID = 1L; @PrimaryKey private String url; private int docid; private int parentDocid; private String parentUrl; private short depth; private String domain; private String subDomain; private String path; private String anchor; private byte priority; /** * Returns the unique document id assigned to this Url. */ public int getDocid() { return docid; } public void setDocid(int docid) { this.docid = docid; } @Override public int hashCode() { return url.hashCode(); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } WebURL otherUrl = (WebURL) o; return url != null && url.equals(otherUrl.getURL()); } @Override public String toString() { return url; } /** * Returns the Url string */ public String getURL() { return url; } public void setURL(String url) { this.url = url; int domainStartIdx = url.indexOf("//") + 2; int domainEndIdx = url.indexOf('/', domainStartIdx); domain = url.substring(domainStartIdx, domainEndIdx); subDomain = ""; String[] parts = domain.split("\\."); if (parts.length > 2) { domain = parts[parts.length - 2] + "." + parts[parts.length - 1]; int limit = 2; if (TLDList.getInstance().contains(domain)) { domain = parts[parts.length - 3] + "." + domain; limit = 3; } for (int i = 0; i < parts.length - limit; i++) { if (subDomain.length() > 0) { subDomain += "."; } subDomain += parts[i]; } } path = url.substring(domainEndIdx); int pathEndIdx = path.indexOf('?'); if (pathEndIdx >= 0) { path = path.substring(0, pathEndIdx); } } /** * Returns the unique document id of the parent page. The parent page is the * page in which the Url of this page is first observed. */ public int getParentDocid() { return parentDocid; } public void setParentDocid(int parentDocid) { this.parentDocid = parentDocid; } /** * Returns the url of the parent page. The parent page is the page in which * the Url of this page is first observed. */ public String getParentUrl() { return parentUrl; } public void setParentUrl(String parentUrl) { this.parentUrl = parentUrl; } /** * Returns the crawl depth at which this Url is first observed. Seed Urls * are at depth 0. Urls that are extracted from seed Urls are at depth 1, * etc. */ public short getDepth() { return depth; } public void setDepth(short depth) { this.depth = depth; } /** * Returns the domain of this Url. For 'http://www.example.com/sample.htm', * domain will be 'example.com' */ public String getDomain() { return domain; } public String getSubDomain() { return subDomain; } /** * Returns the path of this Url. For 'http://www.example.com/sample.htm', * domain will be 'sample.htm' */ public String getPath() { return path; } public void setPath(String path) { this.path = path; } /** * Returns the anchor string. For example, in <a href="example.com">A sample anchor</a> * the anchor string is 'A sample anchor' */ public String getAnchor() { return anchor; } public void setAnchor(String anchor) { this.anchor = anchor; } /** * Returns the priority for crawling this URL. * A lower number results in higher priority. */ public byte getPriority() { return priority; } public void setPriority(byte priority) { this.priority = priority; } }
Java
/** * This class is adopted from Htmlunit with the following copyright: * * Copyright (c) 2002-2012 Gargoyle Software Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.crawler4j.url; public final class UrlResolver { /** * Resolves a given relative URL against a base URL. See * <a href="http://www.faqs.org/rfcs/rfc1808.html">RFC1808</a> * Section 4 for more details. * * @param baseUrl The base URL in which to resolve the specification. * @param relativeUrl The relative URL to resolve against the base URL. * @return the resolved specification. */ public static String resolveUrl(final String baseUrl, final String relativeUrl) { if (baseUrl == null) { throw new IllegalArgumentException("Base URL must not be null"); } if (relativeUrl == null) { throw new IllegalArgumentException("Relative URL must not be null"); } final Url url = resolveUrl(parseUrl(baseUrl.trim()), relativeUrl.trim()); return url.toString(); } /** * Returns the index within the specified string of the first occurrence of * the specified search character. * * @param s the string to search * @param searchChar the character to search for * @param beginIndex the index at which to start the search * @param endIndex the index at which to stop the search * @return the index of the first occurrence of the character in the string or <tt>-1</tt> */ private static int indexOf(final String s, final char searchChar, final int beginIndex, final int endIndex) { for (int i = beginIndex; i < endIndex; i++) { if (s.charAt(i) == searchChar) { return i; } } return -1; } /** * Parses a given specification using the algorithm depicted in * <a href="http://www.faqs.org/rfcs/rfc1808.html">RFC1808</a>: * * Section 2.4: Parsing a URL * * An accepted method for parsing URLs is useful to clarify the * generic-RL syntax of Section 2.2 and to describe the algorithm for * resolving relative URLs presented in Section 4. This section * describes the parsing rules for breaking down a URL (relative or * absolute) into the component parts described in Section 2.1. The * rules assume that the URL has already been separated from any * surrounding text and copied to a "parse string". The rules are * listed in the order in which they would be applied by the parser. * * @param spec The specification to parse. * @return the parsed specification. */ private static Url parseUrl(final String spec) { final Url url = new Url(); int startIndex = 0; int endIndex = spec.length(); // Section 2.4.1: Parsing the Fragment Identifier // // If the parse string contains a crosshatch "#" character, then the // substring after the first (left-most) crosshatch "#" and up to the // end of the parse string is the <fragment> identifier. If the // crosshatch is the last character, or no crosshatch is present, then // the fragment identifier is empty. The matched substring, including // the crosshatch character, is removed from the parse string before // continuing. // // Note that the fragment identifier is not considered part of the URL. // However, since it is often attached to the URL, parsers must be able // to recognize and set aside fragment identifiers as part of the // process. final int crosshatchIndex = indexOf(spec, '#', startIndex, endIndex); if (crosshatchIndex >= 0) { url.fragment_ = spec.substring(crosshatchIndex + 1, endIndex); endIndex = crosshatchIndex; } // Section 2.4.2: Parsing the Scheme // // If the parse string contains a colon ":" after the first character // and before any characters not allowed as part of a scheme name (i.e., // any not an alphanumeric, plus "+", period ".", or hyphen "-"), the // <scheme> of the URL is the substring of characters up to but not // including the first colon. These characters and the colon are then // removed from the parse string before continuing. final int colonIndex = indexOf(spec, ':', startIndex, endIndex); if (colonIndex > 0) { final String scheme = spec.substring(startIndex, colonIndex); if (isValidScheme(scheme)) { url.scheme_ = scheme; startIndex = colonIndex + 1; } } // Section 2.4.3: Parsing the Network Location/Login // // If the parse string begins with a double-slash "//", then the // substring of characters after the double-slash and up to, but not // including, the next slash "/" character is the network location/login // (<net_loc>) of the URL. If no trailing slash "/" is present, the // entire remaining parse string is assigned to <net_loc>. The double- // slash and <net_loc> are removed from the parse string before // continuing. // // Note: We also accept a question mark "?" or a semicolon ";" character as // delimiters for the network location/login (<net_loc>) of the URL. final int locationStartIndex; int locationEndIndex; if (spec.startsWith("//", startIndex)) { locationStartIndex = startIndex + 2; locationEndIndex = indexOf(spec, '/', locationStartIndex, endIndex); if (locationEndIndex >= 0) { startIndex = locationEndIndex; } } else { locationStartIndex = -1; locationEndIndex = -1; } // Section 2.4.4: Parsing the Query Information // // If the parse string contains a question mark "?" character, then the // substring after the first (left-most) question mark "?" and up to the // end of the parse string is the <query> information. If the question // mark is the last character, or no question mark is present, then the // query information is empty. The matched substring, including the // question mark character, is removed from the parse string before // continuing. final int questionMarkIndex = indexOf(spec, '?', startIndex, endIndex); if (questionMarkIndex >= 0) { if ((locationStartIndex >= 0) && (locationEndIndex < 0)) { // The substring of characters after the double-slash and up to, but not // including, the question mark "?" character is the network location/login // (<net_loc>) of the URL. locationEndIndex = questionMarkIndex; startIndex = questionMarkIndex; } url.query_ = spec.substring(questionMarkIndex + 1, endIndex); endIndex = questionMarkIndex; } // Section 2.4.5: Parsing the Parameters // // If the parse string contains a semicolon ";" character, then the // substring after the first (left-most) semicolon ";" and up to the end // of the parse string is the parameters (<params>). If the semicolon // is the last character, or no semicolon is present, then <params> is // empty. The matched substring, including the semicolon character, is // removed from the parse string before continuing. final int semicolonIndex = indexOf(spec, ';', startIndex, endIndex); if (semicolonIndex >= 0) { if ((locationStartIndex >= 0) && (locationEndIndex < 0)) { // The substring of characters after the double-slash and up to, but not // including, the semicolon ";" character is the network location/login // (<net_loc>) of the URL. locationEndIndex = semicolonIndex; startIndex = semicolonIndex; } url.parameters_ = spec.substring(semicolonIndex + 1, endIndex); endIndex = semicolonIndex; } // Section 2.4.6: Parsing the Path // // After the above steps, all that is left of the parse string is the // URL <path> and the slash "/" that may precede it. Even though the // initial slash is not part of the URL path, the parser must remember // whether or not it was present so that later processes can // differentiate between relative and absolute paths. Often this is // done by simply storing the preceding slash along with the path. if ((locationStartIndex >= 0) && (locationEndIndex < 0)) { // The entire remaining parse string is assigned to the network // location/login (<net_loc>) of the URL. locationEndIndex = endIndex; } else if (startIndex < endIndex) { url.path_ = spec.substring(startIndex, endIndex); } // Set the network location/login (<net_loc>) of the URL. if ((locationStartIndex >= 0) && (locationEndIndex >= 0)) { url.location_ = spec.substring(locationStartIndex, locationEndIndex); } return url; } /* * Returns true if specified string is a valid scheme name. */ private static boolean isValidScheme(final String scheme) { final int length = scheme.length(); if (length < 1) { return false; } char c = scheme.charAt(0); if (!Character.isLetter(c)) { return false; } for (int i = 1; i < length; i++) { c = scheme.charAt(i); if (!Character.isLetterOrDigit(c) && c != '.' && c != '+' && c != '-') { return false; } } return true; } /** * Resolves a given relative URL against a base URL using the algorithm * depicted in <a href="http://www.faqs.org/rfcs/rfc1808.html">RFC1808</a>: * * Section 4: Resolving Relative URLs * * This section describes an example algorithm for resolving URLs within * a context in which the URLs may be relative, such that the result is * always a URL in absolute form. Although this algorithm cannot * guarantee that the resulting URL will equal that intended by the * original author, it does guarantee that any valid URL (relative or * absolute) can be consistently transformed to an absolute form given a * valid base URL. * * @param baseUrl The base URL in which to resolve the specification. * @param relativeUrl The relative URL to resolve against the base URL. * @return the resolved specification. */ private static Url resolveUrl(final Url baseUrl, final String relativeUrl) { final Url url = parseUrl(relativeUrl); // Step 1: The base URL is established according to the rules of // Section 3. If the base URL is the empty string (unknown), // the embedded URL is interpreted as an absolute URL and // we are done. if (baseUrl == null) { return url; } // Step 2: Both the base and embedded URLs are parsed into their // component parts as described in Section 2.4. // a) If the embedded URL is entirely empty, it inherits the // entire base URL (i.e., is set equal to the base URL) // and we are done. if (relativeUrl.length() == 0) { return new Url(baseUrl); } // b) If the embedded URL starts with a scheme name, it is // interpreted as an absolute URL and we are done. if (url.scheme_ != null) { return url; } // c) Otherwise, the embedded URL inherits the scheme of // the base URL. url.scheme_ = baseUrl.scheme_; // Step 3: If the embedded URL's <net_loc> is non-empty, we skip to // Step 7. Otherwise, the embedded URL inherits the <net_loc> // (if any) of the base URL. if (url.location_ != null) { return url; } url.location_ = baseUrl.location_; // Step 4: If the embedded URL path is preceded by a slash "/", the // path is not relative and we skip to Step 7. if ((url.path_ != null) && ((url.path_.length() > 0) && ('/' == url.path_.charAt(0)))) { url.path_ = removeLeadingSlashPoints(url.path_); return url; } // Step 5: If the embedded URL path is empty (and not preceded by a // slash), then the embedded URL inherits the base URL path, // and if (url.path_ == null) { url.path_ = baseUrl.path_; // a) if the embedded URL's <params> is non-empty, we skip to // step 7; otherwise, it inherits the <params> of the base // URL (if any) and if (url.parameters_ != null) { return url; } url.parameters_ = baseUrl.parameters_; // b) if the embedded URL's <query> is non-empty, we skip to // step 7; otherwise, it inherits the <query> of the base // URL (if any) and we skip to step 7. if (url.query_ != null) { return url; } url.query_ = baseUrl.query_; return url; } // Step 6: The last segment of the base URL's path (anything // following the rightmost slash "/", or the entire path if no // slash is present) is removed and the embedded URL's path is // appended in its place. The following operations are // then applied, in order, to the new path: final String basePath = baseUrl.path_; String path = ""; if (basePath != null) { final int lastSlashIndex = basePath.lastIndexOf('/'); if (lastSlashIndex >= 0) { path = basePath.substring(0, lastSlashIndex + 1); } } else { path = "/"; } path = path.concat(url.path_); // a) All occurrences of "./", where "." is a complete path // segment, are removed. int pathSegmentIndex; while ((pathSegmentIndex = path.indexOf("/./")) >= 0) { path = path.substring(0, pathSegmentIndex + 1).concat(path.substring(pathSegmentIndex + 3)); } // b) If the path ends with "." as a complete path segment, // that "." is removed. if (path.endsWith("/.")) { path = path.substring(0, path.length() - 1); } // c) All occurrences of "<segment>/../", where <segment> is a // complete path segment not equal to "..", are removed. // Removal of these path segments is performed iteratively, // removing the leftmost matching pattern on each iteration, // until no matching pattern remains. while ((pathSegmentIndex = path.indexOf("/../")) > 0) { final String pathSegment = path.substring(0, pathSegmentIndex); final int slashIndex = pathSegment.lastIndexOf('/'); if (slashIndex < 0) { continue; } if (!"..".equals(pathSegment.substring(slashIndex))) { path = path.substring(0, slashIndex + 1).concat(path.substring(pathSegmentIndex + 4)); } } // d) If the path ends with "<segment>/..", where <segment> is a // complete path segment not equal to "..", that // "<segment>/.." is removed. if (path.endsWith("/..")) { final String pathSegment = path.substring(0, path.length() - 3); final int slashIndex = pathSegment.lastIndexOf('/'); if (slashIndex >= 0) { path = path.substring(0, slashIndex + 1); } } path = removeLeadingSlashPoints(path); url.path_ = path; // Step 7: The resulting URL components, including any inherited from // the base URL, are recombined to give the absolute form of // the embedded URL. return url; } /** * "/.." at the beginning should be removed as browsers do (not in RFC) */ private static String removeLeadingSlashPoints(String path) { while (path.startsWith("/..")) { path = path.substring(3); } return path; } /** * Class <tt>Url</tt> represents a Uniform Resource Locator. * * @author Martin Tamme */ private static class Url { String scheme_; String location_; String path_; String parameters_; String query_; String fragment_; /** * Creates a <tt>Url</tt> object. */ public Url() { } /** * Creates a <tt>Url</tt> object from the specified * <tt>Url</tt> object. * * @param url a <tt>Url</tt> object. */ public Url(final Url url) { scheme_ = url.scheme_; location_ = url.location_; path_ = url.path_; parameters_ = url.parameters_; query_ = url.query_; fragment_ = url.fragment_; } /** * Returns a string representation of the <tt>Url</tt> object. * * @return a string representation of the <tt>Url</tt> object. */ @Override public String toString() { final StringBuilder sb = new StringBuilder(); if (scheme_ != null) { sb.append(scheme_); sb.append(':'); } if (location_ != null) { sb.append("//"); sb.append(location_); } if (path_ != null) { sb.append(path_); } if (parameters_ != null) { sb.append(';'); sb.append(parameters_); } if (query_ != null) { sb.append('?'); sb.append(query_); } if (fragment_ != null) { sb.append('#'); sb.append(fragment_); } return sb.toString(); } } }
Java
package edu.uci.ics.crawler4j.url; import java.io.BufferedReader; import java.io.InputStream; import java.io.InputStreamReader; import java.util.HashSet; import java.util.Set; public class TLDList { private final String tldNamesFileName = "tld-names.txt"; private Set<String> tldSet = new HashSet<>(); private static TLDList instance = new TLDList(); private TLDList() { try { InputStream stream = this.getClass().getClassLoader().getResourceAsStream(tldNamesFileName); if (stream == null) { System.err.println("Couldn't find " + tldNamesFileName); System.exit(-1); } BufferedReader reader = new BufferedReader(new InputStreamReader(stream)); String line; while ((line = reader.readLine()) != null) { line = line.trim(); if (line.isEmpty() || line.startsWith("//")) { continue; } tldSet.add(line); } reader.close(); } catch (Exception e) { e.printStackTrace(); } } public static TLDList getInstance() { return instance; } public boolean contains(String str) { return tldSet.contains(str); } }
Java
package com.xyz.practice.jdbc.databasemetadata; import java.io.Serializable; public class Schema implements Serializable { private static final long serialVersionUID = -6086978758296528123L; Schema() {} /** * schema name */ private String tableSchem; /** * catalog name (may be null) */ private String tableCatalog; String getTableSchem() { return tableSchem; } void setTableSchem( String tableSchem ) { this.tableSchem = tableSchem; } String getTableCatalog() { return tableCatalog; } void setTableCatalog( String tableCatalog ) { this.tableCatalog = tableCatalog; } }
Java
package com.xyz.practice.jdbc.databasemetadata; import java.io.Serializable; public class Column implements Serializable { private static final long serialVersionUID = 4257926755376078999L; Column() { } /** * table catalog (may be null) */ private String tableCat; /** * table schema (may be null) */ private String tableSchem; /** * table name */ private String tableName; /** * column name */ private String columnName; /** * SQL type from java.sql.Types */ private int dataType; /** * Data source dependent type name, for a UDT the type name is fully qualified */ private String typeName; /** * column size */ private int columnSize; /** * the number of fractional digits. Null is returned for data types where DECIMAL_DIGITS is not * applicable. */ private int decimalDigits; /** * Radix (typically either 10 or 2) */ private int numPrecRadix; /** * is NULL allowed columnNoNulls - might not allow NULL values columnNullable - definitely * allows NULL values columnNullableUnknown - nullability unknown */ private int nullable; /** * comment describing column (may be null) */ private String remarks; /** * efault value for the column, which should be interpreted as a string when the value is * enclosed in single quotes (may be null) */ private String columnDef; /** * for char types the maximum number of bytes in the column */ private int charOctetLength; /** * index of column in table (starting at 1) */ private int originalPosition; /** * ISO rules are used to determine the nullability for a column. YES --- if the parameter can * include NULLs NO --- if the parameter cannot include NULLs empty string --- if the * nullability for the parameter is unknown */ private String isNullable; /** * catalog of table that is the scope of a reference attribute (null if DATA_TYPE isn't REF) */ private String scopeCatlog; /** * schema of table that is the scope of a reference attribute (null if the DATA_TYPE isn't REF) */ private String scopeSchema; /** * table name that this the scope of a reference attribure (null if the DATA_TYPE isn't REF) */ private String scopeTable; /** * source type of a distinct type or user-generated Ref type, SQL type from java.sql.Types (null * if DATA_TYPE isn't DISTINCT or user-generated REF) */ private short sourceDataType; /** * Indicates whether this column is auto incremented YES --- if the column is auto incremented * NO --- if the column is not auto incremented empty string --- if it cannot be determined * whether the column is auto incremented parameter is unknown */ private String isAutoincrement; String getTableCat() { return tableCat; } void setTableCat( String tableCat ) { this.tableCat = tableCat; } String getTableSchem() { return tableSchem; } void setTableSchem( String tableSchem ) { this.tableSchem = tableSchem; } String getTableName() { return tableName; } void setTableName( String tableName ) { this.tableName = tableName; } String getColumnName() { return columnName; } void setColumnName( String columnName ) { this.columnName = columnName; } int getDataType() { return dataType; } void setDataType( int dataType ) { this.dataType = dataType; } String getTypeName() { return typeName; } void setTypeName( String typeName ) { this.typeName = typeName; } int getColumnSize() { return columnSize; } void setColumnSize( int columnSize ) { this.columnSize = columnSize; } int getDecimalDigits() { return decimalDigits; } void setDecimalDigits( int decimalDigits ) { this.decimalDigits = decimalDigits; } int getNumPrecRadix() { return numPrecRadix; } void setNumPrecRadix( int numPrecRadix ) { this.numPrecRadix = numPrecRadix; } int getNullable() { return nullable; } void setNullable( int nullable ) { this.nullable = nullable; } String getRemarks() { return remarks; } void setRemarks( String remarks ) { this.remarks = remarks; } String getColumnDef() { return columnDef; } void setColumnDef( String columnDef ) { this.columnDef = columnDef; } int getCharOctetLength() { return charOctetLength; } void setCharOctetLength( int charOctetLength ) { this.charOctetLength = charOctetLength; } int getOriginalPosition() { return originalPosition; } void setOriginalPosition( int originalPosition ) { this.originalPosition = originalPosition; } String getIsNullable() { return isNullable; } void setIsNullable( String isNullable ) { this.isNullable = isNullable; } String getScopeCatlog() { return scopeCatlog; } void setScopeCatlog( String scopeCatlog ) { this.scopeCatlog = scopeCatlog; } String getScopeSchema() { return scopeSchema; } void setScopeSchema( String scopeSchema ) { this.scopeSchema = scopeSchema; } String getScopeTable() { return scopeTable; } void setScopeTable( String scopeTable ) { this.scopeTable = scopeTable; } short getSourceDataType() { return sourceDataType; } void setSourceDataType( short sourceDataType ) { this.sourceDataType = sourceDataType; } String getIsAutoincrement() { return isAutoincrement; } void setIsAutoincrement( String isAutoincrement ) { this.isAutoincrement = isAutoincrement; } }
Java
package com.xyz.practice.jdbc.databasemetadata; import java.io.Serializable; public class FunctionColumn implements Serializable { private static final long serialVersionUID = 2416865956602345916L; FunctionColumn() { } /** * function catalog (may be null) */ private String functionCat; /** * function schema (may be null) */ private String functionSchem; /** * function name. This is the name used to invoke the function */ private String functionName; /** * column/parameter name */ private String columnName; /** * kind of column/parameter: functionColumnUnknown - nobody knows * functionColumnIn - IN parameter functionColumnInOut - INOUT parameter * functionColumnOut - OUT parameter functionColumnReturn - function return * value functionColumnResult - Indicates that the parameter or column is a * column in the ResultSet */ private short columnType; /** * SQL type from java.sql.Types */ private int dataType; /** * SQL type name, for a UDT type the type name is fully qualified */ private String typeName; /** * precision */ private int precision; /** * length in bytes of data */ private int length; /** * scale - null is returned for data types where SCALE is not applicable. */ private short scale; /** * radix */ private short radix; /** * can it contain NULL. functionNoNulls - does not allow NULL values * functionNullable - allows NULL values functionNullableUnknown - * nullability unknown */ private short nullable; /** * comment describing column/parameter */ private String remarks; /** * the maximum length of binary and character based parameters or columns. * For any other datatype the returned value is a NULL */ private int charOctetLength; /** * the ordinal position, starting from 1, for the input and output * parameters. A value of 0 is returned if this row describes the function's * return value. For result set columns, it is the ordinal position of the * column in the result set starting from 1. */ private int ordinalPosition; /** * ISO rules are used to determine the nullability for a parameter or * column. YES --- if the parameter or column can include NULLs NO --- if * the parameter or column cannot include NULLs empty string --- if the * nullability for the parameter or column is unknown */ private String isNullable; /** * the name which uniquely identifies this function within its schema. This * is a user specified, or DBMS generated, name that may be different then * the FUNCTION_NAME for example with overload functions */ private String specificName; String getFunctionCat() { return functionCat; } void setFunctionCat(String functionCat) { this.functionCat = functionCat; } String getFunctionSchem() { return functionSchem; } void setFunctionSchem(String functionSchem) { this.functionSchem = functionSchem; } String getFunctionName() { return functionName; } void setFunctionName(String functionName) { this.functionName = functionName; } String getColumnName() { return columnName; } void setColumnName(String columnName) { this.columnName = columnName; } short getColumnType() { return columnType; } void setColumnType(short columnType) { this.columnType = columnType; } int getDataType() { return dataType; } void setDataType(int dataType) { this.dataType = dataType; } String getTypeName() { return typeName; } void setTypeName(String typeName) { this.typeName = typeName; } int getPrecision() { return precision; } void setPrecision(int precision) { this.precision = precision; } int getLength() { return length; } void setLength(int length) { this.length = length; } short getScale() { return scale; } void setScale(short scale) { this.scale = scale; } short getRadix() { return radix; } void setRadix(short radix) { this.radix = radix; } short getNullable() { return nullable; } void setNullable(short nullable) { this.nullable = nullable; } String getRemarks() { return remarks; } void setRemarks(String remarks) { this.remarks = remarks; } int getCharOctetLength() { return charOctetLength; } void setCharOctetLength(int charOctetLength) { this.charOctetLength = charOctetLength; } int getOrdinalPosition() { return ordinalPosition; } void setOrdinalPosition(int ordinalPosition) { this.ordinalPosition = ordinalPosition; } String getIsNullable() { return isNullable; } void setIsNullable(String isNullable) { this.isNullable = isNullable; } String getSpecificName() { return specificName; } void setSpecificName(String specificName) { this.specificName = specificName; } }
Java
package com.xyz.practice.jdbc.databasemetadata; import java.io.Serializable; import java.sql.RowIdLifetime; public class Database implements Serializable { private static final long serialVersionUID = -1225713110410721872L; /** * Retrieves whether the current user can call all the procedures returned by the method * getProcedures. */ private boolean allProceduresAreCallable; /** * Retrieves whether the current user can use all the tables returned by the method getTables in * a SELECT statement. */ private boolean allTablesAreSelectable; /** * Retrieves whether a SQLException while autoCommit is true inidcates that all open ResultSets * are closed, even ones that are holdable. When a SQLException occurs while autocommit is true, * it is vendor specific whether the JDBC driver responds with a commit operation, a rollback * operation, or by doing neither a commit nor a rollback. A potential result of this difference * is in whether or not holdable ResultSets are closed. */ private boolean autoCommitFailureClosesAllResultSets; /** * Retrieves whether a data definition statement within a transaction forces the transaction to * commit */ private boolean dataDefinitionCausesTransactionCommit; /** * Retrieves whether this database ignores a data definition statement within a transaction */ private boolean dataDefinitionIgnoredInTransactions; /** * Retrieves whether or not a visible row delete can be detected by calling the method * ResultSet.rowDeleted. If the method deletesAreDetected returns false, it means that deleted * rows are removed from the result set */ private boolean deletesAreDetected; /** * Retrieves whether the return value for the method getMaxRowSize includes the SQL data types * LONGVARCHAR and LONGVARBINARY */ private boolean doesMaxRowSizeIncludeBlobs; /** * Retrieves the String that this database uses as the separator between a catalog and table * name */ private String catalogSeparator; /** * Retrieves the database vendor's preferred term for "catalog". */ private String catalogTerm; /** * Retrieves the major version number of the underlying database. */ private int databaseMajorVersion; /** * Retrieves the minor version number of the underlying database. */ private int databaseMinorVersion; /** * Retrieves the name of this database product. */ private String databaseProductName; /** * Retrieves the version number of this database product. */ private String databaseProductVersion; /** * Retrieves this database's default transaction isolation level. The possible values are * defined in java.sql.Connection */ private int defaultTransactionIsolation; /** * Retrieves this JDBC driver's major version number. */ private int driverMajorVersion; /** * Retrieves this JDBC driver's minor version number */ private int driverMinorVersion; /** * Retrieves the name of this JDBC driver */ private String driverName; /** * Retrieves the version number of this JDBC driver as a String. */ private String driverVersion; /** * Retrieves all the "extra" characters that can be used in unquoted identifier names (those * beyond a-z, A-Z, 0-9 and _) */ private String extraNameCharacters; /** * Retrieves the string used to quote SQL identifiers. This method returns a space " " if * identifier quoting is not supported */ private String identifierQuoteString; /** * Retrieves the major JDBC version number for this driver */ private int jdbcMajorVersion; /** * Retrieves the minor JDBC version number for this driver */ private int jdbcMinorVersion; /** * Retrieves the maximum number of hex characters this database allows in an inline binary * literal */ private int maxBinaryLiteralLength; /** * Retrieves the maximum number of characters that this database allows in a catalog name */ private int maxCatalogNameLength; /** * Retrieves the maximum number of characters this database allows for a character literal. */ private int maxCharLiteralLength; /** * Retrieves the maximum number of characters this database allows for a column name. */ private int maxColumnNameLength; /** * Retrieves the maximum number of columns this database allows in a GROUP BY clause. */ private int maxColumnsInGroupBy; /** * Retrieves the maximum number of columns this database allows in an index. */ private int maxColumnsInIndex; /** * Retrieves the maximum number of columns this database allows in an ORDER BY clause. */ private int maxColumnsInOrderBy; /** * Retrieves the maximum number of columns this database allows in a SELECT list. */ private int maxColumnsInSelect; /** * Retrieves the maximum number of columns this database allows in a table. */ private int maxColumnsInTable; /** * Retrieves the maximum number of concurrent connections to this database that are possible. */ private int maxConnections; /** * Retrieves the maximum number of characters that this database allows in a cursor name. */ private int maxCursorNameLength; /** * Retrieves the maximum number of bytes this database allows for an index, including all of the * parts of the index. */ private int maxIndexLength; /** * Retrieves the maximum number of characters that this database allows in a procedure name. */ private int maxProcedureNameLength; /** * Retrieves the maximum number of bytes this database allows in a single row. */ private int maxRowSize; /** * Retrieves the maximum number of characters that this database allows in a schema name. */ private int maxSchemaNameLength; /** * Retrieves the maximum number of characters this database allows in an SQL statement. */ private int maxStatementLength; /** * Retrieves the maximum number of active statements to this database that can be open at the * same time. */ private int maxStatements; /** * Retrieves the maximum number of characters this database allows in a table name. */ private int maxTableNameLength; /** * Retrieves the maximum number of tables this database allows in a SELECT statement. */ private int maxTablesInSelect; /** * Retrieves the maximum number of characters this database allows in a user name. */ private int maxUserNameLength; /** * Retrieves a comma-separated list of math functions available with this database. These are * the Open /Open CLI math function names used in the JDBC function escape clause. */ private String numericFunctions; /** * Retrieves the database vendor's preferred term for "procedure". */ private String procedureTerm; /** * Retrieves this database's default holdability for ResultSet objects. */ private int resultSetHoldability; /** * Indicates whether or not this data source supports the SQL ROWID type, and if so the lifetime * for which a RowId object remains valid. */ private RowIdLifetime rowIdLifetime; /** * Retrieves the database vendor's preferred term for "schema". */ private String schemaTerm; /** * Retrieves the string that can be used to escape wildcard characters. This is the string that * can be used to escape '_' or '%' in the catalog search parameters that are a pattern (and * therefore use one of the wildcard characters). */ private String searchStringEscape; /** * Retrieves a comma-separated list of all of this database's SQL keywords that are NOT also * SQL:2003 keywords. */ private String sqlKeywords; /** * Indicates whether the SQLSTATE returned by SQLException.getSQLState is X/Open (now known as * Open Group) SQL CLI or SQL:2003. */ private int sqlStateType; /** * Retrieves a comma-separated list of string functions available with this database. These are * the Open Group CLI string function names used in the JDBC function escape clause. */ private String stringFunctions; /** * Retrieves a comma-separated list of system functions available with this database. These are * the Open Group CLI system function names used in the JDBC function escape clause. */ private String systemFunctions; /** * Retrieves a comma-separated list of the time and date functions available with this database. */ private String timeDateFunctions; /** * Retrieves the URL for this DBMS. */ private String url; /** * Retrieves the user name as known to this database. */ private String userName; /** * Retrieves whether or not a visible row insert can be detected by calling the method * ResultSet.rowInserted. */ private boolean insertsAreDetected; /** * Retrieves whether a catalog appears at the start of a fully qualified table name. If not, the * catalog appears at the end. */ private boolean isCatalogAtStart; /** * Retrieves whether this database is in read-only mode. */ private boolean isReadOnly; /** * Indicates whether updates made to a LOB are made on a copy or directly to the LOB. */ private boolean locatorsUpdateCopy; /** * Retrieves whether this database supports concatenations between NULL and non-NULL values * being NULL. */ private boolean nullPlusNonNullIsNull; /** * Retrieves whether NULL values are sorted at the end regardless of sort order. */ private boolean nullsAreSortedAtEnd; /** * Retrieves whether NULL values are sorted at the start regardless of sort order. */ private boolean nullsAreSortedAtStart; /** * Retrieves whether NULL values are sorted high. Sorted high means that NULL values sort higher * than any other value in a domain. In an ascending order, if this method returns true, NULL * values will appear at the end. By contrast, the method nullsAreSortedAtEnd indicates whether * NULL values are sorted at the end regardless of sort order. */ private boolean nullsAreSortedHigh; /** * Retrieves whether NULL values are sorted low. Sorted low means that NULL values sort lower * than any other value in a domain. In an ascending order, if this method returns true, NULL * values will appear at the beginning. By contrast, the method nullsAreSortedAtStart indicates * whether NULL values are sorted at the beginning regardless of sort order. */ private boolean nullsAreSortedLow; /** * Retrieves whether deletes made by others are visible. */ private boolean othersDeletesAreVisible; /** * Retrieves whether inserts made by others are visible. */ private boolean othersInsertsAreVisible; /** * Retrieves whether updates made by others are visible. */ private boolean othersUpdatesAreVisible; /** * Retrieves whether a result set's own deletes are visible. */ private boolean ownDeletesAreVisible; /** * Retrieves whether a result set's own inserts are visible. */ private boolean ownInsertsAreVisible; /** * Retrieves whether for the given type of ResultSet object, the result set's own updates are * visible. */ private boolean ownUpdatesAreVisible; /** * Retrieves whether this database treats mixed case unquoted SQL identifiers as case * insensitive and stores them in lower case. */ private boolean storesLowerCaseIdentifiers; /** * Retrieves whether this database treats mixed case quoted SQL identifiers as case insensitive * and stores them in lower case. */ private boolean storesLowerCaseQuotedIdentifiers; /** * Retrieves whether this database treats mixed case unquoted SQL identifiers as case * insensitive and stores them in mixed case. */ private boolean storesMixedCaseIdentifiers; /** * Retrieves whether this database treats mixed case quoted SQL identifiers as case insensitive * and stores them in mixed case. */ private boolean storesMixedCaseQuotedIdentifiers; /** * Retrieves whether this database treats mixed case unquoted SQL identifiers as case * insensitive and stores them in upper case. */ private boolean storesUpperCaseIdentifiers; /** * Retrieves whether this database supports ALTER TABLE with add column. */ private boolean supportsAlterTableWithAddColumn; /** * Retrieves whether this database supports ALTER TABLE with drop column. */ private boolean supportsAlterTableWithDropColumn; /** * Retrieves whether this database supports the ANSI92 entry level SQL grammar. */ private boolean supportsANSI92EntryLevelSQL; /** * Retrieves whether this database supports the ANSI92 full SQL grammar supported. */ private boolean supportsANSI92FullSQL; /** * Retrieves whether this database supports the ANSI92 intermediate SQL grammar supported. */ private boolean supportsANSI92IntermediateSQL; /** * Retrieves whether this database supports batch updates. */ private boolean supportsBatchUpdates; /** * Retrieves whether a catalog name can be used in a data manipulation statement. */ private boolean supportsCatalogsInDataManipulation; /** * Retrieves whether a catalog name can be used in an index definition statement. */ private boolean supportsCatalogsInIndexDefinitions; /** * Retrieves whether a catalog name can be used in a privilege definition statement. */ private boolean supportsCatalogsInPrivilegeDefinitions; /** * Retrieves whether a catalog name can be used in a procedure call statement. */ private boolean supportsCatalogsInProcedureCalls; /** * Retrieves whether a catalog name can be used in a table definition statement. */ private boolean supportsCatalogsInTableDefinitions; /** * Retrieves whether this database supports column aliasing. */ private boolean supportsColumnAliasing; /** * Retrieves whether this database supports the JDBC scalar function CONVERT for the conversion * of one JDBC type to another. The JDBC types are the generic SQL data types defined in * java.sql.Types. */ private boolean supportsConvert; /** * */ }
Java
package com.xyz.practice.jdbc.databasemetadata; import java.io.Serializable; public class ProcedureColumn implements Serializable { private static final long serialVersionUID = 1886695116931613838L; ProcedureColumn() { } /** * procedure catalog (may be null) */ private String procedureCat; /** * procedure schema (may be null) */ private String procedureSchem; /** * procedure name */ private String procedureName; /** * column/parameter name */ private String columnName; /** * kind of column/parameter: * procedureColumnUnknown - nobody knows * procedureColumnIn - IN parameter * procedureColumnInOut - INOUT parameter * procedureColumnOut - OUT parameter * procedureColumnReturn - procedure return value * procedureColumnResult - result column in ResultSet */ private short columnType; /** * SQL type from java.sql.Types */ private int dataType; /** * SQL type name, for a UDT type the type name is fully qualified */ private String typeName; /** * precision */ private int precision; /** * length in bytes of data */ private int length; /** * scale - null is returned for data types where SCALE is not applicable */ private short scale; /** * radix */ private short radix; /** * can it contain NULL. * procedureNoNulls - does not allow NULL values * procedureNullable - allows NULL values * procedureNullableUnknown - nullability unknown */ private short nullable; /** * comment describing parameter/column */ private String remarks; /** * default value for the column, which should be interpreted as a string when the value is enclosed in single quotes (may be null) * The string NULL (not enclosed in quotes) - if NULL was specified as the default value * TRUNCATE (not enclosed in quotes) - if the specified default value cannot be represented without truncation * NULL - if a default value was not specified */ private String columnDef; /** * reserved for future use */ private int sqlDataType; /** * reserved for future use */ private int sqlDatetimeSub; /** * the maximum length of binary and character based columns. For any other datatype the returned value is a NULL */ private int charOctetLength; /** * the ordinal position, starting from 1, for the input and output parameters for a procedure. A value of 0 is returned if this row describes the procedure's return value. For result set columns, it is the ordinal position of the column in the result set starting from 1. If there are multiple result sets, the column ordinal positions are implementation defined. */ private int ordinalPosition; /** * ISO rules are used to determine the nullability for a column. * YES --- if the parameter can include NULLs * NO --- if the parameter cannot include NULLs * empty string --- if the nullability for the parameter is unknown */ private String isNullable; /** * the name which uniquely identifies this procedure within its schema. */ private String specificName; String getProcedureCat() { return procedureCat; } void setProcedureCat( String procedureCat ) { this.procedureCat = procedureCat; } String getProcedureSchem() { return procedureSchem; } void setProcedureSchem( String procedureSchem ) { this.procedureSchem = procedureSchem; } String getProcedureName() { return procedureName; } void setProcedureName( String procedureName ) { this.procedureName = procedureName; } String getColumnName() { return columnName; } void setColumnName( String columnName ) { this.columnName = columnName; } short getColumnType() { return columnType; } void setColumnType( short columnType ) { this.columnType = columnType; } int getDataType() { return dataType; } void setDataType( int dataType ) { this.dataType = dataType; } String getTypeName() { return typeName; } void setTypeName( String typeName ) { this.typeName = typeName; } int getPrecision() { return precision; } void setPrecision( int precision ) { this.precision = precision; } int getLength() { return length; } void setLength( int length ) { this.length = length; } short getScale() { return scale; } void setScale( short scale ) { this.scale = scale; } short getRadix() { return radix; } void setRadix( short radix ) { this.radix = radix; } short getNullable() { return nullable; } void setNullable( short nullable ) { this.nullable = nullable; } String getRemarks() { return remarks; } void setRemarks( String remarks ) { this.remarks = remarks; } String getColumnDef() { return columnDef; } void setColumnDef( String columnDef ) { this.columnDef = columnDef; } int getSqlDataType() { return sqlDataType; } void setSqlDataType( int sqlDataType ) { this.sqlDataType = sqlDataType; } int getSqlDatetimeSub() { return sqlDatetimeSub; } void setSqlDatetimeSub( int sqlDatetimeSub ) { this.sqlDatetimeSub = sqlDatetimeSub; } int getCharOctetLength() { return charOctetLength; } void setCharOctetLength( int charOctetLength ) { this.charOctetLength = charOctetLength; } int getOrdinalPosition() { return ordinalPosition; } void setOrdinalPosition( int ordinalPosition ) { this.ordinalPosition = ordinalPosition; } String getIsNullable() { return isNullable; } void setIsNullable( String isNullable ) { this.isNullable = isNullable; } String getSpecificName() { return specificName; } void setSpecificName( String specificName ) { this.specificName = specificName; } }
Java
package com.xyz.practice.jdbc.databasemetadata; import java.io.Serializable; public class ClientInfoProperty implements Serializable { private static final long serialVersionUID = 2238063041885637942L; ClientInfoProperty() { } /** * The name of the client info property */ private String name; /** * The maximum length of the value for the property */ private int maxLen; /** * The default value of the property */ private String defaultValue; /** * A description of the property. This will typically contain information as to where this property is stored in the database. */ private String description; String getName() { return name; } void setName( String name ) { this.name = name; } int getMaxLen() { return maxLen; } void setMaxLen( int maxLen ) { this.maxLen = maxLen; } String getDefaultValue() { return defaultValue; } void setDefaultValue( String defaultValue ) { this.defaultValue = defaultValue; } String getDescription() { return description; } void setDescription( String description ) { this.description = description; } }
Java
package com.xyz.practice.jdbc.databasemetadata; import java.io.Serializable; public class Table implements Serializable { private static final long serialVersionUID = 3363512786615751411L; /** * table catalog (may be null) */ private String tableCat; /** * table schema (may be null) */ private String tableSchem; /** * table name */ private String tableName; /** * table type. Typical types are "TABLE", "VIEW", "SYSTEM TABLE", * "GLOBAL TEMPORARY", "LOCAL TEMPORARY", "ALIAS", "SYNONYM" */ private TableType tableType; /** * explanatory comment on the table */ private String remarks; /** * the types catalog (may be null) */ private String typeCat; String getTableCat() { return tableCat; } void setTableCat(String tableCat) { this.tableCat = tableCat; } String getTableSchem() { return tableSchem; } void setTableSchem(String tableSchem) { this.tableSchem = tableSchem; } String getTableName() { return tableName; } void setTableName(String tableName) { this.tableName = tableName; } TableType getTableType() { return tableType; } void setTableType(TableType tableType) { this.tableType = tableType; } String getRemarks() { return remarks; } void setRemarks(String remarks) { this.remarks = remarks; } String getTypeCat() { return typeCat; } void setTypeCat(String typeCat) { this.typeCat = typeCat; } String getTypeSchem() { return typeSchem; } void setTypeSchem(String typeSchem) { this.typeSchem = typeSchem; } String getTypeName() { return typeName; } void setTypeName(String typeName) { this.typeName = typeName; } String getSelfReferencingColName() { return selfReferencingColName; } void setSelfReferencingColName(String selfReferencingColName) { this.selfReferencingColName = selfReferencingColName; } String getRefGeneration() { return refGeneration; } void setRefGeneration(String refGeneration) { this.refGeneration = refGeneration; } /** * the types schema (may be null) */ private String typeSchem; /** * type name (may be null) */ private String typeName; /** * name of the designated "identifier" column of a typed table (may be null) */ private String selfReferencingColName; /** * specifies how values in SELF_REFERENCING_COL_NAME are created. Values are * "SYSTEM", "USER", "DERIVED". (may be null) */ private String refGeneration; }
Java
package com.xyz.practice.jdbc.databasemetadata; import java.io.Serializable; public class TableType implements Serializable { private static final long serialVersionUID = 4214894369464157382L; static final TableType table_type_table = new TableType( "TABLE" ); static final TableType table_type_view = new TableType( "VIEW" ); static final TableType table_type_system_table = new TableType( "SYSTEM TABLE" ); static final TableType table_type_global_temporary = new TableType( "GLOBAL TEMPORARY" ); static final TableType table_type_local_temporary = new TableType( "LOCAL TEMPORARY" ); static final TableType table_type_alias = new TableType( "ALIAS" ); static final TableType table_type_synonym = new TableType( "SYNONYM" ); TableType ( String tableType ) { this.tableType = tableType; } private String tableType; String getTableType() { return tableType; } void setTableType(String tableType) { this.tableType = tableType; } }
Java
package com.xyz.practice.jdbc.databasemetadata; import java.io.Serializable; public class ColumnPrivilege implements Serializable { private static final long serialVersionUID = 8532093440653495540L; ColumnPrivilege() { } /** * table catalog (may be null) */ private String tableCat; /** * table schema (may be null) */ private String tableSchem; /** * table name */ private String tableName; /** * column name */ private String columnName; /** * grantor of access (may be null) */ private String grantor; /** * grantee of access */ private String grantee; /** * name of access (SELECT, INSERT, UPDATE, REFRENCES, ...) */ private String privilege; /** * "YES" if grantee is permitted to grant to others; "NO" if not; null if unknown */ private String isGrantable; String getTableCat() { return tableCat; } void setTableCat( String tableCat ) { this.tableCat = tableCat; } String getTableSchem() { return tableSchem; } void setTableSchem( String tableSchem ) { this.tableSchem = tableSchem; } String getTableName() { return tableName; } void setTableName( String tableName ) { this.tableName = tableName; } String getColumnName() { return columnName; } void setColumnName( String columnName ) { this.columnName = columnName; } String getGrantor() { return grantor; } void setGrantor( String grantor ) { this.grantor = grantor; } String getGrantee() { return grantee; } void setGrantee( String grantee ) { this.grantee = grantee; } String getPrivilege() { return privilege; } void setPrivilege( String privilege ) { this.privilege = privilege; } String getIsGrantable() { return isGrantable; } void setIsGrantable( String isGrantable ) { this.isGrantable = isGrantable; } }
Java
package com.xyz.practice.jdbc.databasemetadata; import java.io.Serializable; public class Function implements Serializable { private static final long serialVersionUID = 7337130883767894365L; Function() { } /** * function catalog (may be null) */ private String functionCat; /** * function schema (may be null) */ private String functionSchema; /** * function name. This is the name used to invoke the function */ private String functionName; /** * explanatory comment on the function */ private String remarks; /** * kind of function: functionResultUnknown - Cannot determine if a return * value or table will be returned functionNoTable- Does not return a table * functionReturnsTable - Returns a table */ private short functionType; /** * the name which uniquely identifies this function within its schema. This * is a user specified, or DBMS generated, name that may be different then * the FUNCTION_NAME for example with overload functions */ private String specificName; String getFunctionCat() { return functionCat; } void setFunctionCat(String functionCat) { this.functionCat = functionCat; } String getFunctionSchema() { return functionSchema; } void setFunctionSchema(String functionSchema) { this.functionSchema = functionSchema; } String getFunctionName() { return functionName; } void setFunctionName(String functionName) { this.functionName = functionName; } String getRemarks() { return remarks; } void setRemarks(String remarks) { this.remarks = remarks; } short getFunctionType() { return functionType; } void setFunctionType(short functionType) { this.functionType = functionType; } String getSpecificName() { return specificName; } void setSpecificName(String specificName) { this.specificName = specificName; } }
Java