Search is not available for this dataset
text stringlengths 75 104k |
|---|
def __cluster_distance(self, cluster1, cluster2):
"""!
@brief Calculate minimal distance between clusters using representative points.
@param[in] cluster1 (cure_cluster): The first cluster.
@param[in] cluster2 (cure_cluster): The second cluster.
@return (double) Euclidean distance between two clusters that is defined by minimum distance between representation points of two clusters.
"""
distance = float('inf')
for i in range(0, len(cluster1.rep)):
for k in range(0, len(cluster2.rep)):
dist = euclidean_distance_square(cluster1.rep[i], cluster2.rep[k]); # Fast mode
#dist = euclidean_distance(cluster1.rep[i], cluster2.rep[k]) # Slow mode
if dist < distance:
distance = dist
return distance |
def allocate_observation_matrix(self):
"""!
@brief Allocates observation matrix in line with output dynamic of the network.
@details Matrix where state of each neuron is denoted by zero/one in line with Heaviside function on each iteration.
@return (list) Observation matrix of the network dynamic.
"""
number_neurons = len(self.output[0])
observation_matrix = []
for iteration in range(len(self.output)):
obervation_column = []
for index_neuron in range(number_neurons):
obervation_column.append(heaviside(self.output[iteration][index_neuron]))
observation_matrix.append(obervation_column)
return observation_matrix |
def __allocate_neuron_patterns(self, start_iteration, stop_iteration):
"""!
@brief Allocates observation transposed matrix of neurons that is limited by specified periods of simulation.
@details Matrix where state of each neuron is denoted by zero/one in line with Heaviside function on each iteration.
@return (list) Transposed observation matrix that is limited by specified periods of simulation.
"""
pattern_matrix = []
for index_neuron in range(len(self.output[0])):
pattern_neuron = []
for iteration in range(start_iteration, stop_iteration):
pattern_neuron.append(heaviside(self.output[iteration][index_neuron]))
pattern_matrix.append(pattern_neuron)
return pattern_matrix |
def allocate_sync_ensembles(self, steps):
"""!
@brief Allocate clusters in line with ensembles of synchronous neurons where each synchronous ensemble corresponds to only one cluster.
@param[in] steps (double): Amount of steps from the end that is used for analysis. During specified period chaotic neural network should have stable output
otherwise inccorect results are allocated.
@return (list) Grours (lists) of indexes of synchronous oscillators.
For example [ [index_osc1, index_osc3], [index_osc2], [index_osc4, index_osc5] ].
"""
iterations = steps
if iterations >= len(self.output):
iterations = len(self.output)
ensembles = []
start_iteration = len(self.output) - iterations
end_iteration = len(self.output)
pattern_matrix = self.__allocate_neuron_patterns(start_iteration, end_iteration)
ensembles.append( [0] )
for index_neuron in range(1, len(self.output[0])):
neuron_pattern = pattern_matrix[index_neuron][:]
neuron_assigned = False
for ensemble in ensembles:
ensemble_pattern = pattern_matrix[ensemble[0]][:]
if neuron_pattern == ensemble_pattern:
ensemble.append(index_neuron)
neuron_assigned = True
break
if neuron_assigned is False:
ensembles.append( [index_neuron] )
return ensembles |
def show_dynamic_matrix(cnn_output_dynamic):
"""!
@brief Shows output dynamic as matrix in grey colors.
@details This type of visualization is convenient for observing allocated clusters.
@param[in] cnn_output_dynamic (cnn_dynamic): Output dynamic of the chaotic neural network.
@see show_output_dynamic
@see show_observation_matrix
"""
network_dynamic = numpy.array(cnn_output_dynamic.output)
plt.imshow(network_dynamic.T, cmap = plt.get_cmap('gray'), interpolation='None', vmin = 0.0, vmax = 1.0)
plt.show() |
def show_observation_matrix(cnn_output_dynamic):
"""!
@brief Shows observation matrix as black/white blocks.
@details This type of visualization is convenient for observing allocated clusters.
@param[in] cnn_output_dynamic (cnn_dynamic): Output dynamic of the chaotic neural network.
@see show_output_dynamic
@see show_dynamic_matrix
"""
observation_matrix = numpy.array(cnn_output_dynamic.allocate_observation_matrix())
plt.imshow(observation_matrix.T, cmap = plt.get_cmap('gray'), interpolation='None', vmin = 0.0, vmax = 1.0)
plt.show() |
def simulate(self, steps, stimulus):
"""!
@brief Simulates chaotic neural network with extrnal stimulus during specified steps.
@details Stimulus are considered as a coordinates of neurons and in line with that weights
are initialized.
@param[in] steps (uint): Amount of steps for simulation.
@param[in] stimulus (list): Stimulus that are used for simulation.
@return (cnn_dynamic) Output dynamic of the chaotic neural network.
"""
self.__create_weights(stimulus)
self.__location = stimulus
dynamic = cnn_dynamic([], [])
dynamic.output.append(self.__output)
dynamic.time.append(0)
for step in range(1, steps, 1):
self.__output = self.__calculate_states()
dynamic.output.append(self.__output)
dynamic.time.append(step)
return dynamic |
def __calculate_states(self):
"""!
@brief Calculates new state of each neuron.
@detail There is no any assignment.
@return (list) Returns new states (output).
"""
output = [ 0.0 for _ in range(self.__num_osc) ]
for i in range(self.__num_osc):
output[i] = self.__neuron_evolution(i)
return output |
def __neuron_evolution(self, index):
"""!
@brief Calculates state of the neuron with specified index.
@param[in] index (uint): Index of neuron in the network.
@return (double) New output of the specified neuron.
"""
value = 0.0
for index_neighbor in range(self.__num_osc):
value += self.__weights[index][index_neighbor] * (1.0 - 2.0 * (self.__output[index_neighbor] ** 2))
return value / self.__weights_summary[index] |
def __create_weights(self, stimulus):
"""!
@brief Create weights between neurons in line with stimulus.
@param[in] stimulus (list): External stimulus for the chaotic neural network.
"""
self.__average_distance = average_neighbor_distance(stimulus, self.__amount_neighbors)
self.__weights = [ [ 0.0 for _ in range(len(stimulus)) ] for _ in range(len(stimulus)) ]
self.__weights_summary = [ 0.0 for _ in range(self.__num_osc) ]
if self.__conn_type == type_conn.ALL_TO_ALL:
self.__create_weights_all_to_all(stimulus)
elif self.__conn_type == type_conn.TRIANGULATION_DELAUNAY:
self.__create_weights_delaunay_triangulation(stimulus) |
def __create_weights_all_to_all(self, stimulus):
"""!
@brief Create weight all-to-all structure between neurons in line with stimulus.
@param[in] stimulus (list): External stimulus for the chaotic neural network.
"""
for i in range(len(stimulus)):
for j in range(i + 1, len(stimulus)):
weight = self.__calculate_weight(stimulus[i], stimulus[j])
self.__weights[i][j] = weight
self.__weights[j][i] = weight
self.__weights_summary[i] += weight
self.__weights_summary[j] += weight |
def __create_weights_delaunay_triangulation(self, stimulus):
"""!
@brief Create weight Denlauny triangulation structure between neurons in line with stimulus.
@param[in] stimulus (list): External stimulus for the chaotic neural network.
"""
points = numpy.array(stimulus)
triangulation = Delaunay(points)
for triangle in triangulation.simplices:
for index_tri_point1 in range(len(triangle)):
for index_tri_point2 in range(index_tri_point1 + 1, len(triangle)):
index_point1 = triangle[index_tri_point1]
index_point2 = triangle[index_tri_point2]
weight = self.__calculate_weight(stimulus[index_point1], stimulus[index_point2])
self.__weights[index_point1][index_point2] = weight
self.__weights[index_point2][index_point1] = weight
self.__weights_summary[index_point1] += weight
self.__weights_summary[index_point2] += weight |
def __calculate_weight(self, stimulus1, stimulus2):
"""!
@brief Calculate weight between neurons that have external stimulus1 and stimulus2.
@param[in] stimulus1 (list): External stimulus of the first neuron.
@param[in] stimulus2 (list): External stimulus of the second neuron.
@return (double) Weight between neurons that are under specified stimulus.
"""
distance = euclidean_distance_square(stimulus1, stimulus2)
return math.exp(-distance / (2.0 * self.__average_distance)) |
def show_network(self):
"""!
@brief Shows structure of the network: neurons and connections between them.
"""
dimension = len(self.__location[0])
if (dimension != 3) and (dimension != 2):
raise NameError('Network that is located in different from 2-d and 3-d dimensions can not be represented')
(fig, axes) = self.__create_surface(dimension)
for i in range(0, self.__num_osc, 1):
if dimension == 2:
axes.plot(self.__location[i][0], self.__location[i][1], 'bo')
for j in range(i, self.__num_osc, 1): # draw connection between two points only one time
if self.__weights[i][j] > 0.0:
axes.plot([self.__location[i][0], self.__location[j][0]], [self.__location[i][1], self.__location[j][1]], 'b-', linewidth = 0.5)
elif dimension == 3:
axes.scatter(self.__location[i][0], self.__location[i][1], self.__location[i][2], c = 'b', marker = 'o')
for j in range(i, self.__num_osc, 1): # draw connection between two points only one time
if self.__weights[i][j] > 0.0:
axes.plot([self.__location[i][0], self.__location[j][0]], [self.__location[i][1], self.__location[j][1]], [self.__location[i][2], self.__location[j][2]], 'b-', linewidth = 0.5)
plt.grid()
plt.show() |
def __create_surface(self, dimension):
"""!
@brief Prepares surface for showing network structure in line with specified dimension.
@param[in] dimension (uint): Dimension of processed data (external stimulus).
@return (tuple) Description of surface for drawing network structure.
"""
rcParams['font.sans-serif'] = ['Arial']
rcParams['font.size'] = 12
fig = plt.figure()
axes = None
if dimension == 2:
axes = fig.add_subplot(111)
elif dimension == 3:
axes = fig.gca(projection='3d')
surface_font = FontProperties()
surface_font.set_name('Arial')
surface_font.set_size('12')
return (fig, axes) |
def show_pattern(syncpr_output_dynamic, image_height, image_width):
"""!
@brief Displays evolution of phase oscillators as set of patterns where the last one means final result of recognition.
@param[in] syncpr_output_dynamic (syncpr_dynamic): Output dynamic of a syncpr network.
@param[in] image_height (uint): Height of the pattern (image_height * image_width should be equal to number of oscillators).
@param[in] image_width (uint): Width of the pattern.
"""
number_pictures = len(syncpr_output_dynamic);
iteration_math_step = 1.0;
if (number_pictures > 50):
iteration_math_step = number_pictures / 50.0;
number_pictures = 50;
number_cols = int(numpy.ceil(number_pictures ** 0.5));
number_rows = int(numpy.ceil(number_pictures / number_cols));
real_index = 0, 0;
double_indexer = True;
if ( (number_cols == 1) or (number_rows == 1) ):
real_index = 0;
double_indexer = False;
(_, axarr) = plt.subplots(number_rows, number_cols);
if (number_pictures > 1):
plt.setp([ax for ax in axarr], visible = False);
iteration_display = 0.0;
for iteration in range(len(syncpr_output_dynamic)):
if (iteration >= iteration_display):
iteration_display += iteration_math_step;
ax_handle = axarr;
if (number_pictures > 1):
ax_handle = axarr[real_index];
syncpr_visualizer.__show_pattern(ax_handle, syncpr_output_dynamic, image_height, image_width, iteration);
if (double_indexer is True):
real_index = real_index[0], real_index[1] + 1;
if (real_index[1] >= number_cols):
real_index = real_index[0] + 1, 0;
else:
real_index += 1;
plt.show(); |
def animate_pattern_recognition(syncpr_output_dynamic, image_height, image_width, animation_velocity = 75, title = None, save_movie = None):
"""!
@brief Shows animation of pattern recognition process that has been preformed by the oscillatory network.
@param[in] syncpr_output_dynamic (syncpr_dynamic): Output dynamic of a syncpr network.
@param[in] image_height (uint): Height of the pattern (image_height * image_width should be equal to number of oscillators).
@param[in] image_width (uint): Width of the pattern.
@param[in] animation_velocity (uint): Interval between frames in milliseconds.
@param[in] title (string): Title of the animation that is displayed on a figure if it is specified.
@param[in] save_movie (string): If it is specified then animation will be stored to file that is specified in this parameter.
"""
figure = plt.figure();
def init_frame():
return frame_generation(0);
def frame_generation(index_dynamic):
figure.clf();
if (title is not None):
figure.suptitle(title, fontsize = 26, fontweight = 'bold')
ax1 = figure.add_subplot(121, projection='polar');
ax2 = figure.add_subplot(122);
dynamic = syncpr_output_dynamic.output[index_dynamic];
artist1, = ax1.plot(dynamic, [1.0] * len(dynamic), marker = 'o', color = 'blue', ls = '');
artist2 = syncpr_visualizer.__show_pattern(ax2, syncpr_output_dynamic, image_height, image_width, index_dynamic);
return [ artist1, artist2 ];
cluster_animation = animation.FuncAnimation(figure, frame_generation, len(syncpr_output_dynamic), interval = animation_velocity, init_func = init_frame, repeat_delay = 5000);
if (save_movie is not None):
# plt.rcParams['animation.ffmpeg_path'] = 'C:\\Users\\annoviko\\programs\\ffmpeg-win64-static\\bin\\ffmpeg.exe';
# ffmpeg_writer = animation.FFMpegWriter();
# cluster_animation.save(save_movie, writer = ffmpeg_writer, fps = 15);
cluster_animation.save(save_movie, writer = 'ffmpeg', fps = 15, bitrate = 1500);
else:
plt.show(); |
def __show_pattern(ax_handle, syncpr_output_dynamic, image_height, image_width, iteration):
"""!
@brief Draws pattern on specified ax.
@param[in] ax_handle (Axis): Axis where pattern should be drawn.
@param[in] syncpr_output_dynamic (syncpr_dynamic): Output dynamic of a syncpr network.
@param[in] image_height (uint): Height of the pattern (image_height * image_width should be equal to number of oscillators).
@param[in] image_width (uint): Width of the pattern.
@param[in] iteration (uint): Simulation iteration that should be used for extracting pattern.
@return (matplotlib.artist) Artist (pattern) that is rendered in the canvas.
"""
current_dynamic = syncpr_output_dynamic.output[iteration];
stage_picture = [(255, 255, 255)] * (image_height * image_width);
for index_phase in range(len(current_dynamic)):
phase = current_dynamic[index_phase];
pixel_color = math.floor( phase * (255 / (2 * math.pi)) );
stage_picture[index_phase] = (pixel_color, pixel_color, pixel_color);
stage = numpy.array(stage_picture, numpy.uint8);
stage = numpy.reshape(stage, (image_height, image_width) + ((3),)); # ((3),) it's size of RGB - third dimension.
image_cluster = Image.fromarray(stage);
artist = ax_handle.imshow(image_cluster, interpolation = 'none');
plt.setp(ax_handle, visible = True);
ax_handle.xaxis.set_ticklabels([]);
ax_handle.yaxis.set_ticklabels([]);
ax_handle.xaxis.set_ticks_position('none');
ax_handle.yaxis.set_ticks_position('none');
return artist; |
def train(self, samples):
"""!
@brief Trains syncpr network using Hebbian rule for adjusting strength of connections between oscillators during training.
@param[in] samples (list): list of patterns where each pattern is represented by list of features that are equal to [-1; 1].
"""
# Verify pattern for learning
for pattern in samples:
self.__validate_pattern(pattern);
if (self._ccore_network_pointer is not None):
return wrapper.syncpr_train(self._ccore_network_pointer, samples);
length = len(self);
number_samples = len(samples);
for i in range(length):
for j in range(i + 1, len(self), 1):
# go through via all patterns
for p in range(number_samples):
value1 = samples[p][i];
value2 = samples[p][j];
self._coupling[i][j] += value1 * value2;
self._coupling[i][j] /= length;
self._coupling[j][i] = self._coupling[i][j]; |
def simulate_dynamic(self, pattern, order = 0.998, solution = solve_type.RK4, collect_dynamic = False, step = 0.1, int_step = 0.01, threshold_changes = 0.0000001):
"""!
@brief Performs dynamic simulation of the network until stop condition is not reached.
@details In other words network performs pattern recognition during simulation.
Stop condition is defined by input argument 'order' that represents memory order, but
process of simulation can be stopped if convergance rate is low whose threshold is defined
by the argument 'threshold_changes'.
@param[in] pattern (list): Pattern for recognition represented by list of features that are equal to [-1; 1].
@param[in] order (double): Order of process synchronization, distributed 0..1.
@param[in] solution (solve_type): Type of solution.
@param[in] collect_dynamic (bool): If True - returns whole dynamic of oscillatory network, otherwise returns only last values of dynamics.
@param[in] step (double): Time step of one iteration of simulation.
@param[in] int_step (double): Integration step, should be less than step.
@param[in] threshold_changes (double): Additional stop condition that helps prevent infinite simulation, defines limit of changes of oscillators between current and previous steps.
@return (list) Dynamic of oscillatory network. If argument 'collect_dynamic' = True, than return dynamic for the whole simulation time,
otherwise returns only last values (last step of simulation) of dynamic.
@see simulate()
@see simulate_static()
"""
self.__validate_pattern(pattern);
if (self._ccore_network_pointer is not None):
ccore_instance_dynamic = wrapper.syncpr_simulate_dynamic(self._ccore_network_pointer, pattern, order, solution, collect_dynamic, step);
return syncpr_dynamic(None, None, ccore_instance_dynamic);
for i in range(0, len(pattern), 1):
if (pattern[i] > 0.0):
self._phases[i] = 0.0;
else:
self._phases[i] = math.pi / 2.0;
# For statistics and integration
time_counter = 0;
# Prevent infinite loop. It's possible when required state cannot be reached.
previous_order = 0;
current_order = self.__calculate_memory_order(pattern);
# If requested input dynamics
dyn_phase = [];
dyn_time = [];
if (collect_dynamic == True):
dyn_phase.append(self._phases);
dyn_time.append(0);
# Execute until sync state will be reached
while (current_order < order):
# update states of oscillators
self._phases = self._calculate_phases(solution, time_counter, step, int_step);
# update time
time_counter += step;
# if requested input dynamic
if (collect_dynamic == True):
dyn_phase.append(self._phases);
dyn_time.append(time_counter);
# update orders
previous_order = current_order;
current_order = self.__calculate_memory_order(pattern);
# hang prevention
if (abs(current_order - previous_order) < threshold_changes):
break;
if (collect_dynamic != True):
dyn_phase.append(self._phases);
dyn_time.append(time_counter);
output_sync_dynamic = syncpr_dynamic(dyn_phase, dyn_time, None);
return output_sync_dynamic; |
def simulate_static(self, steps, time, pattern, solution = solve_type.FAST, collect_dynamic = False):
"""!
@brief Performs static simulation of syncpr oscillatory network.
@details In other words network performs pattern recognition during simulation.
@param[in] steps (uint): Number steps of simulations during simulation.
@param[in] time (double): Time of simulation.
@param[in] pattern (list): Pattern for recognition represented by list of features that are equal to [-1; 1].
@param[in] solution (solve_type): Type of solution.
@param[in] collect_dynamic (bool): If True - returns whole dynamic of oscillatory network, otherwise returns only last values of dynamics.
@return (list) Dynamic of oscillatory network. If argument 'collect_dynamic' = True, than return dynamic for the whole simulation time,
otherwise returns only last values (last step of simulation) of dynamic.
@see simulate()
@see simulate_dynamic()
"""
self.__validate_pattern(pattern);
if (self._ccore_network_pointer is not None):
ccore_instance_dynamic = wrapper.syncpr_simulate_static(self._ccore_network_pointer, steps, time, pattern, solution, collect_dynamic);
return syncpr_dynamic(None, None, ccore_instance_dynamic);
for i in range(0, len(pattern), 1):
if (pattern[i] > 0.0):
self._phases[i] = 0.0;
else:
self._phases[i] = math.pi / 2.0;
return super().simulate_static(steps, time, solution, collect_dynamic); |
def memory_order(self, pattern):
"""!
@brief Calculates function of the memorized pattern.
@details Throws exception if length of pattern is not equal to size of the network or if it consists feature with value that are not equal to [-1; 1].
@param[in] pattern (list): Pattern for recognition represented by list of features that are equal to [-1; 1].
@return (double) Order of memory for the specified pattern.
"""
self.__validate_pattern(pattern);
if (self._ccore_network_pointer is not None):
return wrapper.syncpr_memory_order(self._ccore_network_pointer, pattern);
else:
return self.__calculate_memory_order(pattern); |
def __calculate_memory_order(self, pattern):
"""!
@brief Calculates function of the memorized pattern without any pattern validation.
@param[in] pattern (list): Pattern for recognition represented by list of features that are equal to [-1; 1].
@return (double) Order of memory for the specified pattern.
"""
memory_order = 0.0;
for index in range(len(self)):
memory_order += pattern[index] * cmath.exp( 1j * self._phases[index] );
memory_order /= len(self);
return abs(memory_order); |
def _phase_kuramoto(self, teta, t, argv):
"""!
@brief Returns result of phase calculation for specified oscillator in the network.
@param[in] teta (double): Phase of the oscillator that is differentiated.
@param[in] t (double): Current time of simulation.
@param[in] argv (tuple): Index of the oscillator in the list.
@return (double) New phase for specified oscillator (don't assign it here).
"""
index = argv;
phase = 0.0;
term = 0.0;
for k in range(0, self._num_osc):
if (k != index):
phase_delta = self._phases[k] - teta;
phase += self._coupling[index][k] * math.sin(phase_delta);
term1 = self._increase_strength1 * math.sin(2.0 * phase_delta);
term2 = self._increase_strength2 * math.sin(3.0 * phase_delta);
term += (term1 - term2);
return ( phase + term / len(self) ); |
def __validate_pattern(self, pattern):
"""!
@brief Validates pattern.
@details Throws exception if length of pattern is not equal to size of the network or if it consists feature with value that are not equal to [-1; 1].
@param[in] pattern (list): Pattern for recognition represented by list of features that are equal to [-1; 1].
"""
if (len(pattern) != len(self)):
raise NameError('syncpr: length of the pattern (' + len(pattern) + ') should be equal to size of the network');
for feature in pattern:
if ( (feature != -1.0) and (feature != 1.0) ):
raise NameError('syncpr: patten feature (' + feature + ') should be distributed in [-1; 1]'); |
def process(self):
"""!
@brief Performs cluster analysis in line with rules of K-Medians algorithm.
@return (kmedians) Returns itself (K-Medians instance).
@remark Results of clustering can be obtained using corresponding get methods.
@see get_clusters()
@see get_medians()
"""
if self.__ccore is True:
ccore_metric = metric_wrapper.create_instance(self.__metric)
self.__clusters, self.__medians = wrapper.kmedians(self.__pointer_data, self.__medians, self.__tolerance, self.__itermax, ccore_metric.get_pointer())
else:
changes = float('inf')
# Check for dimension
if len(self.__pointer_data[0]) != len(self.__medians[0]):
raise NameError('Dimension of the input data and dimension of the initial medians must be equal.')
iterations = 0
while changes > self.__tolerance and iterations < self.__itermax:
self.__clusters = self.__update_clusters()
updated_centers = self.__update_medians()
changes = max([self.__metric(self.__medians[index], updated_centers[index]) for index in range(len(updated_centers))])
self.__medians = updated_centers
iterations += 1
return self |
def __update_clusters(self):
"""!
@brief Calculate Manhattan distance to each point from the each cluster.
@details Nearest points are captured by according clusters and as a result clusters are updated.
@return (list) updated clusters as list of clusters where each cluster contains indexes of objects from data.
"""
clusters = [[] for i in range(len(self.__medians))]
for index_point in range(len(self.__pointer_data)):
index_optim = -1
dist_optim = 0.0
for index in range(len(self.__medians)):
dist = self.__metric(self.__pointer_data[index_point], self.__medians[index])
if (dist < dist_optim) or (index == 0):
index_optim = index
dist_optim = dist
clusters[index_optim].append(index_point)
# If cluster is not able to capture object it should be removed
clusters = [cluster for cluster in clusters if len(cluster) > 0]
return clusters |
def __update_medians(self):
"""!
@brief Calculate medians of clusters in line with contained objects.
@return (list) list of medians for current number of clusters.
"""
medians = [[] for i in range(len(self.__clusters))]
for index in range(len(self.__clusters)):
medians[index] = [0.0 for i in range(len(self.__pointer_data[0]))]
length_cluster = len(self.__clusters[index])
for index_dimension in range(len(self.__pointer_data[0])):
sorted_cluster = sorted(self.__clusters[index], key=lambda x: self.__pointer_data[x][index_dimension])
relative_index_median = int(math.floor((length_cluster - 1) / 2))
index_median = sorted_cluster[relative_index_median]
if (length_cluster % 2) == 0:
index_median_second = sorted_cluster[relative_index_median + 1]
medians[index][index_dimension] = (self.__pointer_data[index_median][index_dimension] + self.__pointer_data[index_median_second][index_dimension]) / 2.0
else:
medians[index][index_dimension] = self.__pointer_data[index_median][index_dimension]
return medians |
def cleanup_old_versions(
src, keep_last_versions,
config_file='config.yaml', profile_name=None,
):
"""Deletes old deployed versions of the function in AWS Lambda.
Won't delete $Latest and any aliased version
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param int keep_last_versions:
The number of recent versions to keep and not delete
"""
if keep_last_versions <= 0:
print("Won't delete all versions. Please do this manually")
else:
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
profile_name = cfg.get('profile')
aws_access_key_id = cfg.get('aws_access_key_id')
aws_secret_access_key = cfg.get('aws_secret_access_key')
client = get_client(
'lambda', profile_name, aws_access_key_id, aws_secret_access_key,
cfg.get('region'),
)
response = client.list_versions_by_function(
FunctionName=cfg.get('function_name'),
)
versions = response.get('Versions')
if len(response.get('Versions')) < keep_last_versions:
print('Nothing to delete. (Too few versions published)')
else:
version_numbers = [elem.get('Version') for elem in
versions[1:-keep_last_versions]]
for version_number in version_numbers:
try:
client.delete_function(
FunctionName=cfg.get('function_name'),
Qualifier=version_number,
)
except botocore.exceptions.ClientError as e:
print('Skipping Version {}: {}'
.format(version_number, e.message)) |
def deploy(
src, requirements=None, local_package=None,
config_file='config.yaml', profile_name=None,
preserve_vpc=False
):
"""Deploys a new function to AWS Lambda.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src, config_file=config_file,
requirements=requirements,
local_package=local_package,
)
existing_config = get_function_config(cfg)
if existing_config:
update_function(cfg, path_to_zip_file, existing_config, preserve_vpc=preserve_vpc)
else:
create_function(cfg, path_to_zip_file) |
def deploy_s3(
src, requirements=None, local_package=None,
config_file='config.yaml', profile_name=None,
preserve_vpc=False
):
"""Deploys a new function via AWS S3.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src, config_file=config_file, requirements=requirements,
local_package=local_package,
)
use_s3 = True
s3_file = upload_s3(cfg, path_to_zip_file, use_s3)
existing_config = get_function_config(cfg)
if existing_config:
update_function(cfg, path_to_zip_file, existing_config, use_s3=use_s3,
s3_file=s3_file, preserve_vpc=preserve_vpc)
else:
create_function(cfg, path_to_zip_file, use_s3=use_s3, s3_file=s3_file) |
def upload(
src, requirements=None, local_package=None,
config_file='config.yaml', profile_name=None,
):
"""Uploads a new function to AWS S3.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src, config_file=config_file, requirements=requirements,
local_package=local_package,
)
upload_s3(cfg, path_to_zip_file) |
def invoke(
src, event_file='event.json',
config_file='config.yaml', profile_name=None,
verbose=False,
):
"""Simulates a call to your function.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str alt_event:
An optional argument to override which event file to use.
:param bool verbose:
Whether to print out verbose details.
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Set AWS_PROFILE environment variable based on `--profile` option.
if profile_name:
os.environ['AWS_PROFILE'] = profile_name
# Load environment variables from the config file into the actual
# environment.
env_vars = cfg.get('environment_variables')
if env_vars:
for key, value in env_vars.items():
os.environ[key] = get_environment_variable_value(value)
# Load and parse event file.
path_to_event_file = os.path.join(src, event_file)
event = read(path_to_event_file, loader=json.loads)
# Tweak to allow module to import local modules
try:
sys.path.index(src)
except ValueError:
sys.path.append(src)
handler = cfg.get('handler')
# Inspect the handler string (<module>.<function name>) and translate it
# into a function we can execute.
fn = get_callable_handler_function(src, handler)
timeout = cfg.get('timeout')
if timeout:
context = LambdaContext(cfg.get('function_name'),timeout)
else:
context = LambdaContext(cfg.get('function_name'))
start = time.time()
results = fn(event, context)
end = time.time()
print('{0}'.format(results))
if verbose:
print('\nexecution time: {:.8f}s\nfunction execution '
'timeout: {:2}s'.format(end - start, cfg.get('timeout', 15))) |
def init(src, minimal=False):
"""Copies template files to a given directory.
:param str src:
The path to output the template lambda project files.
:param bool minimal:
Minimal possible template files (excludes event.json).
"""
templates_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'project_templates',
)
for filename in os.listdir(templates_path):
if (minimal and filename == 'event.json') or filename.endswith('.pyc'):
continue
dest_path = os.path.join(templates_path, filename)
if not os.path.isdir(dest_path):
copy(dest_path, src) |
def build(
src, requirements=None, local_package=None,
config_file='config.yaml', profile_name=None,
):
"""Builds the file bundle.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Get the absolute path to the output directory and create it if it doesn't
# already exist.
dist_directory = cfg.get('dist_directory', 'dist')
path_to_dist = os.path.join(src, dist_directory)
mkdir(path_to_dist)
# Combine the name of the Lambda function with the current timestamp to use
# for the output filename.
function_name = cfg.get('function_name')
output_filename = '{0}-{1}.zip'.format(timestamp(), function_name)
path_to_temp = mkdtemp(prefix='aws-lambda')
pip_install_to_target(
path_to_temp,
requirements=requirements,
local_package=local_package,
)
# Hack for Zope.
if 'zope' in os.listdir(path_to_temp):
print(
'Zope packages detected; fixing Zope package paths to '
'make them importable.',
)
# Touch.
with open(os.path.join(path_to_temp, 'zope/__init__.py'), 'wb'):
pass
# Gracefully handle whether ".zip" was included in the filename or not.
output_filename = (
'{0}.zip'.format(output_filename)
if not output_filename.endswith('.zip')
else output_filename
)
# Allow definition of source code directories we want to build into our
# zipped package.
build_config = defaultdict(**cfg.get('build', {}))
build_source_directories = build_config.get('source_directories', '')
build_source_directories = (
build_source_directories
if build_source_directories is not None
else ''
)
source_directories = [
d.strip() for d in build_source_directories.split(',')
]
files = []
for filename in os.listdir(src):
if os.path.isfile(filename):
if filename == '.DS_Store':
continue
if filename == config_file:
continue
print('Bundling: %r' % filename)
files.append(os.path.join(src, filename))
elif os.path.isdir(filename) and filename in source_directories:
print('Bundling directory: %r' % filename)
files.append(os.path.join(src, filename))
# "cd" into `temp_path` directory.
os.chdir(path_to_temp)
for f in files:
if os.path.isfile(f):
_, filename = os.path.split(f)
# Copy handler file into root of the packages folder.
copyfile(f, os.path.join(path_to_temp, filename))
copystat(f, os.path.join(path_to_temp, filename))
elif os.path.isdir(f):
destination_folder = os.path.join(path_to_temp, f[len(src) + 1:])
copytree(f, destination_folder)
# Zip them together into a single file.
# TODO: Delete temp directory created once the archive has been compiled.
path_to_zip_file = archive('./', path_to_dist, output_filename)
return path_to_zip_file |
def get_callable_handler_function(src, handler):
"""Tranlate a string of the form "module.function" into a callable
function.
:param str src:
The path to your Lambda project containing a valid handler file.
:param str handler:
A dot delimited string representing the `<module>.<function name>`.
"""
# "cd" into `src` directory.
os.chdir(src)
module_name, function_name = handler.split('.')
filename = get_handler_filename(handler)
path_to_module_file = os.path.join(src, filename)
module = load_source(module_name, path_to_module_file)
return getattr(module, function_name) |
def _install_packages(path, packages):
"""Install all packages listed to the target directory.
Ignores any package that includes Python itself and python-lambda as well
since its only needed for deploying and not running the code
:param str path:
Path to copy installed pip packages to.
:param list packages:
A list of packages to be installed via pip.
"""
def _filter_blacklist(package):
blacklist = ['-i', '#', 'Python==', 'python-lambda==']
return all(package.startswith(entry) is False for entry in blacklist)
filtered_packages = filter(_filter_blacklist, packages)
for package in filtered_packages:
if package.startswith('-e '):
package = package.replace('-e ', '')
print('Installing {package}'.format(package=package))
subprocess.check_call([sys.executable, '-m', 'pip', 'install', package, '-t', path, '--ignore-installed'])
print ('Install directory contents are now: {directory}'.format(directory=os.listdir(path))) |
def pip_install_to_target(path, requirements=None, local_package=None):
"""For a given active virtualenv, gather all installed pip packages then
copy (re-install) them to the path provided.
:param str path:
Path to copy installed pip packages to.
:param str requirements:
If set, only the packages in the supplied requirements file are
installed.
If not set then installs all packages found via pip freeze.
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
packages = []
if not requirements:
print('Gathering pip packages')
pkgStr = subprocess.check_output([sys.executable, '-m', 'pip', 'freeze'])
packages.extend(pkgStr.decode('utf-8').splitlines())
else:
if os.path.exists(requirements):
print('Gathering requirement packages')
data = read(requirements)
packages.extend(data.splitlines())
if not packages:
print('No dependency packages installed!')
if local_package is not None:
if not isinstance(local_package, (list, tuple)):
local_package = [local_package]
for l_package in local_package:
packages.append(l_package)
_install_packages(path, packages) |
def get_role_name(region, account_id, role):
"""Shortcut to insert the `account_id` and `role` into the iam string."""
prefix = ARN_PREFIXES.get(region, 'aws')
return 'arn:{0}:iam::{1}:role/{2}'.format(prefix, account_id, role) |
def get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key,
region=None,
):
"""Query STS for a users' account_id"""
client = get_client(
'sts', profile_name, aws_access_key_id, aws_secret_access_key,
region,
)
return client.get_caller_identity().get('Account') |
def get_client(
client, profile_name, aws_access_key_id, aws_secret_access_key,
region=None,
):
"""Shortcut for getting an initialized instance of the boto3 client."""
boto3.setup_default_session(
profile_name=profile_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region,
)
return boto3.client(client) |
def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None):
"""Register and upload a function to AWS Lambda."""
print('Creating your new Lambda function')
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get('profile')
aws_access_key_id = cfg.get('aws_access_key_id')
aws_secret_access_key = cfg.get('aws_secret_access_key')
account_id = get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key, cfg.get(
'region',
),
)
role = get_role_name(
cfg.get('region'), account_id,
cfg.get('role', 'lambda_basic_execution'),
)
client = get_client(
'lambda', profile_name, aws_access_key_id, aws_secret_access_key,
cfg.get('region'),
)
# Do we prefer development variable over config?
buck_name = (
os.environ.get('S3_BUCKET_NAME') or cfg.get('bucket_name')
)
func_name = (
os.environ.get('LAMBDA_FUNCTION_NAME') or cfg.get('function_name')
)
print('Creating lambda function with name: {}'.format(func_name))
if use_s3:
kwargs = {
'FunctionName': func_name,
'Runtime': cfg.get('runtime', 'python2.7'),
'Role': role,
'Handler': cfg.get('handler'),
'Code': {
'S3Bucket': '{}'.format(buck_name),
'S3Key': '{}'.format(s3_file),
},
'Description': cfg.get('description', ''),
'Timeout': cfg.get('timeout', 15),
'MemorySize': cfg.get('memory_size', 512),
'VpcConfig': {
'SubnetIds': cfg.get('subnet_ids', []),
'SecurityGroupIds': cfg.get('security_group_ids', []),
},
'Publish': True,
}
else:
kwargs = {
'FunctionName': func_name,
'Runtime': cfg.get('runtime', 'python2.7'),
'Role': role,
'Handler': cfg.get('handler'),
'Code': {'ZipFile': byte_stream},
'Description': cfg.get('description', ''),
'Timeout': cfg.get('timeout', 15),
'MemorySize': cfg.get('memory_size', 512),
'VpcConfig': {
'SubnetIds': cfg.get('subnet_ids', []),
'SecurityGroupIds': cfg.get('security_group_ids', []),
},
'Publish': True,
}
if 'tags' in cfg:
kwargs.update(
Tags={
key: str(value)
for key, value in cfg.get('tags').items()
}
)
if 'environment_variables' in cfg:
kwargs.update(
Environment={
'Variables': {
key: get_environment_variable_value(value)
for key, value
in cfg.get('environment_variables').items()
},
},
)
client.create_function(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(FunctionName=func_name, ReservedConcurrentExecutions=concurrency) |
def update_function(
cfg, path_to_zip_file, existing_cfg, use_s3=False, s3_file=None, preserve_vpc=False
):
"""Updates the code of an existing Lambda function"""
print('Updating your Lambda function')
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get('profile')
aws_access_key_id = cfg.get('aws_access_key_id')
aws_secret_access_key = cfg.get('aws_secret_access_key')
account_id = get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key, cfg.get(
'region',
),
)
role = get_role_name(
cfg.get('region'), account_id,
cfg.get('role', 'lambda_basic_execution'),
)
client = get_client(
'lambda', profile_name, aws_access_key_id, aws_secret_access_key,
cfg.get('region'),
)
# Do we prefer development variable over config?
buck_name = (
os.environ.get('S3_BUCKET_NAME') or cfg.get('bucket_name')
)
if use_s3:
client.update_function_code(
FunctionName=cfg.get('function_name'),
S3Bucket='{}'.format(buck_name),
S3Key='{}'.format(s3_file),
Publish=True,
)
else:
client.update_function_code(
FunctionName=cfg.get('function_name'),
ZipFile=byte_stream,
Publish=True,
)
kwargs = {
'FunctionName': cfg.get('function_name'),
'Role': role,
'Runtime': cfg.get('runtime'),
'Handler': cfg.get('handler'),
'Description': cfg.get('description', ''),
'Timeout': cfg.get('timeout', 15),
'MemorySize': cfg.get('memory_size', 512),
}
if preserve_vpc:
kwargs['VpcConfig'] = existing_cfg.get('Configuration', {}).get('VpcConfig')
if kwargs['VpcConfig'] is None:
kwargs['VpcConfig'] = {
'SubnetIds': cfg.get('subnet_ids', []),
'SecurityGroupIds': cfg.get('security_group_ids', []),
}
else:
del kwargs['VpcConfig']['VpcId']
else:
kwargs['VpcConfig'] = {
'SubnetIds': cfg.get('subnet_ids', []),
'SecurityGroupIds': cfg.get('security_group_ids', []),
}
if 'environment_variables' in cfg:
kwargs.update(
Environment={
'Variables': {
key: str(get_environment_variable_value(value))
for key, value
in cfg.get('environment_variables').items()
},
},
)
ret = client.update_function_configuration(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(FunctionName=cfg.get('function_name'), ReservedConcurrentExecutions=concurrency)
elif 'Concurrency' in existing_cfg:
client.delete_function_concurrency(FunctionName=cfg.get('function_name'))
if 'tags' in cfg:
tags = {
key: str(value)
for key, value in cfg.get('tags').items()
}
if tags != existing_cfg.get('Tags'):
if existing_cfg.get('Tags'):
client.untag_resource(Resource=ret['FunctionArn'],
TagKeys=list(existing_cfg['Tags'].keys()))
client.tag_resource(Resource=ret['FunctionArn'], Tags=tags) |
def upload_s3(cfg, path_to_zip_file, *use_s3):
"""Upload a function to AWS S3."""
print('Uploading your new Lambda function')
profile_name = cfg.get('profile')
aws_access_key_id = cfg.get('aws_access_key_id')
aws_secret_access_key = cfg.get('aws_secret_access_key')
client = get_client(
's3', profile_name, aws_access_key_id, aws_secret_access_key,
cfg.get('region'),
)
byte_stream = b''
with open(path_to_zip_file, mode='rb') as fh:
byte_stream = fh.read()
s3_key_prefix = cfg.get('s3_key_prefix', '/dist')
checksum = hashlib.new('md5', byte_stream).hexdigest()
timestamp = str(time.time())
filename = '{prefix}{checksum}-{ts}.zip'.format(
prefix=s3_key_prefix, checksum=checksum, ts=timestamp,
)
# Do we prefer development variable over config?
buck_name = (
os.environ.get('S3_BUCKET_NAME') or cfg.get('bucket_name')
)
func_name = (
os.environ.get('LAMBDA_FUNCTION_NAME') or cfg.get('function_name')
)
kwargs = {
'Bucket': '{}'.format(buck_name),
'Key': '{}'.format(filename),
'Body': byte_stream,
}
client.put_object(**kwargs)
print('Finished uploading {} to S3 bucket {}'.format(func_name, buck_name))
if use_s3:
return filename |
def get_function_config(cfg):
"""Check whether a function exists or not and return its config"""
function_name = cfg.get('function_name')
profile_name = cfg.get('profile')
aws_access_key_id = cfg.get('aws_access_key_id')
aws_secret_access_key = cfg.get('aws_secret_access_key')
client = get_client(
'lambda', profile_name, aws_access_key_id, aws_secret_access_key,
cfg.get('region'),
)
try:
return client.get_function(FunctionName=function_name)
except client.exceptions.ResourceNotFoundException as e:
if 'Function not found' in str(e):
return False |
def cached_download(url, name):
"""Download the data at a URL, and cache it under the given name.
The file is stored under `pyav/test` with the given name in the directory
:envvar:`PYAV_TESTDATA_DIR`, or the first that is writeable of:
- the current virtualenv
- ``/usr/local/share``
- ``/usr/local/lib``
- ``/usr/share``
- ``/usr/lib``
- the user's home
"""
clean_name = os.path.normpath(name)
if clean_name != name:
raise ValueError("{} is not normalized.".format(name))
for dir_ in iter_data_dirs():
path = os.path.join(dir_, name)
if os.path.exists(path):
return path
dir_ = next(iter_data_dirs(True))
path = os.path.join(dir_, name)
log.info("Downloading {} to {}".format(url, path))
response = urlopen(url)
if response.getcode() != 200:
raise ValueError("HTTP {}".format(response.getcode()))
dir_ = os.path.dirname(path)
try:
os.makedirs(dir_)
except OSError as e:
if e.errno != errno.EEXIST:
raise
tmp_path = path + '.tmp'
with open(tmp_path, 'wb') as fh:
while True:
chunk = response.read(8196)
if chunk:
fh.write(chunk)
else:
break
os.rename(tmp_path, path)
return path |
def fate(name):
"""Download and return a path to a sample from the FFmpeg test suite.
Data is handled by :func:`cached_download`.
See the `FFmpeg Automated Test Environment <https://www.ffmpeg.org/fate.html>`_
"""
return cached_download('http://fate.ffmpeg.org/fate-suite/' + name,
os.path.join('fate-suite', name.replace('/', os.path.sep))) |
def curated(name):
"""Download and return a path to a sample that is curated by the PyAV developers.
Data is handled by :func:`cached_download`.
"""
return cached_download('https://docs.mikeboers.com/pyav/samples/' + name,
os.path.join('pyav-curated', name.replace('/', os.path.sep))) |
def get_library_config(name):
"""Get distutils-compatible extension extras for the given library.
This requires ``pkg-config``.
"""
try:
proc = Popen(['pkg-config', '--cflags', '--libs', name], stdout=PIPE, stderr=PIPE)
except OSError:
print('pkg-config is required for building PyAV')
exit(1)
raw_cflags, err = proc.communicate()
if proc.wait():
return
known, unknown = parse_cflags(raw_cflags.decode('utf8'))
if unknown:
print("pkg-config returned flags we don't understand: {}".format(unknown))
exit(1)
return known |
def update_extend(dst, src):
"""Update the `dst` with the `src`, extending values where lists.
Primiarily useful for integrating results from `get_library_config`.
"""
for k, v in src.items():
existing = dst.setdefault(k, [])
for x in v:
if x not in existing:
existing.append(x) |
def dump_config():
"""Print out all the config information we have so far (for debugging)."""
print('PyAV:', version, git_commit or '(unknown commit)')
print('Python:', sys.version.encode('unicode_escape' if PY3 else 'string-escape'))
print('platform:', platform.platform())
print('extension_extra:')
for k, vs in extension_extra.items():
print('\t%s: %s' % (k, [x.encode('utf8') for x in vs]))
print('config_macros:')
for x in sorted(config_macros.items()):
print('\t%s=%s' % x) |
def _CCompiler_spawn_silent(cmd, dry_run=None):
"""Spawn a process, and eat the stdio."""
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
if proc.returncode:
raise DistutilsExecError(err) |
def new_compiler(*args, **kwargs):
"""Create a C compiler.
:param bool silent: Eat all stdio? Defaults to ``True``.
All other arguments passed to ``distutils.ccompiler.new_compiler``.
"""
make_silent = kwargs.pop('silent', True)
cc = _new_compiler(*args, **kwargs)
# If MSVC10, initialize the compiler here and add /MANIFEST to linker flags.
# See Python issue 4431 (https://bugs.python.org/issue4431)
if is_msvc(cc):
from distutils.msvc9compiler import get_build_version
if get_build_version() == 10:
cc.initialize()
for ldflags in [cc.ldflags_shared, cc.ldflags_shared_debug]:
unique_extend(ldflags, ['/MANIFEST'])
# If MSVC14, do not silence. As msvc14 requires some custom
# steps before the process is spawned, we can't monkey-patch this.
elif get_build_version() == 14:
make_silent = False
# monkey-patch compiler to suppress stdout and stderr.
if make_silent:
cc.spawn = _CCompiler_spawn_silent
return cc |
def iter_cython(path):
'''Yield all ``.pyx`` and ``.pxd`` files in the given root.'''
for dir_path, dir_names, file_names in os.walk(path):
for file_name in file_names:
if file_name.startswith('.'):
continue
if os.path.splitext(file_name)[1] not in ('.pyx', '.pxd'):
continue
yield os.path.join(dir_path, file_name) |
def cleanup_text (text):
"""
It scrubs the garbled from its stream...
Or it gets the debugger again.
"""
x = " ".join(map(lambda s: s.strip(), text.split("\n"))).strip()
x = x.replace('“', '"').replace('”', '"')
x = x.replace("‘", "'").replace("’", "'").replace("`", "'")
x = x.replace('…', '...').replace('–', '-')
x = str(unicodedata.normalize('NFKD', x).encode('ascii', 'ignore').decode('ascii'))
# some content returns text in bytes rather than as a str ?
try:
assert type(x).__name__ == 'str'
except AssertionError:
print("not a string?", type(line), line)
return x |
def split_grafs (lines):
"""
segment the raw text into paragraphs
"""
graf = []
for line in lines:
line = line.strip()
if len(line) < 1:
if len(graf) > 0:
yield "\n".join(graf)
graf = []
else:
graf.append(line)
if len(graf) > 0:
yield "\n".join(graf) |
def filter_quotes (text, is_email=True):
"""
filter the quoted text out of a message
"""
global DEBUG
global PAT_FORWARD, PAT_REPLIED, PAT_UNSUBSC
if is_email:
text = filter(lambda x: x in string.printable, text)
if DEBUG:
print("text:", text)
# strip off quoted text in a forward
m = PAT_FORWARD.split(text, re.M)
if m and len(m) > 1:
text = m[0]
# strip off quoted text in a reply
m = PAT_REPLIED.split(text, re.M)
if m and len(m) > 1:
text = m[0]
# strip off any trailing unsubscription notice
m = PAT_UNSUBSC.split(text, re.M)
if m:
text = m[0]
# replace any remaining quoted text with blank lines
lines = []
for line in text.split("\n"):
if line.startswith(">"):
lines.append("")
else:
lines.append(line)
return list(split_grafs(lines)) |
def get_word_id (root):
"""
lookup/assign a unique identify for each word root
"""
global UNIQ_WORDS
# in practice, this should use a microservice via some robust
# distributed cache, e.g., Redis, Cassandra, etc.
if root not in UNIQ_WORDS:
UNIQ_WORDS[root] = len(UNIQ_WORDS)
return UNIQ_WORDS[root] |
def fix_microsoft (foo):
"""
fix special case for `c#`, `f#`, etc.; thanks Microsoft
"""
i = 0
bar = []
while i < len(foo):
text, lemma, pos, tag = foo[i]
if (text == "#") and (i > 0):
prev_tok = bar[-1]
prev_tok[0] += "#"
prev_tok[1] += "#"
bar[-1] = prev_tok
else:
bar.append(foo[i])
i += 1
return bar |
def fix_hypenation (foo):
"""
fix hyphenation in the word list for a parsed sentence
"""
i = 0
bar = []
while i < len(foo):
text, lemma, pos, tag = foo[i]
if (tag == "HYPH") and (i > 0) and (i < len(foo) - 1):
prev_tok = bar[-1]
next_tok = foo[i + 1]
prev_tok[0] += "-" + next_tok[0]
prev_tok[1] += "-" + next_tok[1]
bar[-1] = prev_tok
i += 2
else:
bar.append(foo[i])
i += 1
return bar |
def parse_graf (doc_id, graf_text, base_idx, spacy_nlp=None):
"""
CORE ALGORITHM: parse and markup sentences in the given paragraph
"""
global DEBUG
global POS_KEEPS, POS_LEMMA, SPACY_NLP
# set up the spaCy NLP parser
if not spacy_nlp:
if not SPACY_NLP:
SPACY_NLP = spacy.load("en")
spacy_nlp = SPACY_NLP
markup = []
new_base_idx = base_idx
doc = spacy_nlp(graf_text, parse=True)
for span in doc.sents:
graf = []
digest = hashlib.sha1()
if DEBUG:
print(span)
# build a word list, on which to apply corrections
word_list = []
for tag_idx in range(span.start, span.end):
token = doc[tag_idx]
if DEBUG:
print("IDX", tag_idx, token.text, token.tag_, token.pos_)
print("reg", is_not_word(token.text))
word_list.append([token.text, token.lemma_, token.pos_, token.tag_])
# scan the parsed sentence, annotating as a list of `WordNode`
corrected_words = fix_microsoft(fix_hypenation(word_list))
for tok_text, tok_lemma, tok_pos, tok_tag in corrected_words:
word = WordNode(word_id=0, raw=tok_text, root=tok_text.lower(), pos=tok_tag, keep=0, idx=new_base_idx)
if is_not_word(tok_text) or (tok_tag == "SYM"):
# a punctuation, or other symbol
pos_family = '.'
word = word._replace(pos=pos_family)
else:
pos_family = tok_tag.lower()[0]
if pos_family in POS_LEMMA:
# can lemmatize this word?
word = word._replace(root=tok_lemma)
if pos_family in POS_KEEPS:
word = word._replace(word_id=get_word_id(word.root), keep=1)
digest.update(word.root.encode('utf-8'))
# schema: word_id, raw, root, pos, keep, idx
if DEBUG:
print(word)
graf.append(list(word))
new_base_idx += 1
markup.append(ParsedGraf(id=doc_id, sha1=digest.hexdigest(), graf=graf))
return markup, new_base_idx |
def parse_doc (json_iter):
"""
parse one document to prep for TextRank
"""
global DEBUG
for meta in json_iter:
base_idx = 0
for graf_text in filter_quotes(meta["text"], is_email=False):
if DEBUG:
print("graf_text:", graf_text)
grafs, new_base_idx = parse_graf(meta["id"], graf_text, base_idx)
base_idx = new_base_idx
for graf in grafs:
yield graf |
def get_tiles (graf, size=3):
"""
generate word pairs for the TextRank graph
"""
keeps = list(filter(lambda w: w.word_id > 0, graf))
keeps_len = len(keeps)
for i in iter(range(0, keeps_len - 1)):
w0 = keeps[i]
for j in iter(range(i + 1, min(keeps_len, i + 1 + size))):
w1 = keeps[j]
if (w1.idx - w0.idx) <= size:
yield (w0.root, w1.root,) |
def build_graph (json_iter):
"""
construct the TextRank graph from parsed paragraphs
"""
global DEBUG, WordNode
graph = nx.DiGraph()
for meta in json_iter:
if DEBUG:
print(meta["graf"])
for pair in get_tiles(map(WordNode._make, meta["graf"])):
if DEBUG:
print(pair)
for word_id in pair:
if not graph.has_node(word_id):
graph.add_node(word_id)
try:
graph.edge[pair[0]][pair[1]]["weight"] += 1.0
except KeyError:
graph.add_edge(pair[0], pair[1], weight=1.0)
return graph |
def write_dot (graph, ranks, path="graph.dot"):
"""
output the graph in Dot file format
"""
dot = Digraph()
for node in graph.nodes():
dot.node(node, "%s %0.3f" % (node, ranks[node]))
for edge in graph.edges():
dot.edge(edge[0], edge[1], constraint="false")
with open(path, 'w') as f:
f.write(dot.source) |
def render_ranks (graph, ranks, dot_file="graph.dot"):
"""
render the TextRank graph for visual formats
"""
if dot_file:
write_dot(graph, ranks, path=dot_file) |
def text_rank (path):
"""
run the TextRank algorithm
"""
graph = build_graph(json_iter(path))
ranks = nx.pagerank(graph)
return graph, ranks |
def find_chunk (phrase, np):
"""
leverage noun phrase chunking
"""
for i in iter(range(0, len(phrase))):
parsed_np = find_chunk_sub(phrase, np, i)
if parsed_np:
return parsed_np |
def enumerate_chunks (phrase, spacy_nlp):
"""
iterate through the noun phrases
"""
if (len(phrase) > 1):
found = False
text = " ".join([rl.text for rl in phrase])
doc = spacy_nlp(text.strip(), parse=True)
for np in doc.noun_chunks:
if np.text != text:
found = True
yield np.text, find_chunk(phrase, np.text.split(" "))
if not found and all([rl.pos[0] != "v" for rl in phrase]):
yield text, phrase |
def collect_keyword (sent, ranks, stopwords):
"""
iterator for collecting the single-word keyphrases
"""
for w in sent:
if (w.word_id > 0) and (w.root in ranks) and (w.pos[0] in "NV") and (w.root not in stopwords):
rl = RankedLexeme(text=w.raw.lower(), rank=ranks[w.root]/2.0, ids=[w.word_id], pos=w.pos.lower(), count=1)
if DEBUG:
print(rl)
yield rl |
def collect_entities (sent, ranks, stopwords, spacy_nlp):
"""
iterator for collecting the named-entities
"""
global DEBUG
sent_text = " ".join([w.raw for w in sent])
if DEBUG:
print("sent:", sent_text)
for ent in spacy_nlp(sent_text).ents:
if DEBUG:
print("NER:", ent.label_, ent.text)
if (ent.label_ not in ["CARDINAL"]) and (ent.text.lower() not in stopwords):
w_ranks, w_ids = find_entity(sent, ranks, ent.text.split(" "), 0)
if w_ranks and w_ids:
rl = RankedLexeme(text=ent.text.lower(), rank=w_ranks, ids=w_ids, pos="np", count=1)
if DEBUG:
print(rl)
yield rl |
def collect_phrases (sent, ranks, spacy_nlp):
"""
iterator for collecting the noun phrases
"""
tail = 0
last_idx = sent[0].idx - 1
phrase = []
while tail < len(sent):
w = sent[tail]
if (w.word_id > 0) and (w.root in ranks) and ((w.idx - last_idx) == 1):
# keep collecting...
rl = RankedLexeme(text=w.raw.lower(), rank=ranks[w.root], ids=w.word_id, pos=w.pos.lower(), count=1)
phrase.append(rl)
else:
# just hit a phrase boundary
for text, p in enumerate_chunks(phrase, spacy_nlp):
if p:
id_list = [rl.ids for rl in p]
rank_list = [rl.rank for rl in p]
np_rl = RankedLexeme(text=text, rank=rank_list, ids=id_list, pos="np", count=1)
if DEBUG:
print(np_rl)
yield np_rl
phrase = []
last_idx = w.idx
tail += 1 |
def normalize_key_phrases (path, ranks, stopwords=None, spacy_nlp=None, skip_ner=True):
"""
collect keyphrases, named entities, etc., while removing stop words
"""
global STOPWORDS, SPACY_NLP
# set up the stop words
if (type(stopwords) is list) or (type(stopwords) is set):
# explicit conversion to a set, for better performance
stopwords = set(stopwords)
else:
if not STOPWORDS:
STOPWORDS = load_stopwords(stopwords)
stopwords = STOPWORDS
# set up the spaCy NLP parser
if not spacy_nlp:
if not SPACY_NLP:
SPACY_NLP = spacy.load("en")
spacy_nlp = SPACY_NLP
# collect keyphrases
single_lex = {}
phrase_lex = {}
if isinstance(path, str):
path = json_iter(path)
for meta in path:
sent = [w for w in map(WordNode._make, meta["graf"])]
for rl in collect_keyword(sent, ranks, stopwords):
id = str(rl.ids)
if id not in single_lex:
single_lex[id] = rl
else:
prev_lex = single_lex[id]
single_lex[id] = rl._replace(count = prev_lex.count + 1)
if not skip_ner:
for rl in collect_entities(sent, ranks, stopwords, spacy_nlp):
id = str(rl.ids)
if id not in phrase_lex:
phrase_lex[id] = rl
else:
prev_lex = phrase_lex[id]
phrase_lex[id] = rl._replace(count = prev_lex.count + 1)
for rl in collect_phrases(sent, ranks, spacy_nlp):
id = str(rl.ids)
if id not in phrase_lex:
phrase_lex[id] = rl
else:
prev_lex = phrase_lex[id]
phrase_lex[id] = rl._replace(count = prev_lex.count + 1)
# normalize ranks across single keywords and longer phrases:
# * boost the noun phrases based on their length
# * penalize the noun phrases for repeated words
rank_list = [rl.rank for rl in single_lex.values()]
if len(rank_list) < 1:
max_single_rank = 0
else:
max_single_rank = max(rank_list)
repeated_roots = {}
for rl in sorted(phrase_lex.values(), key=lambda rl: len(rl), reverse=True):
rank_list = []
for i in iter(range(0, len(rl.ids))):
id = rl.ids[i]
if not id in repeated_roots:
repeated_roots[id] = 1.0
rank_list.append(rl.rank[i])
else:
repeated_roots[id] += 1.0
rank_list.append(rl.rank[i] / repeated_roots[id])
phrase_rank = calc_rms(rank_list)
single_lex[str(rl.ids)] = rl._replace(rank = phrase_rank)
# scale all the ranks together, so they sum to 1.0
sum_ranks = sum([rl.rank for rl in single_lex.values()])
for rl in sorted(single_lex.values(), key=lambda rl: rl.rank, reverse=True):
if sum_ranks > 0.0:
rl = rl._replace(rank=rl.rank / sum_ranks)
elif rl.rank == 0.0:
rl = rl._replace(rank=0.1)
rl = rl._replace(text=re.sub(r"\s([\.\,\-\+\:\@])\s", r"\1", rl.text))
yield rl |
def mh_digest (data):
"""
create a MinHash digest
"""
num_perm = 512
m = MinHash(num_perm)
for d in data:
m.update(d.encode('utf8'))
return m |
def rank_kernel (path):
"""
return a list (matrix-ish) of the key phrases and their ranks
"""
kernel = []
if isinstance(path, str):
path = json_iter(path)
for meta in path:
if not isinstance(meta, RankedLexeme):
rl = RankedLexeme(**meta)
else:
rl = meta
m = mh_digest(map(lambda x: str(x), rl.ids))
kernel.append((rl, m,))
return kernel |
def top_sentences (kernel, path):
"""
determine distance for each sentence
"""
key_sent = {}
i = 0
if isinstance(path, str):
path = json_iter(path)
for meta in path:
graf = meta["graf"]
tagged_sent = [WordNode._make(x) for x in graf]
text = " ".join([w.raw for w in tagged_sent])
m_sent = mh_digest([str(w.word_id) for w in tagged_sent])
dist = sum([m_sent.jaccard(m) * rl.rank for rl, m in kernel])
key_sent[text] = (dist, i)
i += 1
for text, (dist, i) in sorted(key_sent.items(), key=lambda x: x[1][0], reverse=True):
yield SummarySent(dist=dist, idx=i, text=text) |
def limit_keyphrases (path, phrase_limit=20):
"""
iterator for the most significant key phrases
"""
rank_thresh = None
if isinstance(path, str):
lex = []
for meta in json_iter(path):
rl = RankedLexeme(**meta)
lex.append(rl)
else:
lex = path
if len(lex) > 0:
rank_thresh = statistics.mean([rl.rank for rl in lex])
else:
rank_thresh = 0
used = 0
for rl in lex:
if rl.pos[0] != "v":
if (used > phrase_limit) or (rl.rank < rank_thresh):
return
used += 1
yield rl.text.replace(" - ", "-") |
def limit_sentences (path, word_limit=100):
"""
iterator for the most significant sentences, up to a specified limit
"""
word_count = 0
if isinstance(path, str):
path = json_iter(path)
for meta in path:
if not isinstance(meta, SummarySent):
p = SummarySent(**meta)
else:
p = meta
sent_text = p.text.strip().split(" ")
sent_len = len(sent_text)
if (word_count + sent_len) > word_limit:
break
else:
word_count += sent_len
yield sent_text, p.idx |
def make_sentence (sent_text):
"""
construct a sentence text, with proper spacing
"""
lex = []
idx = 0
for word in sent_text:
if len(word) > 0:
if (idx > 0) and not (word[0] in ",.:;!?-\"'"):
lex.append(" ")
lex.append(word)
idx += 1
return "".join(lex) |
def json_iter (path):
"""
iterator for JSON-per-line in a file pattern
"""
with open(path, 'r') as f:
for line in f.readlines():
yield json.loads(line) |
def pretty_print (obj, indent=False):
"""
pretty print a JSON object
"""
if indent:
return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '))
else:
return json.dumps(obj, sort_keys=True) |
def get_object(cls, api_token, snapshot_id):
"""
Class method that will return a Snapshot object by ID.
"""
snapshot = cls(token=api_token, id=snapshot_id)
snapshot.load()
return snapshot |
def load(self):
"""
Fetch data about tag
"""
tags = self.get_data("tags/%s" % self.name)
tag = tags['tag']
for attr in tag.keys():
setattr(self, attr, tag[attr])
return self |
def create(self, **kwargs):
"""
Create the tag.
"""
for attr in kwargs.keys():
setattr(self, attr, kwargs[attr])
params = {"name": self.name}
output = self.get_data("tags", type="POST", params=params)
if output:
self.name = output['tag']['name']
self.resources = output['tag']['resources'] |
def __get_resources(self, resources, method):
""" Method used to talk directly to the API (TAGs' Resources) """
tagged = self.get_data(
'tags/%s/resources' % self.name, params={
"resources": resources
},
type=method,
)
return tagged |
def __extract_resources_from_droplets(self, data):
"""
Private method to extract from a value, the resources.
It will check the type of object in the array provided and build
the right structure for the API.
"""
resources = []
if not isinstance(data, list): return data
for a_droplet in data:
res = {}
try:
if isinstance(a_droplet, unicode):
res = {"resource_id": a_droplet, "resource_type": "droplet"}
except NameError:
pass
if isinstance(a_droplet, str) or isinstance(a_droplet, int):
res = {"resource_id": str(a_droplet), "resource_type": "droplet"}
elif isinstance(a_droplet, Droplet):
res = {"resource_id": str(a_droplet.id), "resource_type": "droplet"}
if len(res) > 0:
resources.append(res)
return resources |
def add_droplets(self, droplet):
"""
Add the Tag to a Droplet.
Attributes accepted at creation time:
droplet: array of string or array of int, or array of Droplets.
"""
droplets = droplet
if not isinstance(droplets, list):
droplets = [droplet]
# Extracting data from the Droplet object
resources = self.__extract_resources_from_droplets(droplets)
if len(resources) > 0:
return self.__add_resources(resources)
return False |
def remove_droplets(self, droplet):
"""
Remove the Tag from the Droplet.
Attributes accepted at creation time:
droplet: array of string or array of int, or array of Droplets.
"""
droplets = droplet
if not isinstance(droplets, list):
droplets = [droplet]
# Extracting data from the Droplet object
resources = self.__extract_resources_from_droplets(droplets)
if len(resources) > 0:
return self.__remove_resources(resources)
return False |
def get_object(cls, api_token, action_id):
"""
Class method that will return a Action object by ID.
"""
action = cls(token=api_token, id=action_id)
action.load_directly()
return action |
def wait(self, update_every_seconds=1):
"""
Wait until the action is marked as completed or with an error.
It will return True in case of success, otherwise False.
Optional Args:
update_every_seconds - int : number of seconds to wait before
checking if the action is completed.
"""
while self.status == u'in-progress':
sleep(update_every_seconds)
self.load()
return self.status == u'completed' |
def get_object(cls, api_token, droplet_id):
"""Class method that will return a Droplet object by ID.
Args:
api_token (str): token
droplet_id (int): droplet id
"""
droplet = cls(token=api_token, id=droplet_id)
droplet.load()
return droplet |
def get_data(self, *args, **kwargs):
"""
Customized version of get_data to perform __check_actions_in_data
"""
data = super(Droplet, self).get_data(*args, **kwargs)
if "type" in kwargs:
if kwargs["type"] == POST:
self.__check_actions_in_data(data)
return data |
def load(self):
"""
Fetch data about droplet - use this instead of get_data()
"""
droplets = self.get_data("droplets/%s" % self.id)
droplet = droplets['droplet']
for attr in droplet.keys():
setattr(self, attr, droplet[attr])
for net in self.networks['v4']:
if net['type'] == 'private':
self.private_ip_address = net['ip_address']
if net['type'] == 'public':
self.ip_address = net['ip_address']
if self.networks['v6']:
self.ip_v6_address = self.networks['v6'][0]['ip_address']
if "backups" in self.features:
self.backups = True
else:
self.backups = False
if "ipv6" in self.features:
self.ipv6 = True
else:
self.ipv6 = False
if "private_networking" in self.features:
self.private_networking = True
else:
self.private_networking = False
if "tags" in droplets:
self.tags = droplets["tags"]
return self |
def _perform_action(self, params, return_dict=True):
"""
Perform a droplet action.
Args:
params (dict): parameters of the action
Optional Args:
return_dict (bool): Return a dict when True (default),
otherwise return an Action.
Returns dict or Action
"""
action = self.get_data(
"droplets/%s/actions/" % self.id,
type=POST,
params=params
)
if return_dict:
return action
else:
action = action[u'action']
return_action = Action(token=self.token)
# Loading attributes
for attr in action.keys():
setattr(return_action, attr, action[attr])
return return_action |
def resize(self, new_size_slug, return_dict=True, disk=True):
"""Resize the droplet to a new size slug.
https://developers.digitalocean.com/documentation/v2/#resize-a-droplet
Args:
new_size_slug (str): name of new size
Optional Args:
return_dict (bool): Return a dict when True (default),
otherwise return an Action.
disk (bool): If a permanent resize, with disk changes included.
Returns dict or Action
"""
options = {"type": "resize", "size": new_size_slug}
if disk: options["disk"] = "true"
return self._perform_action(options, return_dict) |
def take_snapshot(self, snapshot_name, return_dict=True, power_off=False):
"""Take a snapshot!
Args:
snapshot_name (str): name of snapshot
Optional Args:
return_dict (bool): Return a dict when True (default),
otherwise return an Action.
power_off (bool): Before taking the snapshot the droplet will be
turned off with another API call. It will wait until the
droplet will be powered off.
Returns dict or Action
"""
if power_off is True and self.status != "off":
action = self.power_off(return_dict=False)
action.wait()
self.load()
return self._perform_action(
{"type": "snapshot", "name": snapshot_name},
return_dict
) |
def rebuild(self, image_id=None, return_dict=True):
"""Restore the droplet to an image ( snapshot or backup )
Args:
image_id (int): id of image
Optional Args:
return_dict (bool): Return a dict when True (default),
otherwise return an Action.
Returns dict or Action
"""
if not image_id:
image_id = self.image['id']
return self._perform_action(
{"type": "rebuild", "image": image_id},
return_dict
) |
def change_kernel(self, kernel, return_dict=True):
"""Change the kernel to a new one
Args:
kernel : instance of digitalocean.Kernel.Kernel
Optional Args:
return_dict (bool): Return a dict when True (default),
otherwise return an Action.
Returns dict or Action
"""
if type(kernel) != Kernel:
raise BadKernelObject("Use Kernel object")
return self._perform_action(
{'type': 'change_kernel', 'kernel': kernel.id},
return_dict
) |
def __get_ssh_keys_id_or_fingerprint(ssh_keys, token, name):
"""
Check and return a list of SSH key IDs or fingerprints according
to DigitalOcean's API. This method is used to check and create a
droplet with the correct SSH keys.
"""
ssh_keys_id = list()
for ssh_key in ssh_keys:
if type(ssh_key) in [int, type(2 ** 64)]:
ssh_keys_id.append(int(ssh_key))
elif type(ssh_key) == SSHKey:
ssh_keys_id.append(ssh_key.id)
elif type(ssh_key) in [type(u''), type('')]:
# ssh_key could either be a fingerprint or a public key
#
# type(u'') and type('') is the same in python 3 but
# different in 2. See:
# https://github.com/koalalorenzo/python-digitalocean/issues/80
regexp_of_fingerprint = '([0-9a-fA-F]{2}:){15}[0-9a-fA-F]'
match = re.match(regexp_of_fingerprint, ssh_key)
if match is not None and match.end() == len(ssh_key) - 1:
ssh_keys_id.append(ssh_key)
else:
key = SSHKey()
key.token = token
results = key.load_by_pub_key(ssh_key)
if results is None:
key.public_key = ssh_key
key.name = "SSH Key %s" % name
key.create()
else:
key = results
ssh_keys_id.append(key.id)
else:
raise BadSSHKeyFormat(
"Droplet.ssh_keys should be a list of IDs, public keys"
+ " or fingerprints."
)
return ssh_keys_id |
def create(self, *args, **kwargs):
"""
Create the droplet with object properties.
Note: Every argument and parameter given to this method will be
assigned to the object.
"""
for attr in kwargs.keys():
setattr(self, attr, kwargs[attr])
# Provide backwards compatibility
if not self.size_slug and self.size:
self.size_slug = self.size
ssh_keys_id = Droplet.__get_ssh_keys_id_or_fingerprint(self.ssh_keys,
self.token,
self.name)
data = {
"name": self.name,
"size": self.size_slug,
"image": self.image,
"region": self.region,
"ssh_keys": ssh_keys_id,
"backups": bool(self.backups),
"ipv6": bool(self.ipv6),
"private_networking": bool(self.private_networking),
"volumes": self.volumes,
"tags": self.tags,
"monitoring": bool(self.monitoring),
}
if self.user_data:
data["user_data"] = self.user_data
data = self.get_data("droplets/", type=POST, params=data)
if data:
self.id = data['droplet']['id']
action_id = data['links']['actions'][0]['id']
self.action_ids = []
self.action_ids.append(action_id) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.