text
stringlengths
4
1.02M
meta
dict
"""superlists URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.8/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Add an import: from blog import urls as blog_urls 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls)) """ from django.conf.urls import url from lists import views urlpatterns = [ url(r'^(\d+)/$', 'lists.views.view_list', name='view_list'), url(r'^new$', 'lists.views.new_list', name='new_list'), ]
{ "content_hash": "41b3c2b5d0d5142a14f6f12998cc4ae6", "timestamp": "", "source": "github", "line_count": 23, "max_line_length": 77, "avg_line_length": 34.47826086956522, "alnum_prop": 0.7036569987389659, "repo_name": "rmelchorv/TDD-Cuervos", "id": "f05df358e10775ffa272981369998abe464479cf", "size": "793", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "lists/urls.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "70" }, { "name": "HTML", "bytes": "1537" }, { "name": "Python", "bytes": "27857" } ], "symlink_target": "" }
"""Leetcode 657. Robot Return to Origin Easy URL: https://leetcode.com/problems/robot-return-to-origin/ There is a robot starting at position (0, 0), the origin, on a 2D plane. Given a sequence of its moves, judge if this robot ends up at (0, 0) after it completes its moves. The move sequence is represented by a string, and the character moves[i] represents its ith move. Valid moves are R (right), L (left), U (up), and D (down). If the robot returns to the origin after it finishes all of its moves, return true. Otherwise, return false. Note: The way that the robot is "facing" is irrelevant. "R" will always make the robot move to the right once, "L" will always make it move left, etc. Also, assume that the magnitude of the robot's movement is the same for each move. Example 1: Input: "UD" Output: true Explanation: The robot moves up once, and then down once. All moves have the same magnitude, so it ended up at the origin where it started. Therefore, we return true. Example 2: Input: "LL" Output: false Explanation: The robot moves left twice. It ends up two "moves" to the left of the origin. We return false because it is not at the origin at the end of its moves. """ class SolutionDict(object): def judgeCircle(self, moves): """ :type moves: str :rtype: bool Time complexity: O(n), where n is the number of moves. Space complexity: O(1). """ # Since L(U) needs to be complemented by R(D), vice versa, # use a dict to count the number of different directions, # and check balances of U & D and L & R. from collections import defaultdict dirs_d = defaultdict(int) for c in moves: dirs_d[c] += 1 if dirs_d['U'] == dirs_d['D'] and dirs_d['L'] == dirs_d['R']: return True else: return False class SolutionTwoCounters(object): def judgeCircle(self, moves): """ :type moves: str :rtype: bool Time complexity: O(n), where n is the number of moves. Space complexity: O(1). """ # Since L(U) needs to be complemented by R(D), vice versa, # use two counters to count the number of U/D and L/R. # and check balances of U & D and L & R. ud_counter = 0 lr_counter = 0 for c in moves: if c == 'U': ud_counter += 1 elif c == 'D': ud_counter -= 1 elif c == 'L': lr_counter += 1 elif c == 'R': lr_counter -= 0 if ud_counter == 0 and lr_counter == 0: return True else: return False def main(): import time print 'By dict:' start_time = time.time() # Output: True moves = "UD" print SolutionDict().judgeCircle(moves) # Output: False moves = "LL" print SolutionDict().judgeCircle(moves) print 'Time: {}'.format(time.time() - start_time) print 'By two conters:' start_time = time.time() # Output: True moves = "UD" print SolutionTwoCounters().judgeCircle(moves) # Output: False moves = "LL" print SolutionTwoCounters().judgeCircle(moves) print 'Time: {}'.format(time.time() - start_time) if __name__ == '__main__': main()
{ "content_hash": "cf9145c1ee5045bb199adce645de01c8", "timestamp": "", "source": "github", "line_count": 119, "max_line_length": 83, "avg_line_length": 27.941176470588236, "alnum_prop": 0.6006015037593985, "repo_name": "bowen0701/algorithms_data_structures", "id": "2a63a34c9bb25ec3aba76213d987c5935f8ba7af", "size": "3325", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "lc0657_robot_return_to_origin.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "108750" } ], "symlink_target": "" }
import unittest import os import gistey.gistey as gistey class gisteyTest(unittest.TestCase): def setUp(self): open("test_file.py", 'a').close() self.args = gistey.parser.parse_args(["-s", "--files", "test_file.py", "--description", "This is the description"]) def tearDown(self): os.remove("test_file.py") def test_process_files(self): test_file_contents = { "test_file.py": { "content": "" } } self.assertEqual(gistey.process_files(self.args), test_file_contents) def test_construct_data(self): test_data = { "public": False, "description": "This is the description", "files": gistey.process_files(self.args) } self.assertEqual(gistey.construct_data(self.args), test_data) if __name__ == "__main__": unittest.main()
{ "content_hash": "6567640b9a190fb101362fc85048b911", "timestamp": "", "source": "github", "line_count": 37, "max_line_length": 77, "avg_line_length": 27.91891891891892, "alnum_prop": 0.4937076476282672, "repo_name": "meetmangukiya/gistey", "id": "5b1defb16e5a63d902d0622494a502179d877f50", "size": "1033", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_gistey.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "5195" }, { "name": "Shell", "bytes": "302" } ], "symlink_target": "" }
""" Platform for Ecobee Thermostats. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/climate.ecobee/ """ import logging from os import path import voluptuous as vol from homeassistant.components import ecobee from homeassistant.components.climate import ( DOMAIN, STATE_COOL, STATE_HEAT, STATE_AUTO, STATE_IDLE, ClimateDevice, ATTR_TARGET_TEMP_LOW, ATTR_TARGET_TEMP_HIGH) from homeassistant.const import ( ATTR_ENTITY_ID, STATE_OFF, STATE_ON, ATTR_TEMPERATURE, TEMP_FAHRENHEIT) from homeassistant.config import load_yaml_config_file import homeassistant.helpers.config_validation as cv _CONFIGURING = {} _LOGGER = logging.getLogger(__name__) ATTR_FAN_MIN_ON_TIME = 'fan_min_on_time' ATTR_RESUME_ALL = 'resume_all' DEFAULT_RESUME_ALL = False TEMPERATURE_HOLD = 'temp' VACATION_HOLD = 'vacation' DEPENDENCIES = ['ecobee'] SERVICE_SET_FAN_MIN_ON_TIME = 'ecobee_set_fan_min_on_time' SERVICE_RESUME_PROGRAM = 'ecobee_resume_program' SET_FAN_MIN_ON_TIME_SCHEMA = vol.Schema({ vol.Optional(ATTR_ENTITY_ID): cv.entity_ids, vol.Required(ATTR_FAN_MIN_ON_TIME): vol.Coerce(int), }) RESUME_PROGRAM_SCHEMA = vol.Schema({ vol.Optional(ATTR_ENTITY_ID): cv.entity_ids, vol.Optional(ATTR_RESUME_ALL, default=DEFAULT_RESUME_ALL): cv.boolean, }) def setup_platform(hass, config, add_devices, discovery_info=None): """Set up the Ecobee Thermostat Platform.""" if discovery_info is None: return data = ecobee.NETWORK hold_temp = discovery_info['hold_temp'] _LOGGER.info( "Loading ecobee thermostat component with hold_temp set to %s", hold_temp) devices = [Thermostat(data, index, hold_temp) for index in range(len(data.ecobee.thermostats))] add_devices(devices) def fan_min_on_time_set_service(service): """Set the minimum fan on time on the target thermostats.""" entity_id = service.data.get(ATTR_ENTITY_ID) fan_min_on_time = service.data[ATTR_FAN_MIN_ON_TIME] if entity_id: target_thermostats = [device for device in devices if device.entity_id in entity_id] else: target_thermostats = devices for thermostat in target_thermostats: thermostat.set_fan_min_on_time(str(fan_min_on_time)) thermostat.schedule_update_ha_state(True) def resume_program_set_service(service): """Resume the program on the target thermostats.""" entity_id = service.data.get(ATTR_ENTITY_ID) resume_all = service.data.get(ATTR_RESUME_ALL) if entity_id: target_thermostats = [device for device in devices if device.entity_id in entity_id] else: target_thermostats = devices for thermostat in target_thermostats: thermostat.resume_program(resume_all) thermostat.schedule_update_ha_state(True) descriptions = load_yaml_config_file( path.join(path.dirname(__file__), 'services.yaml')) hass.services.register( DOMAIN, SERVICE_SET_FAN_MIN_ON_TIME, fan_min_on_time_set_service, descriptions.get(SERVICE_SET_FAN_MIN_ON_TIME), schema=SET_FAN_MIN_ON_TIME_SCHEMA) hass.services.register( DOMAIN, SERVICE_RESUME_PROGRAM, resume_program_set_service, descriptions.get(SERVICE_RESUME_PROGRAM), schema=RESUME_PROGRAM_SCHEMA) class Thermostat(ClimateDevice): """A thermostat class for Ecobee.""" def __init__(self, data, thermostat_index, hold_temp): """Initialize the thermostat.""" self.data = data self.thermostat_index = thermostat_index self.thermostat = self.data.ecobee.get_thermostat( self.thermostat_index) self._name = self.thermostat['name'] self.hold_temp = hold_temp self.vacation = None self._climate_list = self.climate_list self._operation_list = ['auto', 'auxHeatOnly', 'cool', 'heat', 'off'] self.update_without_throttle = False def update(self): """Get the latest state from the thermostat.""" if self.update_without_throttle: self.data.update(no_throttle=True) self.update_without_throttle = False else: self.data.update() self.thermostat = self.data.ecobee.get_thermostat( self.thermostat_index) @property def name(self): """Return the name of the Ecobee Thermostat.""" return self.thermostat['name'] @property def temperature_unit(self): """Return the unit of measurement.""" return TEMP_FAHRENHEIT @property def current_temperature(self): """Return the current temperature.""" return self.thermostat['runtime']['actualTemperature'] / 10 @property def target_temperature_low(self): """Return the lower bound temperature we try to reach.""" if self.current_operation == STATE_AUTO: return int(self.thermostat['runtime']['desiredHeat'] / 10) return None @property def target_temperature_high(self): """Return the upper bound temperature we try to reach.""" if self.current_operation == STATE_AUTO: return int(self.thermostat['runtime']['desiredCool'] / 10) return None @property def target_temperature(self): """Return the temperature we try to reach.""" if self.current_operation == STATE_AUTO: return None if self.current_operation == STATE_HEAT: return int(self.thermostat['runtime']['desiredHeat'] / 10) elif self.current_operation == STATE_COOL: return int(self.thermostat['runtime']['desiredCool'] / 10) return None @property def desired_fan_mode(self): """Return the desired fan mode of operation.""" return self.thermostat['runtime']['desiredFanMode'] @property def fan(self): """Return the current fan state.""" if 'fan' in self.thermostat['equipmentStatus']: return STATE_ON return STATE_OFF @property def current_hold_mode(self): """Return current hold mode.""" events = self.thermostat['events'] for event in events: if event['running']: if event['type'] == 'hold': if event['holdClimateRef'] == 'away': if int(event['endDate'][0:4]) - \ int(event['startDate'][0:4]) <= 1: # A temporary hold from away climate is a hold return 'away' # A permanent hold from away climate is away_mode return None elif event['holdClimateRef'] != "": # Any other hold based on climate return event['holdClimateRef'] # Any hold not based on a climate is a temp hold return TEMPERATURE_HOLD elif event['type'].startswith('auto'): # All auto modes are treated as holds return event['type'][4:].lower() elif event['type'] == 'vacation': self.vacation = event['name'] return VACATION_HOLD return None @property def current_operation(self): """Return current operation.""" if self.operation_mode == 'auxHeatOnly' or \ self.operation_mode == 'heatPump': return STATE_HEAT return self.operation_mode @property def operation_list(self): """Return the operation modes list.""" return self._operation_list @property def operation_mode(self): """Return current operation ie. heat, cool, idle.""" return self.thermostat['settings']['hvacMode'] @property def mode(self): """Return current mode, as the user-visible name.""" cur = self.thermostat['program']['currentClimateRef'] climates = self.thermostat['program']['climates'] current = list(filter(lambda x: x['climateRef'] == cur, climates)) return current[0]['name'] @property def fan_min_on_time(self): """Return current fan minimum on time.""" return self.thermostat['settings']['fanMinOnTime'] @property def device_state_attributes(self): """Return device specific state attributes.""" # Move these to Thermostat Device and make them global status = self.thermostat['equipmentStatus'] operation = None if status == '': operation = STATE_IDLE elif 'Cool' in status: operation = STATE_COOL elif 'auxHeat' in status: operation = STATE_HEAT elif 'heatPump' in status: operation = STATE_HEAT else: operation = status return { "actual_humidity": self.thermostat['runtime']['actualHumidity'], "fan": self.fan, "mode": self.mode, "operation": operation, "climate_list": self.climate_list, "fan_min_on_time": self.fan_min_on_time } @property def is_away_mode_on(self): """Return true if away mode is on.""" return self.current_hold_mode == 'away' @property def is_aux_heat_on(self): """Return true if aux heater.""" return 'auxHeat' in self.thermostat['equipmentStatus'] def turn_away_mode_on(self): """Turn away on.""" self.set_hold_mode('away') def turn_away_mode_off(self): """Turn away off.""" self.set_hold_mode(None) def set_hold_mode(self, hold_mode): """Set hold mode (away, home, temp, sleep, etc.).""" hold = self.current_hold_mode if hold == hold_mode: # no change, so no action required return elif hold_mode == 'None' or hold_mode is None: if hold == VACATION_HOLD: self.data.ecobee.delete_vacation( self.thermostat_index, self.vacation) else: self.data.ecobee.resume_program(self.thermostat_index) else: if hold_mode == TEMPERATURE_HOLD: self.set_temp_hold(int(self.current_temperature)) else: self.data.ecobee.set_climate_hold( self.thermostat_index, hold_mode, self.hold_preference()) self.update_without_throttle = True def set_auto_temp_hold(self, heat_temp, cool_temp): """Set temperature hold in auto mode.""" self.data.ecobee.set_hold_temp(self.thermostat_index, cool_temp, heat_temp, self.hold_preference()) _LOGGER.debug("Setting ecobee hold_temp to: heat=%s, is=%s, " "cool=%s, is=%s", heat_temp, isinstance( heat_temp, (int, float)), cool_temp, isinstance(cool_temp, (int, float))) self.update_without_throttle = True def set_temp_hold(self, temp): """Set temperature hold in modes other than auto.""" # Set arbitrary range when not in auto mode if self.current_operation == STATE_HEAT: heat_temp = temp cool_temp = temp + 20 elif self.current_operation == STATE_COOL: heat_temp = temp - 20 cool_temp = temp self.data.ecobee.set_hold_temp(self.thermostat_index, cool_temp, heat_temp, self.hold_preference()) _LOGGER.debug("Setting ecobee hold_temp to: low=%s, is=%s, " "cool=%s, is=%s", heat_temp, isinstance( heat_temp, (int, float)), cool_temp, isinstance(cool_temp, (int, float))) self.update_without_throttle = True def set_temperature(self, **kwargs): """Set new target temperature.""" low_temp = kwargs.get(ATTR_TARGET_TEMP_LOW) high_temp = kwargs.get(ATTR_TARGET_TEMP_HIGH) temp = kwargs.get(ATTR_TEMPERATURE) if self.current_operation == STATE_AUTO and low_temp is not None \ and high_temp is not None: self.set_auto_temp_hold(int(low_temp), int(high_temp)) elif temp is not None: self.set_temp_hold(int(temp)) else: _LOGGER.error( "Missing valid arguments for set_temperature in %s", kwargs) def set_operation_mode(self, operation_mode): """Set HVAC mode (auto, auxHeatOnly, cool, heat, off).""" self.data.ecobee.set_hvac_mode(self.thermostat_index, operation_mode) self.update_without_throttle = True def set_fan_min_on_time(self, fan_min_on_time): """Set the minimum fan on time.""" self.data.ecobee.set_fan_min_on_time( self.thermostat_index, fan_min_on_time) self.update_without_throttle = True def resume_program(self, resume_all): """Resume the thermostat schedule program.""" self.data.ecobee.resume_program( self.thermostat_index, str(resume_all).lower()) self.update_without_throttle = True def hold_preference(self): """Return user preference setting for hold time.""" # Values returned from thermostat are 'useEndTime4hour', # 'useEndTime2hour', 'nextTransition', 'indefinite', 'askMe' default = self.thermostat['settings']['holdAction'] if default == 'nextTransition': return default # add further conditions if other hold durations should be # supported; note that this should not include 'indefinite' # as an indefinite away hold is interpreted as away_mode return 'nextTransition' @property def climate_list(self): """Return the list of climates currently available.""" climates = self.thermostat['program']['climates'] return list(map((lambda x: x['name']), climates))
{ "content_hash": "a0e26fa0091f3778e521742fffb97980", "timestamp": "", "source": "github", "line_count": 386, "max_line_length": 77, "avg_line_length": 36.74611398963731, "alnum_prop": 0.5948956570783982, "repo_name": "LinuxChristian/home-assistant", "id": "6780d3745f05e35d0037d61dc58b58b2797eefcf", "size": "14184", "binary": false, "copies": "4", "ref": "refs/heads/dev", "path": "homeassistant/components/climate/ecobee.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "13788" }, { "name": "HTML", "bytes": "1733802" }, { "name": "JavaScript", "bytes": "15192" }, { "name": "Python", "bytes": "7415265" }, { "name": "Ruby", "bytes": "517" }, { "name": "Shell", "bytes": "15154" } ], "symlink_target": "" }
import eventlet from eventlet import Timeout from mock import create_autospec from nameko.containers import ServiceContainer from nameko.testing.utils import wait_for_call from nameko.timer import Timer def spawn_managed_thread(fn, identifier=None): return eventlet.spawn(fn) def test_provider(): container = create_autospec(ServiceContainer) container.service_name = "service" container.spawn_managed_thread = spawn_managed_thread timer = Timer(interval=0.1).bind(container, "method") timer.setup() timer.start() assert timer.interval == 0.1 with wait_for_call(1, container.spawn_worker) as spawn_worker: with Timeout(1): timer.stop() # the timer should have stopped and should only have spawned # a single worker spawn_worker.assert_called_once_with(timer, (), {}) assert timer.gt.dead def test_stop_timer_immediately(): container = create_autospec(ServiceContainer) container.service_name = "service" container.config = {} timer = Timer(interval=5).bind(container, "method") timer.setup() timer.start() eventlet.sleep(0.1) timer.stop() assert container.spawn_worker.call_count == 0 assert timer.gt.dead def test_kill_stops_timer(): container = create_autospec(ServiceContainer) container.service_name = "service" container.spawn_managed_thread = spawn_managed_thread timer = Timer(interval=0).bind(container, "method") timer.setup() timer.start() with wait_for_call(1, container.spawn_worker): timer.kill() # unless the timer is dead, the following nap would cause a timer # to trigger eventlet.sleep(0.1) assert container.spawn_worker.call_count == 1
{ "content_hash": "50b0cd53b2f4f99afd4a61d51e04527d", "timestamp": "", "source": "github", "line_count": 66, "max_line_length": 69, "avg_line_length": 26.333333333333332, "alnum_prop": 0.6973532796317606, "repo_name": "Alecto3-D/testable-greeter", "id": "b04ce64eebbbbd8e6cbf900441123007aa6d9848", "size": "1738", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "nameko/test/test_timers.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "1340" }, { "name": "JavaScript", "bytes": "6003191" }, { "name": "Makefile", "bytes": "7521" }, { "name": "Python", "bytes": "4833445" }, { "name": "RAML", "bytes": "62192" }, { "name": "Shell", "bytes": "3682" } ], "symlink_target": "" }
""" This script generates tests text-emphasis-position-property-001 ~ 006 which cover all possible values of text-emphasis-position property with all combination of three main writing modes and two orientations. Only test files are generated by this script. It also outputs a list of all tests it generated in the format of Mozilla reftest.list to the stdout. """ from __future__ import unicode_literals, print_function, absolute_import import itertools TEST_FILE = 'text-emphasis-position-property-{:03}{}.html' REF_FILE = 'text-emphasis-position-property-{:03}-ref.html' TEST_TEMPLATE = '''<!DOCTYPE html> <meta charset="utf-8"> <!-- This file was generated automatically by the script ./support/generate-text-emphasis-position-property-tests.py --> <title>CSS Test: text-emphasis-position: {value}, {title}</title> <link rel="author" title="Xidorn Quan" href="https://www.upsuper.org"> <link rel="author" title="Mozilla" href="https://www.mozilla.org"> <link rel="help" href="https://drafts.csswg.org/css-text-decor-3/#text-emphasis-position-property"> <meta name="assert" content="'text-emphasis-position: {value}' with 'writing-mode: {wm}' puts emphasis marks {position} the text."> <link rel="match" href="text-emphasis-position-property-{index:03}-ref.html"> <p>Pass if the emphasis marks are {position} the text below:</p> <div lang="ja" style="line-height: 5; text-emphasis: circle; writing-mode: {wm}; text-orientation: {orient}; text-emphasis-position: {value}">試験テスト</div> ''' SUFFIXES = ['', 'a', 'b', 'c', 'd', 'e', 'f', 'g'] WRITING_MODES = ["horizontal-tb", "vertical-rl", "vertical-lr"] POSITION_HORIZONTAL = ["over", "under"] POSITION_VERTICAL = ["right", "left"] REF_MAP_MIXED = { "over": 1, "under": 2, "right": 3, "left": 4 } REF_MAP_SIDEWAYS = { "right": 5, "left": 6 } POSITION_TEXT = { "over": "over", "under": "under", "right": "to the right of", "left": "to the left of" } suffixes = [iter(SUFFIXES) for i in range(6)] reftest_items = [] def write_file(filename, content): with open(filename, 'wb') as f: f.write(content.encode('UTF-8')) def write_test_file(idx, suffix, wm, orient, value, position): filename = TEST_FILE.format(idx, suffix) write_file(filename, TEST_TEMPLATE.format( value=value, wm=wm, orient=orient, index=idx, position=position, title=(wm if orient == "mixed" else "{}, {}".format(wm, orient)))) reftest_items.append("== {} {}".format(filename, REF_FILE.format(idx))) def write_test_files(wm, orient, pos1, pos2): idx = (REF_MAP_MIXED if orient == "mixed" else REF_MAP_SIDEWAYS)[pos1] position = POSITION_TEXT[pos1] suffix = suffixes[idx - 1] write_test_file(idx, next(suffix), wm, orient, pos1 + " " + pos2, position) write_test_file(idx, next(suffix), wm, orient, pos2 + " " + pos1, position) for wm in WRITING_MODES: if wm == "horizontal-tb": effective_pos = POSITION_HORIZONTAL ineffective_pos = POSITION_VERTICAL else: effective_pos = POSITION_VERTICAL ineffective_pos = POSITION_HORIZONTAL for pos1, pos2 in itertools.product(effective_pos, ineffective_pos): write_test_files(wm, "mixed", pos1, pos2) if wm != "horizontal-tb": write_test_files(wm, "sideways", pos1, pos2) print("# START tests from {}".format(__file__)) reftest_items.sort() for item in reftest_items: print(item) print("# END tests from {}".format(__file__))
{ "content_hash": "020415b6918ca6b98a75f938aed2df41", "timestamp": "", "source": "github", "line_count": 78, "max_line_length": 153, "avg_line_length": 44.243589743589745, "alnum_prop": 0.6731382208055636, "repo_name": "nwjs/chromium.src", "id": "527959068762f5a7185bf11bdc2536054d2db258", "size": "3512", "binary": false, "copies": "25", "ref": "refs/heads/nw70", "path": "third_party/blink/web_tests/external/wpt/css/css-text-decor/tools/generate-text-emphasis-position-property-tests.py", "mode": "33261", "license": "bsd-3-clause", "language": [], "symlink_target": "" }
""" homeassistant.components.light.blinksticklight ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Support for Blinkstick lights. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/light.blinksticklight/ """ import logging from homeassistant.components.light import ATTR_RGB_COLOR, Light _LOGGER = logging.getLogger(__name__) REQUIREMENTS = ["blinkstick==1.1.7"] # pylint: disable=unused-argument def setup_platform(hass, config, add_devices_callback, discovery_info=None): """ Add device specified by serial number. """ from blinkstick import blinkstick stick = blinkstick.find_by_serial(config['serial']) add_devices_callback([BlinkStickLight(stick, config['name'])]) class BlinkStickLight(Light): """ Represents a BlinkStick light. """ def __init__(self, stick, name): self._stick = stick self._name = name self._serial = stick.get_serial() self._rgb_color = stick.get_color() @property def should_poll(self): """ Polling needed. """ return True @property def name(self): """ The name of the light. """ return self._name @property def rgb_color(self): """ Read back the color of the light. """ return self._rgb_color @property def is_on(self): """ Check whether any of the LEDs colors are non-zero. """ return sum(self._rgb_color) > 0 def update(self): """ Read back the device state """ self._rgb_color = self._stick.get_color() def turn_on(self, **kwargs): """ Turn the device on. """ if ATTR_RGB_COLOR in kwargs: self._rgb_color = kwargs[ATTR_RGB_COLOR] else: self._rgb_color = [255, 255, 255] self._stick.set_color(red=self._rgb_color[0], green=self._rgb_color[1], blue=self._rgb_color[2]) def turn_off(self, **kwargs): """ Turn the device off """ self._stick.turn_off()
{ "content_hash": "486ae8d04f8c17754da8abfa238e9eb1", "timestamp": "", "source": "github", "line_count": 75, "max_line_length": 76, "avg_line_length": 27.6, "alnum_prop": 0.591304347826087, "repo_name": "nnic/home-assistant", "id": "5e2f026aa90f6c11b48db1cfcd5a8c0b9a9d20a4", "size": "2070", "binary": false, "copies": "2", "ref": "refs/heads/dev", "path": "homeassistant/components/light/blinksticklight.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "1482064" }, { "name": "Python", "bytes": "1790232" }, { "name": "Shell", "bytes": "3570" } ], "symlink_target": "" }
from kansha.cardextension.tests import CardExtensionTestCase from .comp import CardWeightEditor class CardWeightTest(CardExtensionTestCase): extension_name = 'weight' extension_class = CardWeightEditor def test_copy(self): self.extension.weight(u'25') self.extension.commit() self.assertEqual(self.extension.data.weight, 25) cpy = self.extension_copy self.assertEqual(self.extension.weight(), cpy.weight())
{ "content_hash": "4283fe6394437ddcacd6b7eb792ebb0a", "timestamp": "", "source": "github", "line_count": 16, "max_line_length": 63, "avg_line_length": 29, "alnum_prop": 0.7176724137931034, "repo_name": "bcroq/kansha", "id": "c8a9b488a4be9f1ea58262c50c91c869f3929952", "size": "709", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "kansha/card_addons/weight/tests.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "90495" }, { "name": "HTML", "bytes": "25077" }, { "name": "JavaScript", "bytes": "342746" }, { "name": "Mako", "bytes": "412" }, { "name": "Python", "bytes": "584771" }, { "name": "Shell", "bytes": "234" } ], "symlink_target": "" }
__author__ = "esemi" from collections import deque import numpy from scipy.cluster import * from scipy.spatial.distance import cdist from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from shared import get_data def kmeans_export(centroids, data, labels): """Export kmeans result""" res = [[] for i in xrange(len(centroids))] d = cdist(numpy.array(data), centroids, 'euclidean') for i, l in enumerate(d): res[l.tolist().index(l.min())].append((labels[i], data[i])) return res def kmeans_draw(clusters): """Drawing kmeans clustering result""" colors = deque(['r', 'g', 'b', 'c', 'm', 'y', 'k']) fig = plt.figure() # Prior to version 1.0.0, the method of creating a 3D axes was different. For those using older versions of matplotlib, # change ax = fig.add_subplot(111, projection='3d') to ax = Axes3D(fig). ax = Axes3D(fig) for cluster in clusters: color = colors.popleft() for name, coord in cluster: x, y, z = coord ax.plot3D([x], [y], [z], marker='o', c=color) ax.set_xlabel(u'Белки') ax.set_ylabel(u'Жиры') ax.set_zlabel(u'Углеводы') plt.show() if __name__ == '__main__': names, data = get_data() centroids = vq.kmeans(numpy.array(data), 7, iter=200)[0] K_res = kmeans_export(centroids, data, names) kmeans_draw(K_res)
{ "content_hash": "4ac79b9e41256f5b9489b362c303119c", "timestamp": "", "source": "github", "line_count": 56, "max_line_length": 123, "avg_line_length": 24.839285714285715, "alnum_prop": 0.6232925952552121, "repo_name": "esemi/blogpost_clustering", "id": "ff1757fdbaf78b9b9d778681926d5a3edc83fc92", "size": "1456", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/kmeans.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "4292" } ], "symlink_target": "" }
from telemetry.page import page_benchmark_results class CsvPageBenchmarkResults(page_benchmark_results.PageBenchmarkResults): def __init__(self, results_writer, output_after_every_page): super(CsvPageBenchmarkResults, self).__init__() self._results_writer = results_writer self._did_output_header = False self._header_names_written_to_writer = None self._output_after_every_page = output_after_every_page def DidMeasurePage(self): assert self.values_for_current_page, 'Failed to call WillMeasurePage' if not self._output_after_every_page: super(CsvPageBenchmarkResults, self).DidMeasurePage() return if not self._did_output_header: self._OutputHeader() else: self._ValidateOutputNamesForCurrentPage() self._OutputValuesForPage(self.values_for_current_page) super(CsvPageBenchmarkResults, self).DidMeasurePage() def PrintSummary(self, trace_tag): if not self._output_after_every_page: self._OutputHeader() for page_values in self.page_results: self._OutputValuesForPage(page_values) super(CsvPageBenchmarkResults, self).PrintSummary(trace_tag) def _ValidateOutputNamesForCurrentPage(self): assert self._did_output_header current_page_measurement_names = \ set(self.values_for_current_page.measurement_names) header_names_written_to_writer = \ set(self._header_names_written_to_writer) if header_names_written_to_writer == current_page_measurement_names: return assert False, """To use CsvPageBenchmarkResults, you must add the same result names for every page. In this case, first page output: %s Thus, all subsequent pages must output this as well. Instead, the current page output: %s Change your test to produce the same thing each time, or modify PageBenchmark.results_are_the_same_on_every_page to return False. """ % (repr(header_names_written_to_writer), repr(current_page_measurement_names)) def _OutputHeader(self): assert not self._did_output_header all_measurement_names = list( self.all_measurements_that_have_been_seen.keys()) all_measurement_names.sort() self._did_output_header = True self._header_names_written_to_writer = list(all_measurement_names) row = ['url'] for measurement_name in all_measurement_names: measurement_data = \ self.all_measurements_that_have_been_seen[measurement_name] row.append('%s (%s)' % (measurement_name, measurement_data['units'])) self._results_writer.writerow(row) def _OutputValuesForPage(self, page_values): row = [page_values.page.display_url] for measurement_name in self._header_names_written_to_writer: value = page_values.FindValueByMeasurementName(measurement_name) if value: row.append('%s' % value.output_value) else: row.append('-') self._results_writer.writerow(row)
{ "content_hash": "5205ccf600562b88a3902ef96a399b28", "timestamp": "", "source": "github", "line_count": 78, "max_line_length": 78, "avg_line_length": 37.217948717948715, "alnum_prop": 0.7144333448157079, "repo_name": "codenote/chromium-test", "id": "16df18e5fb2066c05654bba6c6e525e25fc1888d", "size": "3069", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tools/telemetry/telemetry/page/csv_page_benchmark_results.py", "mode": "33188", "license": "bsd-3-clause", "language": [], "symlink_target": "" }
""" Test settings for Big Data For Education project. - Used to run tests fast on the continuous integration server and locally """ from .base import * # noqa # DEBUG # ------------------------------------------------------------------------------ # Turn debug off so tests run faster DEBUG = False TEMPLATES[0]['OPTIONS']['debug'] = False # SECRET CONFIGURATION # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key # Note: This key only used for development and testing. SECRET_KEY = env('DJANGO_SECRET_KEY', default='CHANGEME!!!') # Mail settings # ------------------------------------------------------------------------------ EMAIL_HOST = 'localhost' EMAIL_PORT = 1025 # In-memory email backend stores messages in django.core.mail.outbox # for unit testing purposes EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend' # CACHING # ------------------------------------------------------------------------------ # Speed advantages of in-memory caching without having to run Memcached CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': '' } } # TESTING # ------------------------------------------------------------------------------ TEST_RUNNER = 'django.test.runner.DiscoverRunner' # PASSWORD HASHING # ------------------------------------------------------------------------------ # Use fast password hasher so tests run faster PASSWORD_HASHERS = [ 'django.contrib.auth.hashers.MD5PasswordHasher', ] # TEMPLATE LOADERS # ------------------------------------------------------------------------------ # Keep templates in memory so tests run faster TEMPLATES[0]['OPTIONS']['loaders'] = [ ['django.template.loaders.cached.Loader', [ 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ], ], ]
{ "content_hash": "592d05a7ccf833d874dbffbf0f193ffe", "timestamp": "", "source": "github", "line_count": 61, "max_line_length": 80, "avg_line_length": 32.0655737704918, "alnum_prop": 0.5086912065439673, "repo_name": "phoebeargon/BigDataForEducation", "id": "4708e45c338c3f7256f8d9e31a3ec72a576d6935", "size": "1956", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "config/settings/test.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "1409" }, { "name": "HTML", "bytes": "22021" }, { "name": "JavaScript", "bytes": "1806" }, { "name": "Python", "bytes": "47142" }, { "name": "Shell", "bytes": "8981" } ], "symlink_target": "" }
""" Pelix remote services: Specifications handling utility methods :author: Thomas Calmant :copyright: Copyright 2014, isandlaTech :license: Apache License 2.0 :version: 0.5.7 :status: Beta .. Copyright 2014 isandlaTech Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ # Module version __version_info__ = (0, 5, 7) __version__ = ".".join(str(x) for x in __version_info__) # Documentation strings format __docformat__ = "restructuredtext en" # ------------------------------------------------------------------------------ # Pelix from pelix.utilities import is_string import pelix.constants import pelix.ldapfilter import pelix.remote import pelix.utilities # Standard library try: # Python 3 # pylint: disable=F0401,E0611 from urllib.parse import urlparse except ImportError: # Python 2 # pylint: disable=F0401 from urlparse import urlparse # ------------------------------------------------------------------------------ PYTHON_LANGUAGE = "python" """ Prefix to use for the Python specifications """ # ------------------------------------------------------------------------------ class ExportEndpoint(object): """ Represents an export end point (one per group of configuration types) """ def __init__(self, uid, fw_uid, configurations, name, svc_ref, service, properties): """ Sets up the members :param uid: Unique identified of the end point :param fw_uid: The framework UID :param configurations: Kinds of end point (xmlrpc, ...) :param name: Name of the end point :param svc_ref: ServiceReference of the exported service :param service: Instance of the exported service :param properties: Extra properties :raise ValueError: Invalid UID or the end point exports nothing (all specifications have been filtered) """ if not uid: raise ValueError("Invalid UID") # Given information self.__uid = uid self.__fw_uid = fw_uid self.__instance = service self.__reference = svc_ref self.__configurations = configurations self.__name = name # Normalize extra properties if not isinstance(properties, dict): self.__properties = {} else: self.__properties = properties # Normalize the list of configurations if is_string(configurations): self.__configurations = (configurations,) else: self.__configurations = tuple(configurations) # Exported specifications self.__exported_specs = [] exported_specs = compute_exported_specifications(svc_ref) if exported_specs: # Transform the specifications for export (add the language prefix) self.__exported_specs = format_specifications(exported_specs) else: raise ValueError("Endpoint {0}, {1}, exports nothing" .format(self.__uid, self.__name)) def __hash__(self): """ Custom hash, as we override equality tests """ return hash(self.__uid) def __eq__(self, other): """ Equality checked by UID """ return self.__uid == other.uid def __ne__(self, other): """ Inequality checked by UID """ return self.__uid != other.uid def __str__(self): """ String representation """ return "ExportEndpoint(uid={0}, types={1}, specs={2})" \ .format(self.__uid, self.__configurations, self.__exported_specs) def get_properties(self): """ Returns merged properties :return: Endpoint merged properties """ # Get service properties properties = self.__reference.get_properties() # Merge with local properties properties.update(self.__properties) # Some properties can't be merged for key in (pelix.constants.OBJECTCLASS, pelix.constants.SERVICE_ID): properties[key] = self.__reference.get_property(key) # Force the exported configurations properties[pelix.remote.PROP_EXPORTED_CONFIGS] = self.configurations return properties def make_import_properties(self): """ Returns the properties of this endpoint where export properties have been replaced by import ones :return: A dictionary with import properties """ # Convert merged properties props = to_import_properties(self.get_properties()) # Add the framework UID props[pelix.remote.PROP_ENDPOINT_FRAMEWORK_UUID] = self.__fw_uid return props def rename(self, new_name): """ Updates the endpoint name :param new_name: The new name of the endpoint """ if new_name: # Update the name only if the new one is valid self.__name = new_name # Access to the service @property def instance(self): """ Service instance """ return self.__instance @property def reference(self): """ Service reference """ return self.__reference # End point properties @property def uid(self): """ End point unique identifier """ return self.__uid @property def framework(self): """ Framework UID """ return self.__fw_uid @property def configurations(self): """ Configurations of this end point """ return self.__configurations @property def name(self): """ Name of the end point """ return self.__name @property def specifications(self): """ Returns the exported specifications """ return self.__exported_specs # ------------------------------------------------------------------------------ class ImportEndpoint(object): """ Represents an end point to access an imported service """ def __init__(self, uid, framework, configurations, name, specifications, properties): """ Sets up the members :param uid: Unique identified of the end point :param framework: UID of the framework exporting the end point (can be None) :param configurations: Kinds of end point (xmlrpc, ...) :param name: Name of the end point :param specifications: Specifications of the exported service :param properties: Properties of the service """ self.__uid = uid self.__fw_uid = framework or None self.__name = name self.__properties = properties.copy() if properties else {} # Normalize list of configurations if is_string(configurations): self.__configurations = (configurations,) else: self.__configurations = tuple(configurations) # Extract the language prefix in specifications self.__specifications = extract_specifications(specifications, self.__properties) # Public variable: the source server, # set up by a Pelix discovery service self.server = None def __str__(self): """ String representation of the end point """ return "ImportEndpoint(uid={0}, framework={1}, configurations={2}, " \ "specs={3})".format(self.__uid, self.__fw_uid, self.__configurations, self.__specifications) # Access to the service informations @property def specifications(self): """ Specifications of the service """ return self.__specifications @property def properties(self): """ Properties of the imported service """ return self.__properties @properties.setter def properties(self, properties): """ Sets the properties of the imported service """ # Keep a copy of the new properties self.__properties = properties.copy() if properties else {} # End point properties @property def uid(self): """ End point unique identifier """ return self.__uid @property def framework(self): """ UID of the framework exporting this end point """ return self.__fw_uid @property def configurations(self): """ Kind of end point """ return self.__configurations @property def name(self): """ Name of the end point """ return self.__name # ------------------------------------------------------------------------------ class EndpointDescription(object): """ Endpoint description bean, according to OSGi specifications: http://www.osgi.org/javadoc/r4v42/org/osgi/service/remoteserviceadmin/ EndpointDescription.html This is an importer-side description """ def __init__(self, svc_ref, properties): """ Sets up the description with the given properties :raise ValueError: Invalid properties """ # Set up properties all_properties = {} if svc_ref is not None: all_properties.update(svc_ref.get_properties()) if properties: all_properties.update(properties) # Add some properties if the service reference is given if svc_ref is not None: # Service ID all_properties[pelix.remote.PROP_ENDPOINT_SERVICE_ID] = \ svc_ref.get_property(pelix.constants.SERVICE_ID) # Convert properties self.__properties = to_import_properties(all_properties) # Check their validity self.__check_properties(self.__properties) # Keep a copy of the endpoint ID self.__endpoint_id = self.get_id() def __hash__(self): """ Custom hash, as we override equality tests """ return hash(self.__endpoint_id) def __eq__(self, other): """ Equality checked by UID """ return self.__endpoint_id == other.__endpoint_id def __ne__(self, other): """ Inequality checked by UID """ return self.__endpoint_id != other.__endpoint_id def __str__(self): """ String representation """ return "EndpointDescription(id={0}; endpoint.service.id={1}; " \ "framework.uuid={2})".format(self.get_id(), self.get_service_id(), self.get_framework_uuid()) def __check_properties(self, props): """ Checks that the given dictionary doesn't have export keys and has import keys :param props: Properties to validate :raise ValueError: Invalid properties """ # Mandatory properties mandatory = (pelix.remote.PROP_ENDPOINT_ID, pelix.remote.PROP_IMPORTED_CONFIGS, pelix.constants.OBJECTCLASS) for key in mandatory: if key not in props: raise ValueError("Missing property: {0}".format(key)) # Export/Import properties props_export = (pelix.remote.PROP_EXPORTED_CONFIGS, pelix.remote.PROP_EXPORTED_INTERFACES) for key in props_export: if key in props: raise ValueError("Export property found: {0}".format(key)) def get_configuration_types(self): """ Returns the configuration types. A distribution provider exports a service with an endpoint. This endpoint uses some kind of communications protocol with a set of configuration parameters. There are many different types but each endpoint is configured by only one configuration type. However, a distribution provider can be aware of different configuration types and provide synonyms to increase the change a receiving distribution provider can create a connection to this endpoint. This value of the configuration types is stored in the pelix.remote.PROP_IMPORTED_CONFIGS service property. :return: The configuration types (list of str) """ # Return a copy of the list return self.__properties[pelix.remote.PROP_IMPORTED_CONFIGS][:] def get_framework_uuid(self): """ Returns the UUID of the framework exporting this endpoint, or None :return: A framework UUID (str) or None """ return self.__properties.get(pelix.remote.PROP_ENDPOINT_FRAMEWORK_UUID) def get_id(self): """ Returns the endpoint's id. """ return self.__properties[pelix.remote.PROP_ENDPOINT_ID] def get_intents(self): """ Returns the list of intents implemented by this endpoint. The intents are based on the service.intents on an imported service, except for any intents that are additionally provided by the importing distribution provider. All qualified intents must have been expanded. This value of the intents is stored in the pelix.remote.PROP_INTENTS service property. :return: A list of intents (list of str) """ # Return a copy of the list try: return self.__properties[pelix.remote.PROP_INTENTS][:] except KeyError: return [] def get_interfaces(self): """ Provides the list of interfaces implemented by the exported service. :return: A list of specifications (list of str) """ return self.__properties[pelix.constants.OBJECTCLASS][:] def get_package_version(self, package): """ Provides the version of the given package name. :param package: The name of the package :return: The version of the specified package as a tuple or (0,0,0) """ name = "{0}{1}".format(pelix.remote.PROP_ENDPOINT_PACKAGE_VERSION_, package) try: # Get the version string version = self.__properties[name] # Split dots ('.') return tuple(version.split('.')) except KeyError: # No version return 0, 0, 0 def get_properties(self): """ Returns all endpoint properties. :return: A copy of the endpoint properties """ return self.__properties.copy() def get_service_id(self): """ Returns the service id for the service exported through this endpoint. :return: The ID of service on the exporter side, or 0 """ try: return self.__properties[pelix.remote.PROP_ENDPOINT_SERVICE_ID] except KeyError: # Not found return 0 def is_same_service(self, endpoint): """ Tests if this endpoint and the given one have the same framework UUID and service ID :param endpoint: Another endpoint :return: True if both endpoints represent the same remote service """ return self.get_framework_uuid() == endpoint.get_framework_uuid() \ and self.get_service_id() == endpoint.get_service_id() def matches(self, ldap_filter): """ Tests the properties of this EndpointDescription against the given filter :param ldap_filter: A filter :return: True if properties matches the filter """ return pelix.ldapfilter.get_ldap_filter(ldap_filter) \ .matches(self.__properties) def to_import(self): """ Converts an EndpointDescription bean to an ImportEndpoint :return: An ImportEndpoint bean """ # Properties properties = self.get_properties() # Framework UUID fw_uid = self.get_framework_uuid() # Endpoint name try: # From Pelix UID name = properties[pelix.remote.PROP_ENDPOINT_NAME] except KeyError: # Generated name = '{0}.{1}'.format(fw_uid, self.get_service_id()) # Configuration / kind configurations = self.get_configuration_types() # Interfaces specifications = self.get_interfaces() return ImportEndpoint(self.get_id(), fw_uid, configurations, name, specifications, properties) @classmethod def from_export(cls, endpoint): """ Converts an ExportEndpoint bean to an EndpointDescription :param endpoint: An ExportEndpoint bean :return: An EndpointDescription bean """ assert isinstance(endpoint, ExportEndpoint) # Service properties properties = endpoint.get_properties() # Set import keys properties[pelix.remote.PROP_ENDPOINT_ID] = endpoint.uid properties[pelix.remote.PROP_IMPORTED_CONFIGS] = \ endpoint.configurations properties[pelix.remote.PROP_EXPORTED_INTERFACES] = \ endpoint.specifications # Remove export keys for key in (pelix.remote.PROP_EXPORTED_CONFIGS, pelix.remote.PROP_EXPORTED_INTERFACES, pelix.remote.PROP_EXPORTED_INTENTS, pelix.remote.PROP_EXPORTED_INTENTS_EXTRA): try: del properties[key] except KeyError: pass # Other information properties[pelix.remote.PROP_ENDPOINT_NAME] = endpoint.name properties[pelix.remote.PROP_ENDPOINT_FRAMEWORK_UUID] = \ endpoint.framework return EndpointDescription(None, properties) # ------------------------------------------------------------------------------ def to_import_properties(properties): """ Returns a dictionary where export properties have been replaced by import ones :param properties: A dictionary of service properties (with export keys) :return: A dictionary with import properties """ # Copy the given dictionary props = properties.copy() # Add the "imported" property props[pelix.remote.PROP_IMPORTED] = True # Remote service ID try: props[pelix.remote.PROP_ENDPOINT_SERVICE_ID] = \ props.pop(pelix.constants.SERVICE_ID) except KeyError: # No service ID pass # Replace the "export configs" configs = props.pop(pelix.remote.PROP_EXPORTED_CONFIGS, None) if configs: props[pelix.remote.PROP_IMPORTED_CONFIGS] = configs # Clear other export properties for key in (pelix.remote.PROP_EXPORTED_INTENTS, pelix.remote.PROP_EXPORTED_INTENTS_EXTRA, pelix.remote.PROP_EXPORTED_INTERFACES): try: del props[key] except KeyError: # Key wasn't there pass return props # ------------------------------------------------------------------------------ def compute_exported_specifications(svc_ref): """ Computes the list of specifications exported by the given service :param svc_ref: A ServiceReference :return: The list of exported specifications (or an empty list) """ specs = svc_ref.get_property(pelix.constants.OBJECTCLASS) exported_specs = svc_ref.get_property( pelix.remote.PROP_EXPORTED_INTERFACES) rejected_specs = pelix.utilities.to_iterable( svc_ref.get_property(pelix.remote.PROP_EXPORT_REJECT), False) if exported_specs and exported_specs != "*": # A set of specifications is exported, replace "objectClass" iterable_exports = pelix.utilities.to_iterable(exported_specs, False) all_exported_specs = [spec for spec in specs if spec in iterable_exports] else: # Export everything all_exported_specs = pelix.utilities.to_iterable(specs) # Filter specifications return [spec for spec in all_exported_specs if spec not in rejected_specs] def extract_specifications(specifications, properties): """ Converts "python:/name" specifications to "name". Keeps the other specifications as is. :param specifications: The specifications found in a remote registration :param properties: Service properties :return: The filtered specifications (as a list) """ all_specs = set(pelix.utilities.to_iterable(specifications)) try: synonyms = \ pelix.utilities.to_iterable(properties[pelix.remote.PROP_SYNONYMS], False) all_specs.update(synonyms) except KeyError: # No synonyms property pass filtered_specs = set() for original in all_specs: try: # Extract information lang, spec = _extract_specification_parts(original) if lang == PYTHON_LANGUAGE: # Language match: keep the name only filtered_specs.add(spec) else: # Keep the name as is filtered_specs.add(original) except ValueError: # Ignore invalid specifications pass return list(filtered_specs) def format_specifications(specifications): """ Transforms the interfaces names into URI strings, with the interface implementation language as a scheme. :param specifications: Specifications to transform :return: The transformed names """ transformed = set() for original in specifications: try: lang, spec = _extract_specification_parts(original) transformed.add(_format_specification(lang, spec)) except ValueError: # Ignore invalid specifications pass return list(transformed) def _extract_specification_parts(specification): """ Extract the language and the interface from a "language:/interface" interface name :param specification: The formatted interface name :return: A (language, interface name) tuple :raise ValueError: Invalid specification content """ try: # Parse the URI-like string parsed = urlparse(specification) except: # Invalid URL raise ValueError("Invalid specification URL: {0}" .format(specification)) # Extract the interface name interface = parsed.path # Extract the language, if given language = parsed.scheme if not language: # Simple name, without scheme language = PYTHON_LANGUAGE else: # Formatted name: un-escape it, without the starting '/' interface = _unescape_specification(interface[1:]) return language, interface def _format_specification(language, specification): """ Formats a "language://interface" string :param language: Specification language :param specification: Specification name :return: A formatted string """ return "{0}:/{1}".format(language, _escape_specification(specification)) def _escape_specification(specification): """ Escapes the interface string: replaces slashes '/' by '%2F' :param specification: Specification name :return: The escaped name """ return specification.replace('/', '%2F') def _unescape_specification(specification): """ Unescapes the interface string: replaces '%2F' by slashes '/' :param specification: Specification name :return: The unescaped name """ return specification.replace('%2F', '/')
{ "content_hash": "a7488e63598992a9c6982d0ff6d77370", "timestamp": "", "source": "github", "line_count": 808, "max_line_length": 80, "avg_line_length": 30.183168316831683, "alnum_prop": 0.5895112350336231, "repo_name": "ahmadshahwan/cohorte-runtime", "id": "4a8145d404617349304bb0ec2d1ee18a8c53c06e", "size": "24442", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "python/src/lib/python/pelix/remote/beans.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "5939" }, { "name": "CSS", "bytes": "15319" }, { "name": "HTML", "bytes": "12092" }, { "name": "Java", "bytes": "857542" }, { "name": "JavaScript", "bytes": "43722" }, { "name": "Python", "bytes": "3678527" }, { "name": "Shell", "bytes": "800" } ], "symlink_target": "" }
from SeaIceConnector import * from threading import Condition class ScopedSeaIceConnector (SeaIceConnector): """ A SeaIce DB Connector which is released to the pool from whence it came when it goes out of scope. This type of connector is produced by :func:`seaice.ConnectorPool.SeaIceConnectorPool.getScoped` and should not be used directly. :param pool: The pool from which this connector originates. When the destructor is called, the connection is enqueued into the pool. :type pool: seaice.ConnectorPool.SeaIceConnectorPool :param db_con: The connector. :type db_con: seaice.SeaIceConnector.SeaIceConnector """ def __init__(self, pool, db_con): self.con = db_con.con self.db_con = db_con self.pool = pool def __del__(self): self.pool.enqueue(self.db_con) class ConnectorPool: """ A thread-safe connection pool. TODO: Make this an actual queue, not a stack. Nomenclature is important sometimes. """ def __init__(self, Connector, count=20, user=None, password=None, db=None): self.pool = [ Connector(user, password, db) for _ in range(count) ] self.C_pool = Condition() def dequeue(self): """ Get connector. :rtype: seaice.SeaIceConnector.SeaIceConnector """ self.C_pool.acquire() while len(self.pool) == 0: self.C_pool.wait() db_con = self.pool.pop() self.C_pool.release() return db_con def enqueue(self, db_con): """ Release connector. :param db_con: The connector. :type db_con: seaice.SeaIceConnector.SeaIceConnector """ self.C_pool.acquire() self.pool.append(db_con) self.C_pool.notify() self.C_pool.release() class SeaIceConnectorPool (ConnectorPool): """ A thread-safe connection pool which can produce scoped SeaIce connectors. :param count: Size of the pool. :type count: int :param user: Name of DB role (see :class:`seaice.SeaIceConnector.SeaIceConnector` for default behavior). :type user: str :param password: User's password. :type password: str :param db: Name of database. :type db: str """ def __init__(self, count=20, user=None, password=None, db=None): ConnectorPool.__init__(self, SeaIceConnector, count, user, password, db) def getScoped(self): """ Return a scoped connector from the pool. :rtype: seaice.SeaIceConnector.SeaIceConnector """ return ScopedSeaIceConnector(self, self.dequeue())
{ "content_hash": "493d455e37a9a34e0315f5286ab297a4", "timestamp": "", "source": "github", "line_count": 90, "max_line_length": 90, "avg_line_length": 28.155555555555555, "alnum_prop": 0.6610102604577742, "repo_name": "nassar/yamz", "id": "ca42c244e6a3387a77d54bd7c4e4d1e2f444922a", "size": "4342", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "seaice/ConnectorPool.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "16424" }, { "name": "HTML", "bytes": "663413" }, { "name": "JavaScript", "bytes": "96004" }, { "name": "Python", "bytes": "171061" }, { "name": "Shell", "bytes": "10637" } ], "symlink_target": "" }
"""Tests for the Lutron Caseta integration.""" from homeassistant.const import STATE_ON from homeassistant.core import HomeAssistant from homeassistant.helpers import entity_registry as er from . import MockBridge, async_setup_integration async def test_light_unique_id(hass: HomeAssistant) -> None: """Test a light unique id.""" await async_setup_integration(hass, MockBridge) ra3_entity_id = "light.basement_bedroom_main_lights" caseta_entity_id = "light.kitchen_main_lights" entity_registry = er.async_get(hass) # Assert that RA3 lights will have the bridge serial hash and the zone id as the uniqueID assert entity_registry.async_get(ra3_entity_id).unique_id == "000004d2_801" # Assert that Caseta lights will have the serial number as the uniqueID assert entity_registry.async_get(caseta_entity_id).unique_id == "5442321" state = hass.states.get(ra3_entity_id) assert state.state == STATE_ON
{ "content_hash": "f3d8ddf2c32ef1550bb48892a8f9ebac", "timestamp": "", "source": "github", "line_count": 27, "max_line_length": 93, "avg_line_length": 35.22222222222222, "alnum_prop": 0.7392218717139852, "repo_name": "nkgilley/home-assistant", "id": "6449ce048328b08f12856250032feba3ad28a88b", "size": "951", "binary": false, "copies": "3", "ref": "refs/heads/dev", "path": "tests/components/lutron_caseta/test_light.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2963" }, { "name": "PLSQL", "bytes": "840" }, { "name": "Python", "bytes": "51597279" }, { "name": "Shell", "bytes": "6252" } ], "symlink_target": "" }
import re, json, sys, traceback from urlparse import urlparse from hashlib import sha1 from datetime import datetime from base64 import b64encode from twisted.internet.defer import inlineCallbacks from vumi.transports import Transport from vumi.utils import http_request_full from vumi import log from vusion.error import MissingData class CioecHttp(Transport): transport_type = 'http_api' def setup_transport(self): log.msg("Setup Embolivia http transport %s" % self.config) self.transport_metadata = {'transport_type': self.transport_type} def teardown_transport(self): log.msg("Stop forward http transport") def build_data(self, message, labels_to_add): data = {} for label_to_add in labels_to_add: if label_to_add == 'phone': data['phone'] = message['transport_metadata']['participant_phone'] elif label_to_add == 'message': data['message'] = message['content'] else: self.extract_data_from_profile( data, message['transport_metadata']['participant_profile'], label_to_add) return {'data': [data]} def extract_data_from_profile(self, data, participant_profile, label_rule): label = None default = None if isinstance(label_rule, dict): label = label_rule['label'] default = label_rule['default'] else: label = label_rule item = [x for x in participant_profile if label == x['label']] if item == []: if default is None: raise MissingData("%s is missing" % label) else: data[label] = default else: data[label] = item[0]['value'] def get_date(self): return datetime.now().strftime('%Y-%m-%d') @inlineCallbacks def handle_outbound_message(self, message): log.msg("Outboung message to be processed %s" % repr(message)) try: url = message['to_addr'] url = urlparse(url) forward_url = "%s://%s%s" % (url.scheme, url.netloc, url.path) data = {} if url.path in self.config['api']: data = self.build_data(message, self.config['api'][url.path]) auth = sha1('%s%s%s' % (self.config['api_key'], self.config['salt'], self.get_date())) auth = b64encode("%s:api_token" % auth.hexdigest()) log.msg('Hitting %s with %s' % (forward_url, json.dumps(data))) response = yield http_request_full( forward_url.encode('ASCII'), json.dumps(data), {'User-Agent': ['Vusion Cioec Transport'], 'Content-Type': ['application/json,charset=UTF-8'], 'Authorization': ['Basic %s' % auth]}, 'POST') if response.code != 200: reason = "HTTP ERROR %s - %s" % (response.code, response.delivered_body) log.error(reason) yield self.publish_nack( message['message_id'], reason, transport_metadata=self.transport_metadata) return response_body = json.loads(response.delivered_body) if response_body['status'] == 'fail': reason = "SERVICE ERROR %s - %s" % (response_body['error'], response_body['message']) log.error(reason) yield self.publish_nack( message['message_id'], reason, transport_metadata=self.transport_metadata) return yield self.publish_ack( user_message_id=message['message_id'], sent_message_id=message['message_id'], transport_metadata=self.transport_metadata) except MissingData as ex: reason = "MISSING DATA %s" % ex.message yield self.publish_nack( message['message_id'], reason, transport_metadata=self.transport_metadata) except Exception as ex: exc_type, exc_value, exc_traceback = sys.exc_info() log.error( "TRANSPORT ERROR: %r" % traceback.format_exception(exc_type, exc_value, exc_traceback)) reason = "TRANSPORT ERROR %s" % (ex.message) yield self.publish_nack( message['message_id'], reason, transport_metadata=self.transport_metadata)
{ "content_hash": "ddafe4b0dab450d2842956e56656031d", "timestamp": "", "source": "github", "line_count": 121, "max_line_length": 101, "avg_line_length": 38.32231404958678, "alnum_prop": 0.5415139098555101, "repo_name": "texttochange/vusion-backend", "id": "7795955496d6b8c8df0d147f869feb3af1759c4d", "size": "4637", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "transports/http_forward/cioec_http.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "JavaScript", "bytes": "1510" }, { "name": "Python", "bytes": "1204678" }, { "name": "Shell", "bytes": "798" } ], "symlink_target": "" }
"""Tests for tensorflow.ops.tf.scatter.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf def _AsType(v, vtype): return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v) def _NumpyAdd(ref, indices, updates): # Since numpy advanced assignment does not support repeated indices, # we run a simple loop to perform scatter_add. for i, indx in np.ndenumerate(indices): ref[indx] += updates[i] def _NumpySub(ref, indices, updates): for i, indx in np.ndenumerate(indices): ref[indx] -= updates[i] class ScatterTest(tf.test.TestCase): def _VariableRankTest(self, np_scatter, tf_scatter, vtype, itype, use_gpu, repeat_indices=False): np.random.seed(8) with self.test_session(use_gpu=use_gpu): for indices_shape in (), (2,), (3, 7), (3, 4, 7): for extra_shape in (), (5,), (5, 9): # Generate random indices with no duplicates for easy numpy comparison size = np.prod(indices_shape, dtype=itype) first_dim = 3 * size indices = np.arange(first_dim) np.random.shuffle(indices) indices = indices[:size] if size > 1 and repeat_indices: # Add some random repeats. indices = indices[:size // 2] for _ in range(size - size // 2): # Randomly append some repeats. indices = np.append(indices, indices[np.random.randint(size // 2)]) np.random.shuffle(indices) indices = indices.reshape(indices_shape) updates = _AsType(np.random.randn(*(indices_shape + extra_shape)), vtype) old = _AsType(np.random.randn(*((first_dim,) + extra_shape)), vtype) # Scatter via numpy new = old.copy() np_scatter(new, indices, updates) # Scatter via tensorflow ref = tf.Variable(old) ref.initializer.run() tf_scatter(ref, indices, updates).eval() # Compare self.assertAllClose(ref.eval(), new) def _VariableRankTests(self, np_scatter, tf_scatter): for vtype in (np.float32, np.float64): for itype in (np.int32, np.int64): for use_gpu in (False, True): self._VariableRankTest(np_scatter, tf_scatter, vtype, itype, use_gpu) def testVariableRankUpdate(self): def update(ref, indices, updates): ref[indices] = updates self._VariableRankTests(update, tf.scatter_update) def testVariableRankAdd(self): self._VariableRankTests(_NumpyAdd, tf.scatter_add) def testVariableRankSub(self): self._VariableRankTests(_NumpySub, tf.scatter_sub) def _ScatterRepeatIndicesTest(self, np_scatter, tf_scatter): for vtype in (np.float32, np.float64): for itype in (np.int32, np.int64): for use_gpu in (False, True): self._VariableRankTest(np_scatter, tf_scatter, vtype, itype, use_gpu, repeat_indices=True) def testScatterRepeatIndices(self): """This tests scatter_add using indices that repeat.""" self._ScatterRepeatIndicesTest(_NumpyAdd, tf.scatter_add) self._ScatterRepeatIndicesTest(_NumpySub, tf.scatter_sub) def testBooleanScatterUpdate(self): with self.test_session(use_gpu=False) as session: var = tf.Variable([True, False]) update0 = tf.scatter_update(var, 1, True) update1 = tf.scatter_update(var, tf.constant(0, dtype=tf.int64), False) var.initializer.run() session.run([update0, update1]) self.assertAllEqual([False, True], var.eval()) def testScatterOutOfRangeCpu(self): for op in (tf.scatter_add, tf.scatter_sub, tf.scatter_update): params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32) updates = np.array([-3, -4, -5]).astype(np.float32) with self.test_session(use_gpu=False): ref = tf.Variable(params) ref.initializer.run() # Indices all in range, no problem. indices = np.array([2, 0, 5]) op(ref, indices, updates).eval() # Test some out of range errors. indices = np.array([-1, 0, 5]) with self.assertRaisesOpError('indices is out of range'): op(ref, indices, updates).eval() indices = np.array([2, 0, 6]) with self.assertRaisesOpError('indices is out of range'): op(ref, indices, updates).eval() # TODO(fpmc): Re-enable this test when gpu_pip test actually runs on a GPU. def _disabledTestScatterOutOfRangeGpu(self): if not tf.test.IsBuiltWithCuda(): return for op in (tf.scatter_add, tf.scatter_sub, tf.scatter_update): params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32) updates = np.array([-3, -4, -5]).astype(np.float32) # With GPU, the code ignores indices that are out of range. # We don't test the implementation; just test there's no failures. with self.test_session(force_gpu=True): ref = tf.Variable(params) ref.initializer.run() # Indices all in range, no problem. indices = np.array([2, 0, 5]) op(ref, indices, updates).eval() # Indicies out of range should not fail. indices = np.array([-1, 0, 5]) op(ref, indices, updates).eval() indices = np.array([2, 0, 6]) op(ref, indices, updates).eval() if __name__ == "__main__": tf.test.main()
{ "content_hash": "7bffc58561a45499d74d0434ec42333f", "timestamp": "", "source": "github", "line_count": 148, "max_line_length": 95, "avg_line_length": 41.722972972972975, "alnum_prop": 0.5475303643724696, "repo_name": "DailyActie/Surrogate-Model", "id": "0bbf0b9ce2dd112f814d4e3094578876da08498a", "size": "6853", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "01-codes/tensorflow-master/tensorflow/python/kernel_tests/scatter_ops_test.py", "mode": "33261", "license": "mit", "language": [ { "name": "Awk", "bytes": "345" }, { "name": "Batchfile", "bytes": "18746" }, { "name": "C", "bytes": "13004913" }, { "name": "C++", "bytes": "14692003" }, { "name": "CMake", "bytes": "72831" }, { "name": "CSS", "bytes": "303488" }, { "name": "Fortran", "bytes": "7339415" }, { "name": "HTML", "bytes": "854774" }, { "name": "Java", "bytes": "38854" }, { "name": "JavaScript", "bytes": "2432846" }, { "name": "Jupyter Notebook", "bytes": "829689" }, { "name": "M4", "bytes": "1379" }, { "name": "Makefile", "bytes": "48708" }, { "name": "Matlab", "bytes": "4346" }, { "name": "Objective-C", "bytes": "567" }, { "name": "PHP", "bytes": "93585" }, { "name": "Pascal", "bytes": "1449" }, { "name": "Perl", "bytes": "1152272" }, { "name": "PowerShell", "bytes": "17042" }, { "name": "Python", "bytes": "34668203" }, { "name": "Roff", "bytes": "5925" }, { "name": "Ruby", "bytes": "92498" }, { "name": "Shell", "bytes": "94698" }, { "name": "TeX", "bytes": "156540" }, { "name": "TypeScript", "bytes": "41691" } ], "symlink_target": "" }
from __future__ import absolute_import, unicode_literals from contextlib import contextmanager from .base import AppData, ContentStore class AppDataDisabled(AppData): """No application cache available (most likely as we don't have write permissions)""" def __init__(self): pass error = RuntimeError("no app data folder available, probably no write access to the folder") def close(self): """do nothing""" def reset(self): """do nothing""" def py_info(self, path): return ContentStoreNA() def embed_update_log(self, distribution, for_py_version): return ContentStoreNA() def extract(self, path, to_folder): raise self.error @contextmanager def locked(self, path): """do nothing""" yield @property def house(self): raise self.error def wheel_image(self, for_py_version, name): raise self.error @property def transient(self): return True def py_info_clear(self): """""" class ContentStoreNA(ContentStore): def exists(self): return False def read(self): """""" return None def write(self, content): """""" def remove(self): """""" @contextmanager def locked(self): yield
{ "content_hash": "57c74f08dcf059b571ce605ca97c00f3", "timestamp": "", "source": "github", "line_count": 67, "max_line_length": 96, "avg_line_length": 19.71641791044776, "alnum_prop": 0.6010598031794095, "repo_name": "TeamSPoon/logicmoo_workspace", "id": "937aa9a47436800f6ba221bcb022ee57530d87c4", "size": "1321", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "packs_web/butterfly/lib/python3.7/site-packages/virtualenv/app_data/na.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "342" }, { "name": "C", "bytes": "1" }, { "name": "C++", "bytes": "1" }, { "name": "CSS", "bytes": "126627" }, { "name": "HTML", "bytes": "839172" }, { "name": "Java", "bytes": "11116" }, { "name": "JavaScript", "bytes": "238700" }, { "name": "PHP", "bytes": "42253" }, { "name": "Perl 6", "bytes": "23" }, { "name": "Prolog", "bytes": "440882" }, { "name": "PureBasic", "bytes": "1334" }, { "name": "Rich Text Format", "bytes": "3436542" }, { "name": "Roff", "bytes": "42" }, { "name": "Shell", "bytes": "61603" }, { "name": "TeX", "bytes": "99504" } ], "symlink_target": "" }
from __future__ import division, print_function """ Scripts to automate running the DEIMOS spec1d pipeline (in IDL) """ import os import sys import time invoke_spec2d_templ = "et_domask,'{planfn}'\nexit\n" def invoke_spec2d(path, maskname): """ Runs spec2d in the given path, assuming there's a {maskname}.plan file Note that you have to manually close the returned proc.stdout! """ import subprocess planfn = os.path.abspath(os.path.join(path, maskname + '.plan')) logfn = os.path.abspath(os.path.join(path, maskname + '.log')) if not os.path.isdir(path): raise IOError('"{0}" is not a directory!'.format(path)) if not os.path.isfile(planfn): raise IOError('Plan file "{0}" does not exist!'.format(planfn)) logf = open(logfn, 'w') proc = subprocess.Popen('idl', cwd=path, stdin=subprocess.PIPE, stdout=logf, stderr=subprocess.STDOUT) proc.stdin.write(invoke_spec2d_templ.format(**locals())) # proc = subprocess.Popen('ls', cwd=path, stdin=None, # stdout=logf, stderr=subprocess.STDOUT) proc.maskname = maskname return proc def try_finish_spec2d(proc): if proc.poll() is None: return False else: if proc.returncode != 0: print('The process for plan file "{0}" returned {1}... ' 'possible problem? Check logs.'.format(proc.maskname, proc.returncode)) if proc.stdout is not None and not proc.stdout.closed: proc.stdout.close() if proc.stderr is not None and not proc.stderr.closed: proc.stderr.close() return True def find_unfinished_planfiles(msknames): planfiles = [] for nm in msknames: if os.path.isfile(nm): planfiles.append(nm) elif os.path.isdir(nm): path, name = os.path.split(nm) if name == '': nm = path path, name = os.path.split(nm) planfiles.append(os.path.join(path, name, name + '.plan')) for i, pf in reversed(list(enumerate(planfiles))): path, name = os.path.split(pf) if os.path.isfile(os.path.join(path, 'doneprocessing.txt')): print("doneprocessing was found for", name, 'skipping!') del planfiles[i] return planfiles def scatter_spec2ds(planpaths, maxtorun=2, waittime=1, verbose=True): """ `planpaths` is list of planfiles `maxtorun` is the number of simultaneous processes to run `waittime` is the time in sec to wait between polling """ procsdone = [] procsrunning = [] toinvoke = [] for plp in planpaths: if plp.endswith('.plan'): plp = plp[:-5] path, name = os.path.split(plp) toinvoke.append((path, name)) sleepsdone = 0 while len(toinvoke) > 0 or len(procsrunning) > 0: #first check if any are running that have finished for i, p in reversed(list(enumerate(procsrunning))): if try_finish_spec2d(p): # True -> proc done if verbose: print('\nFinished spec2d for', p.maskname) del procsrunning[i] procsdone.append(p) sleepsdone = 0 #now try to invoke any that remain to be invoked rem_from_toinvoke = [] for i, (path, name) in enumerate(toinvoke): if len(procsrunning) < maxtorun: if verbose: print('\nInvoking spec2d for', path, name) procsrunning.append(invoke_spec2d(path, name)) rem_from_toinvoke.append(i) sleepsdone = 0 for i in reversed(sorted(rem_from_toinvoke)): del toinvoke[i] if verbose: sys.stdout.write('Sleeping for {0} sec\r'.format(waittime*sleepsdone)) sys.stdout.flush() time.sleep(waittime) sleepsdone += 1 return dict([(p.maskname, p) for p in procsdone])
{ "content_hash": "45d934996b514c5fd34f17ea2bec16e7", "timestamp": "", "source": "github", "line_count": 125, "max_line_length": 89, "avg_line_length": 31.984, "alnum_prop": 0.587543771885943, "repo_name": "eteq/erikutils", "id": "03a3c82deb60efec7c2d63092846b716cdf877ba", "size": "3998", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "erikutils/spec2d_runner.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "87716" } ], "symlink_target": "" }
import asyncore import unittest import select import os import socket import sys import time import warnings import errno from test import test_support from test.test_support import TESTFN, run_unittest, unlink from StringIO import StringIO try: import threading except ImportError: threading = None HOST = test_support.HOST class dummysocket: def __init__(self): self.closed = False def close(self): self.closed = True def fileno(self): return 42 class dummychannel: def __init__(self): self.socket = dummysocket() def close(self): self.socket.close() class exitingdummy: def __init__(self): pass def handle_read_event(self): raise asyncore.ExitNow() handle_write_event = handle_read_event handle_close = handle_read_event handle_expt_event = handle_read_event class crashingdummy: def __init__(self): self.error_handled = False def handle_read_event(self): raise Exception() handle_write_event = handle_read_event handle_close = handle_read_event handle_expt_event = handle_read_event def handle_error(self): self.error_handled = True # used when testing senders; just collects what it gets until newline is sent def capture_server(evt, buf, serv): try: serv.listen(5) conn, addr = serv.accept() except socket.timeout: pass else: n = 200 while n > 0: r, w, e = select.select([conn], [], []) if r: data = conn.recv(10) # keep everything except for the newline terminator buf.write(data.replace('\n', '')) if '\n' in data: break n -= 1 time.sleep(0.01) conn.close() finally: serv.close() evt.set() class HelperFunctionTests(unittest.TestCase): def test_readwriteexc(self): # Check exception handling behavior of read, write and _exception # check that ExitNow exceptions in the object handler method # bubbles all the way up through asyncore read/write/_exception calls tr1 = exitingdummy() self.assertRaises(asyncore.ExitNow, asyncore.read, tr1) self.assertRaises(asyncore.ExitNow, asyncore.write, tr1) self.assertRaises(asyncore.ExitNow, asyncore._exception, tr1) # check that an exception other than ExitNow in the object handler # method causes the handle_error method to get called tr2 = crashingdummy() asyncore.read(tr2) self.assertEqual(tr2.error_handled, True) tr2 = crashingdummy() asyncore.write(tr2) self.assertEqual(tr2.error_handled, True) tr2 = crashingdummy() asyncore._exception(tr2) self.assertEqual(tr2.error_handled, True) # asyncore.readwrite uses constants in the select module that # are not present in Windows systems (see this thread: # http://mail.python.org/pipermail/python-list/2001-October/109973.html) # These constants should be present as long as poll is available @unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required') def test_readwrite(self): # Check that correct methods are called by readwrite() attributes = ('read', 'expt', 'write', 'closed', 'error_handled') expected = ( (select.POLLIN, 'read'), (select.POLLPRI, 'expt'), (select.POLLOUT, 'write'), (select.POLLERR, 'closed'), (select.POLLHUP, 'closed'), (select.POLLNVAL, 'closed'), ) class testobj: def __init__(self): self.read = False self.write = False self.closed = False self.expt = False self.error_handled = False def handle_read_event(self): self.read = True def handle_write_event(self): self.write = True def handle_close(self): self.closed = True def handle_expt_event(self): self.expt = True def handle_error(self): self.error_handled = True for flag, expectedattr in expected: tobj = testobj() self.assertEqual(getattr(tobj, expectedattr), False) asyncore.readwrite(tobj, flag) # Only the attribute modified by the routine we expect to be # called should be True. for attr in attributes: self.assertEqual(getattr(tobj, attr), attr==expectedattr) # check that ExitNow exceptions in the object handler method # bubbles all the way up through asyncore readwrite call tr1 = exitingdummy() self.assertRaises(asyncore.ExitNow, asyncore.readwrite, tr1, flag) # check that an exception other than ExitNow in the object handler # method causes the handle_error method to get called tr2 = crashingdummy() self.assertEqual(tr2.error_handled, False) asyncore.readwrite(tr2, flag) self.assertEqual(tr2.error_handled, True) def test_closeall(self): self.closeall_check(False) def test_closeall_default(self): self.closeall_check(True) def closeall_check(self, usedefault): # Check that close_all() closes everything in a given map l = [] testmap = {} for i in range(10): c = dummychannel() l.append(c) self.assertEqual(c.socket.closed, False) testmap[i] = c if usedefault: socketmap = asyncore.socket_map try: asyncore.socket_map = testmap asyncore.close_all() finally: testmap, asyncore.socket_map = asyncore.socket_map, socketmap else: asyncore.close_all(testmap) self.assertEqual(len(testmap), 0) for c in l: self.assertEqual(c.socket.closed, True) def test_compact_traceback(self): try: raise Exception("I don't like spam!") except: real_t, real_v, real_tb = sys.exc_info() r = asyncore.compact_traceback() else: self.fail("Expected exception") (f, function, line), t, v, info = r self.assertEqual(os.path.split(f)[-1], 'test_asyncore.py') self.assertEqual(function, 'test_compact_traceback') self.assertEqual(t, real_t) self.assertEqual(v, real_v) self.assertEqual(info, '[%s|%s|%s]' % (f, function, line)) class DispatcherTests(unittest.TestCase): def setUp(self): pass def tearDown(self): asyncore.close_all() def test_basic(self): d = asyncore.dispatcher() self.assertEqual(d.readable(), True) self.assertEqual(d.writable(), True) def test_repr(self): d = asyncore.dispatcher() self.assertEqual(repr(d), '<asyncore.dispatcher at %#x>' % id(d)) def test_log(self): d = asyncore.dispatcher() # capture output of dispatcher.log() (to stderr) fp = StringIO() stderr = sys.stderr l1 = "Lovely spam! Wonderful spam!" l2 = "I don't like spam!" try: sys.stderr = fp d.log(l1) d.log(l2) finally: sys.stderr = stderr lines = fp.getvalue().splitlines() self.assertEqual(lines, ['log: %s' % l1, 'log: %s' % l2]) def test_log_info(self): d = asyncore.dispatcher() # capture output of dispatcher.log_info() (to stdout via print) fp = StringIO() stdout = sys.stdout l1 = "Have you got anything without spam?" l2 = "Why can't she have egg bacon spam and sausage?" l3 = "THAT'S got spam in it!" try: sys.stdout = fp d.log_info(l1, 'EGGS') d.log_info(l2) d.log_info(l3, 'SPAM') finally: sys.stdout = stdout lines = fp.getvalue().splitlines() expected = ['EGGS: %s' % l1, 'info: %s' % l2, 'SPAM: %s' % l3] self.assertEqual(lines, expected) def test_unhandled(self): d = asyncore.dispatcher() d.ignore_log_types = () # capture output of dispatcher.log_info() (to stdout via print) fp = StringIO() stdout = sys.stdout try: sys.stdout = fp d.handle_expt() d.handle_read() d.handle_write() d.handle_connect() d.handle_accept() finally: sys.stdout = stdout lines = fp.getvalue().splitlines() expected = ['warning: unhandled incoming priority event', 'warning: unhandled read event', 'warning: unhandled write event', 'warning: unhandled connect event', 'warning: unhandled accept event'] self.assertEqual(lines, expected) def test_issue_8594(self): # XXX - this test is supposed to be removed in next major Python # version d = asyncore.dispatcher(socket.socket()) # make sure the error message no longer refers to the socket # object but the dispatcher instance instead self.assertRaisesRegexp(AttributeError, 'dispatcher instance', getattr, d, 'foo') # cheap inheritance with the underlying socket is supposed # to still work but a DeprecationWarning is expected with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") family = d.family self.assertEqual(family, socket.AF_INET) self.assertEqual(len(w), 1) self.assertTrue(issubclass(w[0].category, DeprecationWarning)) def test_strerror(self): # refers to bug #8573 err = asyncore._strerror(errno.EPERM) if hasattr(os, 'strerror'): self.assertEqual(err, os.strerror(errno.EPERM)) err = asyncore._strerror(-1) self.assertTrue(err != "") class dispatcherwithsend_noread(asyncore.dispatcher_with_send): def readable(self): return False def handle_connect(self): pass class DispatcherWithSendTests(unittest.TestCase): usepoll = False def setUp(self): pass def tearDown(self): asyncore.close_all() @unittest.skipUnless(threading, 'Threading required for this test.') @test_support.reap_threads def test_send(self): evt = threading.Event() sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(3) port = test_support.bind_port(sock) cap = StringIO() args = (evt, cap, sock) t = threading.Thread(target=capture_server, args=args) t.start() try: # wait a little longer for the server to initialize (it sometimes # refuses connections on slow machines without this wait) time.sleep(0.2) data = "Suppose there isn't a 16-ton weight?" d = dispatcherwithsend_noread() d.create_socket(socket.AF_INET, socket.SOCK_STREAM) d.connect((HOST, port)) # give time for socket to connect time.sleep(0.1) d.send(data) d.send(data) d.send('\n') n = 1000 while d.out_buffer and n > 0: asyncore.poll() n -= 1 evt.wait() self.assertEqual(cap.getvalue(), data*2) finally: t.join() class DispatcherWithSendTests_UsePoll(DispatcherWithSendTests): usepoll = True @unittest.skipUnless(hasattr(asyncore, 'file_wrapper'), 'asyncore.file_wrapper required') class FileWrapperTest(unittest.TestCase): def setUp(self): self.d = "It's not dead, it's sleeping!" with file(TESTFN, 'w') as h: h.write(self.d) def tearDown(self): unlink(TESTFN) def test_recv(self): fd = os.open(TESTFN, os.O_RDONLY) w = asyncore.file_wrapper(fd) os.close(fd) self.assertNotEqual(w.fd, fd) self.assertNotEqual(w.fileno(), fd) self.assertEqual(w.recv(13), "It's not dead") self.assertEqual(w.read(6), ", it's") w.close() self.assertRaises(OSError, w.read, 1) def test_send(self): d1 = "Come again?" d2 = "I want to buy some cheese." fd = os.open(TESTFN, os.O_WRONLY | os.O_APPEND) w = asyncore.file_wrapper(fd) os.close(fd) w.write(d1) w.send(d2) w.close() self.assertEqual(file(TESTFN).read(), self.d + d1 + d2) @unittest.skipUnless(hasattr(asyncore, 'file_dispatcher'), 'asyncore.file_dispatcher required') def test_dispatcher(self): fd = os.open(TESTFN, os.O_RDONLY) data = [] class FileDispatcher(asyncore.file_dispatcher): def handle_read(self): data.append(self.recv(29)) s = FileDispatcher(fd) os.close(fd) asyncore.loop(timeout=0.01, use_poll=True, count=2) self.assertEqual(b"".join(data), self.d) class BaseTestHandler(asyncore.dispatcher): def __init__(self, sock=None): asyncore.dispatcher.__init__(self, sock) self.flag = False def handle_accept(self): raise Exception("handle_accept not supposed to be called") def handle_connect(self): raise Exception("handle_connect not supposed to be called") def handle_expt(self): raise Exception("handle_expt not supposed to be called") def handle_close(self): raise Exception("handle_close not supposed to be called") def handle_error(self): raise class TCPServer(asyncore.dispatcher): """A server which listens on an address and dispatches the connection to a handler. """ def __init__(self, handler=BaseTestHandler, host=HOST, port=0): asyncore.dispatcher.__init__(self) self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.set_reuse_addr() self.bind((host, port)) self.listen(5) self.handler = handler @property def address(self): return self.socket.getsockname()[:2] def handle_accept(self): sock, addr = self.accept() self.handler(sock) def handle_error(self): raise class BaseClient(BaseTestHandler): def __init__(self, address): BaseTestHandler.__init__(self) self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.connect(address) def handle_connect(self): pass class BaseTestAPI(unittest.TestCase): def tearDown(self): asyncore.close_all() def loop_waiting_for_flag(self, instance, timeout=5): timeout = float(timeout) / 100 count = 100 while asyncore.socket_map and count > 0: asyncore.loop(timeout=0.01, count=1, use_poll=self.use_poll) if instance.flag: return count -= 1 time.sleep(timeout) self.fail("flag not set") def test_handle_connect(self): # make sure handle_connect is called on connect() class TestClient(BaseClient): def handle_connect(self): self.flag = True server = TCPServer() client = TestClient(server.address) self.loop_waiting_for_flag(client) def test_handle_accept(self): # make sure handle_accept() is called when a client connects class TestListener(BaseTestHandler): def __init__(self): BaseTestHandler.__init__(self) self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.bind((HOST, 0)) self.listen(5) self.address = self.socket.getsockname()[:2] def handle_accept(self): self.flag = True server = TestListener() client = BaseClient(server.address) self.loop_waiting_for_flag(server) def test_handle_read(self): # make sure handle_read is called on data received class TestClient(BaseClient): def handle_read(self): self.flag = True class TestHandler(BaseTestHandler): def __init__(self, conn): BaseTestHandler.__init__(self, conn) self.send('x' * 1024) server = TCPServer(TestHandler) client = TestClient(server.address) self.loop_waiting_for_flag(client) def test_handle_write(self): # make sure handle_write is called class TestClient(BaseClient): def handle_write(self): self.flag = True server = TCPServer() client = TestClient(server.address) self.loop_waiting_for_flag(client) def test_handle_close(self): # make sure handle_close is called when the other end closes # the connection class TestClient(BaseClient): def handle_read(self): # in order to make handle_close be called we are supposed # to make at least one recv() call self.recv(1024) def handle_close(self): self.flag = True self.close() class TestHandler(BaseTestHandler): def __init__(self, conn): BaseTestHandler.__init__(self, conn) self.close() server = TCPServer(TestHandler) client = TestClient(server.address) self.loop_waiting_for_flag(client) @unittest.skipIf(sys.platform.startswith("sunos"), "OOB support is broken on Solaris") def test_handle_expt(self): # Make sure handle_expt is called on OOB data received. # Note: this might fail on some platforms as OOB data is # tenuously supported and rarely used. class TestClient(BaseClient): def handle_expt(self): self.flag = True class TestHandler(BaseTestHandler): def __init__(self, conn): BaseTestHandler.__init__(self, conn) self.socket.send(chr(244), socket.MSG_OOB) server = TCPServer(TestHandler) client = TestClient(server.address) self.loop_waiting_for_flag(client) def test_handle_error(self): class TestClient(BaseClient): def handle_write(self): 1.0 / 0 def handle_error(self): self.flag = True try: raise except ZeroDivisionError: pass else: raise Exception("exception not raised") server = TCPServer() client = TestClient(server.address) self.loop_waiting_for_flag(client) def test_connection_attributes(self): server = TCPServer() client = BaseClient(server.address) # we start disconnected self.assertFalse(server.connected) self.assertTrue(server.accepting) # this can't be taken for granted across all platforms #self.assertFalse(client.connected) self.assertFalse(client.accepting) # execute some loops so that client connects to server asyncore.loop(timeout=0.01, use_poll=self.use_poll, count=100) self.assertFalse(server.connected) self.assertTrue(server.accepting) self.assertTrue(client.connected) self.assertFalse(client.accepting) # disconnect the client client.close() self.assertFalse(server.connected) self.assertTrue(server.accepting) self.assertFalse(client.connected) self.assertFalse(client.accepting) # stop serving server.close() self.assertFalse(server.connected) self.assertFalse(server.accepting) def test_create_socket(self): s = asyncore.dispatcher() s.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.assertEqual(s.socket.family, socket.AF_INET) self.assertEqual(s.socket.type, socket.SOCK_STREAM) def test_bind(self): s1 = asyncore.dispatcher() s1.create_socket(socket.AF_INET, socket.SOCK_STREAM) s1.bind((HOST, 0)) s1.listen(5) port = s1.socket.getsockname()[1] s2 = asyncore.dispatcher() s2.create_socket(socket.AF_INET, socket.SOCK_STREAM) # EADDRINUSE indicates the socket was correctly bound self.assertRaises(socket.error, s2.bind, (HOST, port)) def test_set_reuse_addr(self): sock = socket.socket() try: sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) except socket.error: unittest.skip("SO_REUSEADDR not supported on this platform") else: # if SO_REUSEADDR succeeded for sock we expect asyncore # to do the same s = asyncore.dispatcher(socket.socket()) self.assertFalse(s.socket.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)) s.create_socket(socket.AF_INET, socket.SOCK_STREAM) s.set_reuse_addr() self.assertTrue(s.socket.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)) finally: sock.close() class TestAPI_UseSelect(BaseTestAPI): use_poll = False @unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required') class TestAPI_UsePoll(BaseTestAPI): use_poll = True def test_main(): tests = [HelperFunctionTests, DispatcherTests, DispatcherWithSendTests, DispatcherWithSendTests_UsePoll, TestAPI_UseSelect, TestAPI_UsePoll, FileWrapperTest] run_unittest(*tests) if __name__ == "__main__": test_main()
{ "content_hash": "22478c4ca92c4e34b2db458340f1e2c0", "timestamp": "", "source": "github", "line_count": 722, "max_line_length": 78, "avg_line_length": 31.734072022160664, "alnum_prop": 0.5639402932960894, "repo_name": "ktan2020/legacy-automation", "id": "c28bb7d4eb8a7da654b5957d8a5ba74545139a4c", "size": "22912", "binary": false, "copies": "9", "ref": "refs/heads/master", "path": "win/Lib/test/test_asyncore.py", "mode": "33261", "license": "mit", "language": [ { "name": "ActionScript", "bytes": "913" }, { "name": "Ada", "bytes": "289" }, { "name": "Assembly", "bytes": "687" }, { "name": "Boo", "bytes": "540" }, { "name": "C", "bytes": "40116" }, { "name": "C#", "bytes": "474" }, { "name": "C++", "bytes": "393" }, { "name": "CSS", "bytes": "70883" }, { "name": "ColdFusion", "bytes": "1012" }, { "name": "Common Lisp", "bytes": "1034" }, { "name": "D", "bytes": "1858" }, { "name": "Eiffel", "bytes": "426" }, { "name": "Erlang", "bytes": "9243" }, { "name": "FORTRAN", "bytes": "1810" }, { "name": "Forth", "bytes": "182" }, { "name": "Groovy", "bytes": "2366" }, { "name": "Haskell", "bytes": "816" }, { "name": "Haxe", "bytes": "455" }, { "name": "Java", "bytes": "1155" }, { "name": "JavaScript", "bytes": "69444" }, { "name": "Lua", "bytes": "795" }, { "name": "Matlab", "bytes": "1278" }, { "name": "OCaml", "bytes": "350" }, { "name": "Objective-C++", "bytes": "885" }, { "name": "PHP", "bytes": "1411" }, { "name": "Pascal", "bytes": "388" }, { "name": "Perl", "bytes": "252651" }, { "name": "Pike", "bytes": "589" }, { "name": "Python", "bytes": "42085780" }, { "name": "R", "bytes": "1156" }, { "name": "Ruby", "bytes": "480" }, { "name": "Scheme", "bytes": "282" }, { "name": "Shell", "bytes": "30518" }, { "name": "Smalltalk", "bytes": "926" }, { "name": "Squirrel", "bytes": "697" }, { "name": "Stata", "bytes": "302" }, { "name": "SystemVerilog", "bytes": "3145" }, { "name": "Tcl", "bytes": "1039" }, { "name": "TeX", "bytes": "1746" }, { "name": "VHDL", "bytes": "985" }, { "name": "Vala", "bytes": "664" }, { "name": "Verilog", "bytes": "439" }, { "name": "Visual Basic", "bytes": "2142" }, { "name": "XSLT", "bytes": "152770" }, { "name": "ooc", "bytes": "890" }, { "name": "xBase", "bytes": "769" } ], "symlink_target": "" }
def attributesFromDict(d, obj=None, objName="self"): if obj is None: obj = d.pop(objName) for n, v in d.iteritems(): setattr(obj, n, v) class Before: def __init__(self, foo, bar, baz, boom=1, bang=2): self.foo = foo self.bar = bar self.baz = baz self.boom = boom self.bang = bang class After: def __init__(self, foo, bar, baz, boom=1, bang=2): attributesFromDict(locals())
{ "content_hash": "01002fc08dd9960c1b0fab480656552b", "timestamp": "", "source": "github", "line_count": 17, "max_line_length": 54, "avg_line_length": 26.764705882352942, "alnum_prop": 0.5560439560439561, "repo_name": "ActiveState/code", "id": "59b749397a635a1a9828400d9e9aa56fb9587407", "size": "455", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "recipes/Python/280381_Cleinit_methods_that_contaonly_attribute/recipe-280381.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "35894" }, { "name": "C", "bytes": "56048" }, { "name": "C++", "bytes": "90880" }, { "name": "HTML", "bytes": "11656" }, { "name": "Java", "bytes": "57468" }, { "name": "JavaScript", "bytes": "181218" }, { "name": "PHP", "bytes": "250144" }, { "name": "Perl", "bytes": "37296" }, { "name": "Perl 6", "bytes": "9914" }, { "name": "Python", "bytes": "17387779" }, { "name": "Ruby", "bytes": "40233" }, { "name": "Shell", "bytes": "190732" }, { "name": "Tcl", "bytes": "674650" } ], "symlink_target": "" }
# Std libs import os import sqlite3 import time # Non-std libs import visa # My libs from instr.ke2636a import Keithley2636A from instr.sci9700 import Sci9700 ## Configurations --------------------------------------------------------------- debug_mode = False # Set True while development without instruments. #sci_rsrc_name = 'GPIB0::1::INSTR' #sci_timeout_sec = 1 ke_rsrc_name = 'GPIB0::26::INSTR' sci_rsrc_name = 'GPIB0::1::INSTR' ke_timeout_sec = 30 sci_timeout_sec = 1 sqlite3_file_name = os.path.expanduser('~') + '/Documents/instr_data/IT.sqlite3' sample = 'dummy_sample' voltage = 10e-3 compliance = 100e-6 instrument = '304B Keithley 2636A' comment = None # Initialize ------------------------------------------------------------------- if debug_mode: ap_rsrc = None ke_rsrc = None sci_rsrc = None else: rm = visa.ResourceManager() print(rm.list_resources()) ke_rsrc = rm.open_resource(ke_rsrc_name) sci_rsrc = rm.open_resource(sci_rsrc_name) ke = Keithley2636A(debug_mode, ke_rsrc, ke_timeout_sec) sci = Sci9700(debug_mode, sci_rsrc, sci_timeout_sec) # Connect to database ---------------------------------------------------------- sqlite3_connection = sqlite3.connect(sqlite3_file_name) cursor = sqlite3_connection.cursor() # Measure ---------------------------------------------------------------------- try: t0 = int(time.strftime('%Y%m%d%H%M%S')) cursor.execute('INSERT INTO params VALUES(?,?,?,?,?,?,?)', (t0, sample, None, voltage, compliance, instrument, comment)) sqlite3_connection.commit() t = t0 points = 0 ke.read_single_on(voltage, compliance) while(True): pass new_t = int(time.strftime('%Y%m%d%H%M%S')) if new_t == t: # for SQL unique constraint. time.sleep(1) new_t = int(time.strftime('%Y%m%d%H%M%S')) t = new_t temp = sci.read_temp('A') I = ke.read_single_read() points += 1 cursor.execute('INSERT INTO IT VALUES(?,?,?,?)', (t, t0, temp, I)) sqlite3_connection.commit() finally: cursor.execute('UPDATE params SET points=? WHERE t0=?',(points, t0)) ke.read_single_off() sqlite3_connection.commit() cursor.close()
{ "content_hash": "7b0270176ed561eed1d90ffccd00cf2c", "timestamp": "", "source": "github", "line_count": 79, "max_line_length": 81, "avg_line_length": 28.82278481012658, "alnum_prop": 0.556873078612209, "repo_name": "wataash/Instr", "id": "ea8a1f39897758d74937663eaffce20e4e55b1bf", "size": "2279", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "mag_ti.py", "mode": "33261", "license": "mit", "language": [ { "name": "C#", "bytes": "43577" }, { "name": "Python", "bytes": "65761" } ], "symlink_target": "" }
from OpenGL.GL import * import pywavefront.parser as parser import pywavefront.texture as texture class Material(object): def __init__(self, name): self.name = name self.diffuse = [.8, .8, .8, 1.] self.ambient = [.2, .2, .2, 1.] self.specular = [0., 0., 0., 1.] self.emissive = [0., 0., 0., 1.] self.shininess = 0. self.texture = None # Interleaved array of floats in GL_T2F_N3F_V3F format self.vertices = [] self.gl_floats = None def pad_light(self, values): """Accept an array of up to 4 values, and return an array of 4 values. If the input array is less than length 4, pad it with zeroes until it is length 4. Also ensure each value is a float""" while len(values) < 4: values.append(0.) return list(map(float, values)) def set_alpha(self, alpha): """Set alpha/last value on all four lighting attributes.""" alpha = float(alpha) self.diffuse[3] = alpha self.ambient[3] = alpha self.specular[3] = alpha self.emissive[3] = alpha def set_diffuse(self, values=[]): self.diffuse = self.pad_light(values) def set_ambient(self, values=[]): self.ambient = self.pad_light(values) def set_specular(self, values=[]): self.specular = self.pad_light(values) def set_emissive(self, values=[]): self.emissive = self.pad_light(values) def set_texture(self, path): self.texture = texture.Texture(path) def unset_texture(self): self.texture = None def gl_light(self, lighting): """Return a GLfloat with length 4, containing the 4 lighting values.""" return (GLfloat * 4)(*(lighting)) def draw(self, face=GL_FRONT_AND_BACK): glEnable(GL_TEXTURE_2D) glColor4f(1,1,1,1) glMaterialfv(face, GL_DIFFUSE, self.gl_light(self.diffuse) ) glMaterialfv(face, GL_AMBIENT, self.gl_light(self.ambient) ) glMaterialfv(face, GL_SPECULAR, self.gl_light(self.specular) ) glMaterialfv(face, GL_EMISSION, self.gl_light(self.emissive) ) glMaterialf(face, GL_SHININESS, self.shininess) if self.texture: self.texture.draw() if self.gl_floats is None: self.gl_floats = (GLfloat * len(self.vertices))(*self.vertices) self.triangle_count = len(self.vertices) / 8 glInterleavedArrays(GL_T2F_N3F_V3F, 0, self.gl_floats) glDrawArrays(GL_TRIANGLES, 0, int(self.triangle_count)) glDisable(GL_TEXTURE_2D) class MaterialParser(parser.Parser): """Object to parse lines of a materials definition file.""" def __init__(self, file_path, path): self.materials = {} self.path = path self.this_material = None self.read_file(file_path) def parse_newmtl(self, args): [newmtl] = args self.this_material = Material(newmtl) self.materials[self.this_material.name] = self.this_material def parse_Kd(self, args): self.this_material.set_diffuse(args) def parse_Ka(self, args): self.this_material.set_ambient(args) def parse_Ks(self, args): self.this_material.set_specular(args) def parse_Ke(self, args): self.this_material.set_emissive(args) def parse_Ns(self, args): [Ns] = args self.this_material.shininess = float(Ns) def parse_d(self, args): [d] = args self.this_material.set_alpha(d) def parse_map_Kd(self, args): [Kd] = args self.this_material.set_texture(self.path + "textures/" + Kd) def parse_Ni(self, args): # unimplemented return def parse_illum(self, args): # unimplemented return
{ "content_hash": "c7016f8f93a4cba21dd2f934e37c18d7", "timestamp": "", "source": "github", "line_count": 121, "max_line_length": 79, "avg_line_length": 31.355371900826448, "alnum_prop": 0.6038481813389562, "repo_name": "elgrandt/ShooterInc", "id": "f72bd8ae65fb7e4bb2f7694fe3f07aad90af90c7", "size": "5513", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pywavefront/material.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "57199" } ], "symlink_target": "" }
"""Handles all requests relating to volumes.""" import collections import datetime import functools from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import timeutils from oslo_utils import uuidutils import six from cinder.api import common from cinder import context from cinder.db import base from cinder import exception from cinder import flow_utils from cinder.i18n import _, _LE, _LI, _LW from cinder.image import glance from cinder import keymgr from cinder import objects from cinder.objects import base as objects_base import cinder.policy from cinder import quota from cinder import quota_utils from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder import utils from cinder.volume.flows.api import create_volume from cinder.volume.flows.api import manage_existing from cinder.volume import qos_specs from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import utils as volume_utils from cinder.volume import volume_types allow_force_upload = cfg.BoolOpt('enable_force_upload', default=False, help='Enables the Force option on ' 'upload_to_image. This enables ' 'running upload_volume on in-use ' 'volumes for backends that support it.') volume_host_opt = cfg.BoolOpt('snapshot_same_host', default=True, help='Create volume from snapshot at the host ' 'where snapshot resides') volume_same_az_opt = cfg.BoolOpt('cloned_volume_same_az', default=True, help='Ensure that the new volumes are the ' 'same AZ as snapshot or source volume') az_cache_time_opt = cfg.IntOpt('az_cache_duration', default=3600, help='Cache volume availability zones in ' 'memory for the provided duration in ' 'seconds') CONF = cfg.CONF CONF.register_opt(allow_force_upload) CONF.register_opt(volume_host_opt) CONF.register_opt(volume_same_az_opt) CONF.register_opt(az_cache_time_opt) CONF.import_opt('glance_core_properties', 'cinder.image.glance') LOG = logging.getLogger(__name__) QUOTAS = quota.QUOTAS def wrap_check_policy(func): """Check policy corresponding to the wrapped methods prior to execution This decorator requires the first 3 args of the wrapped function to be (self, context, volume) """ @functools.wraps(func) def wrapped(self, context, target_obj, *args, **kwargs): check_policy(context, func.__name__, target_obj) return func(self, context, target_obj, *args, **kwargs) return wrapped def check_policy(context, action, target_obj=None): target = { 'project_id': context.project_id, 'user_id': context.user_id, } if isinstance(target_obj, objects_base.CinderObject): # Turn object into dict so target.update can work target.update( target_obj.obj_to_primitive()['versioned_object.data'] or {}) else: target.update(target_obj or {}) _action = 'volume:%s' % action cinder.policy.enforce(context, _action, target) class API(base.Base): """API for interacting with the volume manager.""" def __init__(self, db_driver=None, image_service=None): self.image_service = (image_service or glance.get_default_image_service()) self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() self.volume_rpcapi = volume_rpcapi.VolumeAPI() self.availability_zones = [] self.availability_zones_last_fetched = None self.key_manager = keymgr.API() super(API, self).__init__(db_driver) def list_availability_zones(self, enable_cache=False): """Describe the known availability zones :retval tuple of dicts, each with a 'name' and 'available' key """ refresh_cache = False if enable_cache: if self.availability_zones_last_fetched is None: refresh_cache = True else: cache_age = timeutils.delta_seconds( self.availability_zones_last_fetched, timeutils.utcnow()) if cache_age >= CONF.az_cache_duration: refresh_cache = True if refresh_cache or not enable_cache: topic = CONF.volume_topic ctxt = context.get_admin_context() services = self.db.service_get_all_by_topic(ctxt, topic) az_data = [(s['availability_zone'], s['disabled']) for s in services] disabled_map = {} for (az_name, disabled) in az_data: tracked_disabled = disabled_map.get(az_name, True) disabled_map[az_name] = tracked_disabled and disabled azs = [{'name': name, 'available': not disabled} for (name, disabled) in disabled_map.items()] if refresh_cache: now = timeutils.utcnow() self.availability_zones = azs self.availability_zones_last_fetched = now LOG.debug("Availability zone cache updated, next update will" " occur around %s.", now + datetime.timedelta( seconds=CONF.az_cache_duration)) else: azs = self.availability_zones LOG.info(_LI("Availability Zones retrieved successfully.")) return tuple(azs) def _retype_is_possible(self, context, first_type_id, second_type_id, first_type=None, second_type=None): safe = False if len(self.db.service_get_all_by_topic(context, 'cinder-volume', disabled=True)) == 1: safe = True else: type_a = first_type or volume_types.get_volume_type( context, first_type_id) type_b = second_type or volume_types.get_volume_type( context, second_type_id) if(volume_utils.matching_backend_name(type_a['extra_specs'], type_b['extra_specs'])): safe = True return safe def create(self, context, size, name, description, snapshot=None, image_id=None, volume_type=None, metadata=None, availability_zone=None, source_volume=None, scheduler_hints=None, source_replica=None, consistencygroup=None, cgsnapshot=None, multiattach=False, source_cg=None): # NOTE(jdg): we can have a create without size if we're # doing a create from snap or volume. Currently # the taskflow api will handle this and pull in the # size from the source. # NOTE(jdg): cinderclient sends in a string representation # of the size value. BUT there is a possibility that somebody # could call the API directly so the is_int_like check # handles both cases (string representation of true float or int). if size and (not utils.is_int_like(size) or int(size) <= 0): msg = _('Invalid volume size provided for create request: %s ' '(size argument must be an integer (or string ' 'representation of an integer) and greater ' 'than zero).') % size raise exception.InvalidInput(reason=msg) if consistencygroup and (not cgsnapshot and not source_cg): if not volume_type: msg = _("volume_type must be provided when creating " "a volume in a consistency group.") raise exception.InvalidInput(reason=msg) cg_voltypeids = consistencygroup.get('volume_type_id') if volume_type.get('id') not in cg_voltypeids: msg = _("Invalid volume_type provided: %s (requested " "type must be supported by this consistency " "group).") % volume_type raise exception.InvalidInput(reason=msg) if source_volume and volume_type: if volume_type['id'] != source_volume['volume_type_id']: if not self._retype_is_possible( context, volume_type['id'], source_volume['volume_type_id'], volume_type): msg = _("Invalid volume_type provided: %s (requested type " "is not compatible; either match source volume, " "or omit type argument).") % volume_type['id'] raise exception.InvalidInput(reason=msg) # When cloning replica (for testing), volume type must be omitted if source_replica and volume_type: msg = _("No volume_type should be provided when creating test " "replica.") raise exception.InvalidInput(reason=msg) if snapshot and volume_type: if volume_type['id'] != snapshot['volume_type_id']: if not self._retype_is_possible(context, volume_type['id'], snapshot['volume_type_id'], volume_type): msg = _("Invalid volume_type provided: %s (requested " "type is not compatible; recommend omitting " "the type argument).") % volume_type['id'] raise exception.InvalidInput(reason=msg) # Determine the valid availability zones that the volume could be # created in (a task in the flow will/can use this information to # ensure that the availability zone requested is valid). raw_zones = self.list_availability_zones(enable_cache=True) availability_zones = set([az['name'] for az in raw_zones]) if CONF.storage_availability_zone: availability_zones.add(CONF.storage_availability_zone) create_what = { 'context': context, 'raw_size': size, 'name': name, 'description': description, 'snapshot': snapshot, 'image_id': image_id, 'raw_volume_type': volume_type, 'metadata': metadata or {}, 'raw_availability_zone': availability_zone, 'source_volume': source_volume, 'scheduler_hints': scheduler_hints, 'key_manager': self.key_manager, 'source_replica': source_replica, 'optional_args': {'is_quota_committed': False}, 'consistencygroup': consistencygroup, 'cgsnapshot': cgsnapshot, 'multiattach': multiattach, } try: sched_rpcapi = (self.scheduler_rpcapi if (not cgsnapshot and not source_cg) else None) volume_rpcapi = (self.volume_rpcapi if (not cgsnapshot and not source_cg) else None) flow_engine = create_volume.get_flow(self.db, self.image_service, availability_zones, create_what, sched_rpcapi, volume_rpcapi) except Exception: msg = _('Failed to create api volume flow.') LOG.exception(msg) raise exception.CinderException(msg) # Attaching this listener will capture all of the notifications that # taskflow sends out and redirect them to a more useful log for # cinders debugging (or error reporting) usage. with flow_utils.DynamicLogListener(flow_engine, logger=LOG): flow_engine.run() vref = flow_engine.storage.fetch('volume') LOG.info(_LI("Volume created successfully."), resource=vref) return vref @wrap_check_policy def delete(self, context, volume, force=False, unmanage_only=False): if context.is_admin and context.project_id != volume.project_id: project_id = volume.project_id else: project_id = context.project_id if not volume.host: volume_utils.notify_about_volume_usage(context, volume, "delete.start") # NOTE(vish): scheduling failed, so delete it # Note(zhiteng): update volume quota reservation try: reserve_opts = {'volumes': -1, 'gigabytes': -volume.size} QUOTAS.add_volume_type_opts(context, reserve_opts, volume.volume_type_id) reservations = QUOTAS.reserve(context, project_id=project_id, **reserve_opts) except Exception: reservations = None LOG.exception(_LE("Failed to update quota while " "deleting volume.")) volume.destroy() if reservations: QUOTAS.commit(context, reservations, project_id=project_id) volume_utils.notify_about_volume_usage(context, volume, "delete.end") LOG.info(_LI("Delete volume request issued successfully."), resource={'type': 'volume', 'id': volume.id}) return if volume.attach_status == "attached": # Volume is still attached, need to detach first LOG.info(_LI('Unable to delete volume: %s, ' 'volume is attached.'), volume.id) raise exception.VolumeAttached(volume_id=volume.id) if not force and volume.status not in ["available", "error", "error_restoring", "error_extending"]: msg = _("Volume status must be available or error, " "but current status is: %s.") % volume.status LOG.info(_LI('Unable to delete volume: %(vol_id)s, ' 'volume must be available or ' 'error, but is %(vol_status)s.'), {'vol_id': volume.id, 'vol_status': volume.status}) raise exception.InvalidVolume(reason=msg) if volume['migration_status'] not in (None, 'deleting'): # Volume is migrating, wait until done LOG.info(_LI('Unable to delete volume: %s, ' 'volume is currently migrating.'), volume.id) msg = _("Volume cannot be deleted while migrating") raise exception.InvalidVolume(reason=msg) if volume.consistencygroup_id is not None: msg = _("Volume cannot be deleted while in a consistency group.") LOG.info(_LI('Unable to delete volume: %s, ' 'volume is currently part of a ' 'consistency group.'), volume.id) raise exception.InvalidVolume(reason=msg) snapshots = objects.SnapshotList.get_all_for_volume(context, volume.id) if len(snapshots): LOG.info(_LI('Unable to delete volume: %s, ' 'volume currently has snapshots.'), volume.id) msg = _("Volume still has %d dependent " "snapshots.") % len(snapshots) raise exception.InvalidVolume(reason=msg) # If the volume is encrypted, delete its encryption key from the key # manager. This operation makes volume deletion an irreversible process # because the volume cannot be decrypted without its key. encryption_key_id = volume.get('encryption_key_id', None) if encryption_key_id is not None: self.key_manager.delete_key(context, encryption_key_id) volume.status = 'deleting' volume.terminated_at = timeutils.utcnow() volume.save() self.volume_rpcapi.delete_volume(context, volume, unmanage_only) LOG.info(_LI("Delete volume request issued successfully."), resource=volume) @wrap_check_policy def update(self, context, volume, fields): volume.update(fields) volume.save() LOG.info(_LI("Volume updated successfully."), resource=volume) def get(self, context, volume_id, viewable_admin_meta=False): volume = objects.Volume.get_by_id(context, volume_id) if viewable_admin_meta: ctxt = context.elevated() admin_metadata = self.db.volume_admin_metadata_get(ctxt, volume_id) volume.admin_metadata = admin_metadata volume.obj_reset_changes() try: check_policy(context, 'get', volume) except exception.PolicyNotAuthorized: # raise VolumeNotFound instead to make sure Cinder behaves # as it used to raise exception.VolumeNotFound(volume_id=volume_id) LOG.info(_LI("Volume info retrieved successfully."), resource=volume) return volume def _get_all_tenants_value(self, filters): """Returns a Boolean for the value of filters['all_tenants']. False is returned if 'all_tenants' is not in the filters dictionary. An InvalidInput exception is thrown for invalid values. """ b = False if 'all_tenants' in filters: val = six.text_type(filters['all_tenants']).lower() if val in ['true', '1']: b = True elif val in ['false', '0']: b = False else: msg = _('all_tenants param must be 0 or 1') raise exception.InvalidInput(reason=msg) return b def get_all(self, context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, viewable_admin_meta=False, offset=None): check_policy(context, 'get_all') if filters is None: filters = {} allTenants = self._get_all_tenants_value(filters) try: if limit is not None: limit = int(limit) if limit < 0: msg = _('limit param must be positive') raise exception.InvalidInput(reason=msg) except ValueError: msg = _('limit param must be an integer') raise exception.InvalidInput(reason=msg) # Non-admin shouldn't see temporary target of a volume migration, add # unique filter data to reflect that only volumes with a NULL # 'migration_status' or a 'migration_status' that does not start with # 'target:' should be returned (processed in db/sqlalchemy/api.py) if not context.is_admin: filters['no_migration_targets'] = True if filters: LOG.debug("Searching by: %s.", six.text_type(filters)) if context.is_admin and allTenants: # Need to remove all_tenants to pass the filtering below. del filters['all_tenants'] volumes = objects.VolumeList.get_all(context, marker, limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, offset=offset) else: if viewable_admin_meta: context = context.elevated() volumes = objects.VolumeList.get_all_by_project( context, context.project_id, marker, limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, offset=offset) LOG.info(_LI("Get all volumes completed successfully.")) return volumes def get_snapshot(self, context, snapshot_id): snapshot = objects.Snapshot.get_by_id(context, snapshot_id) # FIXME(jdg): The objects don't have the db name entries # so build the resource tag manually for now. LOG.info(_LI("Snapshot retrieved successfully."), resource={'type': 'snapshot', 'id': snapshot.id}) return snapshot def get_volume(self, context, volume_id): check_policy(context, 'get_volume') volume = objects.Volume.get_by_id(context, volume_id) LOG.info(_LI("Volume retrieved successfully."), resource=volume) return volume def get_all_snapshots(self, context, search_opts=None): check_policy(context, 'get_all_snapshots') search_opts = search_opts or {} if (context.is_admin and 'all_tenants' in search_opts): # Need to remove all_tenants to pass the filtering below. del search_opts['all_tenants'] snapshots = objects.SnapshotList.get_all(context, search_opts) else: snapshots = objects.SnapshotList.get_all_by_project( context, context.project_id, search_opts) LOG.info(_LI("Get all snaphsots completed successfully.")) return snapshots @wrap_check_policy def reserve_volume(self, context, volume): # NOTE(jdg): check for Race condition bug 1096983 # explicitly get updated ref and check if volume['status'] == 'available': self.update(context, volume, {"status": "attaching"}) elif volume['status'] == 'in-use': if volume['multiattach']: self.update(context, volume, {"status": "attaching"}) else: msg = _("Volume must be multiattachable to reserve again.") LOG.error(msg) raise exception.InvalidVolume(reason=msg) else: msg = _("Volume status must be available to reserve.") LOG.error(msg) raise exception.InvalidVolume(reason=msg) LOG.info(_LI("Reserve volume completed successfully."), resource=volume) @wrap_check_policy def unreserve_volume(self, context, volume): if volume['status'] == 'attaching': attaches = self.db.volume_attachment_get_used_by_volume_id( context, volume['id']) if attaches: self.update(context, volume, {"status": "in-use"}) else: self.update(context, volume, {"status": "available"}) LOG.info(_LI("Unreserve volume completed successfully."), resource=volume) @wrap_check_policy def begin_detaching(self, context, volume): # NOTE(vbala): The volume status might be 'detaching' already due to # a previous begin_detaching call. Get updated volume status so that # we fail such cases. volume.refresh() # If we are in the middle of a volume migration, we don't want the user # to see that the volume is 'detaching'. Having 'migration_status' set # will have the same effect internally. if volume.migration_status: return if (volume.status != 'in-use' or volume.attach_status != 'attached'): msg = (_("Unable to detach volume. Volume status must be 'in-use' " "and attach_status must be 'attached' to detach. " "Currently: status: '%(status)s', " "attach_status: '%(attach_status)s.'") % {'status': volume.status, 'attach_status': volume.attach_status}) LOG.error(msg) raise exception.InvalidVolume(reason=msg) self.update(context, volume, {"status": "detaching"}) LOG.info(_LI("Begin detaching volume completed successfully."), resource=volume) @wrap_check_policy def roll_detaching(self, context, volume): if volume['status'] == "detaching": self.update(context, volume, {"status": "in-use"}) LOG.info(_LI("Roll detaching of volume completed successfully."), resource=volume) @wrap_check_policy def attach(self, context, volume, instance_uuid, host_name, mountpoint, mode): volume_metadata = self.get_volume_admin_metadata(context.elevated(), volume) if 'readonly' not in volume_metadata: # NOTE(zhiyan): set a default value for read-only flag to metadata. self.update_volume_admin_metadata(context.elevated(), volume, {'readonly': 'False'}) volume_metadata['readonly'] = 'False' if volume_metadata['readonly'] == 'True' and mode != 'ro': raise exception.InvalidVolumeAttachMode(mode=mode, volume_id=volume['id']) attach_results = self.volume_rpcapi.attach_volume(context, volume, instance_uuid, host_name, mountpoint, mode) LOG.info(_LI("Attach volume completed successfully."), resource=volume) return attach_results @wrap_check_policy def detach(self, context, volume, attachment_id): detach_results = self.volume_rpcapi.detach_volume(context, volume, attachment_id) LOG.info(_LI("Detach volume completed successfully."), resource=volume) return detach_results @wrap_check_policy def initialize_connection(self, context, volume, connector): init_results = self.volume_rpcapi.initialize_connection(context, volume, connector) LOG.info(_LI("Initialize volume connection completed successfully."), resource=volume) return init_results @wrap_check_policy def terminate_connection(self, context, volume, connector, force=False): self.unreserve_volume(context, volume) results = self.volume_rpcapi.terminate_connection(context, volume, connector, force) LOG.info(_LI("Terminate volume connection completed successfully."), resource=volume) return results @wrap_check_policy def accept_transfer(self, context, volume, new_user, new_project): results = self.volume_rpcapi.accept_transfer(context, volume, new_user, new_project) LOG.info(_LI("Transfer volume completed successfully."), resource=volume) return results def _create_snapshot(self, context, volume, name, description, force=False, metadata=None, cgsnapshot_id=None): snapshot = self.create_snapshot_in_db( context, volume, name, description, force, metadata, cgsnapshot_id) self.volume_rpcapi.create_snapshot(context, volume, snapshot) return snapshot def create_snapshot_in_db(self, context, volume, name, description, force, metadata, cgsnapshot_id): check_policy(context, 'create_snapshot', volume) if volume['migration_status'] is not None: # Volume is migrating, wait until done msg = _("Snapshot cannot be created while volume is migrating.") raise exception.InvalidVolume(reason=msg) if volume['status'].startswith('replica_'): # Can't snapshot secondary replica msg = _("Snapshot of secondary replica is not allowed.") raise exception.InvalidVolume(reason=msg) if ((not force) and (volume['status'] != "available")): msg = _("Volume %(vol_id)s status must be available, " "but current status is: " "%(vol_status)s.") % {'vol_id': volume['id'], 'vol_status': volume['status']} raise exception.InvalidVolume(reason=msg) try: if CONF.no_snapshot_gb_quota: reserve_opts = {'snapshots': 1} else: reserve_opts = {'snapshots': 1, 'gigabytes': volume['size']} QUOTAS.add_volume_type_opts(context, reserve_opts, volume.get('volume_type_id')) reservations = QUOTAS.reserve(context, **reserve_opts) except exception.OverQuota as e: overs = e.kwargs['overs'] usages = e.kwargs['usages'] quotas = e.kwargs['quotas'] def _consumed(name): return (usages[name]['reserved'] + usages[name]['in_use']) for over in overs: if 'gigabytes' in over: msg = _LW("Quota exceeded for %(s_pid)s, tried to create " "%(s_size)sG snapshot (%(d_consumed)dG of " "%(d_quota)dG already consumed).") LOG.warning(msg, {'s_pid': context.project_id, 's_size': volume['size'], 'd_consumed': _consumed(over), 'd_quota': quotas[over]}) raise exception.VolumeSizeExceedsAvailableQuota( requested=volume['size'], consumed=_consumed('gigabytes'), quota=quotas['gigabytes']) elif 'snapshots' in over: msg = _LW("Quota exceeded for %(s_pid)s, tried to create " "snapshot (%(d_consumed)d snapshots " "already consumed).") LOG.warning(msg, {'s_pid': context.project_id, 'd_consumed': _consumed(over)}) raise exception.SnapshotLimitExceeded( allowed=quotas[over]) self._check_metadata_properties(metadata) snapshot = None try: kwargs = { 'volume_id': volume['id'], 'cgsnapshot_id': cgsnapshot_id, 'user_id': context.user_id, 'project_id': context.project_id, 'status': 'creating', 'progress': '0%', 'volume_size': volume['size'], 'display_name': name, 'display_description': description, 'volume_type_id': volume['volume_type_id'], 'encryption_key_id': volume['encryption_key_id'], 'metadata': metadata or {} } snapshot = objects.Snapshot(context=context, **kwargs) snapshot.create() QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): try: if hasattr(snapshot, 'id'): snapshot.destroy() finally: QUOTAS.rollback(context, reservations) return snapshot def create_snapshots_in_db(self, context, volume_list, name, description, force, cgsnapshot_id): snapshot_list = [] for volume in volume_list: self._create_snapshot_in_db_validate(context, volume, force) reservations = self._create_snapshots_in_db_reserve( context, volume_list) options_list = [] for volume in volume_list: options = self._create_snapshot_in_db_options( context, volume, name, description, cgsnapshot_id) options_list.append(options) try: for options in options_list: snapshot = objects.Snapshot(context=context, **options) snapshot.create() snapshot_list.append(snapshot) QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): try: for snap in snapshot_list: snap.destroy() finally: QUOTAS.rollback(context, reservations) return snapshot_list def _create_snapshot_in_db_validate(self, context, volume, force): check_policy(context, 'create_snapshot', volume) if volume['migration_status'] is not None: # Volume is migrating, wait until done msg = _("Snapshot cannot be created while volume is migrating.") raise exception.InvalidVolume(reason=msg) if ((not force) and (volume['status'] != "available")): msg = _("Snapshot cannot be created because volume %(vol_id)s " "is not available, current volume status: " "%(vol_status)s.") % {'vol_id': volume['id'], 'vol_status': volume['status']} raise exception.InvalidVolume(reason=msg) def _create_snapshots_in_db_reserve(self, context, volume_list): reserve_opts_list = [] total_reserve_opts = {} try: for volume in volume_list: if CONF.no_snapshot_gb_quota: reserve_opts = {'snapshots': 1} else: reserve_opts = {'snapshots': 1, 'gigabytes': volume['size']} QUOTAS.add_volume_type_opts(context, reserve_opts, volume.get('volume_type_id')) reserve_opts_list.append(reserve_opts) for reserve_opts in reserve_opts_list: for (key, value) in reserve_opts.items(): if key not in total_reserve_opts.keys(): total_reserve_opts[key] = value else: total_reserve_opts[key] = \ total_reserve_opts[key] + value reservations = QUOTAS.reserve(context, **total_reserve_opts) except exception.OverQuota as e: overs = e.kwargs['overs'] usages = e.kwargs['usages'] quotas = e.kwargs['quotas'] def _consumed(name): return (usages[name]['reserved'] + usages[name]['in_use']) for over in overs: if 'gigabytes' in over: msg = _LW("Quota exceeded for %(s_pid)s, tried to create " "%(s_size)sG snapshot (%(d_consumed)dG of " "%(d_quota)dG already consumed).") LOG.warning(msg, {'s_pid': context.project_id, 's_size': volume['size'], 'd_consumed': _consumed(over), 'd_quota': quotas[over]}) raise exception.VolumeSizeExceedsAvailableQuota( requested=volume['size'], consumed=_consumed('gigabytes'), quota=quotas['gigabytes']) elif 'snapshots' in over: msg = _LW("Quota exceeded for %(s_pid)s, tried to create " "snapshot (%(d_consumed)d snapshots " "already consumed).") LOG.warning(msg, {'s_pid': context.project_id, 'd_consumed': _consumed(over)}) raise exception.SnapshotLimitExceeded( allowed=quotas[over]) return reservations def _create_snapshot_in_db_options(self, context, volume, name, description, cgsnapshot_id): options = {'volume_id': volume['id'], 'cgsnapshot_id': cgsnapshot_id, 'user_id': context.user_id, 'project_id': context.project_id, 'status': "creating", 'progress': '0%', 'volume_size': volume['size'], 'display_name': name, 'display_description': description, 'volume_type_id': volume['volume_type_id'], 'encryption_key_id': volume['encryption_key_id']} return options def create_snapshot(self, context, volume, name, description, metadata=None, cgsnapshot_id=None): result = self._create_snapshot(context, volume, name, description, False, metadata, cgsnapshot_id) LOG.info(_LI("Snapshot create request issued successfully."), resource=result) return result def create_snapshot_force(self, context, volume, name, description, metadata=None): result = self._create_snapshot(context, volume, name, description, True, metadata) LOG.info(_LI("Snapshot force create request issued successfully."), resource=result) return result @wrap_check_policy def delete_snapshot(self, context, snapshot, force=False): if not force and snapshot['status'] not in ["available", "error"]: LOG.error(_LE('Unable to delete snapshot: %(snap_id)s, ' 'due to invalid status. ' 'Status must be available or ' 'error, not %(snap_status)s.'), {'snap_id': snapshot['id'], 'snap_status': snapshot['status']}) msg = _("Volume Snapshot status must be available or error.") raise exception.InvalidSnapshot(reason=msg) cgsnapshot_id = snapshot.get('cgsnapshot_id', None) if cgsnapshot_id: msg = _('Unable to delete snapshot %s because it is part of a ' 'consistency group.') % snapshot['id'] LOG.error(msg) raise exception.InvalidSnapshot(reason=msg) snapshot_obj = self.get_snapshot(context, snapshot['id']) snapshot_obj.status = 'deleting' snapshot_obj.save() volume = objects.Volume.get_by_id(context, snapshot_obj.volume_id) self.volume_rpcapi.delete_snapshot(context, snapshot_obj, volume.host) LOG.info(_LI("Snapshot delete request issued successfully."), resource=snapshot) @wrap_check_policy def update_snapshot(self, context, snapshot, fields): snapshot.update(fields) snapshot.save() @wrap_check_policy def get_volume_metadata(self, context, volume): """Get all metadata associated with a volume.""" rv = self.db.volume_metadata_get(context, volume['id']) LOG.info(_LI("Get volume metadata completed successfully."), resource=volume) return dict(rv) @wrap_check_policy def delete_volume_metadata(self, context, volume, key, meta_type=common.METADATA_TYPES.user): """Delete the given metadata item from a volume.""" self.db.volume_metadata_delete(context, volume['id'], key, meta_type) LOG.info(_LI("Delete volume metadata completed successfully."), resource=volume) def _check_metadata_properties(self, metadata=None): if not metadata: metadata = {} for k, v in metadata.items(): if len(k) == 0: msg = _("Metadata property key blank.") LOG.warning(msg) raise exception.InvalidVolumeMetadata(reason=msg) if len(k) > 255: msg = _("Metadata property key greater than 255 characters.") LOG.warning(msg) raise exception.InvalidVolumeMetadataSize(reason=msg) if len(v) > 255: msg = _("Metadata property value greater than 255 characters.") LOG.warning(msg) raise exception.InvalidVolumeMetadataSize(reason=msg) @wrap_check_policy def update_volume_metadata(self, context, volume, metadata, delete=False, meta_type=common.METADATA_TYPES.user): """Updates or creates volume metadata. If delete is True, metadata items that are not specified in the `metadata` argument will be deleted. """ if delete: _metadata = metadata else: if meta_type == common.METADATA_TYPES.user: orig_meta = self.get_volume_metadata(context, volume) elif meta_type == common.METADATA_TYPES.image: try: orig_meta = self.get_volume_image_metadata(context, volume) except exception.GlanceMetadataNotFound: orig_meta = {} else: raise exception.InvalidMetadataType(metadata_type=meta_type, id=volume['id']) _metadata = orig_meta.copy() _metadata.update(metadata) self._check_metadata_properties(_metadata) db_meta = self.db.volume_metadata_update(context, volume['id'], _metadata, delete, meta_type) # TODO(jdg): Implement an RPC call for drivers that may use this info LOG.info(_LI("Update volume metadata completed successfully."), resource=volume) return db_meta def get_volume_metadata_value(self, volume, key): """Get value of particular metadata key.""" metadata = volume.get('volume_metadata') if metadata: for i in volume['volume_metadata']: if i['key'] == key: return i['value'] LOG.info(_LI("Get volume metadata key completed successfully."), resource=volume) return None @wrap_check_policy def get_volume_admin_metadata(self, context, volume): """Get all administration metadata associated with a volume.""" rv = self.db.volume_admin_metadata_get(context, volume['id']) LOG.info(_LI("Get volume admin metadata completed successfully."), resource=volume) return dict(rv) @wrap_check_policy def delete_volume_admin_metadata(self, context, volume, key): """Delete the given administration metadata item from a volume.""" self.db.volume_admin_metadata_delete(context, volume['id'], key) LOG.info(_LI("Delete volume admin metadata completed successfully."), resource=volume) @wrap_check_policy def update_volume_admin_metadata(self, context, volume, metadata, delete=False): """Updates or creates volume administration metadata. If delete is True, metadata items that are not specified in the `metadata` argument will be deleted. """ if delete: _metadata = metadata else: orig_meta = self.get_volume_admin_metadata(context, volume) _metadata = orig_meta.copy() _metadata.update(metadata) self._check_metadata_properties(_metadata) self.db.volume_admin_metadata_update(context, volume['id'], _metadata, delete) # TODO(jdg): Implement an RPC call for drivers that may use this info LOG.info(_LI("Update volume admin metadata completed successfully."), resource=volume) return _metadata def get_snapshot_metadata(self, context, snapshot): """Get all metadata associated with a snapshot.""" snapshot_obj = self.get_snapshot(context, snapshot['id']) LOG.info(_LI("Get snapshot metadata completed successfully."), resource=snapshot) return snapshot_obj.metadata def delete_snapshot_metadata(self, context, snapshot, key): """Delete the given metadata item from a snapshot.""" snapshot_obj = self.get_snapshot(context, snapshot['id']) snapshot_obj.delete_metadata_key(context, key) LOG.info(_LI("Delete snapshot metadata completed successfully."), resource=snapshot) def update_snapshot_metadata(self, context, snapshot, metadata, delete=False): """Updates or creates snapshot metadata. If delete is True, metadata items that are not specified in the `metadata` argument will be deleted. """ if delete: _metadata = metadata else: orig_meta = snapshot.metadata _metadata = orig_meta.copy() _metadata.update(metadata) self._check_metadata_properties(_metadata) snapshot.metadata = _metadata snapshot.save() # TODO(jdg): Implement an RPC call for drivers that may use this info LOG.info(_LI("Update snapshot metadata completed successfully."), resource=snapshot) return snapshot.metadata def get_snapshot_metadata_value(self, snapshot, key): LOG.info(_LI("Get snapshot metadata value not implemented."), resource=snapshot) # FIXME(jdg): Huh? Pass? pass def get_volumes_image_metadata(self, context): check_policy(context, 'get_volumes_image_metadata') db_data = self.db.volume_glance_metadata_get_all(context) results = collections.defaultdict(dict) for meta_entry in db_data: results[meta_entry['volume_id']].update({meta_entry['key']: meta_entry['value']}) return results @wrap_check_policy def get_volume_image_metadata(self, context, volume): db_data = self.db.volume_glance_metadata_get(context, volume['id']) LOG.info(_LI("Get volume image-metadata completed successfully."), resource=volume) return {meta_entry.key: meta_entry.value for meta_entry in db_data} def _check_volume_availability(self, volume, force): """Check if the volume can be used.""" if volume['status'] not in ['available', 'in-use']: msg = _('Volume %(vol_id)s status must be ' 'available or in-use, but current status is: ' '%(vol_status)s.') % {'vol_id': volume['id'], 'vol_status': volume['status']} raise exception.InvalidVolume(reason=msg) if not force and 'in-use' == volume['status']: msg = _('Volume status is in-use.') raise exception.InvalidVolume(reason=msg) @wrap_check_policy def copy_volume_to_image(self, context, volume, metadata, force): """Create a new image from the specified volume.""" if not CONF.enable_force_upload and force: LOG.info(_LI("Force upload to image is disabled, " "Force option will be ignored."), resource={'type': 'volume', 'id': volume['id']}) force = False self._check_volume_availability(volume, force) glance_core_properties = CONF.glance_core_properties if glance_core_properties: try: volume_image_metadata = self.get_volume_image_metadata(context, volume) custom_property_set = (set(volume_image_metadata).difference (set(glance_core_properties))) if custom_property_set: properties = {custom_property: volume_image_metadata[custom_property] for custom_property in custom_property_set} metadata.update(dict(properties=properties)) except exception.GlanceMetadataNotFound: # If volume is not created from image, No glance metadata # would be available for that volume in # volume glance metadata table pass recv_metadata = self.image_service.create(context, metadata) self.update(context, volume, {'status': 'uploading'}) self.volume_rpcapi.copy_volume_to_image(context, volume, recv_metadata) response = {"id": volume['id'], "updated_at": volume['updated_at'], "status": 'uploading', "display_description": volume['display_description'], "size": volume['size'], "volume_type": volume['volume_type'], "image_id": recv_metadata['id'], "container_format": recv_metadata['container_format'], "disk_format": recv_metadata['disk_format'], "image_name": recv_metadata.get('name', None)} LOG.info(_LI("Copy image to volume completed successfully."), resource=volume) return response @wrap_check_policy def extend(self, context, volume, new_size): if volume['status'] != 'available': msg = _('Volume %(vol_id)s status must be available ' 'to extend, but current status is: ' '%(vol_status)s.') % {'vol_id': volume['id'], 'vol_status': volume['status']} raise exception.InvalidVolume(reason=msg) size_increase = (int(new_size)) - volume['size'] if size_increase <= 0: msg = (_("New size for extend must be greater " "than current size. (current: %(size)s, " "extended: %(new_size)s).") % {'new_size': new_size, 'size': volume['size']}) raise exception.InvalidInput(reason=msg) try: reserve_opts = {'gigabytes': size_increase} QUOTAS.add_volume_type_opts(context, reserve_opts, volume.get('volume_type_id')) reservations = QUOTAS.reserve(context, project_id=volume['project_id'], **reserve_opts) except exception.OverQuota as exc: usages = exc.kwargs['usages'] quotas = exc.kwargs['quotas'] def _consumed(name): return (usages[name]['reserved'] + usages[name]['in_use']) msg = _LE("Quota exceeded for %(s_pid)s, tried to extend volume " "by %(s_size)sG, (%(d_consumed)dG of %(d_quota)dG " "already consumed).") LOG.error(msg, {'s_pid': context.project_id, 's_size': size_increase, 'd_consumed': _consumed('gigabytes'), 'd_quota': quotas['gigabytes']}) raise exception.VolumeSizeExceedsAvailableQuota( requested=size_increase, consumed=_consumed('gigabytes'), quota=quotas['gigabytes']) self.update(context, volume, {'status': 'extending'}) self.volume_rpcapi.extend_volume(context, volume, new_size, reservations) LOG.info(_LI("Extend volume request issued successfully."), resource=volume) @wrap_check_policy def migrate_volume(self, context, volume, host, force_host_copy): """Migrate the volume to the specified host.""" # We only handle "available" volumes for now if volume['status'] not in ['available', 'in-use']: msg = _('Volume %(vol_id)s status must be available or in-use, ' 'but current status is: ' '%(vol_status)s.') % {'vol_id': volume['id'], 'vol_status': volume['status']} LOG.error(msg) raise exception.InvalidVolume(reason=msg) # Make sure volume is not part of a migration if volume['migration_status'] is not None: msg = _("Volume %s is already part of an active " "migration.") % volume['id'] LOG.error(msg) raise exception.InvalidVolume(reason=msg) # We only handle volumes without snapshots for now snaps = objects.SnapshotList.get_all_for_volume(context, volume['id']) if snaps: msg = _("Volume %s must not have snapshots.") % volume['id'] LOG.error(msg) raise exception.InvalidVolume(reason=msg) # We only handle non-replicated volumes for now rep_status = volume['replication_status'] if rep_status is not None and rep_status != 'disabled': msg = _("Volume %s must not be replicated.") % volume['id'] LOG.error(msg) raise exception.InvalidVolume(reason=msg) cg_id = volume.get('consistencygroup_id', None) if cg_id: msg = _("Volume %s must not be part of a consistency " "group.") % volume['id'] LOG.error(msg) raise exception.InvalidVolume(reason=msg) # Make sure the host is in the list of available hosts elevated = context.elevated() topic = CONF.volume_topic services = self.db.service_get_all_by_topic(elevated, topic, disabled=False) found = False for service in services: svc_host = volume_utils.extract_host(host, 'backend') if utils.service_is_up(service) and service['host'] == svc_host: found = True if not found: msg = _('No available service named %s') % host LOG.error(msg) raise exception.InvalidHost(reason=msg) # Make sure the destination host is different than the current one if host == volume['host']: msg = _('Destination host must be different ' 'than the current host.') LOG.error(msg) raise exception.InvalidHost(reason=msg) self.update(context, volume, {'migration_status': 'starting'}) # Call the scheduler to ensure that the host exists and that it can # accept the volume volume_type = {} volume_type_id = volume['volume_type_id'] if volume_type_id: volume_type = volume_types.get_volume_type(context, volume_type_id) request_spec = {'volume_properties': volume, 'volume_type': volume_type, 'volume_id': volume['id']} self.scheduler_rpcapi.migrate_volume_to_host(context, CONF.volume_topic, volume['id'], host, force_host_copy, request_spec) LOG.info(_LI("Migrate volume request issued successfully."), resource=volume) @wrap_check_policy def migrate_volume_completion(self, context, volume, new_volume, error): # This is a volume swap initiated by Nova, not Cinder. Nova expects # us to return the new_volume_id. if not (volume['migration_status'] or new_volume['migration_status']): return new_volume['id'] if not volume['migration_status']: msg = _('Source volume not mid-migration.') raise exception.InvalidVolume(reason=msg) if not new_volume['migration_status']: msg = _('Destination volume not mid-migration.') raise exception.InvalidVolume(reason=msg) expected_status = 'target:%s' % volume['id'] if not new_volume['migration_status'] == expected_status: msg = (_('Destination has migration_status %(stat)s, expected ' '%(exp)s.') % {'stat': new_volume['migration_status'], 'exp': expected_status}) raise exception.InvalidVolume(reason=msg) LOG.info(_LI("Migrate volume completion issued successfully."), resource=volume) return self.volume_rpcapi.migrate_volume_completion(context, volume, new_volume, error) @wrap_check_policy def update_readonly_flag(self, context, volume, flag): if volume['status'] != 'available': msg = _('Volume %(vol_id)s status must be available ' 'to update readonly flag, but current status is: ' '%(vol_status)s.') % {'vol_id': volume['id'], 'vol_status': volume['status']} raise exception.InvalidVolume(reason=msg) self.update_volume_admin_metadata(context.elevated(), volume, {'readonly': six.text_type(flag)}) LOG.info(_LI("Update readonly setting on volume " "completed successfully."), resource=volume) @wrap_check_policy def retype(self, context, volume, new_type, migration_policy=None): """Attempt to modify the type associated with an existing volume.""" if volume['status'] not in ['available', 'in-use']: msg = _('Unable to update type due to incorrect status: ' '%(vol_status)s on volume: %(vol_id)s. Volume status ' 'must be available or ' 'in-use.') % {'vol_status': volume['status'], 'vol_id': volume['id']} LOG.error(msg) raise exception.InvalidVolume(reason=msg) if volume['migration_status'] is not None: msg = (_("Volume %s is already part of an active migration.") % volume['id']) LOG.error(msg) raise exception.InvalidVolume(reason=msg) if migration_policy and migration_policy not in ['on-demand', 'never']: msg = _('migration_policy must be \'on-demand\' or \'never\', ' 'passed: %s') % new_type LOG.error(msg) raise exception.InvalidInput(reason=msg) cg_id = volume.get('consistencygroup_id', None) if cg_id: msg = _("Volume must not be part of a consistency group.") LOG.error(msg) raise exception.InvalidVolume(reason=msg) # Support specifying volume type by ID or name try: if uuidutils.is_uuid_like(new_type): vol_type = volume_types.get_volume_type(context, new_type) else: vol_type = volume_types.get_volume_type_by_name(context, new_type) except exception.InvalidVolumeType: msg = _('Invalid volume_type passed: %s.') % new_type LOG.error(msg) raise exception.InvalidInput(reason=msg) vol_type_id = vol_type['id'] vol_type_qos_id = vol_type['qos_specs_id'] old_vol_type = None old_vol_type_id = volume['volume_type_id'] old_vol_type_qos_id = None # Error if the original and new type are the same if volume['volume_type_id'] == vol_type_id: msg = _('New volume_type same as original: %s.') % new_type LOG.error(msg) raise exception.InvalidInput(reason=msg) if volume['volume_type_id']: old_vol_type = volume_types.get_volume_type( context, old_vol_type_id) old_vol_type_qos_id = old_vol_type['qos_specs_id'] # We don't support changing encryption requirements yet old_enc = volume_types.get_volume_type_encryption(context, old_vol_type_id) new_enc = volume_types.get_volume_type_encryption(context, vol_type_id) if old_enc != new_enc: msg = _('Retype cannot change encryption requirements.') raise exception.InvalidInput(reason=msg) # We don't support changing QoS at the front-end yet for in-use volumes # TODO(avishay): Call Nova to change QoS setting (libvirt has support # - virDomainSetBlockIoTune() - Nova does not have support yet). if (volume['status'] != 'available' and old_vol_type_qos_id != vol_type_qos_id): for qos_id in [old_vol_type_qos_id, vol_type_qos_id]: if qos_id: specs = qos_specs.get_qos_specs(context.elevated(), qos_id) if specs['consumer'] != 'back-end': msg = _('Retype cannot change front-end qos specs for ' 'in-use volume: %s.') % volume['id'] raise exception.InvalidInput(reason=msg) # We're checking here in so that we can report any quota issues as # early as possible, but won't commit until we change the type. We # pass the reservations onward in case we need to roll back. reservations = quota_utils.get_volume_type_reservation(context, volume, vol_type_id) self.update(context, volume, {'status': 'retyping'}) request_spec = {'volume_properties': volume, 'volume_id': volume['id'], 'volume_type': vol_type, 'migration_policy': migration_policy, 'quota_reservations': reservations} self.scheduler_rpcapi.retype(context, CONF.volume_topic, volume['id'], request_spec=request_spec, filter_properties={}) LOG.info(_LI("Retype volume request issued successfully."), resource=volume) def manage_existing(self, context, host, ref, name=None, description=None, volume_type=None, metadata=None, availability_zone=None, bootable=False): if availability_zone is None: elevated = context.elevated() try: svc_host = volume_utils.extract_host(host, 'backend') service = self.db.service_get_by_host_and_topic( elevated, svc_host, CONF.volume_topic) except exception.ServiceNotFound: with excutils.save_and_reraise_exception(): LOG.error(_LE('Unable to find service for given host.')) availability_zone = service.get('availability_zone') manage_what = { 'context': context, 'name': name, 'description': description, 'host': host, 'ref': ref, 'volume_type': volume_type, 'metadata': metadata, 'availability_zone': availability_zone, 'bootable': bootable, } try: flow_engine = manage_existing.get_flow(self.scheduler_rpcapi, self.db, manage_what) except Exception: msg = _('Failed to manage api volume flow.') LOG.exception(msg) raise exception.CinderException(msg) # Attaching this listener will capture all of the notifications that # taskflow sends out and redirect them to a more useful log for # cinder's debugging (or error reporting) usage. with flow_utils.DynamicLogListener(flow_engine, logger=LOG): flow_engine.run() vol_ref = flow_engine.storage.fetch('volume') LOG.info(_LI("Manage volume request issued successfully."), resource=vol_ref) return vol_ref class HostAPI(base.Base): def __init__(self): super(HostAPI, self).__init__() """Sub-set of the Volume Manager API for managing host operations.""" def set_host_enabled(self, context, host, enabled): """Sets the specified host's ability to accept new volumes.""" raise NotImplementedError() def get_host_uptime(self, context, host): """Returns the result of calling "uptime" on the target host.""" raise NotImplementedError() def host_power_action(self, context, host, action): raise NotImplementedError() def set_host_maintenance(self, context, host, mode): """Start/Stop host maintenance window. On start, it triggers volume evacuation. """ raise NotImplementedError()
{ "content_hash": "50c57c39b9d8aa54d90254fff9a990a8", "timestamp": "", "source": "github", "line_count": 1508, "max_line_length": 79, "avg_line_length": 44.64655172413793, "alnum_prop": 0.5297726023734906, "repo_name": "tlakshman26/cinder-new-branch", "id": "5eaa64dc1b4a5fedd6f6334742a69709bedbd8a5", "size": "68059", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cinder/volume/api.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "12371447" }, { "name": "Shell", "bytes": "8172" } ], "symlink_target": "" }
import copy import mock import testtools from testtools.matchers import HasLength from ironicclient.common import base from ironicclient.tests import utils from ironicclient.v1 import node NODE1 = {'id': 123, 'uuid': '66666666-7777-8888-9999-000000000000', 'chassis_uuid': 'aaaaaaaa-1111-bbbb-2222-cccccccccccc', 'maintenance': False, 'driver': 'fake', 'driver_info': {'user': 'foo', 'password': 'bar'}, 'properties': {'num_cpu': 4}, 'extra': {}} NODE2 = {'id': 456, 'uuid': '66666666-7777-8888-9999-111111111111', 'instance_uuid': '66666666-7777-8888-9999-222222222222', 'chassis_uuid': 'aaaaaaaa-1111-bbbb-2222-cccccccccccc', 'maintenance': True, 'driver': 'fake too', 'driver_info': {'user': 'foo', 'password': 'bar'}, 'properties': {'num_cpu': 4}, 'extra': {}} PORT = {'id': 456, 'uuid': '11111111-2222-3333-4444-555555555555', 'node_id': 123, 'address': 'AA:AA:AA:AA:AA:AA', 'extra': {}} POWER_STATE = {'power_state': 'power off', 'target_power_state': 'power on'} DRIVER_IFACES = {'deploy': {'result': True}, 'power': {'result': False, 'reason': 'Invalid IPMI username'}, 'console': {'result': None, 'reason': 'not supported'}, 'rescue': {'result': None, 'reason': 'not supported'}} NODE_STATES = {"last_error": None, "power_state": "power on", "provision_state": "active", "target_power_state": None, "target_provision_state": None} CONSOLE_DATA_ENABLED = {'console_enabled': True, 'console_info': {'test-console': 'test-console-data'}} CONSOLE_DATA_DISABLED = {'console_enabled': False, 'console_info': None} BOOT_DEVICE = {'boot_device': 'pxe', 'persistent': False} SUPPORTED_BOOT_DEVICE = {'supported_boot_devices': ['pxe']} CREATE_NODE = copy.deepcopy(NODE1) del CREATE_NODE['id'] del CREATE_NODE['uuid'] del CREATE_NODE['maintenance'] UPDATED_NODE = copy.deepcopy(NODE1) NEW_DRIVER = 'new-driver' UPDATED_NODE['driver'] = NEW_DRIVER CREATE_WITH_UUID = copy.deepcopy(NODE1) del CREATE_WITH_UUID['id'] del CREATE_WITH_UUID['maintenance'] fake_responses = { '/v1/nodes': { 'GET': ( {}, {"nodes": [NODE1, NODE2]} ), 'POST': ( {}, CREATE_NODE, ), }, '/v1/nodes/detail': { 'GET': ( {}, {"nodes": [NODE1, NODE2]} ), }, '/v1/nodes/?associated=False': { 'GET': ( {}, {"nodes": [NODE1]}, ) }, '/v1/nodes/?associated=True': { 'GET': ( {}, {"nodes": [NODE2]}, ) }, '/v1/nodes/?maintenance=False': { 'GET': ( {}, {"nodes": [NODE1]}, ) }, '/v1/nodes/?maintenance=True': { 'GET': ( {}, {"nodes": [NODE2]}, ) }, '/v1/nodes/?associated=True&maintenance=True': { 'GET': ( {}, {"nodes": [NODE2]}, ) }, '/v1/nodes/detail?instance_uuid=%s' % NODE2['instance_uuid']: { 'GET': ( {}, {"nodes": [NODE2]}, ) }, '/v1/nodes/%s' % NODE1['uuid']: { 'GET': ( {}, NODE1, ), 'DELETE': ( {}, None, ), 'PATCH': ( {}, UPDATED_NODE, ), }, '/v1/nodes/%s' % NODE2['uuid']: { 'GET': ( {}, NODE2, ), }, '/v1/nodes/%s/ports' % NODE1['uuid']: { 'GET': ( {}, {"ports": [PORT]}, ), }, '/v1/nodes/%s/ports/detail' % NODE1['uuid']: { 'GET': ( {}, {"ports": [PORT]}, ), }, '/v1/nodes/%s/states/power' % NODE1['uuid']: { 'PUT': ( {}, POWER_STATE, ), }, '/v1/nodes/%s/validate' % NODE1['uuid']: { 'GET': ( {}, DRIVER_IFACES, ), }, '/v1/nodes/%s/states/provision' % NODE1['uuid']: { 'PUT': ( {}, None, ), }, '/v1/nodes/%s/states' % NODE1['uuid']: { 'GET': ( {}, NODE_STATES, ), }, '/v1/nodes/%s/states/console' % NODE1['uuid']: { 'GET': ( {}, CONSOLE_DATA_ENABLED, ), 'PUT': ( {'enabled': 'true'}, None, ), }, '/v1/nodes/%s/states/console' % NODE2['uuid']: { 'GET': ( {}, CONSOLE_DATA_DISABLED, ), }, '/v1/nodes/%s/management/boot_device' % NODE1['uuid']: { 'GET': ( {}, BOOT_DEVICE, ), 'PUT': ( {}, None, ), }, '/v1/nodes/%s/management/boot_device/supported' % NODE1['uuid']: { 'GET': ( {}, SUPPORTED_BOOT_DEVICE, ), }, } fake_responses_pagination = { '/v1/nodes': { 'GET': ( {}, {"nodes": [NODE1], "next": "http://127.0.0.1:6385/v1/nodes/?limit=1"} ), }, '/v1/nodes/?limit=1': { 'GET': ( {}, {"nodes": [NODE2]} ), }, '/v1/nodes/?marker=%s' % NODE1['uuid']: { 'GET': ( {}, {"nodes": [NODE2]} ), }, '/v1/nodes/%s/ports?limit=1' % NODE1['uuid']: { 'GET': ( {}, {"ports": [PORT]}, ), }, '/v1/nodes/%s/ports?marker=%s' % (NODE1['uuid'], PORT['uuid']): { 'GET': ( {}, {"ports": [PORT]}, ), }, } fake_responses_sorting = { '/v1/nodes/?sort_key=updated_at': { 'GET': ( {}, {"nodes": [NODE2, NODE1]} ), }, '/v1/nodes/?sort_dir=desc': { 'GET': ( {}, {"nodes": [NODE2, NODE1]} ), }, '/v1/nodes/%s/ports?sort_key=updated_at' % NODE1['uuid']: { 'GET': ( {}, {"ports": [PORT]}, ), }, '/v1/nodes/%s/ports?sort_dir=desc' % NODE1['uuid']: { 'GET': ( {}, {"ports": [PORT]}, ), }, } class NodeManagerTest(testtools.TestCase): def setUp(self): super(NodeManagerTest, self).setUp() self.api = utils.FakeAPI(fake_responses) self.mgr = node.NodeManager(self.api) def test_node_list(self): nodes = self.mgr.list() expect = [ ('GET', '/v1/nodes', {}, None), ] self.assertEqual(expect, self.api.calls) self.assertEqual(2, len(nodes)) def test_node_list_limit(self): self.api = utils.FakeAPI(fake_responses_pagination) self.mgr = node.NodeManager(self.api) nodes = self.mgr.list(limit=1) expect = [ ('GET', '/v1/nodes/?limit=1', {}, None) ] self.assertEqual(expect, self.api.calls) self.assertThat(nodes, HasLength(1)) def test_node_list_marker(self): self.api = utils.FakeAPI(fake_responses_pagination) self.mgr = node.NodeManager(self.api) nodes = self.mgr.list(marker=NODE1['uuid']) expect = [ ('GET', '/v1/nodes/?marker=%s' % NODE1['uuid'], {}, None) ] self.assertEqual(expect, self.api.calls) self.assertThat(nodes, HasLength(1)) def test_node_list_pagination_no_limit(self): self.api = utils.FakeAPI(fake_responses_pagination) self.mgr = node.NodeManager(self.api) nodes = self.mgr.list(limit=0) expect = [ ('GET', '/v1/nodes', {}, None), ('GET', '/v1/nodes/?limit=1', {}, None) ] self.assertEqual(expect, self.api.calls) self.assertEqual(2, len(nodes)) def test_node_list_sort_key(self): self.api = utils.FakeAPI(fake_responses_sorting) self.mgr = node.NodeManager(self.api) nodes = self.mgr.list(sort_key='updated_at') expect = [ ('GET', '/v1/nodes/?sort_key=updated_at', {}, None) ] self.assertEqual(expect, self.api.calls) self.assertEqual(2, len(nodes)) def test_node_list_sort_dir(self): self.api = utils.FakeAPI(fake_responses_sorting) self.mgr = node.NodeManager(self.api) nodes = self.mgr.list(sort_dir='desc') expect = [ ('GET', '/v1/nodes/?sort_dir=desc', {}, None) ] self.assertEqual(expect, self.api.calls) self.assertEqual(2, len(nodes)) def test_node_list_associated(self): nodes = self.mgr.list(associated=True) expect = [ ('GET', '/v1/nodes/?associated=True', {}, None), ] self.assertEqual(expect, self.api.calls) self.assertThat(nodes, HasLength(1)) self.assertEqual(NODE2['uuid'], getattr(nodes[0], 'uuid')) def test_node_list_unassociated(self): nodes = self.mgr.list(associated=False) expect = [ ('GET', '/v1/nodes/?associated=False', {}, None), ] self.assertEqual(expect, self.api.calls) self.assertThat(nodes, HasLength(1)) self.assertEqual(NODE1['uuid'], getattr(nodes[0], 'uuid')) def test_node_list_maintenance(self): nodes = self.mgr.list(maintenance=True) expect = [ ('GET', '/v1/nodes/?maintenance=True', {}, None), ] self.assertEqual(expect, self.api.calls) self.assertThat(nodes, HasLength(1)) self.assertEqual(NODE2['uuid'], getattr(nodes[0], 'uuid')) def test_node_list_no_maintenance(self): nodes = self.mgr.list(maintenance=False) expect = [ ('GET', '/v1/nodes/?maintenance=False', {}, None), ] self.assertEqual(expect, self.api.calls) self.assertThat(nodes, HasLength(1)) self.assertEqual(NODE1['uuid'], getattr(nodes[0], 'uuid')) def test_node_list_associated_and_maintenance(self): nodes = self.mgr.list(associated=True, maintenance=True) expect = [ ('GET', '/v1/nodes/?associated=True&maintenance=True', {}, None), ] self.assertEqual(expect, self.api.calls) self.assertThat(nodes, HasLength(1)) self.assertEqual(NODE2['uuid'], getattr(nodes[0], 'uuid')) def test_node_list_detail(self): nodes = self.mgr.list(detail=True) expect = [ ('GET', '/v1/nodes/detail', {}, None), ] self.assertEqual(expect, self.api.calls) self.assertEqual(2, len(nodes)) self.assertEqual(nodes[0].extra, {}) def test_node_show(self): node = self.mgr.get(NODE1['uuid']) expect = [ ('GET', '/v1/nodes/%s' % NODE1['uuid'], {}, None), ] self.assertEqual(expect, self.api.calls) self.assertEqual(NODE1['uuid'], node.uuid) def test_node_show_by_instance(self): node = self.mgr.get_by_instance_uuid(NODE2['instance_uuid']) expect = [ ('GET', '/v1/nodes/detail?instance_uuid=%s' % NODE2['instance_uuid'], {}, None), ] self.assertEqual(expect, self.api.calls) self.assertEqual(NODE2['uuid'], node.uuid) def test_create(self): node = self.mgr.create(**CREATE_NODE) expect = [ ('POST', '/v1/nodes', {}, CREATE_NODE), ] self.assertEqual(expect, self.api.calls) self.assertTrue(node) def test_create_with_uuid(self): node = self.mgr.create(**CREATE_WITH_UUID) expect = [ ('POST', '/v1/nodes', {}, CREATE_WITH_UUID), ] self.assertEqual(expect, self.api.calls) self.assertTrue(node) def test_delete(self): node = self.mgr.delete(node_id=NODE1['uuid']) expect = [ ('DELETE', '/v1/nodes/%s' % NODE1['uuid'], {}, None), ] self.assertEqual(expect, self.api.calls) self.assertIsNone(node) def test_update(self): patch = {'op': 'replace', 'value': NEW_DRIVER, 'path': '/driver'} node = self.mgr.update(node_id=NODE1['uuid'], patch=patch) expect = [ ('PATCH', '/v1/nodes/%s' % NODE1['uuid'], {}, patch), ] self.assertEqual(expect, self.api.calls) self.assertEqual(NEW_DRIVER, node.driver) def test_node_port_list(self): ports = self.mgr.list_ports(NODE1['uuid']) expect = [ ('GET', '/v1/nodes/%s/ports' % NODE1['uuid'], {}, None), ] self.assertEqual(expect, self.api.calls) self.assertEqual(1, len(ports)) self.assertEqual(PORT['uuid'], ports[0].uuid) self.assertEqual(PORT['address'], ports[0].address) def test_node_port_list_limit(self): self.api = utils.FakeAPI(fake_responses_pagination) self.mgr = node.NodeManager(self.api) ports = self.mgr.list_ports(NODE1['uuid'], limit=1) expect = [ ('GET', '/v1/nodes/%s/ports?limit=1' % NODE1['uuid'], {}, None), ] self.assertEqual(expect, self.api.calls) self.assertThat(ports, HasLength(1)) self.assertEqual(PORT['uuid'], ports[0].uuid) self.assertEqual(PORT['address'], ports[0].address) def test_node_port_list_marker(self): self.api = utils.FakeAPI(fake_responses_pagination) self.mgr = node.NodeManager(self.api) ports = self.mgr.list_ports(NODE1['uuid'], marker=PORT['uuid']) expect = [ ('GET', '/v1/nodes/%s/ports?marker=%s' % (NODE1['uuid'], PORT['uuid']), {}, None), ] self.assertEqual(expect, self.api.calls) self.assertThat(ports, HasLength(1)) def test_node_port_list_sort_key(self): self.api = utils.FakeAPI(fake_responses_sorting) self.mgr = node.NodeManager(self.api) ports = self.mgr.list_ports(NODE1['uuid'], sort_key='updated_at') expect = [ ('GET', '/v1/nodes/%s/ports?sort_key=updated_at' % NODE1['uuid'], {}, None), ] self.assertEqual(expect, self.api.calls) self.assertThat(ports, HasLength(1)) self.assertEqual(PORT['uuid'], ports[0].uuid) self.assertEqual(PORT['address'], ports[0].address) def test_node_port_list_sort_dir(self): self.api = utils.FakeAPI(fake_responses_sorting) self.mgr = node.NodeManager(self.api) ports = self.mgr.list_ports(NODE1['uuid'], sort_dir='desc') expect = [ ('GET', '/v1/nodes/%s/ports?sort_dir=desc' % NODE1['uuid'], {}, None), ] self.assertEqual(expect, self.api.calls) self.assertThat(ports, HasLength(1)) self.assertEqual(PORT['uuid'], ports[0].uuid) self.assertEqual(PORT['address'], ports[0].address) def test_node_port_list_detail(self): ports = self.mgr.list_ports(NODE1['uuid'], detail=True) expect = [ ('GET', '/v1/nodes/%s/ports/detail' % NODE1['uuid'], {}, None), ] self.assertEqual(expect, self.api.calls) self.assertEqual(1, len(ports)) def test_node_set_power_state(self): power_state = self.mgr.set_power_state(NODE1['uuid'], "on") body = {'target': 'power on'} expect = [ ('PUT', '/v1/nodes/%s/states/power' % NODE1['uuid'], {}, body), ] self.assertEqual(expect, self.api.calls) self.assertEqual('power on', power_state.target_power_state) def test_node_validate(self): ifaces = self.mgr.validate(NODE1['uuid']) expect = [ ('GET', '/v1/nodes/%s/validate' % NODE1['uuid'], {}, None), ] self.assertEqual(expect, self.api.calls) self.assertEqual(DRIVER_IFACES['power'], ifaces.power) self.assertEqual(DRIVER_IFACES['deploy'], ifaces.deploy) self.assertEqual(DRIVER_IFACES['rescue'], ifaces.rescue) self.assertEqual(DRIVER_IFACES['console'], ifaces.console) def test_node_set_provision_state(self): target_state = 'active' self.mgr.set_provision_state(NODE1['uuid'], target_state) body = {'target': target_state} expect = [ ('PUT', '/v1/nodes/%s/states/provision' % NODE1['uuid'], {}, body), ] self.assertEqual(expect, self.api.calls) def test_node_states(self): states = self.mgr.states(NODE1['uuid']) expect = [ ('GET', '/v1/nodes/%s/states' % NODE1['uuid'], {}, None), ] self.assertEqual(expect, self.api.calls) expected_fields = ['last_error', 'power_state', 'provision_state', 'target_power_state', 'target_provision_state'] self.assertEqual(sorted(expected_fields), sorted(states.to_dict().keys())) def test_node_set_console_mode(self): enabled = 'true' self.mgr.set_console_mode(NODE1['uuid'], enabled) body = {'enabled': enabled} expect = [ ('PUT', '/v1/nodes/%s/states/console' % NODE1['uuid'], {}, body), ] self.assertEqual(expect, self.api.calls) def test_node_get_console(self): info = self.mgr.get_console(NODE1['uuid']) expect = [ ('GET', '/v1/nodes/%s/states/console' % NODE1['uuid'], {}, None), ] self.assertEqual(expect, self.api.calls) self.assertEqual(CONSOLE_DATA_ENABLED, info) def test_node_get_console_disabled(self): info = self.mgr.get_console(NODE2['uuid']) expect = [ ('GET', '/v1/nodes/%s/states/console' % NODE2['uuid'], {}, None), ] self.assertEqual(expect, self.api.calls) self.assertEqual(CONSOLE_DATA_DISABLED, info) @mock.patch.object(base.Manager, '_update') def test_vendor_passthru(self, update_mock): # For now just mock the tests because vendor-passthru doesn't return # anything to verify. vendor_passthru_args = {'arg1': 'val1'} kwargs = { 'node_id': 'node_uuid', 'method': 'method', 'args': vendor_passthru_args } self.mgr.vendor_passthru(**kwargs) final_path = '/v1/nodes/node_uuid/vendor_passthru/method' update_mock.assert_once_called_with(final_path, vendor_passthru_args, method='POST') def _test_node_set_boot_device(self, boot_device, persistent=False): self.mgr.set_boot_device(NODE1['uuid'], boot_device, persistent) body = {'boot_device': boot_device, 'persistent': persistent} expect = [ ('PUT', '/v1/nodes/%s/management/boot_device' % NODE1['uuid'], {}, body), ] self.assertEqual(expect, self.api.calls) def test_node_set_boot_device(self): self._test_node_set_boot_device('pxe') def test_node_set_boot_device_persistent(self): self._test_node_set_boot_device('pxe', persistent=True) def test_node_get_boot_device(self): boot_device = self.mgr.get_boot_device(NODE1['uuid']) expect = [ ('GET', '/v1/nodes/%s/management/boot_device' % NODE1['uuid'], {}, None), ] self.assertEqual(expect, self.api.calls) self.assertEqual(BOOT_DEVICE, boot_device) def test_node_get_supported_boot_devices(self): boot_device = self.mgr.get_supported_boot_devices(NODE1['uuid']) expect = [ ('GET', '/v1/nodes/%s/management/boot_device/supported' % NODE1['uuid'], {}, None), ] self.assertEqual(expect, self.api.calls) self.assertEqual(SUPPORTED_BOOT_DEVICE, boot_device)
{ "content_hash": "4ab809c2d9f339fc7b4eb5de6784c3e6", "timestamp": "", "source": "github", "line_count": 654, "max_line_length": 79, "avg_line_length": 30.827217125382262, "alnum_prop": 0.5112841624919399, "repo_name": "redhat-openstack/python-ironicclient", "id": "a5b04300c1ac07c5836db51e80f4adec655e0dbf", "size": "20821", "binary": false, "copies": "1", "ref": "refs/heads/master-patches", "path": "ironicclient/tests/v1/test_node.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "268832" } ], "symlink_target": "" }
from telemetry import decorators from telemetry.page import page from telemetry.unittest_util import options_for_unittests from telemetry.unittest_util import page_test_test_case from telemetry.util import wpr_modes from measurements import thread_times from metrics import timeline class AnimatedPage(page.Page): def __init__(self, page_set): super(AnimatedPage, self).__init__( url='file://animated_page.html', page_set=page_set, base_dir=page_set.base_dir) def RunPageInteractions(self, action_runner): action_runner.Wait(.2) class ThreadTimesUnitTest(page_test_test_case.PageTestTestCase): def setUp(self): self._options = options_for_unittests.GetCopy() self._options.browser_options.wpr_mode = wpr_modes.WPR_OFF @decorators.Disabled('android') def testBasic(self): ps = self.CreateStorySetFromFileInUnittestDataDir('scrollable_page.html') measurement = thread_times.ThreadTimes() timeline_options = self._options results = self.RunMeasurement(measurement, ps, options = timeline_options) self.assertFalse(len(results.failures), results.failures) for interval in timeline.IntervalNames: for category in timeline.TimelineThreadCategories.values(): cpu_time_name = timeline.ThreadCpuTimeResultName(category, interval) cpu_time = results.FindAllPageSpecificValuesNamed(cpu_time_name) self.assertEquals(len(cpu_time), 1) @decorators.Disabled('chromeos') # crbug.com/483212 def testWithSilkDetails(self): ps = self.CreateStorySetFromFileInUnittestDataDir('scrollable_page.html') measurement = thread_times.ThreadTimes(report_silk_details=True) results = self.RunMeasurement(measurement, ps, options = self._options) self.assertEquals(0, len(results.failures)) main_thread = "renderer_main" expected_trace_categories = ["blink", "cc", "idle"] for interval in timeline.IntervalNames: for trace_category in expected_trace_categories: value_name = timeline.ThreadDetailResultName( main_thread, interval, trace_category) values = results.FindAllPageSpecificValuesNamed(value_name) self.assertEquals(len(values), 1) def testCleanUpTrace(self): self.TestTracingCleanedUp(thread_times.ThreadTimes, self._options)
{ "content_hash": "37164474d3b33bc6da5421e20dc853a4", "timestamp": "", "source": "github", "line_count": 57, "max_line_length": 78, "avg_line_length": 40.1578947368421, "alnum_prop": 0.743993010048056, "repo_name": "SaschaMester/delicium", "id": "7e7186c7d86dc250784e41ef25cbbaafa51de32e", "size": "2452", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tools/perf/measurements/thread_times_unittest.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "AppleScript", "bytes": "6973" }, { "name": "Arduino", "bytes": "464" }, { "name": "Assembly", "bytes": "23829" }, { "name": "Batchfile", "bytes": "8451" }, { "name": "C", "bytes": "4171711" }, { "name": "C++", "bytes": "243066171" }, { "name": "CSS", "bytes": "935112" }, { "name": "DM", "bytes": "60" }, { "name": "Groff", "bytes": "2494" }, { "name": "HTML", "bytes": "27211018" }, { "name": "Java", "bytes": "14285999" }, { "name": "JavaScript", "bytes": "20413885" }, { "name": "Makefile", "bytes": "23496" }, { "name": "Objective-C", "bytes": "1725804" }, { "name": "Objective-C++", "bytes": "9880229" }, { "name": "PHP", "bytes": "97817" }, { "name": "PLpgSQL", "bytes": "178732" }, { "name": "Perl", "bytes": "63937" }, { "name": "Protocol Buffer", "bytes": "478406" }, { "name": "Python", "bytes": "8261413" }, { "name": "Shell", "bytes": "482077" }, { "name": "Standard ML", "bytes": "5034" }, { "name": "XSLT", "bytes": "418" }, { "name": "nesC", "bytes": "18347" } ], "symlink_target": "" }
def city_country(name, country): return f"{name}, {country}." print(city_country("Buenos Aires" , "Argentina") ) print(city_country("Bogota", "Colombia")) print(city_country("Caracas" , "Venezuela"))
{ "content_hash": "ebb81a359acce7a8116a148d0ede1f48", "timestamp": "", "source": "github", "line_count": 6, "max_line_length": 53, "avg_line_length": 34.5, "alnum_prop": 0.6811594202898551, "repo_name": "AnhellO/DAS_Sistemas", "id": "00eec6f8328088812f211211bb8707508fc159fc", "size": "207", "binary": false, "copies": "1", "ref": "refs/heads/ene-jun-2022", "path": "Ene-Jun-2022/juan-alejandro-calzoncit-rodriguez/práctica-2/capitulo-8/8-6.py", "mode": "33188", "license": "mit", "language": [ { "name": "Dockerfile", "bytes": "8515" }, { "name": "Go", "bytes": "25845" }, { "name": "HTML", "bytes": "36671" }, { "name": "Python", "bytes": "716604" } ], "symlink_target": "" }
"""Test parsing an append_env action.""" import io import os import textwrap from launch.actions import AppendEnvironmentVariable from launch.frontend import Parser def test_append_env(): xml_file = \ """\ <launch> <append_env name="my_env_var" value="asd"/> <append_env name="my_env_var" value="zxc" separator="|"/> <append_env name="my_other_env_var" value="fgh"/> <append_env name="my_other_env_var" value="jkl" prepend="false"/> <append_env name="my_other_env_var" value="qwe" prepend="yes"/> <append_env name="my_other_env_var" value="rty" prepend="true" separator="|"/> </launch> """ xml_file = textwrap.dedent(xml_file) root_entity, parser = Parser.load(io.StringIO(xml_file)) ld = parser.parse_description(root_entity) assert len(ld.entities) == 6 assert isinstance(ld.entities[0], AppendEnvironmentVariable) assert isinstance(ld.entities[1], AppendEnvironmentVariable) assert isinstance(ld.entities[2], AppendEnvironmentVariable) assert isinstance(ld.entities[3], AppendEnvironmentVariable) assert isinstance(ld.entities[4], AppendEnvironmentVariable) assert isinstance(ld.entities[5], AppendEnvironmentVariable) assert 'my_env_var' == ''.join([x.perform(None) for x in ld.entities[0].name]) assert 'my_env_var' == ''.join([x.perform(None) for x in ld.entities[0].name]) assert 'my_other_env_var' == ''.join([x.perform(None) for x in ld.entities[2].name]) assert 'my_other_env_var' == ''.join([x.perform(None) for x in ld.entities[3].name]) assert 'my_other_env_var' == ''.join([x.perform(None) for x in ld.entities[4].name]) assert 'my_other_env_var' == ''.join([x.perform(None) for x in ld.entities[5].name]) assert 'asd' == ''.join([x.perform(None) for x in ld.entities[0].value]) assert 'zxc' == ''.join([x.perform(None) for x in ld.entities[1].value]) assert 'fgh' == ''.join([x.perform(None) for x in ld.entities[2].value]) assert 'jkl' == ''.join([x.perform(None) for x in ld.entities[3].value]) assert 'qwe' == ''.join([x.perform(None) for x in ld.entities[4].value]) assert 'rty' == ''.join([x.perform(None) for x in ld.entities[5].value]) assert not ld.entities[0].prepend assert not ld.entities[1].prepend assert not ld.entities[2].prepend assert not ld.entities[3].prepend assert ld.entities[4].prepend assert ld.entities[5].prepend assert os.pathsep == ''.join([x.perform(None) for x in ld.entities[0].separator]) assert '|' == ''.join([x.perform(None) for x in ld.entities[1].separator]) assert os.pathsep == ''.join([x.perform(None) for x in ld.entities[2].separator]) assert os.pathsep == ''.join([x.perform(None) for x in ld.entities[3].separator]) assert os.pathsep == ''.join([x.perform(None) for x in ld.entities[4].separator]) assert '|' == ''.join([x.perform(None) for x in ld.entities[5].separator])
{ "content_hash": "a525f78ac10c359462e6e0b18d4376b6", "timestamp": "", "source": "github", "line_count": 56, "max_line_length": 90, "avg_line_length": 53.214285714285715, "alnum_prop": 0.6513422818791946, "repo_name": "ros2/launch", "id": "5b651f3450d016a584d897a4d6b063f837478f84", "size": "3582", "binary": false, "copies": "1", "ref": "refs/heads/rolling", "path": "launch_xml/test/launch_xml/test_append_env.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "857" }, { "name": "C++", "bytes": "1468" }, { "name": "CMake", "bytes": "8807" }, { "name": "Makefile", "bytes": "607" }, { "name": "Python", "bytes": "1063971" }, { "name": "Shell", "bytes": "85" } ], "symlink_target": "" }
from factory import ( SubFactory, ) from factory.django import DjangoModelFactory from accelerator.tests.factories.startup_factory import StartupFactory from accelerator.models import BusinessProposition class BusinessPropositionFactory(DjangoModelFactory): class Meta: model = BusinessProposition startup = SubFactory(StartupFactory)
{ "content_hash": "a4e49571e0b465e7007a98b2f0746641", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 70, "avg_line_length": 25.642857142857142, "alnum_prop": 0.807799442896936, "repo_name": "masschallenge/django-accelerator", "id": "72d8dcef43c680f921e91efb8094ead8143d5d3d", "size": "359", "binary": false, "copies": "1", "ref": "refs/heads/development", "path": "accelerator/tests/factories/business_proposition_factory.py", "mode": "33188", "license": "mit", "language": [ { "name": "Dockerfile", "bytes": "1848" }, { "name": "Makefile", "bytes": "6817" }, { "name": "Python", "bytes": "996767" }, { "name": "Shell", "bytes": "2453" } ], "symlink_target": "" }
from . import FairseqEncoder class CompositeEncoder(FairseqEncoder): """ Encoder class that forwards on multiple encoders, for example for a fusion model or question-answering Accepts a dictionary of encoder, the first encoder's dictionary is used for initialization """ def __init__(self, encoders): super().__init__(next(iter(encoders.values())).dictionary) self.encoders = encoders for key in self.encoders: self.add_module(key, self.encoders[key]) def forward(self, src_tokens, src_lengths): encoder_out = {} for key in self.encoders: encoder_out[key] = self.encoders[key](src_tokens, src_lengths) return encoder_out def reorder_encoder_out(self, encoder_out, new_order): """Reorder encoder output according to new_order.""" for key in self.encoders: encoder_out[key] = self.encoders[key].reorder_encoder_out(encoder_out[key], new_order) return encoder_out def max_positions(self): return min([self.encoders[key].max_positions() for key in self.encoders]) def upgrade_state_dict(self, state_dict): for key in self.encoders: self.encoders[key].upgrade_state_dict(state_dict) return state_dict
{ "content_hash": "a2f4c99059e4e8a2297b858b31d61645", "timestamp": "", "source": "github", "line_count": 34, "max_line_length": 106, "avg_line_length": 37.73529411764706, "alnum_prop": 0.65705378020265, "repo_name": "mlperf/training_results_v0.5", "id": "d2e3d048661413bee7192a2d75cfa9490e36ed66", "size": "1569", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "v0.5.0/nvidia/submission/code/translation/pytorch/fairseq/models/composite_encoder.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "5720" }, { "name": "C++", "bytes": "1288180" }, { "name": "CMake", "bytes": "40880" }, { "name": "CSS", "bytes": "32420" }, { "name": "Cuda", "bytes": "1362093" }, { "name": "Dockerfile", "bytes": "19488" }, { "name": "Go", "bytes": "1088660" }, { "name": "HTML", "bytes": "19756888" }, { "name": "Java", "bytes": "45405" }, { "name": "JavaScript", "bytes": "302838" }, { "name": "Jupyter Notebook", "bytes": "9104667" }, { "name": "Lua", "bytes": "4430" }, { "name": "Makefile", "bytes": "3652" }, { "name": "Python", "bytes": "31508548" }, { "name": "Scala", "bytes": "106211" }, { "name": "Shell", "bytes": "409745" } ], "symlink_target": "" }
DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( ('admin', 'admin@example.com'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'db.sqlite3', } } SHORT_CACHE_TIMEOUT = 60 # One Minute LOGIN_URL = '/auth/signin/' # Hosts/domain names that are valid for this site; required if DEBUG is False # See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts ALLOWED_HOSTS = [] # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'America/New_York' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/var/www/example.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://example.com/media/", "http://media.example.com/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/var/www/example.com/static/" STATIC_ROOT = '' # URL prefix for static files. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = 's=le&3g5menbn0%hz6xx4j0b1km&02g%5^5w3bj^&va=y7*tw3' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) ROOT_URLCONF = 'example.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'example.wsgi.application' TEMPLATE_DIRS = ( 'example/templates', ) MARIMO_TEMPLATE_DIRS = ( 'marimo_comments/templates', ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.flatpages', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.admin', 'example', 'marimo_comments', ) # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } }
{ "content_hash": "34ad967daab20c8b331888cb5c7317f8", "timestamp": "", "source": "github", "line_count": 150, "max_line_length": 79, "avg_line_length": 30.133333333333333, "alnum_prop": 0.6949115044247788, "repo_name": "brandonivey/marimo-comments", "id": "3486f50f1c7377b525395551692d51f4d026c89e", "size": "4560", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "example/settings.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "31938" } ], "symlink_target": "" }
from distutils.core import setup from setuptools import find_packages packages = [ 'tsheets', 'tsheets.repos', 'tsheets.models', ] setup( name='tsheets', version='0.3', description='API library helper for TSheets.com', long_description='Allows to use the TSheets.com API to manage the timesheets and all other related data', author='Kannan Ponnusamy', author_email ='kannan@endpoint.com', license='MIT', packages=find_packages(exclude=['tests']), url='https://github.com/tsheets/api_python', download_url='https://github.com/tsheets/api_python/tarball/0.3', keywords=['api', 'rest', 'tsheets'], install_requires=[ 'requests>=2.7.0', 'python-dateutil==2.4.2', 'pytz==2015.7' ] )
{ "content_hash": "a50952a71bdc72310d9170ab95f10178", "timestamp": "", "source": "github", "line_count": 28, "max_line_length": 109, "avg_line_length": 27.607142857142858, "alnum_prop": 0.6442432082794308, "repo_name": "tsheets/api_python", "id": "884f5f2af5f4a49679145b95e38e70dc0d85d744", "size": "773", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "41953" } ], "symlink_target": "" }
__author__ = 'frank' class BaseObject(object): def __init__(self): self.netki_client = None def set_netki_client(self, netki_client): """ After instantiating the client in the appropriate mode, adding the client to the object will allow for API operations. :param netki_client: NetkiClient.Netki instance to be associated with the object. """ self.netki_client = netki_client
{ "content_hash": "2da62f1ac2ec0c754d9c2a527b281163", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 114, "avg_line_length": 24.833333333333332, "alnum_prop": 0.6375838926174496, "repo_name": "netkicorp/python-partner-client", "id": "4b04b68f4fb56c9e8d4c60d978ec089ceb88b792", "size": "447", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "netki/BaseObject.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "46323" } ], "symlink_target": "" }
from .. import wma from .util import TestData, eq_ def test1(): # test1.wma is a normal, valid wma file w = wma.WMADecoder(TestData.filepath('wma/test1.wma')) eq_(w.artist, 'Modest Mouse') eq_(w.album, 'The Moon & Antarctica') eq_(w.title, '3rd Planet') eq_(w.genre, 'Rock') eq_(w.comment, '') eq_(w.year, '2000') eq_(w.track, 1) eq_(w.bitrate, 192) eq_(w.size, 77051) eq_(w.duration, 239) eq_(w.audio_offset, 0x15a0) eq_(w.audio_size, 0x582682 - 0x15a0) assert w.valid def test2(): # test2.wma is a mpeg file, thus invalid w = wma.WMADecoder(TestData.filepath('wma/test2.wma')) assert not w.valid eq_(w.audio_offset, 0) eq_(w.audio_size, 0) def testZeroFile(): w = wma.WMADecoder(TestData.filepath('zerofile')) assert not w.valid def test1_non_ascii(): # The album is Unicode w = wma.WMADecoder(TestData.filepath('wma/test1_non_ascii.wma')) assert isinstance(w.album, str) eq_(w.album, 'The Moon \u00c8 Antarctica') def test1_no_track(): # This is a file with no WM/TRACK field w = wma.WMADecoder(TestData.filepath('wma/test1_no_track.wma')) eq_(0, w.track) def test3(): # This is the file that made a customer's musicGuru copy bug. It was because it has no track. w = wma.WMADecoder(TestData.filepath('wma/test3.wma')) eq_(w.artist, 'Giovanni Marradi') eq_(w.album, 'Always') eq_(w.title, 'Gideon') eq_(w.genre, 'Easy Listening') eq_(w.comment, '') eq_(w.year, '') eq_(w.track, 0) eq_(w.bitrate, 48) eq_(w.size, 80767) eq_(w.duration, 238) assert w.valid def test3_truncated_unicode(): # This is the file has its WM/GENRE field last char truncated. Its value, 'Easy Listening' # also has one char truncated. 'Gideon' in the unnamed fields part also has one truncated char. w = wma.WMADecoder(TestData.filepath('wma/test3_truncated_unicode.wma')) eq_(w.genre, 'Easy Listening') eq_(w.title, 'Gideon') def test3_invalid_unicode_surregate(): # This is the file has an invalid char (0xffff) in its WM/GENRE field. 'Gideon' in the # unnamed fields part also has an invalid surregate (0xdbff and another 0xdbff). w = wma.WMADecoder(TestData.filepath('wma/test3_invalid_unicode_surregate.wma')) eq_(w.genre, '') eq_(w.title, '') def test3_incomplete(): # This file is truncated right in the middle of a field header. The error that it made was an # unpack error. w = wma.WMADecoder(TestData.filepath('wma/test3_incomplete.wma')) eq_(w.genre, '') eq_(w.title, '') def test4(): # VBR w = wma.WMADecoder(TestData.filepath('wma/test4.wma')) eq_(w.artist, 'Red Hot Chilly Peppers') eq_(w.album, '') eq_(w.title, 'Scar Tissue') eq_(w.genre, '') eq_(w.comment, '') eq_(w.year, '') eq_(w.track, 2) eq_(w.bitrate, 370) eq_(w.size, 673675) eq_(w.duration, 217) assert w.valid def test5(): # Another VBR w = wma.WMADecoder(TestData.filepath('wma/test5.wma')) eq_(w.bitrate, 303) eq_(w.duration, 295) def test6(): # Another VBR. This one had a huge, 30 seconds, duration gap w = wma.WMADecoder(TestData.filepath('wma/test6.wma')) eq_(w.bitrate, 422) eq_(w.duration, 298) def test7(): # Yet another VBR wma with buggy duration. w = wma.WMADecoder(TestData.filepath('wma/test7.wma')) eq_(w.bitrate, 327) eq_(w.duration, 539)
{ "content_hash": "2025f0eecc94bb5c0082da95afc4e96e", "timestamp": "", "source": "github", "line_count": 111, "max_line_length": 99, "avg_line_length": 31.2972972972973, "alnum_prop": 0.6292458261370178, "repo_name": "jmtchllrx/pyMuse", "id": "e07bb2ad52c23257497c03facacad5b5c8c9dba4", "size": "3808", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/hsaudiotag/tests/wma_test.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "123941" } ], "symlink_target": "" }
import functools import os import tempfile import mock from oslo_concurrency import processutils from oslo_config import cfg from oslo_utils import fileutils import six from nova.compute import arch from nova import context from nova import exception from nova import objects from nova import test from nova.tests.unit import fake_instance from nova import utils from nova.virt.disk import api as disk from nova.virt import images from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import utils as libvirt_utils CONF = cfg.CONF class LibvirtUtilsTestCase(test.NoDBTestCase): @mock.patch('nova.utils.execute') def test_copy_image_local(self, mock_execute): libvirt_utils.copy_image('src', 'dest') mock_execute.assert_called_once_with('cp', 'src', 'dest') @mock.patch('nova.virt.libvirt.volume.remotefs.SshDriver.copy_file') def test_copy_image_remote_ssh(self, mock_rem_fs_remove): self.flags(remote_filesystem_transport='ssh', group='libvirt') libvirt_utils.copy_image('src', 'dest', host='host') mock_rem_fs_remove.assert_called_once_with('src', 'host:dest', on_completion=None, on_execute=None, compression=True) @mock.patch('nova.virt.libvirt.volume.remotefs.RsyncDriver.copy_file') def test_copy_image_remote_rsync(self, mock_rem_fs_remove): self.flags(remote_filesystem_transport='rsync', group='libvirt') libvirt_utils.copy_image('src', 'dest', host='host') mock_rem_fs_remove.assert_called_once_with('src', 'host:dest', on_completion=None, on_execute=None, compression=True) @mock.patch('os.path.exists', return_value=True) def test_disk_type_from_path(self, mock_exists): # Seems like lvm detection # if its in /dev ?? for p in ['/dev/b', '/dev/blah/blah']: d_type = libvirt_utils.get_disk_type_from_path(p) self.assertEqual('lvm', d_type) # Try rbd detection d_type = libvirt_utils.get_disk_type_from_path('rbd:pool/instance') self.assertEqual('rbd', d_type) # Try the other types path = '/myhome/disk.config' d_type = libvirt_utils.get_disk_type_from_path(path) self.assertIsNone(d_type) @mock.patch('os.path.exists', return_value=True) @mock.patch('os.path.isdir', return_value=True) def test_disk_type_ploop(self, mock_isdir, mock_exists): path = '/some/path' d_type = libvirt_utils.get_disk_type_from_path(path) mock_isdir.assert_called_once_with(path) mock_exists.assert_called_once_with("%s/DiskDescriptor.xml" % path) self.assertEqual('ploop', d_type) @mock.patch('os.path.exists', return_value=True) @mock.patch('nova.utils.execute') def test_disk_backing(self, mock_execute, mock_exists): path = '/myhome/disk.config' template_output = """image: %(path)s file format: raw virtual size: 2K (2048 bytes) cluster_size: 65536 disk size: 96K """ output = template_output % ({ 'path': path, }) mock_execute.return_value = (output, '') d_backing = libvirt_utils.get_disk_backing_file(path) mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path, prlimit=images.QEMU_IMG_LIMITS) mock_exists.assert_called_once_with(path) self.assertIsNone(d_backing) def _test_disk_size(self, mock_execute, path, expected_size): d_size = libvirt_utils.get_disk_size(path) self.assertEqual(expected_size, d_size) mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path, prlimit=images.QEMU_IMG_LIMITS) @mock.patch('os.path.exists', return_value=True) def test_disk_size(self, mock_exists): path = '/myhome/disk.config' template_output = """image: %(path)s file format: raw virtual size: %(v_size)s (%(vsize_b)s bytes) cluster_size: 65536 disk size: 96K """ for i in range(0, 128): bytes = i * 65336 kbytes = bytes / 1024 mbytes = kbytes / 1024 output = template_output % ({ 'v_size': "%sM" % (mbytes), 'vsize_b': i, 'path': path, }) with mock.patch('nova.utils.execute', return_value=(output, '')) as mock_execute: self._test_disk_size(mock_execute, path, i) output = template_output % ({ 'v_size': "%sK" % (kbytes), 'vsize_b': i, 'path': path, }) with mock.patch('nova.utils.execute', return_value=(output, '')) as mock_execute: self._test_disk_size(mock_execute, path, i) @mock.patch('os.path.exists', return_value=True) @mock.patch('nova.utils.execute') def test_qemu_info_canon(self, mock_execute, mock_exists): path = "disk.config" example_output = """image: disk.config file format: raw virtual size: 64M (67108864 bytes) cluster_size: 65536 disk size: 96K blah BLAH: bb """ mock_execute.return_value = (example_output, '') image_info = images.qemu_img_info(path) mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path, prlimit=images.QEMU_IMG_LIMITS) mock_exists.assert_called_once_with(path) self.assertEqual('disk.config', image_info.image) self.assertEqual('raw', image_info.file_format) self.assertEqual(67108864, image_info.virtual_size) self.assertEqual(98304, image_info.disk_size) self.assertEqual(65536, image_info.cluster_size) @mock.patch('os.path.exists', return_value=True) @mock.patch('nova.utils.execute') def test_qemu_info_canon2(self, mock_execute, mock_exists): path = "disk.config" example_output = """image: disk.config file format: QCOW2 virtual size: 67108844 cluster_size: 65536 disk size: 963434 backing file: /var/lib/nova/a328c7998805951a_2 """ mock_execute.return_value = (example_output, '') image_info = images.qemu_img_info(path) mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path, prlimit=images.QEMU_IMG_LIMITS) mock_exists.assert_called_once_with(path) self.assertEqual('disk.config', image_info.image) self.assertEqual('qcow2', image_info.file_format) self.assertEqual(67108844, image_info.virtual_size) self.assertEqual(963434, image_info.disk_size) self.assertEqual(65536, image_info.cluster_size) self.assertEqual('/var/lib/nova/a328c7998805951a_2', image_info.backing_file) @mock.patch('os.path.exists', return_value=True) @mock.patch('nova.utils.execute') def test_qemu_backing_file_actual(self, mock_execute, mock_exists): path = "disk.config" example_output = """image: disk.config file format: raw virtual size: 64M (67108864 bytes) cluster_size: 65536 disk size: 96K Snapshot list: ID TAG VM SIZE DATE VM CLOCK 1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000 backing file: /var/lib/nova/a328c7998805951a_2 (actual path: /b/3a988059e51a_2) """ mock_execute.return_value = (example_output, '') image_info = images.qemu_img_info(path) mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path, prlimit=images.QEMU_IMG_LIMITS) mock_exists.assert_called_once_with(path) self.assertEqual('disk.config', image_info.image) self.assertEqual('raw', image_info.file_format) self.assertEqual(67108864, image_info.virtual_size) self.assertEqual(98304, image_info.disk_size) self.assertEqual(1, len(image_info.snapshots)) self.assertEqual('/b/3a988059e51a_2', image_info.backing_file) @mock.patch('os.path.exists', return_value=True) @mock.patch('nova.utils.execute') def test_qemu_info_convert(self, mock_execute, mock_exists): path = "disk.config" example_output = """image: disk.config file format: raw virtual size: 64M disk size: 96K Snapshot list: ID TAG VM SIZE DATE VM CLOCK 1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000 3 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000 4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000 junk stuff: bbb """ mock_execute.return_value = (example_output, '') image_info = images.qemu_img_info(path) mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path, prlimit=images.QEMU_IMG_LIMITS) mock_exists.assert_called_once_with(path) self.assertEqual('disk.config', image_info.image) self.assertEqual('raw', image_info.file_format) self.assertEqual(67108864, image_info.virtual_size) self.assertEqual(98304, image_info.disk_size) @mock.patch('os.path.exists', return_value=True) @mock.patch('nova.utils.execute') def test_qemu_info_snaps(self, mock_execute, mock_exists): path = "disk.config" example_output = """image: disk.config file format: raw virtual size: 64M (67108864 bytes) disk size: 96K Snapshot list: ID TAG VM SIZE DATE VM CLOCK 1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000 3 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000 4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000 """ mock_execute.return_value = (example_output, '') image_info = images.qemu_img_info(path) mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path, prlimit=images.QEMU_IMG_LIMITS) mock_exists.assert_called_once_with(path) self.assertEqual('disk.config', image_info.image) self.assertEqual('raw', image_info.file_format) self.assertEqual(67108864, image_info.virtual_size) self.assertEqual(98304, image_info.disk_size) self.assertEqual(3, len(image_info.snapshots)) def test_valid_hostname_normal(self): self.assertTrue(libvirt_utils.is_valid_hostname("hello.world.com")) def test_valid_hostname_ipv4addr(self): self.assertTrue(libvirt_utils.is_valid_hostname("10.0.2.1")) def test_valid_hostname_ipv6addr(self): self.assertTrue(libvirt_utils.is_valid_hostname("240:2ac3::2")) def test_valid_hostname_bad(self): self.assertFalse(libvirt_utils.is_valid_hostname("foo/?com=/bin/sh")) @mock.patch('nova.utils.execute') def test_create_image(self, mock_execute): libvirt_utils.create_image('raw', '/some/path', '10G') libvirt_utils.create_image('qcow2', '/some/stuff', '1234567891234') expected_args = [(('qemu-img', 'create', '-f', 'raw', '/some/path', '10G'),), (('qemu-img', 'create', '-f', 'qcow2', '/some/stuff', '1234567891234'),)] self.assertEqual(expected_args, mock_execute.call_args_list) @mock.patch('os.path.exists', return_value=True) @mock.patch('nova.utils.execute') def test_create_cow_image(self, mock_execute, mock_exists): mock_execute.return_value = ('stdout', None) libvirt_utils.create_cow_image('/some/path', '/the/new/cow') expected_args = [(('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', '/some/path'), {'prlimit': images.QEMU_IMG_LIMITS}), (('qemu-img', 'create', '-f', 'qcow2', '-o', 'backing_file=/some/path', '/the/new/cow'),)] self.assertEqual(expected_args, mock_execute.call_args_list) def test_pick_disk_driver_name(self): type_map = {'kvm': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']), 'qemu': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']), 'uml': ([True, None], [False, None], [None, None]), 'lxc': ([True, None], [False, None], [None, None])} # NOTE(aloga): Xen is tested in test_pick_disk_driver_name_xen version = 1005001 for (virt_type, checks) in six.iteritems(type_map): self.flags(virt_type=virt_type, group='libvirt') for (is_block_dev, expected_result) in checks: result = libvirt_utils.pick_disk_driver_name(version, is_block_dev) self.assertEqual(result, expected_result) @mock.patch('nova.utils.execute') def test_pick_disk_driver_name_xen(self, mock_execute): def side_effect(*args, **kwargs): if args == ('tap-ctl', 'check'): if mock_execute.blktap is True: return ('ok\n', '') elif mock_execute.blktap is False: return ('some error\n', '') else: raise OSError(2, "No such file or directory") elif args == ('xend', 'status'): if mock_execute.xend is True: return ('', '') elif mock_execute.xend is False: raise processutils.ProcessExecutionError("error") else: raise OSError(2, "No such file or directory") raise Exception('Unexpected call') mock_execute.side_effect = side_effect self.flags(virt_type="xen", group='libvirt') versions = [4000000, 4001000, 4002000, 4003000, 4005000] for version in versions: # block dev result = libvirt_utils.pick_disk_driver_name(version, True) self.assertEqual(result, "phy") self.assertFalse(mock_execute.called) mock_execute.reset_mock() # file dev for blktap in True, False, None: mock_execute.blktap = blktap for xend in True, False, None: mock_execute.xend = xend result = libvirt_utils.pick_disk_driver_name(version, False) # qemu backend supported only by libxl which is # production since xen 4.2. libvirt use libxl if # xend service not started. if version >= 4002000 and xend is not True: self.assertEqual(result, 'qemu') elif blktap: if version == 4000000: self.assertEqual(result, 'tap') else: self.assertEqual(result, 'tap2') else: self.assertEqual(result, 'file') # default is_block_dev False self.assertEqual(result, libvirt_utils.pick_disk_driver_name(version)) mock_execute.reset_mock() @mock.patch('os.path.exists', return_value=True) @mock.patch('nova.utils.execute') def test_get_disk_size(self, mock_execute, mock_exists): path = '/some/path' example_output = """image: 00000001 file format: raw virtual size: 4.4M (4592640 bytes) disk size: 4.4M """ mock_execute.return_value = (example_output, '') self.assertEqual(4592640, disk.get_disk_size('/some/path')) mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path, prlimit=images.QEMU_IMG_LIMITS) mock_exists.assert_called_once_with(path) def test_copy_image(self): dst_fd, dst_path = tempfile.mkstemp() try: os.close(dst_fd) src_fd, src_path = tempfile.mkstemp() try: with os.fdopen(src_fd, 'w') as fp: fp.write('canary') libvirt_utils.copy_image(src_path, dst_path) with open(dst_path, 'r') as fp: self.assertEqual(fp.read(), 'canary') finally: os.unlink(src_path) finally: os.unlink(dst_path) def test_write_to_file(self): dst_fd, dst_path = tempfile.mkstemp() try: os.close(dst_fd) libvirt_utils.write_to_file(dst_path, 'hello') with open(dst_path, 'r') as fp: self.assertEqual(fp.read(), 'hello') finally: os.unlink(dst_path) def test_write_to_file_with_umask(self): dst_fd, dst_path = tempfile.mkstemp() try: os.close(dst_fd) os.unlink(dst_path) libvirt_utils.write_to_file(dst_path, 'hello', umask=0o277) with open(dst_path, 'r') as fp: self.assertEqual(fp.read(), 'hello') mode = os.stat(dst_path).st_mode self.assertEqual(mode & 0o277, 0) finally: os.unlink(dst_path) @mock.patch.object(utils, 'execute') def test_chown(self, mock_execute): libvirt_utils.chown('/some/path', 'soren') mock_execute.assert_called_once_with('chown', 'soren', '/some/path', run_as_root=True) @mock.patch.object(utils, 'execute') def test_chown_for_id_maps(self, mock_execute): id_maps = [vconfig.LibvirtConfigGuestUIDMap(), vconfig.LibvirtConfigGuestUIDMap(), vconfig.LibvirtConfigGuestGIDMap(), vconfig.LibvirtConfigGuestGIDMap()] id_maps[0].target = 10000 id_maps[0].count = 2000 id_maps[1].start = 2000 id_maps[1].target = 40000 id_maps[1].count = 2000 id_maps[2].target = 10000 id_maps[2].count = 2000 id_maps[3].start = 2000 id_maps[3].target = 40000 id_maps[3].count = 2000 libvirt_utils.chown_for_id_maps('/some/path', id_maps) execute_args = ('nova-idmapshift', '-i', '-u', '0:10000:2000,2000:40000:2000', '-g', '0:10000:2000,2000:40000:2000', '/some/path') mock_execute.assert_called_once_with(*execute_args, run_as_root=True) def _do_test_extract_snapshot(self, mock_execute, src_format='qcow2', dest_format='raw', out_format='raw'): libvirt_utils.extract_snapshot('/path/to/disk/image', src_format, '/extracted/snap', dest_format) mock_execute.assert_called_once_with( 'qemu-img', 'convert', '-f', src_format, '-O', out_format, '/path/to/disk/image', '/extracted/snap') @mock.patch.object(utils, 'execute') def test_extract_snapshot_raw(self, mock_execute): self._do_test_extract_snapshot(mock_execute) @mock.patch.object(utils, 'execute') def test_extract_snapshot_iso(self, mock_execute): self._do_test_extract_snapshot(mock_execute, dest_format='iso') @mock.patch.object(utils, 'execute') def test_extract_snapshot_qcow2(self, mock_execute): self._do_test_extract_snapshot(mock_execute, dest_format='qcow2', out_format='qcow2') @mock.patch.object(utils, 'execute') def test_extract_snapshot_parallels(self, mock_execute): self._do_test_extract_snapshot(mock_execute, src_format='raw', dest_format='ploop', out_format='parallels') def test_load_file(self): dst_fd, dst_path = tempfile.mkstemp() try: os.close(dst_fd) # We have a test for write_to_file. If that is sound, this suffices libvirt_utils.write_to_file(dst_path, 'hello') self.assertEqual(libvirt_utils.load_file(dst_path), 'hello') finally: os.unlink(dst_path) def test_file_open(self): dst_fd, dst_path = tempfile.mkstemp() try: os.close(dst_fd) # We have a test for write_to_file. If that is sound, this suffices libvirt_utils.write_to_file(dst_path, 'hello') with libvirt_utils.file_open(dst_path, 'r') as fp: self.assertEqual(fp.read(), 'hello') finally: os.unlink(dst_path) def test_get_fs_info(self): class FakeStatResult(object): def __init__(self): self.f_bsize = 4096 self.f_frsize = 4096 self.f_blocks = 2000 self.f_bfree = 1000 self.f_bavail = 900 self.f_files = 2000 self.f_ffree = 1000 self.f_favail = 900 self.f_flag = 4096 self.f_namemax = 255 self.path = None def fake_statvfs(path): self.path = path return FakeStatResult() self.stub_out('os.statvfs', fake_statvfs) fs_info = libvirt_utils.get_fs_info('/some/file/path') self.assertEqual('/some/file/path', self.path) self.assertEqual(8192000, fs_info['total']) self.assertEqual(3686400, fs_info['free']) self.assertEqual(4096000, fs_info['used']) @mock.patch('nova.virt.images.fetch_to_raw') def test_fetch_image(self, mock_images): context = 'opaque context' target = '/tmp/targetfile' image_id = '4' user_id = 'fake' project_id = 'fake' libvirt_utils.fetch_image(context, target, image_id, user_id, project_id) mock_images.assert_called_once_with( context, image_id, target, user_id, project_id, max_size=0) @mock.patch('nova.virt.images.fetch') def test_fetch_initrd_image(self, mock_images): _context = context.RequestContext(project_id=123, project_name="aubergine", user_id=456, user_name="pie") target = '/tmp/targetfile' image_id = '4' user_id = 'fake' project_id = 'fake' libvirt_utils.fetch_raw_image(_context, target, image_id, user_id, project_id) mock_images.assert_called_once_with( _context, image_id, target, user_id, project_id, max_size=0) def test_fetch_raw_image(self): def fake_execute(*cmd, **kwargs): self.executes.append(cmd) return None, None def fake_rename(old, new): self.executes.append(('mv', old, new)) def fake_unlink(path): self.executes.append(('rm', path)) def fake_rm_on_error(path, remove=None): self.executes.append(('rm', '-f', path)) def fake_qemu_img_info(path): class FakeImgInfo(object): pass file_format = path.split('.')[-1] if file_format == 'part': file_format = path.split('.')[-2] elif file_format == 'converted': file_format = 'raw' if 'backing' in path: backing_file = 'backing' else: backing_file = None if 'big' in path: virtual_size = 2 else: virtual_size = 1 FakeImgInfo.file_format = file_format FakeImgInfo.backing_file = backing_file FakeImgInfo.virtual_size = virtual_size return FakeImgInfo() self.stubs.Set(utils, 'execute', fake_execute) self.stub_out('os.rename', fake_rename) self.stub_out('os.unlink', fake_unlink) self.stubs.Set(images, 'fetch', lambda *_, **__: None) self.stubs.Set(images, 'qemu_img_info', fake_qemu_img_info) self.stubs.Set(fileutils, 'delete_if_exists', fake_rm_on_error) # Since the remove param of fileutils.remove_path_on_error() # is initialized at load time, we must provide a wrapper # that explicitly resets it to our fake delete_if_exists() old_rm_path_on_error = fileutils.remove_path_on_error f = functools.partial(old_rm_path_on_error, remove=fake_rm_on_error) self.stubs.Set(fileutils, 'remove_path_on_error', f) context = 'opaque context' image_id = '4' user_id = 'fake' project_id = 'fake' target = 't.qcow2' self.executes = [] expected_commands = [('qemu-img', 'convert', '-O', 'raw', 't.qcow2.part', 't.qcow2.converted', '-f', 'qcow2'), ('rm', 't.qcow2.part'), ('mv', 't.qcow2.converted', 't.qcow2')] images.fetch_to_raw(context, image_id, target, user_id, project_id, max_size=1) self.assertEqual(self.executes, expected_commands) target = 't.raw' self.executes = [] expected_commands = [('mv', 't.raw.part', 't.raw')] images.fetch_to_raw(context, image_id, target, user_id, project_id) self.assertEqual(self.executes, expected_commands) target = 'backing.qcow2' self.executes = [] expected_commands = [('rm', '-f', 'backing.qcow2.part')] self.assertRaises(exception.ImageUnacceptable, images.fetch_to_raw, context, image_id, target, user_id, project_id) self.assertEqual(self.executes, expected_commands) target = 'big.qcow2' self.executes = [] expected_commands = [('rm', '-f', 'big.qcow2.part')] self.assertRaises(exception.FlavorDiskSmallerThanImage, images.fetch_to_raw, context, image_id, target, user_id, project_id, max_size=1) self.assertEqual(self.executes, expected_commands) del self.executes def test_get_disk_backing_file(self): with_actual_path = False def fake_execute(*args, **kwargs): if with_actual_path: return ("some: output\n" "backing file: /foo/bar/baz (actual path: /a/b/c)\n" "...: ...\n"), '' else: return ("some: output\n" "backing file: /foo/bar/baz\n" "...: ...\n"), '' def return_true(*args, **kwargs): return True self.stubs.Set(utils, 'execute', fake_execute) self.stub_out('os.path.exists', return_true) out = libvirt_utils.get_disk_backing_file('') self.assertEqual(out, 'baz') with_actual_path = True out = libvirt_utils.get_disk_backing_file('') self.assertEqual(out, 'c') def test_get_instance_path_at_destination(self): instance = fake_instance.fake_instance_obj(None, name='fake_inst', uuid='fake_uuid') migrate_data = None inst_path_at_dest = libvirt_utils.get_instance_path_at_destination( instance, migrate_data) expected_path = os.path.join(CONF.instances_path, instance['uuid']) self.assertEqual(expected_path, inst_path_at_dest) migrate_data = {} inst_path_at_dest = libvirt_utils.get_instance_path_at_destination( instance, migrate_data) expected_path = os.path.join(CONF.instances_path, instance['uuid']) self.assertEqual(expected_path, inst_path_at_dest) migrate_data = objects.LibvirtLiveMigrateData( instance_relative_path='fake_relative_path') inst_path_at_dest = libvirt_utils.get_instance_path_at_destination( instance, migrate_data) expected_path = os.path.join(CONF.instances_path, 'fake_relative_path') self.assertEqual(expected_path, inst_path_at_dest) def test_get_arch(self): image_meta = objects.ImageMeta.from_dict( {'properties': {'architecture': "X86_64"}}) image_arch = libvirt_utils.get_arch(image_meta) self.assertEqual(arch.X86_64, image_arch)
{ "content_hash": "40ab2fb1aa29cb8bfe45b45be9ad7e63", "timestamp": "", "source": "github", "line_count": 709, "max_line_length": 79, "avg_line_length": 41.400564174894214, "alnum_prop": 0.555513916805778, "repo_name": "HybridF5/nova", "id": "599d51538b9942bee2bddebfd17db2a9339a8e12", "size": "30026", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "nova/tests/unit/virt/libvirt/test_utils.py", "mode": "33188", "license": "apache-2.0", "language": [], "symlink_target": "" }
import os import psycopg2 from petl import * import argparse # get the subdirectory containing the .csv files from the command line and check that it exists. # parser = argparse.ArgumentParser(description='Import .csv files for the Freshwater Species Database.') parser.add_argument('subdir', help="name of the database subdirectory (expected to be in 'wkg/')") args = parser.parse_args() if not os.path.exists(os.path.join('wkg', args.subdir)): print('Error: ' + args.subdir + " does not exist as a subdirectory of 'wkg/'.") exit() # Set up the database connection # connection = psycopg2.connect( database=os.environ['DB'], user=os.environ['DB_USER'], password=os.environ['DB_PASSWORD'], host=os.environ['DB_HOST'], ) cursor = connection.cursor() # Origin: create, map, and load # cursor.execute(""" DROP TABLE IF EXISTS origins CASCADE; CREATE TABLE origins ( id BIGSERIAL NOT NULL UNIQUE PRIMARY KEY, org_id INTEGER NOT NULL UNIQUE, org_name VARCHAR(32) NOT NULL ); """) f = fromcsv(os.path.join('wkg', args.subdir, 'Origin.csv')) f = cutout(f, 'OBJECTID') f = rename(f, { 'Org_ID': 'org_id', 'Org_Name': 'org_name', }) f = convertnumbers(f) todb(f, connection, 'origins') # ObservationType: create, map, and load # cursor.execute(""" DROP TABLE IF EXISTS observation_types CASCADE; CREATE TABLE observation_types ( id BIGSERIAL NOT NULL UNIQUE PRIMARY KEY, obs_typ_id INTEGER UNIQUE, obs_typ_name VARCHAR(64) NOT NULL UNIQUE, range_obs VARCHAR(32), current_other VARCHAR(32), group_ VARCHAR(32) ); """) f = fromcsv(os.path.join('wkg', args.subdir, 'ObservationType.csv')) f = cutout(f, 'OBJECTID') f = rename(f, { 'ObsTyp_ID': 'obs_typ_id', 'ObsTyp_Name': 'obs_typ_name', 'Range_Obs': 'range_obs', 'Current_Other': 'current_other', 'Group_': 'group_', }) f = convertnumbers(f) todb(f, connection, 'observation_types') # Source: create, map, and load # # @todo source_name should be UNIQUE. cursor.execute(""" DROP TABLE IF EXISTS sources CASCADE; CREATE TABLE sources ( id BIGSERIAL NOT NULL UNIQUE PRIMARY KEY, source_id INTEGER NOT NULL UNIQUE, source_name VARCHAR(256), sourcegrp_name VARCHAR(64), use_agree TEXT, permission_request_needed VARCHAR(64), permission_contact_name VARCHAR(32), permission_contact_email VARCHAR(64), permission_status TEXT, permission VARCHAR(32), comment_id INTEGER, citation TEXT, weblink VARCHAR(128), pre_release_review VARCHAR(8), aggregator VARCHAR(32), count_huc12s INTEGER, count_elm_ids INTEGER ); CREATE INDEX ON sources (source_id); """) f = fromcsv(os.path.join('wkg', args.subdir, 'Source.csv')) f = cutout(f, 'OBJECTID') f = rename(f, { 'Source_ID': 'source_id', 'Source_Name': 'source_name', 'SourceGrp_Name': 'sourcegrp_name', 'Use_agree': 'use_agree', 'Permission_request_needed': 'permission_request_needed', 'Permission_contact_name': 'permission_contact_name', 'Permission_contact_email': 'permission_contact_email', 'Permission_status': 'permission_status', 'Permission': 'permission', 'Comment_ID': 'comment_id', 'Citation': 'citation', 'Weblink': 'weblink', 'Pre_release_review': 'pre_release_review', 'Aggregator': 'aggregator', 'Count_HUC12s': 'count_huc12s', 'Count_Elm_IDs': 'count_elm_ids' }) f = convertnumbers(f) f = convert(f, ( 'source_id', 'comment_id', 'count_huc12s', 'count_elm_ids', ), lambda v: int(v)) todb(f, connection, 'sources') # HabitatUsage: create, map, and load # cursor.execute(""" DROP TABLE IF EXISTS habitat_usages CASCADE; CREATE TABLE habitat_usages ( id BIGSERIAL NOT NULL UNIQUE PRIMARY KEY, hab_usage_id INTEGER UNIQUE, hab_usage_name VARCHAR(32) UNIQUE ); """) f = fromcsv(os.path.join('wkg', args.subdir, 'HabitatUsage.csv')) f = cutout(f, 'OBJECTID') f = rename(f, { 'HabU_ID': 'hab_usage_id', 'HabU_Name': 'hab_usage_name', }) f = convertnumbers(f) todb(f, connection, 'habitat_usages') # Element: create, map, and load # cursor.execute(""" DROP TABLE IF EXISTS elements CASCADE; CREATE TABLE elements ( id BIGSERIAL NOT NULL UNIQUE PRIMARY KEY, elm_scinam VARCHAR(64), elm_comnam VARCHAR(64), group_ VARCHAR(32) NOT NULL, fwa_v1 INTEGER, tax_list VARCHAR(32), g_rank VARCHAR(16), s_rank VARCHAR(32), elm_scin_1 VARCHAR(64), elm_scin_2 VARCHAR(64), elm_scin_3 VARCHAR(64), elm_scin_4 VARCHAR(64), kingdom VARCHAR(32), phylum VARCHAR(32), tax_class VARCHAR(32), tax_order VARCHAR(32), family VARCHAR(32), genus VARCHAR(32), species VARCHAR(32), subsp_var VARCHAR(32), kingdom_id VARCHAR(5), phylum_id VARCHAR(5), tax_class_i VARCHAR(5), tax_order_i VARCHAR(5), family_id VARCHAR(5), genus_id VARCHAR(5), species_id VARCHAR(5), elm_id INTEGER NOT NULL UNIQUE, other_id VARCHAR(5), sensitive_fam VARCHAR(32), ns_endemic INTEGER, safit_endemic INTEGER, other_endemic INTEGER, endemism_comment TEXT, fed_list VARCHAR(64), state_list VARCHAR(64), other_list VARCHAR(64), mgtag_list VARCHAR(32), listed BOOLEAN, vulnerable BOOLEAN, endemic BOOLEAN, common BOOLEAN, not_evaluated BOOLEAN, extinct BOOLEAN, status VARCHAR(32) ); CREATE INDEX ON elements (elm_id); CREATE INDEX ON elements (group_); """) f = fromcsv(os.path.join('wkg', args.subdir, 'Elements.csv')) f = cutout(f, 'OBJECTID') f = rename(f, { 'ELM_SCINAM': 'elm_scinam', 'ELM_COMNAM': 'elm_comnam', 'GROUP_': 'group_', 'FWA_v1': 'fwa_v1', 'TAX_LIST': 'tax_list', 'G_Rank': 'g_rank', 'S_Rank': 's_rank', 'ELM_SCIN_1': 'elm_scin_1', 'ELM_SCIN_2': 'elm_scin_2', 'ELM_SCIN_3': 'elm_scin_3', 'ELM_SCIN_4': 'elm_scin_4', 'Kingdom': 'kingdom', 'Phylum': 'phylum', 'TaxClass': 'tax_class', 'TaxOrder': 'tax_order', 'Family': 'family', 'Genus': 'genus', 'Species': 'species', 'Subsp_Var': 'subsp_var', 'Kingdom_ID': 'kingdom_id', 'Phylum_ID': 'phylum_id', 'TaxClass_I': 'tax_class_i', 'TaxOrder_I': 'tax_order_i', 'Family_ID': 'family_id', 'Genus_ID': 'genus_id', 'Species_ID': 'species_id', 'ELM_ID': 'elm_id', 'Other_ID': 'other_id', 'Sensitive_Fam': 'sensitive_fam', 'NS_endemic': 'ns_endemic', 'SAFIT_endemic': 'safit_endemic', 'Other_endemic': 'other_endemic', 'Endemism_comment': 'endemism_comment', 'Fed_list': 'fed_list', 'State_list': 'state_list', 'Other_list': 'other_list', 'MgtAg_list': 'mgtag_list', 'Listed': 'listed', 'Vulnerable': 'vulnerable', 'Endemic': 'endemic', 'Common': 'common', 'Not_evaluated': 'not_evaluated', 'Extinct': 'extinct', 'Status': 'status', }) # @todo resolve this hack: Deal with the mussels (multiple rows having elm_id = 81077) f = selectnotin(f, 'elm_scinam', [ 'Anodonta californiensis', 'Anodonta dejecta', 'Anodonta oregonensis', ]) # Attempt to pull the comma-as-thousands separator out. f = sub(f, ( 'fwa_v1', 'kingdom_id', 'phylum_id', 'tax_class_i', 'tax_order_i', 'family_id', 'genus_id', 'species_id', 'elm_id', 'other_id', ), ',', '') # Convert the new values to integers; this can handle nulls. f = convert(f, ( 'fwa_v1', 'elm_id', ), lambda v: int(v)) # Don't really have to do this, but seems cleaner. f = convert(f, ( 'listed', 'vulnerable', 'endemic', 'common', 'not_evaluated', 'extinct', ), { 0: False, 1 : True }) todb(f, connection, 'elements') # AU_v_elm: create, map, and load # cursor.execute(""" DROP TABLE IF EXISTS au_v_elms CASCADE; CREATE TABLE au_v_elms ( id BIGSERIAL NOT NULL UNIQUE PRIMARY KEY, elm_id INTEGER REFERENCES elements (elm_id), huc_12 VARCHAR(12), obs_typ_id INTEGER REFERENCES observation_types (obs_typ_id), source_id INTEGER REFERENCES sources (source_id), frequency DOUBLE PRECISION, sum_amount DOUBLE PRECISION ); """) f = fromcsv(os.path.join('wkg', args.subdir, 'AU_v_Elm_sum.csv')) f = cutout(f, 'OID_') f = rename(f, { 'Elm_ID': 'elm_id', 'HUC_12': 'huc_12', 'ObsTyp_ID': 'obs_typ_id', 'Source_ID': 'source_id', 'FREQUENCY': 'frequency', 'SUM_Amount': 'sum_amount', }) f = sub(f, ( 'elm_id', 'source_id', 'sum_amount', ), ',', '') f = convert(f, ('elm_id', 'source_id'), lambda v: int(float(v))) f = convert(f, ('sum_amount'), lambda v: float(v)) todb(f, connection, 'au_v_elms') # Persist and be tidy connection.commit() cursor.close() connection.close()
{ "content_hash": "3275f9ffcba957aa2839e45b5b82c1d5", "timestamp": "", "source": "github", "line_count": 316, "max_line_length": 102, "avg_line_length": 35.075949367088604, "alnum_prop": 0.4957596535546734, "repo_name": "NewCaliforniaWaterAtlas/ca-freshwater-species-backend", "id": "42075f5df65f833f75cb166467f368ec4878e739", "size": "11204", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "wkg/data_loader.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "23765" } ], "symlink_target": "" }
from sqlalchemy import orm from sqlalchemy.event import listens_for from sqlalchemy.orm import column_property from sqlalchemy.sql import select from indico.core.db import db from indico.core.db.sqlalchemy import PyIntEnum from indico.util.enum import RichIntEnum from indico.util.i18n import _ from indico.util.locators import locator_property from indico.util.string import format_repr from indico.web.flask.util import url_for class EditableType(RichIntEnum): __titles__ = [None, _('Paper'), _('Slides'), _('Poster')] __editor_permissions__ = [None, 'paper_editing', 'slides_editing', 'poster_editing'] paper = 1 slides = 2 poster = 3 @property def editor_permission(self): return self.__editor_permissions__[self] class EditableState(RichIntEnum): __titles__ = [None, _('New'), _('Ready for Review'), _('Needs Confirmation'), _('Needs Changes'), _('Accepted'), _('Rejected')] __css_classes__ = [None, 'highlight', 'ready', 'warning', 'warning', 'success', 'error'] new = 1 ready_for_review = 2 needs_submitter_confirmation = 3 needs_submitter_changes = 4 accepted = 5 rejected = 6 class Editable(db.Model): __tablename__ = 'editables' __table_args__ = (db.UniqueConstraint('contribution_id', 'type'), {'schema': 'event_editing'}) id = db.Column( db.Integer, primary_key=True ) contribution_id = db.Column( db.ForeignKey('events.contributions.id'), index=True, nullable=False ) type = db.Column( PyIntEnum(EditableType), nullable=False ) editor_id = db.Column( db.ForeignKey('users.users.id'), index=True, nullable=True ) published_revision_id = db.Column( db.ForeignKey('event_editing.revisions.id'), index=True, nullable=True ) contribution = db.relationship( 'Contribution', lazy=True, backref=db.backref( 'editables', lazy=True, ) ) editor = db.relationship( 'User', lazy=True, backref=db.backref( 'editor_for_editables', lazy='dynamic' ) ) published_revision = db.relationship( 'EditingRevision', foreign_keys=published_revision_id, lazy=True, ) # relationship backrefs: # - revisions (EditingRevision.editable) def __repr__(self): return format_repr(self, 'id', 'contribution_id', 'type') @locator_property def locator(self): return dict(self.contribution.locator, type=self.type.name) @property def event(self): return self.contribution.event def _has_general_editor_permissions(self, user): """Whether the user has general editor permissions on the Editable. This means that the user has editor permissions for the editable's type, but does not need to be the assigned editor. """ # Editing (and event) managers always have editor-like access return ( self.event.can_manage(user, permission='editing_manager') or self.event.can_manage(user, permission=self.type.editor_permission) ) def can_see_timeline(self, user): """Whether the user can see the editable's timeline. This is pure read access, without any ability to make changes or leave comments. """ # Anyone with editor access to the editable's type can see the timeline. # Users associated with the editable's contribution can do so as well. return ( self._has_general_editor_permissions(user) or self.contribution.can_submit_proceedings(user) or self.contribution.is_user_associated(user, check_abstract=True) ) def can_perform_submitter_actions(self, user): """Whether the user can perform any submitter actions. These are actions such as uploading a new revision after having been asked to make changes or approving/rejecting changes made by an editor. """ # If the user can't even see the timeline, we never allow any modifications if not self.can_see_timeline(user): return False # Anyone who can submit new proceedings can also perform submitter actions, # i.e. the abstract submitter and anyone with submission access to the contribution. return self.contribution.can_submit_proceedings(user) def can_perform_editor_actions(self, user): """Whether the user can perform any Editing actions. These are actions usually made by the assigned Editor of the editable, such as making changes, asking the user to make changes, or approving/rejecting the editable. """ from indico.modules.events.editing.settings import editable_type_settings # If the user can't even see the timeline, we never allow any modifications if not self.can_see_timeline(user): return False # Editing/event managers can perform actions when they are the assigned editor # even when editing is disabled in the settings if self.editor == user and self.event.can_manage(user, permission='editing_manager'): return True # Editing needs to be enabled in the settings otherwise if not editable_type_settings[self.type].get(self.event, 'editing_enabled'): return False # Editors need the permission on the editable type and also be the assigned editor if self.editor == user and self.event.can_manage(user, permission=self.type.editor_permission): return True return False def can_use_internal_comments(self, user): """Whether the user can create/see internal comments.""" return self._has_general_editor_permissions(user) def can_see_editor_names(self, user, actor=None): """Whether the user can see the names of editing team members. This is always true if team anonymity is not enabled; otherwise only users who are member of the editing team will see names. If an `actor` is set, the check applies to whether the name of this particular user can be seen. """ from indico.modules.events.editing.settings import editable_type_settings return ( not editable_type_settings[self.type].get(self.event, 'anonymous_team') or (actor and not self.can_see_editor_names(actor)) or self._has_general_editor_permissions(user) ) def can_comment(self, user): """Whether the user can comment on the editable.""" # We allow any user associated with the contribution to comment, even if they are # not authorized to actually perform submitter actions. return (self.event.can_manage(user, permission=self.type.editor_permission) or self.event.can_manage(user, permission='editing_manager') or self.contribution.is_user_associated(user, check_abstract=True)) def can_assign_self(self, user): """Whether the user can assign themselves on the editable.""" from indico.modules.events.editing.settings import editable_type_settings type_settings = editable_type_settings[self.type] if self.editor and (self.editor == user or not self.can_unassign(user)): return False return ((self.event.can_manage(user, permission=self.type.editor_permission) and type_settings.get(self.event, 'editing_enabled') and type_settings.get(self.event, 'self_assign_allowed')) or self.event.can_manage(user, permission='editing_manager')) def can_unassign(self, user): """Whether the user can unassign the editor of the editable.""" from indico.modules.events.editing.settings import editable_type_settings type_settings = editable_type_settings[self.type] return (self.event.can_manage(user, permission='editing_manager') or (self.editor == user and self.event.can_manage(user, permission=self.type.editor_permission) and type_settings.get(self.event, 'editing_enabled') and type_settings.get(self.event, 'self_assign_allowed'))) @property def review_conditions_valid(self): from indico.modules.events.editing.models.review_conditions import EditingReviewCondition query = EditingReviewCondition.query.with_parent(self.event).filter_by(type=self.type) review_conditions = [{ft.id for ft in cond.file_types} for cond in query] file_types = {file.file_type_id for file in self.revisions[-1].files} if not review_conditions: return True return any(file_types >= cond for cond in review_conditions) @property def editing_enabled(self): from indico.modules.events.editing.settings import editable_type_settings return editable_type_settings[self.type].get(self.event, 'editing_enabled') @property def external_timeline_url(self): return url_for('event_editing.editable', self, _external=True) @property def timeline_url(self): return url_for('event_editing.editable', self) def log(self, *args, **kwargs): """Log with prefilled metadata for the editable.""" self.event.log(*args, meta={'editable_id': self.id}, **kwargs) @listens_for(orm.mapper, 'after_configured', once=True) def _mappers_configured(): from .revisions import EditingRevision, FinalRevisionState, InitialRevisionState # Editable.state -- the state of the editable itself cases = db.cast(db.case({ FinalRevisionState.none: db.case({ InitialRevisionState.new: EditableState.new, InitialRevisionState.ready_for_review: EditableState.ready_for_review, InitialRevisionState.needs_submitter_confirmation: EditableState.needs_submitter_confirmation }, value=EditingRevision.initial_state), # the states resulting in None are always followed by another revision, so we don't ever # expect the latest revision of an editable to have such a state FinalRevisionState.replaced: None, FinalRevisionState.needs_submitter_confirmation: None, FinalRevisionState.needs_submitter_changes: EditableState.needs_submitter_changes, FinalRevisionState.accepted: EditableState.accepted, FinalRevisionState.rejected: EditableState.rejected, }, value=EditingRevision.final_state), PyIntEnum(EditableState)) query = (select([cases]) .where(EditingRevision.editable_id == Editable.id) .order_by(EditingRevision.created_dt.desc()) .limit(1) .correlate_except(EditingRevision) .scalar_subquery()) Editable.state = column_property(query) # Editable.revision_count -- the number of revisions the editable has query = (select([db.func.count(EditingRevision.id)]) .where(EditingRevision.editable_id == Editable.id) .correlate_except(EditingRevision) .scalar_subquery()) Editable.revision_count = column_property(query)
{ "content_hash": "a389596de583e8e401f19163aa7d3ac2", "timestamp": "", "source": "github", "line_count": 279, "max_line_length": 105, "avg_line_length": 40.60215053763441, "alnum_prop": 0.6558968926553672, "repo_name": "DirkHoffmann/indico", "id": "d666e4d69af850aef96b7cb2baa34ca8b1567bac", "size": "11542", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "indico/modules/events/editing/models/editable.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "33249" }, { "name": "HTML", "bytes": "1398354" }, { "name": "JavaScript", "bytes": "2295843" }, { "name": "Mako", "bytes": "1527" }, { "name": "Python", "bytes": "5426206" }, { "name": "SCSS", "bytes": "496904" }, { "name": "Shell", "bytes": "3877" }, { "name": "TeX", "bytes": "23435" }, { "name": "XSLT", "bytes": "1504" } ], "symlink_target": "" }
from test_framework import BitcoinTestFramework from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException from util import * import os import shutil # Create one-input, one-output, no-fee transaction: class MempoolCoinbaseTest(BitcoinTestFramework): def setup_network(self): # Just need one node for this test args = ["-checkmempool", "-debug=mempool"] self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, args)) self.is_network_split = False def create_tx(self, from_txid, to_address, amount): inputs = [{ "txid" : from_txid, "vout" : 0}] outputs = { to_address : amount } rawtx = self.nodes[0].createrawtransaction(inputs, outputs) signresult = self.nodes[0].signrawtransaction(rawtx) assert_equal(signresult["complete"], True) return signresult["hex"] def run_test(self): node0_address = self.nodes[0].getnewaddress() # Spend block 1/2/3's coinbase transactions # Mine a block. # Create three more transactions, spending the spends # Mine another block. # ... make sure all the transactions are confirmed # Invalidate both blocks # ... make sure all the transactions are put back in the mempool # Mine a new block # ... make sure all the transactions are confirmed again. b = [ self.nodes[0].getblockhash(n) for n in range(1, 4) ] coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ] spends1_raw = [ self.create_tx(txid, node0_address, 50) for txid in coinbase_txids ] spends1_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends1_raw ] blocks = [] blocks.extend(self.nodes[0].generate(1)) spends2_raw = [ self.create_tx(txid, node0_address, 49.99) for txid in spends1_id ] spends2_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends2_raw ] blocks.extend(self.nodes[0].generate(1)) # mempool should be empty, all txns confirmed assert_equal(set(self.nodes[0].getrawmempool()), set()) for txid in spends1_id+spends2_id: tx = self.nodes[0].gettransaction(txid) assert(tx["confirmations"] > 0) # Use invalidateblock to re-org back; all transactions should # end up unconfirmed and back in the mempool for node in self.nodes: node.invalidateblock(blocks[0]) # mempool should be empty, all txns confirmed assert_equal(set(self.nodes[0].getrawmempool()), set(spends1_id+spends2_id)) for txid in spends1_id+spends2_id: tx = self.nodes[0].gettransaction(txid) assert(tx["confirmations"] == 0) # Generate another block, they should all get mined self.nodes[0].generate(1) # mempool should be empty, all txns confirmed assert_equal(set(self.nodes[0].getrawmempool()), set()) for txid in spends1_id+spends2_id: tx = self.nodes[0].gettransaction(txid) assert(tx["confirmations"] > 0) if __name__ == '__main__': MempoolCoinbaseTest().main()
{ "content_hash": "82159598c7fb530dbee3a676c790f635", "timestamp": "", "source": "github", "line_count": 78, "max_line_length": 92, "avg_line_length": 40.44871794871795, "alnum_prop": 0.6335974643423138, "repo_name": "denverl/bitcoin", "id": "6f7f577e36cfe50f1e1ed461f6f8a2fc4f900f51", "size": "3451", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "qa/rpc-tests/mempool_resurrect_test.py", "mode": "33261", "license": "mit", "language": [ { "name": "C", "bytes": "478282" }, { "name": "C++", "bytes": "3572155" }, { "name": "CSS", "bytes": "1127" }, { "name": "Groff", "bytes": "19797" }, { "name": "HTML", "bytes": "50621" }, { "name": "Java", "bytes": "2100" }, { "name": "Makefile", "bytes": "63714" }, { "name": "Objective-C", "bytes": "2023" }, { "name": "Objective-C++", "bytes": "7238" }, { "name": "Protocol Buffer", "bytes": "2308" }, { "name": "Python", "bytes": "223303" }, { "name": "QMake", "bytes": "2019" }, { "name": "Shell", "bytes": "39487" } ], "symlink_target": "" }
from tempest.api.compute import base from tempest.test import attr import time class AttachInterfacesV3TestJSON(base.BaseV3ComputeTest): _interface = 'json' @classmethod def setUpClass(cls): if not cls.config.service_available.neutron: raise cls.skipException("Neutron is required") super(AttachInterfacesV3TestJSON, cls).setUpClass() cls.client = cls.interfaces_client def _check_interface(self, iface, port_id=None, network_id=None, fixed_ip=None): self.assertIn('port_state', iface) if port_id: self.assertEqual(iface['port_id'], port_id) if network_id: self.assertEqual(iface['net_id'], network_id) if fixed_ip: self.assertEqual(iface['fixed_ips'][0]['ip_address'], fixed_ip) def _create_server_get_interfaces(self): resp, server = self.create_test_server(wait_until='ACTIVE') resp, ifs = self.client.list_interfaces(server['id']) resp, body = self.client.wait_for_interface_status( server['id'], ifs[0]['port_id'], 'ACTIVE') ifs[0]['port_state'] = body['port_state'] return server, ifs def _test_create_interface(self, server): resp, iface = self.client.create_interface(server['id']) resp, iface = self.client.wait_for_interface_status( server['id'], iface['port_id'], 'ACTIVE') self._check_interface(iface) return iface def _test_create_interface_by_network_id(self, server, ifs): network_id = ifs[0]['net_id'] resp, iface = self.client.create_interface(server['id'], network_id=network_id) resp, iface = self.client.wait_for_interface_status( server['id'], iface['port_id'], 'ACTIVE') self._check_interface(iface, network_id=network_id) return iface def _test_show_interface(self, server, ifs): iface = ifs[0] resp, _iface = self.client.show_interface(server['id'], iface['port_id']) self.assertEqual(iface, _iface) def _test_delete_interface(self, server, ifs): # NOTE(danms): delete not the first or last, but one in the middle iface = ifs[1] self.client.delete_interface(server['id'], iface['port_id']) for i in range(0, 5): _r, _ifs = self.client.list_interfaces(server['id']) if len(ifs) != len(_ifs): break time.sleep(1) self.assertEqual(len(_ifs), len(ifs) - 1) for _iface in _ifs: self.assertNotEqual(iface['port_id'], _iface['port_id']) return _ifs def _compare_iface_list(self, list1, list2): # NOTE(danms): port_state will likely have changed, so just # confirm the port_ids are the same at least list1 = [x['port_id'] for x in list1] list2 = [x['port_id'] for x in list2] self.assertEqual(sorted(list1), sorted(list2)) @attr(type='gate') def test_create_list_show_delete_interfaces(self): server, ifs = self._create_server_get_interfaces() interface_count = len(ifs) self.assertTrue(interface_count > 0) self._check_interface(ifs[0]) iface = self._test_create_interface(server) ifs.append(iface) iface = self._test_create_interface_by_network_id(server, ifs) ifs.append(iface) resp, _ifs = self.client.list_interfaces(server['id']) self._compare_iface_list(ifs, _ifs) self._test_show_interface(server, ifs) _ifs = self._test_delete_interface(server, ifs) self.assertEqual(len(ifs) - 1, len(_ifs)) class AttachInterfacesV3TestXML(AttachInterfacesV3TestJSON): _interface = 'xml'
{ "content_hash": "9e135b6fedac3a30b1759e2739413ff6", "timestamp": "", "source": "github", "line_count": 103, "max_line_length": 75, "avg_line_length": 37.24271844660194, "alnum_prop": 0.5951511991657977, "repo_name": "armando-migliaccio/tempest", "id": "f208a4b514dcf6d73418c9527f4fe663c7ba9f9c", "size": "4461", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "tempest/api/compute/v3/servers/test_attach_interfaces.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "1966096" }, { "name": "Shell", "bytes": "5228" } ], "symlink_target": "" }
"""Train only on the labelled images in the semi-supervised setting.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf import datasets import semi_supervised.utils as ss_utils import trainer import utils def model_fn(data, mode): """Produces a loss for the rotation task with semi-supervision. Args: data: Dict of inputs containing, among others, "image" and "label." mode: model's mode: training, eval or prediction Returns: EstimatorSpec """ # In this mode (called once at the end of training), we create the tf.Hub # module in order to export the model, and use that to do one last prediction. if mode == tf.estimator.ModeKeys.PREDICT: # This defines a function called by the hub module to create the model. def model_building_fn(img, is_training): # This is an example of calling `apply_model_semi` with only one of the # inputs provided. The outputs will simply use the given names: end_points = ss_utils.apply_model_semi(img, None, is_training, outputs={ 'classes': datasets.get_auxiliary_num_classes(), }) return end_points, end_points['classes'] return trainer.make_estimator( mode, predict_fn=model_building_fn, predict_input=data['image']) # In all other cases, we are in train/eval mode. # Note that here we only use data[1], i.e. the part with labels. # Forward them both through the model. The scope is needed for tf.Hub export. with tf.variable_scope('module'): # Here, we pass both inputs to `apply_model_semi`, and so we now get # outputs corresponding to each in `end_points` as "rotations_unsup" and # similar, which we will use below. end_points = ss_utils.apply_model_semi( None, data[1]['image'], is_training=mode == tf.estimator.ModeKeys.TRAIN, outputs={'classes': datasets.get_auxiliary_num_classes()}) # Compute the classification loss on supervised images. logits_class = end_points['classes'] labels_class = data[1]['label'] loss_class = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=labels_class, logits=logits_class) loss = tf.reduce_mean(loss_class) # Define metrics. eval_metrics = ( lambda labels_class, logits_class: { # pylint: disable=g-long-lambda 'top1 accuracy': utils.top_k_accuracy(1, labels_class, logits_class), 'top5 accuracy': utils.top_k_accuracy(5, labels_class, logits_class), }, [labels_class, logits_class]) return trainer.make_estimator(mode, loss, eval_metrics)
{ "content_hash": "5eafe97bd6cd399d7a88c1891a99c4b3", "timestamp": "", "source": "github", "line_count": 68, "max_line_length": 80, "avg_line_length": 38.529411764705884, "alnum_prop": 0.6950381679389313, "repo_name": "google-research/s4l", "id": "4b35250100c3017b232098f49c2269dc50a72a8d", "size": "3196", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "semi_supervised/supervised.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "211462" }, { "name": "Shell", "bytes": "13700" } ], "symlink_target": "" }
from django.db import models import multilingual class Category(models.Model): created = models.DateTimeField(auto_now_add=True) class Translation(multilingual.Translation): name = models.CharField(max_length=250) class OtherModel(models.Model): name = models.CharField(max_length=250)
{ "content_hash": "9633b323df0fd5df44e9081e777d828d", "timestamp": "", "source": "github", "line_count": 11, "max_line_length": 53, "avg_line_length": 28.09090909090909, "alnum_prop": 0.7540453074433657, "repo_name": "fabiocorneti/django-multilingual", "id": "877fef4b001229a6c417c6b8bb29a99e1398206d", "size": "309", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "testproject/issue_61/models.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "109028" } ], "symlink_target": "" }
import copy from tempest.lib.services.compute import quota_classes_client from tempest.tests.lib import fake_auth_provider from tempest.tests.lib.services import base class TestQuotaClassesClient(base.BaseServiceTest): FAKE_QUOTA_CLASS_SET = { "injected_file_content_bytes": 10240, "metadata_items": 128, "server_group_members": 10, "server_groups": 10, "ram": 51200, "floating_ips": 10, "key_pairs": 100, "id": u'\u2740(*\xb4\u25e1`*)\u2740', "instances": 10, "security_group_rules": 20, "security_groups": 10, "injected_files": 5, "cores": 20, "fixed_ips": -1, "injected_file_path_bytes": 255, } def setUp(self): super(TestQuotaClassesClient, self).setUp() fake_auth = fake_auth_provider.FakeAuthProvider() self.client = quota_classes_client.QuotaClassesClient( fake_auth, 'compute', 'regionOne') def _test_show_quota_class_set(self, bytes_body=False): fake_body = {'quota_class_set': self.FAKE_QUOTA_CLASS_SET} self.check_service_client_function( self.client.show_quota_class_set, 'tempest.lib.common.rest_client.RestClient.get', fake_body, bytes_body, quota_class_id="test") def test_show_quota_class_set_with_str_body(self): self._test_show_quota_class_set() def test_show_quota_class_set_with_bytes_body(self): self._test_show_quota_class_set(bytes_body=True) def test_update_quota_class_set(self): fake_quota_class_set = copy.deepcopy(self.FAKE_QUOTA_CLASS_SET) fake_quota_class_set.pop("id") fake_body = {'quota_class_set': fake_quota_class_set} self.check_service_client_function( self.client.update_quota_class_set, 'tempest.lib.common.rest_client.RestClient.put', fake_body, quota_class_id="test")
{ "content_hash": "b5e80a255dcf2d0e277075436ffcb565", "timestamp": "", "source": "github", "line_count": 57, "max_line_length": 71, "avg_line_length": 34.70175438596491, "alnum_prop": 0.6132457027300303, "repo_name": "Juniper/tempest", "id": "22d8b91a3d4ecafcc766a3c6c9ed884f0130911d", "size": "2609", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "tempest/tests/lib/services/compute/test_quota_classes_client.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "4194970" }, { "name": "Shell", "bytes": "19343" } ], "symlink_target": "" }
from __future__ import division import numpy as np from .mesh import MeshVisual from ..geometry import MeshData class SurfacePlotVisual(MeshVisual): """Displays a surface plot on a regular x,y grid Parameters ---------- x : ndarray | None 1D/2D array of values specifying the x positions of vertices in the grid. In case 1D array given as input, the values will be replicated to fill the 2D array of size(z). If None, values will be assumed to be integers. y : ndarray | None 1D/2D array of values specifying the y positions of vertices in the grid. In case 1D array given as input, the values will be replicated to fill the 2D array of size(z). If None, values will be assumed to be integers. z : ndarray 2D array of height values for each grid vertex. colors : ndarray (width, height, 4) array of vertex colors. Notes ----- All arguments are optional. Note that if vertex positions are updated, the normal vectors for each triangle must be recomputed. This is somewhat expensive if the surface was initialized with smooth=False and very expensive if smooth=True. For faster performance, initialize with compute_normals=False and use per-vertex colors or a material that does not require normals. """ def __init__(self, x=None, y=None, z=None, colors=None, **kwargs): # The x, y, z, and colors arguments are passed to set_data(). # All other keyword arguments are passed to MeshVisual.__init__(). self._x = None self._y = None self._z = None self.__vertices = None self.__faces = None self.__meshdata = MeshData() kwargs.setdefault('shading', 'smooth') MeshVisual.__init__(self, **kwargs) self.set_data(x, y, z, colors) def set_data(self, x=None, y=None, z=None, colors=None): """Update the data in this surface plot. Parameters ---------- x : ndarray | None 1D/2D array of values specifying the x positions of vertices in the grid. In case 1D array given as input, the values will be replicated to fill the 2D array of size(z). If None, values will be assumed to be integers. y : ndarray | None 1D/2D array of values specifying the x positions of vertices in the grid. In case 1D array given as input, the values will be replicated to fill the 2D array of size(z). If None, values will be assumed to be integers. z : ndarray 2D array of height values for each grid vertex. colors : ndarray (width, height, 4) array of vertex colors. """ if x is not None: if self._x is None or len(x) != len(self._x): self.__vertices = None self._x = x if y is not None: if self._y is None or len(y) != len(self._y): self.__vertices = None self._y = y if z is not None: if self._x is not None and z.shape[0] != len(self._x): raise TypeError('Z values must have shape (len(x), len(y))') if self._y is not None and z.shape[1] != len(self._y): raise TypeError('Z values must have shape (len(x), len(y))') self._z = z if (self.__vertices is not None and self._z.shape != self.__vertices.shape[:2]): self.__vertices = None if self._z is None: return update_mesh = False new_vertices = False # Generate vertex and face array if self.__vertices is None: new_vertices = True self.__vertices = np.empty((self._z.shape[0], self._z.shape[1], 3), dtype=np.float32) self.generate_faces() self.__meshdata.set_faces(self.__faces) update_mesh = True # Copy x, y, z data into vertex array if new_vertices or x is not None: if x is None: if self._x is None: x = np.arange(self._z.shape[0]) else: x = self._x if x.ndim == 1: x = x.reshape(len(x), 1) # Copy the 2D data into the appropriate slice self.__vertices[:, :, 0] = x update_mesh = True if new_vertices or y is not None: if y is None: if self._y is None: y = np.arange(self._z.shape[1]) else: y = self._y if y.ndim == 1: y = y.reshape(1, len(y)) # Copy the 2D data into the appropriate slice self.__vertices[:, :, 1] = y update_mesh = True if new_vertices or z is not None: self.__vertices[..., 2] = self._z update_mesh = True if colors is not None: self.__meshdata.set_vertex_colors(colors) update_mesh = True # Update MeshData if update_mesh: self.__meshdata.set_vertices( self.__vertices.reshape(self.__vertices.shape[0] * self.__vertices.shape[1], 3)) MeshVisual.set_data(self, meshdata=self.__meshdata) def generate_faces(self): cols = self._z.shape[1] - 1 rows = self._z.shape[0] - 1 faces = np.empty((cols * rows * 2, 3), dtype=np.uint) rowtemplate1 = (np.arange(cols).reshape(cols, 1) + np.array([[0, 1, cols + 1]])) rowtemplate2 = (np.arange(cols).reshape(cols, 1) + np.array([[cols + 1, 1, cols + 2]])) for row in range(rows): start = row * cols * 2 faces[start:start + cols] = rowtemplate1 + row * (cols + 1) faces[start + cols:start + (cols * 2)] =\ rowtemplate2 + row * (cols + 1) self.__faces = faces
{ "content_hash": "f8f48c183f75d6c150a66cb71a2fbc31", "timestamp": "", "source": "github", "line_count": 163, "max_line_length": 79, "avg_line_length": 37.39877300613497, "alnum_prop": 0.5360892388451444, "repo_name": "Eric89GXL/vispy", "id": "cf8c3bf91d2c73aa735dbaa8d754d542fd61f956", "size": "6256", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "vispy/visuals/surface_plot.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "143081" }, { "name": "GLSL", "bytes": "195460" }, { "name": "JavaScript", "bytes": "5007" }, { "name": "Makefile", "bytes": "1638" }, { "name": "PowerShell", "bytes": "4078" }, { "name": "Python", "bytes": "2461885" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion import jsonfield.fields class Migration(migrations.Migration): dependencies = [ ('ecommerce', '0003_order_user'), ] operations = [ migrations.CreateModel( name='Receipt', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('data', jsonfield.fields.JSONField()), ('created_at', models.DateTimeField(auto_now_add=True)), ('modified_at', models.DateTimeField(auto_now=True)), ], ), migrations.AlterField( model_name='order', name='status', field=models.CharField(choices=[('created', 'created'), ('fulfilled', 'fulfilled'), ('failed', 'failed')], default='created', max_length=30), ), migrations.AddField( model_name='receipt', name='order', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='ecommerce.Order'), ), ]
{ "content_hash": "c87f8c3c042d59b383feffb8b724b9b4", "timestamp": "", "source": "github", "line_count": 34, "max_line_length": 153, "avg_line_length": 34.3235294117647, "alnum_prop": 0.5801199657240789, "repo_name": "mitodl/micromasters", "id": "25a62a62fdf0decb43648d3a55cb5ddacdf9aae2", "size": "1239", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ecommerce/migrations/0004_create_receipt.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "9764" }, { "name": "Dockerfile", "bytes": "958" }, { "name": "HTML", "bytes": "84519" }, { "name": "JavaScript", "bytes": "1462849" }, { "name": "Procfile", "bytes": "407" }, { "name": "Python", "bytes": "2098424" }, { "name": "SCSS", "bytes": "135082" }, { "name": "Shell", "bytes": "10764" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('members', '0008_auto_20160923_2056'), ('boards', '0030_auto_20160925_1843'), ] operations = [ migrations.CreateModel( name='JournalEntry', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=128, verbose_name='Title')), ('slug', models.SlugField(max_length=64, unique=True, verbose_name='Slug for this journal entry')), ('uuid', models.CharField(max_length=16, unique=True, verbose_name='Unique uuid for short urls')), ('content', models.TextField(help_text='Content of this journal entry', verbose_name='Content')), ('creation_datetime', models.DateTimeField(verbose_name='Creation datetime')), ('last_update_datetime', models.DateTimeField(verbose_name='Last update datetime')), ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='journal_entries', to='members.Member', verbose_name='Member')), ('board', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='journal_entries', to='boards.Board', verbose_name='Board')), ], ), ]
{ "content_hash": "1c0d67ea6f918eb0a4e73f7377aa9849", "timestamp": "", "source": "github", "line_count": 31, "max_line_length": 167, "avg_line_length": 48.645161290322584, "alnum_prop": 0.6312997347480106, "repo_name": "diegojromerolopez/djanban", "id": "024578711f8c59a99c1ec8c5208d580a656c2f46", "size": "1579", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/djanban/apps/journal/migrations/0001_initial.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "79709" }, { "name": "HTML", "bytes": "660275" }, { "name": "JavaScript", "bytes": "634320" }, { "name": "Python", "bytes": "993818" }, { "name": "Shell", "bytes": "1732" }, { "name": "TypeScript", "bytes": "71578" } ], "symlink_target": "" }
import __builtin__ import gc import sys import types import unittest import popen2 # trigger early the warning from popen2.py import weakref from copy import deepcopy from test import test_support class OperatorsTest(unittest.TestCase): def __init__(self, *args, **kwargs): unittest.TestCase.__init__(self, *args, **kwargs) self.binops = { 'add': '+', 'sub': '-', 'mul': '*', 'div': '/', 'divmod': 'divmod', 'pow': '**', 'lshift': '<<', 'rshift': '>>', 'and': '&', 'xor': '^', 'or': '|', 'cmp': 'cmp', 'lt': '<', 'le': '<=', 'eq': '==', 'ne': '!=', 'gt': '>', 'ge': '>=', } for name, expr in self.binops.items(): if expr.islower(): expr = expr + "(a, b)" else: expr = 'a %s b' % expr self.binops[name] = expr self.unops = { 'pos': '+', 'neg': '-', 'abs': 'abs', 'invert': '~', 'int': 'int', 'long': 'long', 'float': 'float', 'oct': 'oct', 'hex': 'hex', } for name, expr in self.unops.items(): if expr.islower(): expr = expr + "(a)" else: expr = '%s a' % expr self.unops[name] = expr def unop_test(self, a, res, expr="len(a)", meth="__len__"): d = {'a': a} self.assertEqual(eval(expr, d), res) t = type(a) m = getattr(t, meth) # Find method in parent class while meth not in t.__dict__: t = t.__bases__[0] # in some implementations (e.g. PyPy), 'm' can be a regular unbound # method object; the getattr() below obtains its underlying function. self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth]) self.assertEqual(m(a), res) bm = getattr(a, meth) self.assertEqual(bm(), res) def binop_test(self, a, b, res, expr="a+b", meth="__add__"): d = {'a': a, 'b': b} # XXX Hack so this passes before 2.3 when -Qnew is specified. if meth == "__div__" and 1/2 == 0.5: meth = "__truediv__" if meth == '__divmod__': pass self.assertEqual(eval(expr, d), res) t = type(a) m = getattr(t, meth) while meth not in t.__dict__: t = t.__bases__[0] # in some implementations (e.g. PyPy), 'm' can be a regular unbound # method object; the getattr() below obtains its underlying function. self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth]) self.assertEqual(m(a, b), res) bm = getattr(a, meth) self.assertEqual(bm(b), res) def ternop_test(self, a, b, c, res, expr="a[b:c]", meth="__getslice__"): d = {'a': a, 'b': b, 'c': c} self.assertEqual(eval(expr, d), res) t = type(a) m = getattr(t, meth) while meth not in t.__dict__: t = t.__bases__[0] # in some implementations (e.g. PyPy), 'm' can be a regular unbound # method object; the getattr() below obtains its underlying function. self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth]) self.assertEqual(m(a, b, c), res) bm = getattr(a, meth) self.assertEqual(bm(b, c), res) def setop_test(self, a, b, res, stmt="a+=b", meth="__iadd__"): d = {'a': deepcopy(a), 'b': b} exec stmt in d self.assertEqual(d['a'], res) t = type(a) m = getattr(t, meth) while meth not in t.__dict__: t = t.__bases__[0] # in some implementations (e.g. PyPy), 'm' can be a regular unbound # method object; the getattr() below obtains its underlying function. self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth]) d['a'] = deepcopy(a) m(d['a'], b) self.assertEqual(d['a'], res) d['a'] = deepcopy(a) bm = getattr(d['a'], meth) bm(b) self.assertEqual(d['a'], res) def set2op_test(self, a, b, c, res, stmt="a[b]=c", meth="__setitem__"): d = {'a': deepcopy(a), 'b': b, 'c': c} exec stmt in d self.assertEqual(d['a'], res) t = type(a) m = getattr(t, meth) while meth not in t.__dict__: t = t.__bases__[0] # in some implementations (e.g. PyPy), 'm' can be a regular unbound # method object; the getattr() below obtains its underlying function. self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth]) d['a'] = deepcopy(a) m(d['a'], b, c) self.assertEqual(d['a'], res) d['a'] = deepcopy(a) bm = getattr(d['a'], meth) bm(b, c) self.assertEqual(d['a'], res) def set3op_test(self, a, b, c, d, res, stmt="a[b:c]=d", meth="__setslice__"): dictionary = {'a': deepcopy(a), 'b': b, 'c': c, 'd': d} exec stmt in dictionary self.assertEqual(dictionary['a'], res) t = type(a) while meth not in t.__dict__: t = t.__bases__[0] m = getattr(t, meth) # in some implementations (e.g. PyPy), 'm' can be a regular unbound # method object; the getattr() below obtains its underlying function. self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth]) dictionary['a'] = deepcopy(a) m(dictionary['a'], b, c, d) self.assertEqual(dictionary['a'], res) dictionary['a'] = deepcopy(a) bm = getattr(dictionary['a'], meth) bm(b, c, d) self.assertEqual(dictionary['a'], res) def test_lists(self): # Testing list operations... # Asserts are within individual test methods self.binop_test([1], [2], [1,2], "a+b", "__add__") self.binop_test([1,2,3], 2, 1, "b in a", "__contains__") self.binop_test([1,2,3], 4, 0, "b in a", "__contains__") self.binop_test([1,2,3], 1, 2, "a[b]", "__getitem__") self.ternop_test([1,2,3], 0, 2, [1,2], "a[b:c]", "__getslice__") self.setop_test([1], [2], [1,2], "a+=b", "__iadd__") self.setop_test([1,2], 3, [1,2,1,2,1,2], "a*=b", "__imul__") self.unop_test([1,2,3], 3, "len(a)", "__len__") self.binop_test([1,2], 3, [1,2,1,2,1,2], "a*b", "__mul__") self.binop_test([1,2], 3, [1,2,1,2,1,2], "b*a", "__rmul__") self.set2op_test([1,2], 1, 3, [1,3], "a[b]=c", "__setitem__") self.set3op_test([1,2,3,4], 1, 3, [5,6], [1,5,6,4], "a[b:c]=d", "__setslice__") def test_dicts(self): # Testing dict operations... if hasattr(dict, '__cmp__'): # PyPy has only rich comparison on dicts self.binop_test({1:2}, {2:1}, -1, "cmp(a,b)", "__cmp__") else: self.binop_test({1:2}, {2:1}, True, "a < b", "__lt__") self.binop_test({1:2,3:4}, 1, 1, "b in a", "__contains__") self.binop_test({1:2,3:4}, 2, 0, "b in a", "__contains__") self.binop_test({1:2,3:4}, 1, 2, "a[b]", "__getitem__") d = {1:2, 3:4} l1 = [] for i in d.keys(): l1.append(i) l = [] for i in iter(d): l.append(i) self.assertEqual(l, l1) l = [] for i in d.__iter__(): l.append(i) self.assertEqual(l, l1) l = [] for i in dict.__iter__(d): l.append(i) self.assertEqual(l, l1) d = {1:2, 3:4} self.unop_test(d, 2, "len(a)", "__len__") self.assertEqual(eval(repr(d), {}), d) self.assertEqual(eval(d.__repr__(), {}), d) self.set2op_test({1:2,3:4}, 2, 3, {1:2,2:3,3:4}, "a[b]=c", "__setitem__") # Tests for unary and binary operators def number_operators(self, a, b, skip=[]): dict = {'a': a, 'b': b} for name, expr in self.binops.items(): if name not in skip: name = "__%s__" % name if hasattr(a, name): res = eval(expr, dict) self.binop_test(a, b, res, expr, name) for name, expr in self.unops.items(): if name not in skip: name = "__%s__" % name if hasattr(a, name): res = eval(expr, dict) self.unop_test(a, res, expr, name) def test_ints(self): # Testing int operations... self.number_operators(100, 3) # The following crashes in Python 2.2 self.assertEqual((1).__nonzero__(), 1) self.assertEqual((0).__nonzero__(), 0) # This returns 'NotImplemented' in Python 2.2 class C(int): def __add__(self, other): return NotImplemented self.assertEqual(C(5L), 5) try: C() + "" except TypeError: pass else: self.fail("NotImplemented should have caused TypeError") try: C(sys.maxint+1) except OverflowError: pass else: self.fail("should have raised OverflowError") def test_longs(self): # Testing long operations... self.number_operators(100L, 3L) def test_floats(self): # Testing float operations... self.number_operators(100.0, 3.0) def test_complexes(self): # Testing complex operations... self.number_operators(100.0j, 3.0j, skip=['lt', 'le', 'gt', 'ge', 'int', 'long', 'float']) class Number(complex): __slots__ = ['prec'] def __new__(cls, *args, **kwds): result = complex.__new__(cls, *args) result.prec = kwds.get('prec', 12) return result def __repr__(self): prec = self.prec if self.imag == 0.0: return "%.*g" % (prec, self.real) if self.real == 0.0: return "%.*gj" % (prec, self.imag) return "(%.*g+%.*gj)" % (prec, self.real, prec, self.imag) __str__ = __repr__ a = Number(3.14, prec=6) self.assertEqual(repr(a), "3.14") self.assertEqual(a.prec, 6) a = Number(a, prec=2) self.assertEqual(repr(a), "3.1") self.assertEqual(a.prec, 2) a = Number(234.5) self.assertEqual(repr(a), "234.5") self.assertEqual(a.prec, 12) @test_support.impl_detail("the module 'xxsubtype' is internal") def test_spam_lists(self): # Testing spamlist operations... import copy, xxsubtype as spam def spamlist(l, memo=None): import xxsubtype as spam return spam.spamlist(l) # This is an ugly hack: copy._deepcopy_dispatch[spam.spamlist] = spamlist self.binop_test(spamlist([1]), spamlist([2]), spamlist([1,2]), "a+b", "__add__") self.binop_test(spamlist([1,2,3]), 2, 1, "b in a", "__contains__") self.binop_test(spamlist([1,2,3]), 4, 0, "b in a", "__contains__") self.binop_test(spamlist([1,2,3]), 1, 2, "a[b]", "__getitem__") self.ternop_test(spamlist([1,2,3]), 0, 2, spamlist([1,2]), "a[b:c]", "__getslice__") self.setop_test(spamlist([1]), spamlist([2]), spamlist([1,2]), "a+=b", "__iadd__") self.setop_test(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "a*=b", "__imul__") self.unop_test(spamlist([1,2,3]), 3, "len(a)", "__len__") self.binop_test(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "a*b", "__mul__") self.binop_test(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "b*a", "__rmul__") self.set2op_test(spamlist([1,2]), 1, 3, spamlist([1,3]), "a[b]=c", "__setitem__") self.set3op_test(spamlist([1,2,3,4]), 1, 3, spamlist([5,6]), spamlist([1,5,6,4]), "a[b:c]=d", "__setslice__") # Test subclassing class C(spam.spamlist): def foo(self): return 1 a = C() self.assertEqual(a, []) self.assertEqual(a.foo(), 1) a.append(100) self.assertEqual(a, [100]) self.assertEqual(a.getstate(), 0) a.setstate(42) self.assertEqual(a.getstate(), 42) @test_support.impl_detail("the module 'xxsubtype' is internal") def test_spam_dicts(self): # Testing spamdict operations... import copy, xxsubtype as spam def spamdict(d, memo=None): import xxsubtype as spam sd = spam.spamdict() for k, v in d.items(): sd[k] = v return sd # This is an ugly hack: copy._deepcopy_dispatch[spam.spamdict] = spamdict self.binop_test(spamdict({1:2}), spamdict({2:1}), -1, "cmp(a,b)", "__cmp__") self.binop_test(spamdict({1:2,3:4}), 1, 1, "b in a", "__contains__") self.binop_test(spamdict({1:2,3:4}), 2, 0, "b in a", "__contains__") self.binop_test(spamdict({1:2,3:4}), 1, 2, "a[b]", "__getitem__") d = spamdict({1:2,3:4}) l1 = [] for i in d.keys(): l1.append(i) l = [] for i in iter(d): l.append(i) self.assertEqual(l, l1) l = [] for i in d.__iter__(): l.append(i) self.assertEqual(l, l1) l = [] for i in type(spamdict({})).__iter__(d): l.append(i) self.assertEqual(l, l1) straightd = {1:2, 3:4} spamd = spamdict(straightd) self.unop_test(spamd, 2, "len(a)", "__len__") self.unop_test(spamd, repr(straightd), "repr(a)", "__repr__") self.set2op_test(spamdict({1:2,3:4}), 2, 3, spamdict({1:2,2:3,3:4}), "a[b]=c", "__setitem__") # Test subclassing class C(spam.spamdict): def foo(self): return 1 a = C() self.assertEqual(a.items(), []) self.assertEqual(a.foo(), 1) a['foo'] = 'bar' self.assertEqual(a.items(), [('foo', 'bar')]) self.assertEqual(a.getstate(), 0) a.setstate(100) self.assertEqual(a.getstate(), 100) class ClassPropertiesAndMethods(unittest.TestCase): def assertHasAttr(self, obj, name): self.assertTrue(hasattr(obj, name), '%r has no attribute %r' % (obj, name)) def assertNotHasAttr(self, obj, name): self.assertFalse(hasattr(obj, name), '%r has unexpected attribute %r' % (obj, name)) def test_python_dicts(self): # Testing Python subclass of dict... self.assertTrue(issubclass(dict, dict)) self.assertIsInstance({}, dict) d = dict() self.assertEqual(d, {}) self.assertIs(d.__class__, dict) self.assertIsInstance(d, dict) class C(dict): state = -1 def __init__(self_local, *a, **kw): if a: self.assertEqual(len(a), 1) self_local.state = a[0] if kw: for k, v in kw.items(): self_local[v] = k def __getitem__(self, key): return self.get(key, 0) def __setitem__(self_local, key, value): self.assertIsInstance(key, type(0)) dict.__setitem__(self_local, key, value) def setstate(self, state): self.state = state def getstate(self): return self.state self.assertTrue(issubclass(C, dict)) a1 = C(12) self.assertEqual(a1.state, 12) a2 = C(foo=1, bar=2) self.assertEqual(a2[1] == 'foo' and a2[2], 'bar') a = C() self.assertEqual(a.state, -1) self.assertEqual(a.getstate(), -1) a.setstate(0) self.assertEqual(a.state, 0) self.assertEqual(a.getstate(), 0) a.setstate(10) self.assertEqual(a.state, 10) self.assertEqual(a.getstate(), 10) self.assertEqual(a[42], 0) a[42] = 24 self.assertEqual(a[42], 24) N = 50 for i in range(N): a[i] = C() for j in range(N): a[i][j] = i*j for i in range(N): for j in range(N): self.assertEqual(a[i][j], i*j) def test_python_lists(self): # Testing Python subclass of list... class C(list): def __getitem__(self, i): return list.__getitem__(self, i) + 100 def __getslice__(self, i, j): return (i, j) a = C() a.extend([0,1,2]) self.assertEqual(a[0], 100) self.assertEqual(a[1], 101) self.assertEqual(a[2], 102) self.assertEqual(a[100:200], (100,200)) def test_metaclass(self): # Testing __metaclass__... class C: __metaclass__ = type def __init__(self): self.__state = 0 def getstate(self): return self.__state def setstate(self, state): self.__state = state a = C() self.assertEqual(a.getstate(), 0) a.setstate(10) self.assertEqual(a.getstate(), 10) class D: class __metaclass__(type): def myself(cls): return cls self.assertEqual(D.myself(), D) d = D() self.assertEqual(d.__class__, D) class M1(type): def __new__(cls, name, bases, dict): dict['__spam__'] = 1 return type.__new__(cls, name, bases, dict) class C: __metaclass__ = M1 self.assertEqual(C.__spam__, 1) c = C() self.assertEqual(c.__spam__, 1) class _instance(object): pass class M2(object): @staticmethod def __new__(cls, name, bases, dict): self = object.__new__(cls) self.name = name self.bases = bases self.dict = dict return self def __call__(self): it = _instance() # Early binding of methods for key in self.dict: if key.startswith("__"): continue setattr(it, key, self.dict[key].__get__(it, self)) return it class C: __metaclass__ = M2 def spam(self): return 42 self.assertEqual(C.name, 'C') self.assertEqual(C.bases, ()) self.assertIn('spam', C.dict) c = C() self.assertEqual(c.spam(), 42) # More metaclass examples class autosuper(type): # Automatically add __super to the class # This trick only works for dynamic classes def __new__(metaclass, name, bases, dict): cls = super(autosuper, metaclass).__new__(metaclass, name, bases, dict) # Name mangling for __super removes leading underscores while name[:1] == "_": name = name[1:] if name: name = "_%s__super" % name else: name = "__super" setattr(cls, name, super(cls)) return cls class A: __metaclass__ = autosuper def meth(self): return "A" class B(A): def meth(self): return "B" + self.__super.meth() class C(A): def meth(self): return "C" + self.__super.meth() class D(C, B): def meth(self): return "D" + self.__super.meth() self.assertEqual(D().meth(), "DCBA") class E(B, C): def meth(self): return "E" + self.__super.meth() self.assertEqual(E().meth(), "EBCA") class autoproperty(type): # Automatically create property attributes when methods # named _get_x and/or _set_x are found def __new__(metaclass, name, bases, dict): hits = {} for key, val in dict.iteritems(): if key.startswith("_get_"): key = key[5:] get, set = hits.get(key, (None, None)) get = val hits[key] = get, set elif key.startswith("_set_"): key = key[5:] get, set = hits.get(key, (None, None)) set = val hits[key] = get, set for key, (get, set) in hits.iteritems(): dict[key] = property(get, set) return super(autoproperty, metaclass).__new__(metaclass, name, bases, dict) class A: __metaclass__ = autoproperty def _get_x(self): return -self.__x def _set_x(self, x): self.__x = -x a = A() self.assertNotHasAttr(a, "x") a.x = 12 self.assertEqual(a.x, 12) self.assertEqual(a._A__x, -12) class multimetaclass(autoproperty, autosuper): # Merge of multiple cooperating metaclasses pass class A: __metaclass__ = multimetaclass def _get_x(self): return "A" class B(A): def _get_x(self): return "B" + self.__super._get_x() class C(A): def _get_x(self): return "C" + self.__super._get_x() class D(C, B): def _get_x(self): return "D" + self.__super._get_x() self.assertEqual(D().x, "DCBA") # Make sure type(x) doesn't call x.__class__.__init__ class T(type): counter = 0 def __init__(self, *args): T.counter += 1 class C: __metaclass__ = T self.assertEqual(T.counter, 1) a = C() self.assertEqual(type(a), C) self.assertEqual(T.counter, 1) class C(object): pass c = C() try: c() except TypeError: pass else: self.fail("calling object w/o call method should raise " "TypeError") # Testing code to find most derived baseclass class A(type): def __new__(*args, **kwargs): return type.__new__(*args, **kwargs) class B(object): pass class C(object): __metaclass__ = A # The most derived metaclass of D is A rather than type. class D(B, C): pass def test_module_subclasses(self): # Testing Python subclass of module... log = [] MT = type(sys) class MM(MT): def __init__(self, name): MT.__init__(self, name) def __getattribute__(self, name): log.append(("getattr", name)) return MT.__getattribute__(self, name) def __setattr__(self, name, value): log.append(("setattr", name, value)) MT.__setattr__(self, name, value) def __delattr__(self, name): log.append(("delattr", name)) MT.__delattr__(self, name) a = MM("a") a.foo = 12 x = a.foo del a.foo self.assertEqual(log, [("setattr", "foo", 12), ("getattr", "foo"), ("delattr", "foo")]) # http://python.org/sf/1174712 try: class Module(types.ModuleType, str): pass except TypeError: pass else: self.fail("inheriting from ModuleType and str at the same time " "should fail") def test_multiple_inheritence(self): # Testing multiple inheritance... class C(object): def __init__(self): self.__state = 0 def getstate(self): return self.__state def setstate(self, state): self.__state = state a = C() self.assertEqual(a.getstate(), 0) a.setstate(10) self.assertEqual(a.getstate(), 10) class D(dict, C): def __init__(self): type({}).__init__(self) C.__init__(self) d = D() self.assertEqual(d.keys(), []) d["hello"] = "world" self.assertEqual(d.items(), [("hello", "world")]) self.assertEqual(d["hello"], "world") self.assertEqual(d.getstate(), 0) d.setstate(10) self.assertEqual(d.getstate(), 10) self.assertEqual(D.__mro__, (D, dict, C, object)) # SF bug #442833 class Node(object): def __int__(self): return int(self.foo()) def foo(self): return "23" class Frag(Node, list): def foo(self): return "42" self.assertEqual(Node().__int__(), 23) self.assertEqual(int(Node()), 23) self.assertEqual(Frag().__int__(), 42) self.assertEqual(int(Frag()), 42) # MI mixing classic and new-style classes. class A: x = 1 class B(A): pass class C(A): x = 2 class D(B, C): pass self.assertEqual(D.x, 1) # Classic MRO is preserved for a classic base class. class E(D, object): pass self.assertEqual(E.__mro__, (E, D, B, A, C, object)) self.assertEqual(E.x, 1) # But with a mix of classic bases, their MROs are combined using # new-style MRO. class F(B, C, object): pass self.assertEqual(F.__mro__, (F, B, C, A, object)) self.assertEqual(F.x, 2) # Try something else. class C: def cmethod(self): return "C a" def all_method(self): return "C b" class M1(C, object): def m1method(self): return "M1 a" def all_method(self): return "M1 b" self.assertEqual(M1.__mro__, (M1, C, object)) m = M1() self.assertEqual(m.cmethod(), "C a") self.assertEqual(m.m1method(), "M1 a") self.assertEqual(m.all_method(), "M1 b") class D(C): def dmethod(self): return "D a" def all_method(self): return "D b" class M2(D, object): def m2method(self): return "M2 a" def all_method(self): return "M2 b" self.assertEqual(M2.__mro__, (M2, D, C, object)) m = M2() self.assertEqual(m.cmethod(), "C a") self.assertEqual(m.dmethod(), "D a") self.assertEqual(m.m2method(), "M2 a") self.assertEqual(m.all_method(), "M2 b") class M3(M1, M2, object): def m3method(self): return "M3 a" def all_method(self): return "M3 b" self.assertEqual(M3.__mro__, (M3, M1, M2, D, C, object)) m = M3() self.assertEqual(m.cmethod(), "C a") self.assertEqual(m.dmethod(), "D a") self.assertEqual(m.m1method(), "M1 a") self.assertEqual(m.m2method(), "M2 a") self.assertEqual(m.m3method(), "M3 a") self.assertEqual(m.all_method(), "M3 b") class Classic: pass try: class New(Classic): __metaclass__ = type except TypeError: pass else: self.fail("new class with only classic bases - shouldn't be") def test_diamond_inheritence(self): # Testing multiple inheritance special cases... class A(object): def spam(self): return "A" self.assertEqual(A().spam(), "A") class B(A): def boo(self): return "B" def spam(self): return "B" self.assertEqual(B().spam(), "B") self.assertEqual(B().boo(), "B") class C(A): def boo(self): return "C" self.assertEqual(C().spam(), "A") self.assertEqual(C().boo(), "C") class D(B, C): pass self.assertEqual(D().spam(), "B") self.assertEqual(D().boo(), "B") self.assertEqual(D.__mro__, (D, B, C, A, object)) class E(C, B): pass self.assertEqual(E().spam(), "B") self.assertEqual(E().boo(), "C") self.assertEqual(E.__mro__, (E, C, B, A, object)) # MRO order disagreement try: class F(D, E): pass except TypeError: pass else: self.fail("expected MRO order disagreement (F)") try: class G(E, D): pass except TypeError: pass else: self.fail("expected MRO order disagreement (G)") # see thread python-dev/2002-October/029035.html def test_ex5_from_c3_switch(self): # Testing ex5 from C3 switch discussion... class A(object): pass class B(object): pass class C(object): pass class X(A): pass class Y(A): pass class Z(X,B,Y,C): pass self.assertEqual(Z.__mro__, (Z, X, B, Y, A, C, object)) # see "A Monotonic Superclass Linearization for Dylan", # by Kim Barrett et al. (OOPSLA 1996) def test_monotonicity(self): # Testing MRO monotonicity... class Boat(object): pass class DayBoat(Boat): pass class WheelBoat(Boat): pass class EngineLess(DayBoat): pass class SmallMultihull(DayBoat): pass class PedalWheelBoat(EngineLess,WheelBoat): pass class SmallCatamaran(SmallMultihull): pass class Pedalo(PedalWheelBoat,SmallCatamaran): pass self.assertEqual(PedalWheelBoat.__mro__, (PedalWheelBoat, EngineLess, DayBoat, WheelBoat, Boat, object)) self.assertEqual(SmallCatamaran.__mro__, (SmallCatamaran, SmallMultihull, DayBoat, Boat, object)) self.assertEqual(Pedalo.__mro__, (Pedalo, PedalWheelBoat, EngineLess, SmallCatamaran, SmallMultihull, DayBoat, WheelBoat, Boat, object)) # see "A Monotonic Superclass Linearization for Dylan", # by Kim Barrett et al. (OOPSLA 1996) def test_consistency_with_epg(self): # Testing consistency with EPG... class Pane(object): pass class ScrollingMixin(object): pass class EditingMixin(object): pass class ScrollablePane(Pane,ScrollingMixin): pass class EditablePane(Pane,EditingMixin): pass class EditableScrollablePane(ScrollablePane,EditablePane): pass self.assertEqual(EditableScrollablePane.__mro__, (EditableScrollablePane, ScrollablePane, EditablePane, Pane, ScrollingMixin, EditingMixin, object)) def test_mro_disagreement(self): # Testing error messages for MRO disagreement... mro_err_msg = """Cannot create a consistent method resolution order (MRO) for bases """ def raises(exc, expected, callable, *args): try: callable(*args) except exc, msg: # the exact msg is generally considered an impl detail if test_support.check_impl_detail(): if not str(msg).startswith(expected): self.fail("Message %r, expected %r" % (str(msg), expected)) else: self.fail("Expected %s" % exc) class A(object): pass class B(A): pass class C(object): pass # Test some very simple errors raises(TypeError, "duplicate base class A", type, "X", (A, A), {}) raises(TypeError, mro_err_msg, type, "X", (A, B), {}) raises(TypeError, mro_err_msg, type, "X", (A, C, B), {}) # Test a slightly more complex error class GridLayout(object): pass class HorizontalGrid(GridLayout): pass class VerticalGrid(GridLayout): pass class HVGrid(HorizontalGrid, VerticalGrid): pass class VHGrid(VerticalGrid, HorizontalGrid): pass raises(TypeError, mro_err_msg, type, "ConfusedGrid", (HVGrid, VHGrid), {}) def test_object_class(self): # Testing object class... a = object() self.assertEqual(a.__class__, object) self.assertEqual(type(a), object) b = object() self.assertNotEqual(a, b) self.assertNotHasAttr(a, "foo") try: a.foo = 12 except (AttributeError, TypeError): pass else: self.fail("object() should not allow setting a foo attribute") self.assertNotHasAttr(object(), "__dict__") class Cdict(object): pass x = Cdict() self.assertEqual(x.__dict__, {}) x.foo = 1 self.assertEqual(x.foo, 1) self.assertEqual(x.__dict__, {'foo': 1}) def test_slots(self): # Testing __slots__... class C0(object): __slots__ = [] x = C0() self.assertNotHasAttr(x, "__dict__") self.assertNotHasAttr(x, "foo") class C1(object): __slots__ = ['a'] x = C1() self.assertNotHasAttr(x, "__dict__") self.assertNotHasAttr(x, "a") x.a = 1 self.assertEqual(x.a, 1) x.a = None self.assertEqual(x.a, None) del x.a self.assertNotHasAttr(x, "a") class C3(object): __slots__ = ['a', 'b', 'c'] x = C3() self.assertNotHasAttr(x, "__dict__") self.assertNotHasAttr(x, 'a') self.assertNotHasAttr(x, 'b') self.assertNotHasAttr(x, 'c') x.a = 1 x.b = 2 x.c = 3 self.assertEqual(x.a, 1) self.assertEqual(x.b, 2) self.assertEqual(x.c, 3) class C4(object): """Validate name mangling""" __slots__ = ['__a'] def __init__(self, value): self.__a = value def get(self): return self.__a x = C4(5) self.assertNotHasAttr(x, '__dict__') self.assertNotHasAttr(x, '__a') self.assertEqual(x.get(), 5) try: x.__a = 6 except AttributeError: pass else: self.fail("Double underscored names not mangled") # Make sure slot names are proper identifiers try: class C(object): __slots__ = [None] except TypeError: pass else: self.fail("[None] slots not caught") try: class C(object): __slots__ = ["foo bar"] except TypeError: pass else: self.fail("['foo bar'] slots not caught") try: class C(object): __slots__ = ["foo\0bar"] except TypeError: pass else: self.fail("['foo\\0bar'] slots not caught") try: class C(object): __slots__ = ["1"] except TypeError: pass else: self.fail("['1'] slots not caught") try: class C(object): __slots__ = [""] except TypeError: pass else: self.fail("[''] slots not caught") class C(object): __slots__ = ["a", "a_b", "_a", "A0123456789Z"] # XXX(nnorwitz): was there supposed to be something tested # from the class above? # Test a single string is not expanded as a sequence. class C(object): __slots__ = "abc" c = C() c.abc = 5 self.assertEqual(c.abc, 5) def test_unicode_slots(self): # Test unicode slot names try: unicode except NameError: self.skipTest('no unicode support') else: # Test a single unicode string is not expanded as a sequence. class C(object): __slots__ = unicode("abc") c = C() c.abc = 5 self.assertEqual(c.abc, 5) # _unicode_to_string used to modify slots in certain circumstances slots = (unicode("foo"), unicode("bar")) class C(object): __slots__ = slots x = C() x.foo = 5 self.assertEqual(x.foo, 5) self.assertEqual(type(slots[0]), unicode) # this used to leak references try: class C(object): __slots__ = [unichr(128)] except (TypeError, UnicodeEncodeError): pass else: self.fail("[unichr(128)] slots not caught") # Test leaks class Counted(object): counter = 0 # counts the number of instances alive def __init__(self): Counted.counter += 1 def __del__(self): Counted.counter -= 1 class C(object): __slots__ = ['a', 'b', 'c'] x = C() x.a = Counted() x.b = Counted() x.c = Counted() self.assertEqual(Counted.counter, 3) del x test_support.gc_collect() self.assertEqual(Counted.counter, 0) class D(C): pass x = D() x.a = Counted() x.z = Counted() self.assertEqual(Counted.counter, 2) del x test_support.gc_collect() self.assertEqual(Counted.counter, 0) class E(D): __slots__ = ['e'] x = E() x.a = Counted() x.z = Counted() x.e = Counted() self.assertEqual(Counted.counter, 3) del x test_support.gc_collect() self.assertEqual(Counted.counter, 0) # Test cyclical leaks [SF bug 519621] class F(object): __slots__ = ['a', 'b'] s = F() s.a = [Counted(), s] self.assertEqual(Counted.counter, 1) s = None test_support.gc_collect() self.assertEqual(Counted.counter, 0) # Test lookup leaks [SF bug 572567] if test_support.check_impl_detail(): class G(object): def __cmp__(self, other): return 0 __hash__ = None # Silence Py3k warning g = G() orig_objects = len(gc.get_objects()) for i in xrange(10): g==g new_objects = len(gc.get_objects()) self.assertEqual(orig_objects, new_objects) class H(object): __slots__ = ['a', 'b'] def __init__(self): self.a = 1 self.b = 2 def __del__(self_): self.assertEqual(self_.a, 1) self.assertEqual(self_.b, 2) with test_support.captured_output('stderr') as s: h = H() del h self.assertEqual(s.getvalue(), '') class X(object): __slots__ = "a" with self.assertRaises(AttributeError): del X().a def test_slots_special(self): # Testing __dict__ and __weakref__ in __slots__... class D(object): __slots__ = ["__dict__"] a = D() self.assertHasAttr(a, "__dict__") self.assertNotHasAttr(a, "__weakref__") a.foo = 42 self.assertEqual(a.__dict__, {"foo": 42}) class W(object): __slots__ = ["__weakref__"] a = W() self.assertHasAttr(a, "__weakref__") self.assertNotHasAttr(a, "__dict__") try: a.foo = 42 except AttributeError: pass else: self.fail("shouldn't be allowed to set a.foo") class C1(W, D): __slots__ = [] a = C1() self.assertHasAttr(a, "__dict__") self.assertHasAttr(a, "__weakref__") a.foo = 42 self.assertEqual(a.__dict__, {"foo": 42}) class C2(D, W): __slots__ = [] a = C2() self.assertHasAttr(a, "__dict__") self.assertHasAttr(a, "__weakref__") a.foo = 42 self.assertEqual(a.__dict__, {"foo": 42}) def test_slots_descriptor(self): # Issue2115: slot descriptors did not correctly check # the type of the given object import abc class MyABC: __metaclass__ = abc.ABCMeta __slots__ = "a" class Unrelated(object): pass MyABC.register(Unrelated) u = Unrelated() self.assertIsInstance(u, MyABC) # This used to crash self.assertRaises(TypeError, MyABC.a.__set__, u, 3) def test_metaclass_cmp(self): # See bug 7491. class M(type): def __cmp__(self, other): return -1 class X(object): __metaclass__ = M self.assertTrue(X < M) def test_dynamics(self): # Testing class attribute propagation... class D(object): pass class E(D): pass class F(D): pass D.foo = 1 self.assertEqual(D.foo, 1) # Test that dynamic attributes are inherited self.assertEqual(E.foo, 1) self.assertEqual(F.foo, 1) # Test dynamic instances class C(object): pass a = C() self.assertNotHasAttr(a, "foobar") C.foobar = 2 self.assertEqual(a.foobar, 2) C.method = lambda self: 42 self.assertEqual(a.method(), 42) C.__repr__ = lambda self: "C()" self.assertEqual(repr(a), "C()") C.__int__ = lambda self: 100 self.assertEqual(int(a), 100) self.assertEqual(a.foobar, 2) self.assertNotHasAttr(a, "spam") def mygetattr(self, name): if name == "spam": return "spam" raise AttributeError C.__getattr__ = mygetattr self.assertEqual(a.spam, "spam") a.new = 12 self.assertEqual(a.new, 12) def mysetattr(self, name, value): if name == "spam": raise AttributeError return object.__setattr__(self, name, value) C.__setattr__ = mysetattr try: a.spam = "not spam" except AttributeError: pass else: self.fail("expected AttributeError") self.assertEqual(a.spam, "spam") class D(C): pass d = D() d.foo = 1 self.assertEqual(d.foo, 1) # Test handling of int*seq and seq*int class I(int): pass self.assertEqual("a"*I(2), "aa") self.assertEqual(I(2)*"a", "aa") self.assertEqual(2*I(3), 6) self.assertEqual(I(3)*2, 6) self.assertEqual(I(3)*I(2), 6) # Test handling of long*seq and seq*long class L(long): pass self.assertEqual("a"*L(2L), "aa") self.assertEqual(L(2L)*"a", "aa") self.assertEqual(2*L(3), 6) self.assertEqual(L(3)*2, 6) self.assertEqual(L(3)*L(2), 6) # Test comparison of classes with dynamic metaclasses class dynamicmetaclass(type): pass class someclass: __metaclass__ = dynamicmetaclass self.assertNotEqual(someclass, object) def test_errors(self): # Testing errors... try: class C(list, dict): pass except TypeError: pass else: self.fail("inheritance from both list and dict should be illegal") try: class C(object, None): pass except TypeError: pass else: self.fail("inheritance from non-type should be illegal") class Classic: pass try: class C(type(len)): pass except TypeError: pass else: self.fail("inheritance from CFunction should be illegal") try: class C(object): __slots__ = 1 except TypeError: pass else: self.fail("__slots__ = 1 should be illegal") try: class C(object): __slots__ = [1] except TypeError: pass else: self.fail("__slots__ = [1] should be illegal") class M1(type): pass class M2(type): pass class A1(object): __metaclass__ = M1 class A2(object): __metaclass__ = M2 try: class B(A1, A2): pass except TypeError: pass else: self.fail("finding the most derived metaclass should have failed") def test_classmethods(self): # Testing class methods... class C(object): def foo(*a): return a goo = classmethod(foo) c = C() self.assertEqual(C.goo(1), (C, 1)) self.assertEqual(c.goo(1), (C, 1)) self.assertEqual(c.foo(1), (c, 1)) class D(C): pass d = D() self.assertEqual(D.goo(1), (D, 1)) self.assertEqual(d.goo(1), (D, 1)) self.assertEqual(d.foo(1), (d, 1)) self.assertEqual(D.foo(d, 1), (d, 1)) # Test for a specific crash (SF bug 528132) def f(cls, arg): return (cls, arg) ff = classmethod(f) self.assertEqual(ff.__get__(0, int)(42), (int, 42)) self.assertEqual(ff.__get__(0)(42), (int, 42)) # Test super() with classmethods (SF bug 535444) self.assertEqual(C.goo.im_self, C) self.assertEqual(D.goo.im_self, D) self.assertEqual(super(D,D).goo.im_self, D) self.assertEqual(super(D,d).goo.im_self, D) self.assertEqual(super(D,D).goo(), (D,)) self.assertEqual(super(D,d).goo(), (D,)) # Verify that a non-callable will raise meth = classmethod(1).__get__(1) self.assertRaises(TypeError, meth) # Verify that classmethod() doesn't allow keyword args try: classmethod(f, kw=1) except TypeError: pass else: self.fail("classmethod shouldn't accept keyword args") @test_support.impl_detail("the module 'xxsubtype' is internal") def test_classmethods_in_c(self): # Testing C-based class methods... import xxsubtype as spam a = (1, 2, 3) d = {'abc': 123} x, a1, d1 = spam.spamlist.classmeth(*a, **d) self.assertEqual(x, spam.spamlist) self.assertEqual(a, a1) self.assertEqual(d, d1) x, a1, d1 = spam.spamlist().classmeth(*a, **d) self.assertEqual(x, spam.spamlist) self.assertEqual(a, a1) self.assertEqual(d, d1) spam_cm = spam.spamlist.__dict__['classmeth'] x2, a2, d2 = spam_cm(spam.spamlist, *a, **d) self.assertEqual(x2, spam.spamlist) self.assertEqual(a2, a1) self.assertEqual(d2, d1) class SubSpam(spam.spamlist): pass x2, a2, d2 = spam_cm(SubSpam, *a, **d) self.assertEqual(x2, SubSpam) self.assertEqual(a2, a1) self.assertEqual(d2, d1) with self.assertRaises(TypeError): spam_cm() with self.assertRaises(TypeError): spam_cm(spam.spamlist()) with self.assertRaises(TypeError): spam_cm(list) def test_staticmethods(self): # Testing static methods... class C(object): def foo(*a): return a goo = staticmethod(foo) c = C() self.assertEqual(C.goo(1), (1,)) self.assertEqual(c.goo(1), (1,)) self.assertEqual(c.foo(1), (c, 1,)) class D(C): pass d = D() self.assertEqual(D.goo(1), (1,)) self.assertEqual(d.goo(1), (1,)) self.assertEqual(d.foo(1), (d, 1)) self.assertEqual(D.foo(d, 1), (d, 1)) @test_support.impl_detail("the module 'xxsubtype' is internal") def test_staticmethods_in_c(self): # Testing C-based static methods... import xxsubtype as spam a = (1, 2, 3) d = {"abc": 123} x, a1, d1 = spam.spamlist.staticmeth(*a, **d) self.assertEqual(x, None) self.assertEqual(a, a1) self.assertEqual(d, d1) x, a1, d2 = spam.spamlist().staticmeth(*a, **d) self.assertEqual(x, None) self.assertEqual(a, a1) self.assertEqual(d, d1) def test_classic(self): # Testing classic classes... class C: def foo(*a): return a goo = classmethod(foo) c = C() self.assertEqual(C.goo(1), (C, 1)) self.assertEqual(c.goo(1), (C, 1)) self.assertEqual(c.foo(1), (c, 1)) class D(C): pass d = D() self.assertEqual(D.goo(1), (D, 1)) self.assertEqual(d.goo(1), (D, 1)) self.assertEqual(d.foo(1), (d, 1)) self.assertEqual(D.foo(d, 1), (d, 1)) class E: # *not* subclassing from C foo = C.foo self.assertEqual(E().foo, C.foo) # i.e., unbound self.assertTrue(repr(C.foo.__get__(C())).startswith("<bound method ")) def test_compattr(self): # Testing computed attributes... class C(object): class computed_attribute(object): def __init__(self, get, set=None, delete=None): self.__get = get self.__set = set self.__delete = delete def __get__(self, obj, type=None): return self.__get(obj) def __set__(self, obj, value): return self.__set(obj, value) def __delete__(self, obj): return self.__delete(obj) def __init__(self): self.__x = 0 def __get_x(self): x = self.__x self.__x = x+1 return x def __set_x(self, x): self.__x = x def __delete_x(self): del self.__x x = computed_attribute(__get_x, __set_x, __delete_x) a = C() self.assertEqual(a.x, 0) self.assertEqual(a.x, 1) a.x = 10 self.assertEqual(a.x, 10) self.assertEqual(a.x, 11) del a.x self.assertNotHasAttr(a, 'x') def test_newslots(self): # Testing __new__ slot override... class C(list): def __new__(cls): self = list.__new__(cls) self.foo = 1 return self def __init__(self): self.foo = self.foo + 2 a = C() self.assertEqual(a.foo, 3) self.assertEqual(a.__class__, C) class D(C): pass b = D() self.assertEqual(b.foo, 3) self.assertEqual(b.__class__, D) def test_altmro(self): # Testing mro() and overriding it... class A(object): def f(self): return "A" class B(A): pass class C(A): def f(self): return "C" class D(B, C): pass self.assertEqual(D.mro(), [D, B, C, A, object]) self.assertEqual(D.__mro__, (D, B, C, A, object)) self.assertEqual(D().f(), "C") class PerverseMetaType(type): def mro(cls): L = type.mro(cls) L.reverse() return L class X(D,B,C,A): __metaclass__ = PerverseMetaType self.assertEqual(X.__mro__, (object, A, C, B, D, X)) self.assertEqual(X().f(), "A") try: class X(object): class __metaclass__(type): def mro(self): return [self, dict, object] # In CPython, the class creation above already raises # TypeError, as a protection against the fact that # instances of X would segfault it. In other Python # implementations it would be ok to let the class X # be created, but instead get a clean TypeError on the # __setitem__ below. x = object.__new__(X) x[5] = 6 except TypeError: pass else: self.fail("devious mro() return not caught") try: class X(object): class __metaclass__(type): def mro(self): return [1] except TypeError: pass else: self.fail("non-class mro() return not caught") try: class X(object): class __metaclass__(type): def mro(self): return 1 except TypeError: pass else: self.fail("non-sequence mro() return not caught") def test_overloading(self): # Testing operator overloading... class B(object): "Intermediate class because object doesn't have a __setattr__" class C(B): def __getattr__(self, name): if name == "foo": return ("getattr", name) else: raise AttributeError def __setattr__(self, name, value): if name == "foo": self.setattr = (name, value) else: return B.__setattr__(self, name, value) def __delattr__(self, name): if name == "foo": self.delattr = name else: return B.__delattr__(self, name) def __getitem__(self, key): return ("getitem", key) def __setitem__(self, key, value): self.setitem = (key, value) def __delitem__(self, key): self.delitem = key def __getslice__(self, i, j): return ("getslice", i, j) def __setslice__(self, i, j, value): self.setslice = (i, j, value) def __delslice__(self, i, j): self.delslice = (i, j) a = C() self.assertEqual(a.foo, ("getattr", "foo")) a.foo = 12 self.assertEqual(a.setattr, ("foo", 12)) del a.foo self.assertEqual(a.delattr, "foo") self.assertEqual(a[12], ("getitem", 12)) a[12] = 21 self.assertEqual(a.setitem, (12, 21)) del a[12] self.assertEqual(a.delitem, 12) self.assertEqual(a[0:10], ("getslice", 0, 10)) a[0:10] = "foo" self.assertEqual(a.setslice, (0, 10, "foo")) del a[0:10] self.assertEqual(a.delslice, (0, 10)) def test_methods(self): # Testing methods... class C(object): def __init__(self, x): self.x = x def foo(self): return self.x c1 = C(1) self.assertEqual(c1.foo(), 1) class D(C): boo = C.foo goo = c1.foo d2 = D(2) self.assertEqual(d2.foo(), 2) self.assertEqual(d2.boo(), 2) self.assertEqual(d2.goo(), 1) class E(object): foo = C.foo self.assertEqual(E().foo, C.foo) # i.e., unbound self.assertTrue(repr(C.foo.__get__(C(1))).startswith("<bound method ")) def test_special_method_lookup(self): # The lookup of special methods bypasses __getattr__ and # __getattribute__, but they still can be descriptors. def run_context(manager): with manager: pass def iden(self): return self def hello(self): return "hello" def empty_seq(self): return [] def zero(self): return 0 def complex_num(self): return 1j def stop(self): raise StopIteration def return_true(self, thing=None): return True def do_isinstance(obj): return isinstance(int, obj) def do_issubclass(obj): return issubclass(int, obj) def swallow(*args): pass def do_dict_missing(checker): class DictSub(checker.__class__, dict): pass self.assertEqual(DictSub()["hi"], 4) def some_number(self_, key): self.assertEqual(key, "hi") return 4 def format_impl(self, spec): return "hello" # It would be nice to have every special method tested here, but I'm # only listing the ones I can remember outside of typeobject.c, since it # does it right. specials = [ ("__unicode__", unicode, hello, set(), {}), ("__reversed__", reversed, empty_seq, set(), {}), ("__length_hint__", list, zero, set(), {"__iter__" : iden, "next" : stop}), ("__sizeof__", sys.getsizeof, zero, set(), {}), ("__instancecheck__", do_isinstance, return_true, set(), {}), ("__missing__", do_dict_missing, some_number, set(("__class__",)), {}), ("__subclasscheck__", do_issubclass, return_true, set(("__bases__",)), {}), ("__enter__", run_context, iden, set(), {"__exit__" : swallow}), ("__exit__", run_context, swallow, set(), {"__enter__" : iden}), ("__complex__", complex, complex_num, set(), {}), ("__format__", format, format_impl, set(), {}), ("__dir__", dir, empty_seq, set(), {}), ] class Checker(object): def __getattr__(self, attr, test=self): test.fail("__getattr__ called with {0}".format(attr)) def __getattribute__(self, attr, test=self): if attr not in ok: test.fail("__getattribute__ called with {0}".format(attr)) return object.__getattribute__(self, attr) class SpecialDescr(object): def __init__(self, impl): self.impl = impl def __get__(self, obj, owner): record.append(1) return self.impl.__get__(obj, owner) class MyException(Exception): pass class ErrDescr(object): def __get__(self, obj, owner): raise MyException for name, runner, meth_impl, ok, env in specials: if name == '__length_hint__' or name == '__sizeof__': if not test_support.check_impl_detail(): continue class X(Checker): pass for attr, obj in env.iteritems(): setattr(X, attr, obj) setattr(X, name, meth_impl) runner(X()) record = [] class X(Checker): pass for attr, obj in env.iteritems(): setattr(X, attr, obj) setattr(X, name, SpecialDescr(meth_impl)) runner(X()) self.assertEqual(record, [1], name) class X(Checker): pass for attr, obj in env.iteritems(): setattr(X, attr, obj) setattr(X, name, ErrDescr()) try: runner(X()) except MyException: pass else: self.fail("{0!r} didn't raise".format(name)) def test_specials(self): # Testing special operators... # Test operators like __hash__ for which a built-in default exists # Test the default behavior for static classes class C(object): def __getitem__(self, i): if 0 <= i < 10: return i raise IndexError c1 = C() c2 = C() self.assertFalse(not c1) self.assertNotEqual(id(c1), id(c2)) hash(c1) hash(c2) self.assertEqual(cmp(c1, c2), cmp(id(c1), id(c2))) self.assertEqual(c1, c1) self.assertTrue(c1 != c2) self.assertFalse(c1 != c1) self.assertFalse(c1 == c2) # Note that the module name appears in str/repr, and that varies # depending on whether this test is run standalone or from a framework. self.assertGreaterEqual(str(c1).find('C object at '), 0) self.assertEqual(str(c1), repr(c1)) self.assertNotIn(-1, c1) for i in range(10): self.assertIn(i, c1) self.assertNotIn(10, c1) # Test the default behavior for dynamic classes class D(object): def __getitem__(self, i): if 0 <= i < 10: return i raise IndexError d1 = D() d2 = D() self.assertFalse(not d1) self.assertNotEqual(id(d1), id(d2)) hash(d1) hash(d2) self.assertEqual(cmp(d1, d2), cmp(id(d1), id(d2))) self.assertEqual(d1, d1) self.assertNotEqual(d1, d2) self.assertFalse(d1 != d1) self.assertFalse(d1 == d2) # Note that the module name appears in str/repr, and that varies # depending on whether this test is run standalone or from a framework. self.assertGreaterEqual(str(d1).find('D object at '), 0) self.assertEqual(str(d1), repr(d1)) self.assertNotIn(-1, d1) for i in range(10): self.assertIn(i, d1) self.assertNotIn(10, d1) # Test overridden behavior for static classes class Proxy(object): def __init__(self, x): self.x = x def __nonzero__(self): return not not self.x def __hash__(self): return hash(self.x) def __eq__(self, other): return self.x == other def __ne__(self, other): return self.x != other def __cmp__(self, other): return cmp(self.x, other.x) def __str__(self): return "Proxy:%s" % self.x def __repr__(self): return "Proxy(%r)" % self.x def __contains__(self, value): return value in self.x p0 = Proxy(0) p1 = Proxy(1) p_1 = Proxy(-1) self.assertFalse(p0) self.assertFalse(not p1) self.assertEqual(hash(p0), hash(0)) self.assertEqual(p0, p0) self.assertNotEqual(p0, p1) self.assertFalse(p0 != p0) self.assertEqual(not p0, p1) self.assertEqual(cmp(p0, p1), -1) self.assertEqual(cmp(p0, p0), 0) self.assertEqual(cmp(p0, p_1), 1) self.assertEqual(str(p0), "Proxy:0") self.assertEqual(repr(p0), "Proxy(0)") p10 = Proxy(range(10)) self.assertNotIn(-1, p10) for i in range(10): self.assertIn(i, p10) self.assertNotIn(10, p10) # Test overridden behavior for dynamic classes class DProxy(object): def __init__(self, x): self.x = x def __nonzero__(self): return not not self.x def __hash__(self): return hash(self.x) def __eq__(self, other): return self.x == other def __ne__(self, other): return self.x != other def __cmp__(self, other): return cmp(self.x, other.x) def __str__(self): return "DProxy:%s" % self.x def __repr__(self): return "DProxy(%r)" % self.x def __contains__(self, value): return value in self.x p0 = DProxy(0) p1 = DProxy(1) p_1 = DProxy(-1) self.assertFalse(p0) self.assertFalse(not p1) self.assertEqual(hash(p0), hash(0)) self.assertEqual(p0, p0) self.assertNotEqual(p0, p1) self.assertNotEqual(not p0, p0) self.assertEqual(not p0, p1) self.assertEqual(cmp(p0, p1), -1) self.assertEqual(cmp(p0, p0), 0) self.assertEqual(cmp(p0, p_1), 1) self.assertEqual(str(p0), "DProxy:0") self.assertEqual(repr(p0), "DProxy(0)") p10 = DProxy(range(10)) self.assertNotIn(-1, p10) for i in range(10): self.assertIn(i, p10) self.assertNotIn(10, p10) # Safety test for __cmp__ def unsafecmp(a, b): if not hasattr(a, '__cmp__'): return # some types don't have a __cmp__ any more (so the # test doesn't make sense any more), or maybe they # never had a __cmp__ at all, e.g. in PyPy try: a.__class__.__cmp__(a, b) except TypeError: pass else: self.fail("shouldn't allow %s.__cmp__(%r, %r)" % ( a.__class__, a, b)) unsafecmp(u"123", "123") unsafecmp("123", u"123") unsafecmp(1, 1.0) unsafecmp(1.0, 1) unsafecmp(1, 1L) unsafecmp(1L, 1) @test_support.impl_detail("custom logic for printing to real file objects") def test_recursions_1(self): # Testing recursion checks ... class Letter(str): def __new__(cls, letter): if letter == 'EPS': return str.__new__(cls) return str.__new__(cls, letter) def __str__(self): if not self: return 'EPS' return self # sys.stdout needs to be the original to trigger the recursion bug test_stdout = sys.stdout sys.stdout = test_support.get_original_stdout() try: # nothing should actually be printed, this should raise an exception print Letter('w') except RuntimeError: pass else: self.fail("expected a RuntimeError for print recursion") finally: sys.stdout = test_stdout def test_recursions_2(self): # Bug #1202533. class A(object): pass A.__mul__ = types.MethodType(lambda self, x: self * x, None, A) try: A()*2 except RuntimeError: pass else: self.fail("expected a RuntimeError") def test_weakrefs(self): # Testing weak references... import weakref class C(object): pass c = C() r = weakref.ref(c) self.assertEqual(r(), c) del c test_support.gc_collect() self.assertEqual(r(), None) del r class NoWeak(object): __slots__ = ['foo'] no = NoWeak() try: weakref.ref(no) except TypeError, msg: self.assertIn("weak reference", str(msg)) else: if test_support.check_impl_detail(pypy=False): self.fail("weakref.ref(no) should be illegal") #else: pypy supports taking weakrefs to some more objects class Weak(object): __slots__ = ['foo', '__weakref__'] yes = Weak() r = weakref.ref(yes) self.assertEqual(r(), yes) del yes test_support.gc_collect() self.assertEqual(r(), None) del r def test_properties(self): # Testing property... class C(object): def getx(self): return self.__x def setx(self, value): self.__x = value def delx(self): del self.__x x = property(getx, setx, delx, doc="I'm the x property.") a = C() self.assertNotHasAttr(a, "x") a.x = 42 self.assertEqual(a._C__x, 42) self.assertEqual(a.x, 42) del a.x self.assertNotHasAttr(a, "x") self.assertNotHasAttr(a, "_C__x") C.x.__set__(a, 100) self.assertEqual(C.x.__get__(a), 100) C.x.__delete__(a) self.assertNotHasAttr(a, "x") raw = C.__dict__['x'] self.assertIsInstance(raw, property) attrs = dir(raw) self.assertIn("__doc__", attrs) self.assertIn("fget", attrs) self.assertIn("fset", attrs) self.assertIn("fdel", attrs) self.assertEqual(raw.__doc__, "I'm the x property.") self.assertIs(raw.fget, C.__dict__['getx']) self.assertIs(raw.fset, C.__dict__['setx']) self.assertIs(raw.fdel, C.__dict__['delx']) for attr in "__doc__", "fget", "fset", "fdel": try: setattr(raw, attr, 42) except TypeError, msg: if str(msg).find('readonly') < 0: self.fail("when setting readonly attr %r on a property, " "got unexpected TypeError msg %r" % (attr, str(msg))) else: self.fail("expected TypeError from trying to set readonly %r " "attr on a property" % attr) class D(object): __getitem__ = property(lambda s: 1/0) d = D() try: for i in d: str(i) except ZeroDivisionError: pass else: self.fail("expected ZeroDivisionError from bad property") @unittest.skipIf(sys.flags.optimize >= 2, "Docstrings are omitted with -O2 and above") def test_properties_doc_attrib(self): class E(object): def getter(self): "getter method" return 0 def setter(self_, value): "setter method" pass prop = property(getter) self.assertEqual(prop.__doc__, "getter method") prop2 = property(fset=setter) self.assertEqual(prop2.__doc__, None) @test_support.cpython_only def test_testcapi_no_segfault(self): # this segfaulted in 2.5b2 try: import _testcapi except ImportError: pass else: class X(object): p = property(_testcapi.test_with_docstring) def test_properties_plus(self): class C(object): foo = property(doc="hello") @foo.getter def foo(self): return self._foo @foo.setter def foo(self, value): self._foo = abs(value) @foo.deleter def foo(self): del self._foo c = C() self.assertEqual(C.foo.__doc__, "hello") self.assertNotHasAttr(c, "foo") c.foo = -42 self.assertHasAttr(c, '_foo') self.assertEqual(c._foo, 42) self.assertEqual(c.foo, 42) del c.foo self.assertNotHasAttr(c, '_foo') self.assertNotHasAttr(c, "foo") class D(C): @C.foo.deleter def foo(self): try: del self._foo except AttributeError: pass d = D() d.foo = 24 self.assertEqual(d.foo, 24) del d.foo del d.foo class E(object): @property def foo(self): return self._foo @foo.setter def foo(self, value): raise RuntimeError @foo.setter def foo(self, value): self._foo = abs(value) @foo.deleter def foo(self, value=None): del self._foo e = E() e.foo = -42 self.assertEqual(e.foo, 42) del e.foo class F(E): @E.foo.deleter def foo(self): del self._foo @foo.setter def foo(self, value): self._foo = max(0, value) f = F() f.foo = -10 self.assertEqual(f.foo, 0) del f.foo def test_dict_constructors(self): # Testing dict constructor ... d = dict() self.assertEqual(d, {}) d = dict({}) self.assertEqual(d, {}) d = dict({1: 2, 'a': 'b'}) self.assertEqual(d, {1: 2, 'a': 'b'}) self.assertEqual(d, dict(d.items())) self.assertEqual(d, dict(d.iteritems())) d = dict({'one':1, 'two':2}) self.assertEqual(d, dict(one=1, two=2)) self.assertEqual(d, dict(**d)) self.assertEqual(d, dict({"one": 1}, two=2)) self.assertEqual(d, dict([("two", 2)], one=1)) self.assertEqual(d, dict([("one", 100), ("two", 200)], **d)) self.assertEqual(d, dict(**d)) for badarg in 0, 0L, 0j, "0", [0], (0,): try: dict(badarg) except TypeError: pass except ValueError: if badarg == "0": # It's a sequence, and its elements are also sequences (gotta # love strings <wink>), but they aren't of length 2, so this # one seemed better as a ValueError than a TypeError. pass else: self.fail("no TypeError from dict(%r)" % badarg) else: self.fail("no TypeError from dict(%r)" % badarg) try: dict({}, {}) except TypeError: pass else: self.fail("no TypeError from dict({}, {})") class Mapping: # Lacks a .keys() method; will be added later. dict = {1:2, 3:4, 'a':1j} try: dict(Mapping()) except TypeError: pass else: self.fail("no TypeError from dict(incomplete mapping)") Mapping.keys = lambda self: self.dict.keys() Mapping.__getitem__ = lambda self, i: self.dict[i] d = dict(Mapping()) self.assertEqual(d, Mapping.dict) # Init from sequence of iterable objects, each producing a 2-sequence. class AddressBookEntry: def __init__(self, first, last): self.first = first self.last = last def __iter__(self): return iter([self.first, self.last]) d = dict([AddressBookEntry('Tim', 'Warsaw'), AddressBookEntry('Barry', 'Peters'), AddressBookEntry('Tim', 'Peters'), AddressBookEntry('Barry', 'Warsaw')]) self.assertEqual(d, {'Barry': 'Warsaw', 'Tim': 'Peters'}) d = dict(zip(range(4), range(1, 5))) self.assertEqual(d, dict([(i, i+1) for i in range(4)])) # Bad sequence lengths. for bad in [('tooshort',)], [('too', 'long', 'by 1')]: try: dict(bad) except ValueError: pass else: self.fail("no ValueError from dict(%r)" % bad) def test_dir(self): # Testing dir() ... junk = 12 self.assertEqual(dir(), ['junk', 'self']) del junk # Just make sure these don't blow up! for arg in 2, 2L, 2j, 2e0, [2], "2", u"2", (2,), {2:2}, type, self.test_dir: dir(arg) # Try classic classes. class C: Cdata = 1 def Cmethod(self): pass cstuff = ['Cdata', 'Cmethod', '__doc__', '__module__'] self.assertEqual(dir(C), cstuff) self.assertIn('im_self', dir(C.Cmethod)) c = C() # c.__doc__ is an odd thing to see here; ditto c.__module__. self.assertEqual(dir(c), cstuff) c.cdata = 2 c.cmethod = lambda self: 0 self.assertEqual(dir(c), cstuff + ['cdata', 'cmethod']) self.assertIn('im_self', dir(c.Cmethod)) class A(C): Adata = 1 def Amethod(self): pass astuff = ['Adata', 'Amethod'] + cstuff self.assertEqual(dir(A), astuff) self.assertIn('im_self', dir(A.Amethod)) a = A() self.assertEqual(dir(a), astuff) self.assertIn('im_self', dir(a.Amethod)) a.adata = 42 a.amethod = lambda self: 3 self.assertEqual(dir(a), astuff + ['adata', 'amethod']) # The same, but with new-style classes. Since these have object as a # base class, a lot more gets sucked in. def interesting(strings): return [s for s in strings if not s.startswith('_')] class C(object): Cdata = 1 def Cmethod(self): pass cstuff = ['Cdata', 'Cmethod'] self.assertEqual(interesting(dir(C)), cstuff) c = C() self.assertEqual(interesting(dir(c)), cstuff) self.assertIn('im_self', dir(C.Cmethod)) c.cdata = 2 c.cmethod = lambda self: 0 self.assertEqual(interesting(dir(c)), cstuff + ['cdata', 'cmethod']) self.assertIn('im_self', dir(c.Cmethod)) class A(C): Adata = 1 def Amethod(self): pass astuff = ['Adata', 'Amethod'] + cstuff self.assertEqual(interesting(dir(A)), astuff) self.assertIn('im_self', dir(A.Amethod)) a = A() self.assertEqual(interesting(dir(a)), astuff) a.adata = 42 a.amethod = lambda self: 3 self.assertEqual(interesting(dir(a)), astuff + ['adata', 'amethod']) self.assertIn('im_self', dir(a.Amethod)) # Try a module subclass. class M(type(sys)): pass minstance = M("m") minstance.b = 2 minstance.a = 1 names = [x for x in dir(minstance) if x not in ["__name__", "__doc__"]] self.assertEqual(names, ['a', 'b']) class M2(M): def getdict(self): return "Not a dict!" __dict__ = property(getdict) m2instance = M2("m2") m2instance.b = 2 m2instance.a = 1 self.assertEqual(m2instance.__dict__, "Not a dict!") try: dir(m2instance) except TypeError: pass # Two essentially featureless objects, just inheriting stuff from # object. self.assertEqual(dir(NotImplemented), dir(Ellipsis)) if test_support.check_impl_detail(): # None differs in PyPy: it has a __nonzero__ self.assertEqual(dir(None), dir(Ellipsis)) # Nasty test case for proxied objects class Wrapper(object): def __init__(self, obj): self.__obj = obj def __repr__(self): return "Wrapper(%s)" % repr(self.__obj) def __getitem__(self, key): return Wrapper(self.__obj[key]) def __len__(self): return len(self.__obj) def __getattr__(self, name): return Wrapper(getattr(self.__obj, name)) class C(object): def __getclass(self): return Wrapper(type(self)) __class__ = property(__getclass) dir(C()) # This used to segfault def test_supers(self): # Testing super... class A(object): def meth(self, a): return "A(%r)" % a self.assertEqual(A().meth(1), "A(1)") class B(A): def __init__(self): self.__super = super(B, self) def meth(self, a): return "B(%r)" % a + self.__super.meth(a) self.assertEqual(B().meth(2), "B(2)A(2)") class C(A): def meth(self, a): return "C(%r)" % a + self.__super.meth(a) C._C__super = super(C) self.assertEqual(C().meth(3), "C(3)A(3)") class D(C, B): def meth(self, a): return "D(%r)" % a + super(D, self).meth(a) self.assertEqual(D().meth(4), "D(4)C(4)B(4)A(4)") # Test for subclassing super class mysuper(super): def __init__(self, *args): return super(mysuper, self).__init__(*args) class E(D): def meth(self, a): return "E(%r)" % a + mysuper(E, self).meth(a) self.assertEqual(E().meth(5), "E(5)D(5)C(5)B(5)A(5)") class F(E): def meth(self, a): s = self.__super # == mysuper(F, self) return "F(%r)[%s]" % (a, s.__class__.__name__) + s.meth(a) F._F__super = mysuper(F) self.assertEqual(F().meth(6), "F(6)[mysuper]E(6)D(6)C(6)B(6)A(6)") # Make sure certain errors are raised try: super(D, 42) except TypeError: pass else: self.fail("shouldn't allow super(D, 42)") try: super(D, C()) except TypeError: pass else: self.fail("shouldn't allow super(D, C())") try: super(D).__get__(12) except TypeError: pass else: self.fail("shouldn't allow super(D).__get__(12)") try: super(D).__get__(C()) except TypeError: pass else: self.fail("shouldn't allow super(D).__get__(C())") # Make sure data descriptors can be overridden and accessed via super # (new feature in Python 2.3) class DDbase(object): def getx(self): return 42 x = property(getx) class DDsub(DDbase): def getx(self): return "hello" x = property(getx) dd = DDsub() self.assertEqual(dd.x, "hello") self.assertEqual(super(DDsub, dd).x, 42) # Ensure that super() lookup of descriptor from classmethod # works (SF ID# 743627) class Base(object): aProp = property(lambda self: "foo") class Sub(Base): @classmethod def test(klass): return super(Sub,klass).aProp self.assertEqual(Sub.test(), Base.aProp) # Verify that super() doesn't allow keyword args try: super(Base, kw=1) except TypeError: pass else: self.assertEqual("super shouldn't accept keyword args") def test_basic_inheritance(self): # Testing inheritance from basic types... class hexint(int): def __repr__(self): return hex(self) def __add__(self, other): return hexint(int.__add__(self, other)) # (Note that overriding __radd__ doesn't work, # because the int type gets first dibs.) self.assertEqual(repr(hexint(7) + 9), "0x10") self.assertEqual(repr(hexint(1000) + 7), "0x3ef") a = hexint(12345) self.assertEqual(a, 12345) self.assertEqual(int(a), 12345) self.assertIs(int(a).__class__, int) self.assertEqual(hash(a), hash(12345)) self.assertIs((+a).__class__, int) self.assertIs((a >> 0).__class__, int) self.assertIs((a << 0).__class__, int) self.assertIs((hexint(0) << 12).__class__, int) self.assertIs((hexint(0) >> 12).__class__, int) class octlong(long): __slots__ = [] def __str__(self): s = oct(self) if s[-1] == 'L': s = s[:-1] return s def __add__(self, other): return self.__class__(super(octlong, self).__add__(other)) __radd__ = __add__ self.assertEqual(str(octlong(3) + 5), "010") # (Note that overriding __radd__ here only seems to work # because the example uses a short int left argument.) self.assertEqual(str(5 + octlong(3000)), "05675") a = octlong(12345) self.assertEqual(a, 12345L) self.assertEqual(long(a), 12345L) self.assertEqual(hash(a), hash(12345L)) self.assertIs(long(a).__class__, long) self.assertIs((+a).__class__, long) self.assertIs((-a).__class__, long) self.assertIs((-octlong(0)).__class__, long) self.assertIs((a >> 0).__class__, long) self.assertIs((a << 0).__class__, long) self.assertIs((a - 0).__class__, long) self.assertIs((a * 1).__class__, long) self.assertIs((a ** 1).__class__, long) self.assertIs((a // 1).__class__, long) self.assertIs((1 * a).__class__, long) self.assertIs((a | 0).__class__, long) self.assertIs((a ^ 0).__class__, long) self.assertIs((a & -1L).__class__, long) self.assertIs((octlong(0) << 12).__class__, long) self.assertIs((octlong(0) >> 12).__class__, long) self.assertIs(abs(octlong(0)).__class__, long) # Because octlong overrides __add__, we can't check the absence of +0 # optimizations using octlong. class longclone(long): pass a = longclone(1) self.assertIs((a + 0).__class__, long) self.assertIs((0 + a).__class__, long) # Check that negative clones don't segfault a = longclone(-1) self.assertEqual(a.__dict__, {}) self.assertEqual(long(a), -1) # self.assertTrue PyNumber_Long() copies the sign bit class precfloat(float): __slots__ = ['prec'] def __init__(self, value=0.0, prec=12): self.prec = int(prec) def __repr__(self): return "%.*g" % (self.prec, self) self.assertEqual(repr(precfloat(1.1)), "1.1") a = precfloat(12345) self.assertEqual(a, 12345.0) self.assertEqual(float(a), 12345.0) self.assertIs(float(a).__class__, float) self.assertEqual(hash(a), hash(12345.0)) self.assertIs((+a).__class__, float) class madcomplex(complex): def __repr__(self): return "%.17gj%+.17g" % (self.imag, self.real) a = madcomplex(-3, 4) self.assertEqual(repr(a), "4j-3") base = complex(-3, 4) self.assertEqual(base.__class__, complex) self.assertEqual(a, base) self.assertEqual(complex(a), base) self.assertEqual(complex(a).__class__, complex) a = madcomplex(a) # just trying another form of the constructor self.assertEqual(repr(a), "4j-3") self.assertEqual(a, base) self.assertEqual(complex(a), base) self.assertEqual(complex(a).__class__, complex) self.assertEqual(hash(a), hash(base)) self.assertEqual((+a).__class__, complex) self.assertEqual((a + 0).__class__, complex) self.assertEqual(a + 0, base) self.assertEqual((a - 0).__class__, complex) self.assertEqual(a - 0, base) self.assertEqual((a * 1).__class__, complex) self.assertEqual(a * 1, base) self.assertEqual((a / 1).__class__, complex) self.assertEqual(a / 1, base) class madtuple(tuple): _rev = None def rev(self): if self._rev is not None: return self._rev L = list(self) L.reverse() self._rev = self.__class__(L) return self._rev a = madtuple((1,2,3,4,5,6,7,8,9,0)) self.assertEqual(a, (1,2,3,4,5,6,7,8,9,0)) self.assertEqual(a.rev(), madtuple((0,9,8,7,6,5,4,3,2,1))) self.assertEqual(a.rev().rev(), madtuple((1,2,3,4,5,6,7,8,9,0))) for i in range(512): t = madtuple(range(i)) u = t.rev() v = u.rev() self.assertEqual(v, t) a = madtuple((1,2,3,4,5)) self.assertEqual(tuple(a), (1,2,3,4,5)) self.assertIs(tuple(a).__class__, tuple) self.assertEqual(hash(a), hash((1,2,3,4,5))) self.assertIs(a[:].__class__, tuple) self.assertIs((a * 1).__class__, tuple) self.assertIs((a * 0).__class__, tuple) self.assertIs((a + ()).__class__, tuple) a = madtuple(()) self.assertEqual(tuple(a), ()) self.assertIs(tuple(a).__class__, tuple) self.assertIs((a + a).__class__, tuple) self.assertIs((a * 0).__class__, tuple) self.assertIs((a * 1).__class__, tuple) self.assertIs((a * 2).__class__, tuple) self.assertIs(a[:].__class__, tuple) class madstring(str): _rev = None def rev(self): if self._rev is not None: return self._rev L = list(self) L.reverse() self._rev = self.__class__("".join(L)) return self._rev s = madstring("abcdefghijklmnopqrstuvwxyz") self.assertEqual(s, "abcdefghijklmnopqrstuvwxyz") self.assertEqual(s.rev(), madstring("zyxwvutsrqponmlkjihgfedcba")) self.assertEqual(s.rev().rev(), madstring("abcdefghijklmnopqrstuvwxyz")) for i in range(256): s = madstring("".join(map(chr, range(i)))) t = s.rev() u = t.rev() self.assertEqual(u, s) s = madstring("12345") self.assertEqual(str(s), "12345") self.assertIs(str(s).__class__, str) base = "\x00" * 5 s = madstring(base) self.assertEqual(s, base) self.assertEqual(str(s), base) self.assertIs(str(s).__class__, str) self.assertEqual(hash(s), hash(base)) self.assertEqual({s: 1}[base], 1) self.assertEqual({base: 1}[s], 1) self.assertIs((s + "").__class__, str) self.assertEqual(s + "", base) self.assertIs(("" + s).__class__, str) self.assertEqual("" + s, base) self.assertIs((s * 0).__class__, str) self.assertEqual(s * 0, "") self.assertIs((s * 1).__class__, str) self.assertEqual(s * 1, base) self.assertIs((s * 2).__class__, str) self.assertEqual(s * 2, base + base) self.assertIs(s[:].__class__, str) self.assertEqual(s[:], base) self.assertIs(s[0:0].__class__, str) self.assertEqual(s[0:0], "") self.assertIs(s.strip().__class__, str) self.assertEqual(s.strip(), base) self.assertIs(s.lstrip().__class__, str) self.assertEqual(s.lstrip(), base) self.assertIs(s.rstrip().__class__, str) self.assertEqual(s.rstrip(), base) identitytab = ''.join([chr(i) for i in range(256)]) self.assertIs(s.translate(identitytab).__class__, str) self.assertEqual(s.translate(identitytab), base) self.assertIs(s.translate(identitytab, "x").__class__, str) self.assertEqual(s.translate(identitytab, "x"), base) self.assertEqual(s.translate(identitytab, "\x00"), "") self.assertIs(s.replace("x", "x").__class__, str) self.assertEqual(s.replace("x", "x"), base) self.assertIs(s.ljust(len(s)).__class__, str) self.assertEqual(s.ljust(len(s)), base) self.assertIs(s.rjust(len(s)).__class__, str) self.assertEqual(s.rjust(len(s)), base) self.assertIs(s.center(len(s)).__class__, str) self.assertEqual(s.center(len(s)), base) self.assertIs(s.lower().__class__, str) self.assertEqual(s.lower(), base) class madunicode(unicode): _rev = None def rev(self): if self._rev is not None: return self._rev L = list(self) L.reverse() self._rev = self.__class__(u"".join(L)) return self._rev u = madunicode("ABCDEF") self.assertEqual(u, u"ABCDEF") self.assertEqual(u.rev(), madunicode(u"FEDCBA")) self.assertEqual(u.rev().rev(), madunicode(u"ABCDEF")) base = u"12345" u = madunicode(base) self.assertEqual(unicode(u), base) self.assertIs(unicode(u).__class__, unicode) self.assertEqual(hash(u), hash(base)) self.assertEqual({u: 1}[base], 1) self.assertEqual({base: 1}[u], 1) self.assertIs(u.strip().__class__, unicode) self.assertEqual(u.strip(), base) self.assertIs(u.lstrip().__class__, unicode) self.assertEqual(u.lstrip(), base) self.assertIs(u.rstrip().__class__, unicode) self.assertEqual(u.rstrip(), base) self.assertIs(u.replace(u"x", u"x").__class__, unicode) self.assertEqual(u.replace(u"x", u"x"), base) self.assertIs(u.replace(u"xy", u"xy").__class__, unicode) self.assertEqual(u.replace(u"xy", u"xy"), base) self.assertIs(u.center(len(u)).__class__, unicode) self.assertEqual(u.center(len(u)), base) self.assertIs(u.ljust(len(u)).__class__, unicode) self.assertEqual(u.ljust(len(u)), base) self.assertIs(u.rjust(len(u)).__class__, unicode) self.assertEqual(u.rjust(len(u)), base) self.assertIs(u.lower().__class__, unicode) self.assertEqual(u.lower(), base) self.assertIs(u.upper().__class__, unicode) self.assertEqual(u.upper(), base) self.assertIs(u.capitalize().__class__, unicode) self.assertEqual(u.capitalize(), base) self.assertIs(u.title().__class__, unicode) self.assertEqual(u.title(), base) self.assertIs((u + u"").__class__, unicode) self.assertEqual(u + u"", base) self.assertIs((u"" + u).__class__, unicode) self.assertEqual(u"" + u, base) self.assertIs((u * 0).__class__, unicode) self.assertEqual(u * 0, u"") self.assertIs((u * 1).__class__, unicode) self.assertEqual(u * 1, base) self.assertIs((u * 2).__class__, unicode) self.assertEqual(u * 2, base + base) self.assertIs(u[:].__class__, unicode) self.assertEqual(u[:], base) self.assertIs(u[0:0].__class__, unicode) self.assertEqual(u[0:0], u"") class sublist(list): pass a = sublist(range(5)) self.assertEqual(a, range(5)) a.append("hello") self.assertEqual(a, range(5) + ["hello"]) a[5] = 5 self.assertEqual(a, range(6)) a.extend(range(6, 20)) self.assertEqual(a, range(20)) a[-5:] = [] self.assertEqual(a, range(15)) del a[10:15] self.assertEqual(len(a), 10) self.assertEqual(a, range(10)) self.assertEqual(list(a), range(10)) self.assertEqual(a[0], 0) self.assertEqual(a[9], 9) self.assertEqual(a[-10], 0) self.assertEqual(a[-1], 9) self.assertEqual(a[:5], range(5)) class CountedInput(file): """Counts lines read by self.readline(). self.lineno is the 0-based ordinal of the last line read, up to a maximum of one greater than the number of lines in the file. self.ateof is true if and only if the final "" line has been read, at which point self.lineno stops incrementing, and further calls to readline() continue to return "". """ lineno = 0 ateof = 0 def readline(self): if self.ateof: return "" s = file.readline(self) # Next line works too. # s = super(CountedInput, self).readline() self.lineno += 1 if s == "": self.ateof = 1 return s f = file(name=test_support.TESTFN, mode='w') lines = ['a\n', 'b\n', 'c\n'] try: f.writelines(lines) f.close() f = CountedInput(test_support.TESTFN) for (i, expected) in zip(range(1, 5) + [4], lines + 2 * [""]): got = f.readline() self.assertEqual(expected, got) self.assertEqual(f.lineno, i) self.assertEqual(f.ateof, (i > len(lines))) f.close() finally: try: f.close() except: pass test_support.unlink(test_support.TESTFN) def test_keywords(self): # Testing keyword args to basic type constructors ... self.assertEqual(int(x=1), 1) self.assertEqual(float(x=2), 2.0) self.assertEqual(long(x=3), 3L) self.assertEqual(complex(imag=42, real=666), complex(666, 42)) self.assertEqual(str(object=500), '500') self.assertEqual(unicode(string='abc', errors='strict'), u'abc') self.assertEqual(tuple(sequence=range(3)), (0, 1, 2)) self.assertEqual(list(sequence=(0, 1, 2)), range(3)) # note: as of Python 2.3, dict() no longer has an "items" keyword arg for constructor in (int, float, long, complex, str, unicode, tuple, list, file): try: constructor(bogus_keyword_arg=1) except TypeError: pass else: self.fail("expected TypeError from bogus keyword argument to %r" % constructor) def test_str_subclass_as_dict_key(self): # Testing a str subclass used as dict key .. class cistr(str): """Sublcass of str that computes __eq__ case-insensitively. Also computes a hash code of the string in canonical form. """ def __init__(self, value): self.canonical = value.lower() self.hashcode = hash(self.canonical) def __eq__(self, other): if not isinstance(other, cistr): other = cistr(other) return self.canonical == other.canonical def __hash__(self): return self.hashcode self.assertEqual(cistr('ABC'), 'abc') self.assertEqual('aBc', cistr('ABC')) self.assertEqual(str(cistr('ABC')), 'ABC') d = {cistr('one'): 1, cistr('two'): 2, cistr('tHree'): 3} self.assertEqual(d[cistr('one')], 1) self.assertEqual(d[cistr('tWo')], 2) self.assertEqual(d[cistr('THrEE')], 3) self.assertIn(cistr('ONe'), d) self.assertEqual(d.get(cistr('thrEE')), 3) def test_classic_comparisons(self): # Testing classic comparisons... class classic: pass for base in (classic, int, object): class C(base): def __init__(self, value): self.value = int(value) def __cmp__(self, other): if isinstance(other, C): return cmp(self.value, other.value) if isinstance(other, int) or isinstance(other, long): return cmp(self.value, other) return NotImplemented __hash__ = None # Silence Py3k warning c1 = C(1) c2 = C(2) c3 = C(3) self.assertEqual(c1, 1) c = {1: c1, 2: c2, 3: c3} for x in 1, 2, 3: for y in 1, 2, 3: self.assertEqual(cmp(c[x], c[y]), cmp(x, y), "x=%d, y=%d" % (x, y)) for op in "<", "<=", "==", "!=", ">", ">=": self.assertEqual(eval("c[x] %s c[y]" % op), eval("x %s y" % op), "x=%d, y=%d" % (x, y)) self.assertEqual(cmp(c[x], y), cmp(x, y), "x=%d, y=%d" % (x, y)) self.assertEqual(cmp(x, c[y]), cmp(x, y), "x=%d, y=%d" % (x, y)) def test_rich_comparisons(self): # Testing rich comparisons... class Z(complex): pass z = Z(1) self.assertEqual(z, 1+0j) self.assertEqual(1+0j, z) class ZZ(complex): def __eq__(self, other): try: return abs(self - other) <= 1e-6 except: return NotImplemented __hash__ = None # Silence Py3k warning zz = ZZ(1.0000003) self.assertEqual(zz, 1+0j) self.assertEqual(1+0j, zz) class classic: pass for base in (classic, int, object, list): class C(base): def __init__(self, value): self.value = int(value) def __cmp__(self_, other): self.fail("shouldn't call __cmp__") __hash__ = None # Silence Py3k warning def __eq__(self, other): if isinstance(other, C): return self.value == other.value if isinstance(other, int) or isinstance(other, long): return self.value == other return NotImplemented def __ne__(self, other): if isinstance(other, C): return self.value != other.value if isinstance(other, int) or isinstance(other, long): return self.value != other return NotImplemented def __lt__(self, other): if isinstance(other, C): return self.value < other.value if isinstance(other, int) or isinstance(other, long): return self.value < other return NotImplemented def __le__(self, other): if isinstance(other, C): return self.value <= other.value if isinstance(other, int) or isinstance(other, long): return self.value <= other return NotImplemented def __gt__(self, other): if isinstance(other, C): return self.value > other.value if isinstance(other, int) or isinstance(other, long): return self.value > other return NotImplemented def __ge__(self, other): if isinstance(other, C): return self.value >= other.value if isinstance(other, int) or isinstance(other, long): return self.value >= other return NotImplemented c1 = C(1) c2 = C(2) c3 = C(3) self.assertEqual(c1, 1) c = {1: c1, 2: c2, 3: c3} for x in 1, 2, 3: for y in 1, 2, 3: for op in "<", "<=", "==", "!=", ">", ">=": self.assertEqual(eval("c[x] %s c[y]" % op), eval("x %s y" % op), "x=%d, y=%d" % (x, y)) self.assertEqual(eval("c[x] %s y" % op), eval("x %s y" % op), "x=%d, y=%d" % (x, y)) self.assertEqual(eval("x %s c[y]" % op), eval("x %s y" % op), "x=%d, y=%d" % (x, y)) def test_coercions(self): # Testing coercions... class I(int): pass coerce(I(0), 0) coerce(0, I(0)) class L(long): pass coerce(L(0), 0) coerce(L(0), 0L) coerce(0, L(0)) coerce(0L, L(0)) class F(float): pass coerce(F(0), 0) coerce(F(0), 0L) coerce(F(0), 0.) coerce(0, F(0)) coerce(0L, F(0)) coerce(0., F(0)) class C(complex): pass coerce(C(0), 0) coerce(C(0), 0L) coerce(C(0), 0.) coerce(C(0), 0j) coerce(0, C(0)) coerce(0L, C(0)) coerce(0., C(0)) coerce(0j, C(0)) def test_descrdoc(self): # Testing descriptor doc strings... def check(descr, what): self.assertEqual(descr.__doc__, what) check(file.closed, "True if the file is closed") # getset descriptor check(file.name, "file name") # member descriptor def test_doc_descriptor(self): # Testing __doc__ descriptor... # SF bug 542984 class DocDescr(object): def __get__(self, object, otype): if object: object = object.__class__.__name__ + ' instance' if otype: otype = otype.__name__ return 'object=%s; type=%s' % (object, otype) class OldClass: __doc__ = DocDescr() class NewClass(object): __doc__ = DocDescr() self.assertEqual(OldClass.__doc__, 'object=None; type=OldClass') self.assertEqual(OldClass().__doc__, 'object=OldClass instance; type=OldClass') self.assertEqual(NewClass.__doc__, 'object=None; type=NewClass') self.assertEqual(NewClass().__doc__, 'object=NewClass instance; type=NewClass') def test_set_class(self): # Testing __class__ assignment... class C(object): pass class D(object): pass class E(object): pass class F(D, E): pass for cls in C, D, E, F: for cls2 in C, D, E, F: x = cls() x.__class__ = cls2 self.assertIs(x.__class__, cls2) x.__class__ = cls self.assertIs(x.__class__, cls) def cant(x, C): try: x.__class__ = C except TypeError: pass else: self.fail("shouldn't allow %r.__class__ = %r" % (x, C)) try: delattr(x, "__class__") except (TypeError, AttributeError): pass else: self.fail("shouldn't allow del %r.__class__" % x) cant(C(), list) cant(list(), C) cant(C(), 1) cant(C(), object) cant(object(), list) cant(list(), object) class Int(int): __slots__ = [] cant(2, Int) cant(Int(), int) cant(True, int) cant(2, bool) o = object() cant(o, type(1)) cant(o, type(None)) del o class G(object): __slots__ = ["a", "b"] class H(object): __slots__ = ["b", "a"] try: unicode except NameError: class I(object): __slots__ = ["a", "b"] else: class I(object): __slots__ = [unicode("a"), unicode("b")] class J(object): __slots__ = ["c", "b"] class K(object): __slots__ = ["a", "b", "d"] class L(H): __slots__ = ["e"] class M(I): __slots__ = ["e"] class N(J): __slots__ = ["__weakref__"] class P(J): __slots__ = ["__dict__"] class Q(J): pass class R(J): __slots__ = ["__dict__", "__weakref__"] if test_support.check_impl_detail(pypy=False): lst = ((G, H), (G, I), (I, H), (Q, R), (R, Q)) else: # Not supported in pypy: changing the __class__ of an object # to another __class__ that just happens to have the same slots. # If needed, we can add the feature, but what we'll likely do # then is to allow mostly any __class__ assignment, even if the # classes have different __slots__, because we it's easier. lst = ((Q, R), (R, Q)) for cls, cls2 in lst: x = cls() x.a = 1 x.__class__ = cls2 self.assertIs(x.__class__, cls2, "assigning %r as __class__ for %r silently failed" % (cls2, x)) self.assertEqual(x.a, 1) x.__class__ = cls self.assertIs(x.__class__, cls, "assigning %r as __class__ for %r silently failed" % (cls, x)) self.assertEqual(x.a, 1) for cls in G, J, K, L, M, N, P, R, list, Int: for cls2 in G, J, K, L, M, N, P, R, list, Int: if cls is cls2: continue cant(cls(), cls2) # Issue5283: when __class__ changes in __del__, the wrong # type gets DECREF'd. class O(object): def __del__(self): pass class A(object): def __del__(self): self.__class__ = O l = [A() for x in range(100)] del l def test_set_dict(self): # Testing __dict__ assignment... class C(object): pass a = C() a.__dict__ = {'b': 1} self.assertEqual(a.b, 1) def cant(x, dict): try: x.__dict__ = dict except (AttributeError, TypeError): pass else: self.fail("shouldn't allow %r.__dict__ = %r" % (x, dict)) cant(a, None) cant(a, []) cant(a, 1) del a.__dict__ # Deleting __dict__ is allowed class Base(object): pass def verify_dict_readonly(x): """ x has to be an instance of a class inheriting from Base. """ cant(x, {}) try: del x.__dict__ except (AttributeError, TypeError): pass else: self.fail("shouldn't allow del %r.__dict__" % x) dict_descr = Base.__dict__["__dict__"] try: dict_descr.__set__(x, {}) except (AttributeError, TypeError): pass else: self.fail("dict_descr allowed access to %r's dict" % x) # Classes don't allow __dict__ assignment and have readonly dicts class Meta1(type, Base): pass class Meta2(Base, type): pass class D(object): __metaclass__ = Meta1 class E(object): __metaclass__ = Meta2 for cls in C, D, E: verify_dict_readonly(cls) class_dict = cls.__dict__ try: class_dict["spam"] = "eggs" except TypeError: pass else: if test_support.check_impl_detail(pypy=False): self.fail("%r's __dict__ can be modified" % cls) # Modules also disallow __dict__ assignment class Module1(types.ModuleType, Base): pass class Module2(Base, types.ModuleType): pass for ModuleType in Module1, Module2: mod = ModuleType("spam") verify_dict_readonly(mod) mod.__dict__["spam"] = "eggs" # Exception's __dict__ can be replaced, but not deleted # (at least not any more than regular exception's __dict__ can # be deleted; on CPython it is not the case, whereas on PyPy they # can, just like any other new-style instance's __dict__.) def can_delete_dict(e): try: del e.__dict__ except (TypeError, AttributeError): return False else: return True class Exception1(Exception, Base): pass class Exception2(Base, Exception): pass for ExceptionType in Exception, Exception1, Exception2: e = ExceptionType() e.__dict__ = {"a": 1} self.assertEqual(e.a, 1) self.assertEqual(can_delete_dict(e), can_delete_dict(ValueError())) def test_pickles(self): # Testing pickling and copying new-style classes and objects... import pickle, cPickle def sorteditems(d): L = d.items() L.sort() return L global C class C(object): def __init__(self, a, b): super(C, self).__init__() self.a = a self.b = b def __repr__(self): return "C(%r, %r)" % (self.a, self.b) global C1 class C1(list): def __new__(cls, a, b): return super(C1, cls).__new__(cls) def __getnewargs__(self): return (self.a, self.b) def __init__(self, a, b): self.a = a self.b = b def __repr__(self): return "C1(%r, %r)<%r>" % (self.a, self.b, list(self)) global C2 class C2(int): def __new__(cls, a, b, val=0): return super(C2, cls).__new__(cls, val) def __getnewargs__(self): return (self.a, self.b, int(self)) def __init__(self, a, b, val=0): self.a = a self.b = b def __repr__(self): return "C2(%r, %r)<%r>" % (self.a, self.b, int(self)) global C3 class C3(object): def __init__(self, foo): self.foo = foo def __getstate__(self): return self.foo def __setstate__(self, foo): self.foo = foo global C4classic, C4 class C4classic: # classic pass class C4(C4classic, object): # mixed inheritance pass for p in pickle, cPickle: for bin in 0, 1: for cls in C, C1, C2: s = p.dumps(cls, bin) cls2 = p.loads(s) self.assertIs(cls2, cls) a = C1(1, 2); a.append(42); a.append(24) b = C2("hello", "world", 42) s = p.dumps((a, b), bin) x, y = p.loads(s) self.assertEqual(x.__class__, a.__class__) self.assertEqual(sorteditems(x.__dict__), sorteditems(a.__dict__)) self.assertEqual(y.__class__, b.__class__) self.assertEqual(sorteditems(y.__dict__), sorteditems(b.__dict__)) self.assertEqual(repr(x), repr(a)) self.assertEqual(repr(y), repr(b)) # Test for __getstate__ and __setstate__ on new style class u = C3(42) s = p.dumps(u, bin) v = p.loads(s) self.assertEqual(u.__class__, v.__class__) self.assertEqual(u.foo, v.foo) # Test for picklability of hybrid class u = C4() u.foo = 42 s = p.dumps(u, bin) v = p.loads(s) self.assertEqual(u.__class__, v.__class__) self.assertEqual(u.foo, v.foo) # Testing copy.deepcopy() import copy for cls in C, C1, C2: cls2 = copy.deepcopy(cls) self.assertIs(cls2, cls) a = C1(1, 2); a.append(42); a.append(24) b = C2("hello", "world", 42) x, y = copy.deepcopy((a, b)) self.assertEqual(x.__class__, a.__class__) self.assertEqual(sorteditems(x.__dict__), sorteditems(a.__dict__)) self.assertEqual(y.__class__, b.__class__) self.assertEqual(sorteditems(y.__dict__), sorteditems(b.__dict__)) self.assertEqual(repr(x), repr(a)) self.assertEqual(repr(y), repr(b)) def test_pickle_slots(self): # Testing pickling of classes with __slots__ ... import pickle, cPickle # Pickling of classes with __slots__ but without __getstate__ should fail global B, C, D, E class B(object): pass for base in [object, B]: class C(base): __slots__ = ['a'] class D(C): pass try: pickle.dumps(C()) except TypeError: pass else: self.fail("should fail: pickle C instance - %s" % base) try: cPickle.dumps(C()) except TypeError: pass else: self.fail("should fail: cPickle C instance - %s" % base) try: pickle.dumps(C()) except TypeError: pass else: self.fail("should fail: pickle D instance - %s" % base) try: cPickle.dumps(D()) except TypeError: pass else: self.fail("should fail: cPickle D instance - %s" % base) # Give C a nice generic __getstate__ and __setstate__ class C(base): __slots__ = ['a'] def __getstate__(self): try: d = self.__dict__.copy() except AttributeError: d = {} for cls in self.__class__.__mro__: for sn in cls.__dict__.get('__slots__', ()): try: d[sn] = getattr(self, sn) except AttributeError: pass return d def __setstate__(self, d): for k, v in d.items(): setattr(self, k, v) class D(C): pass # Now it should work x = C() y = pickle.loads(pickle.dumps(x)) self.assertNotHasAttr(y, 'a') y = cPickle.loads(cPickle.dumps(x)) self.assertNotHasAttr(y, 'a') x.a = 42 y = pickle.loads(pickle.dumps(x)) self.assertEqual(y.a, 42) y = cPickle.loads(cPickle.dumps(x)) self.assertEqual(y.a, 42) x = D() x.a = 42 x.b = 100 y = pickle.loads(pickle.dumps(x)) self.assertEqual(y.a + y.b, 142) y = cPickle.loads(cPickle.dumps(x)) self.assertEqual(y.a + y.b, 142) # A subclass that adds a slot should also work class E(C): __slots__ = ['b'] x = E() x.a = 42 x.b = "foo" y = pickle.loads(pickle.dumps(x)) self.assertEqual(y.a, x.a) self.assertEqual(y.b, x.b) y = cPickle.loads(cPickle.dumps(x)) self.assertEqual(y.a, x.a) self.assertEqual(y.b, x.b) def test_binary_operator_override(self): # Testing overrides of binary operations... class I(int): def __repr__(self): return "I(%r)" % int(self) def __add__(self, other): return I(int(self) + int(other)) __radd__ = __add__ def __pow__(self, other, mod=None): if mod is None: return I(pow(int(self), int(other))) else: return I(pow(int(self), int(other), int(mod))) def __rpow__(self, other, mod=None): if mod is None: return I(pow(int(other), int(self), mod)) else: return I(pow(int(other), int(self), int(mod))) self.assertEqual(repr(I(1) + I(2)), "I(3)") self.assertEqual(repr(I(1) + 2), "I(3)") self.assertEqual(repr(1 + I(2)), "I(3)") self.assertEqual(repr(I(2) ** I(3)), "I(8)") self.assertEqual(repr(2 ** I(3)), "I(8)") self.assertEqual(repr(I(2) ** 3), "I(8)") self.assertEqual(repr(pow(I(2), I(3), I(5))), "I(3)") class S(str): def __eq__(self, other): return self.lower() == other.lower() __hash__ = None # Silence Py3k warning def test_subclass_propagation(self): # Testing propagation of slot functions to subclasses... class A(object): pass class B(A): pass class C(A): pass class D(B, C): pass d = D() orig_hash = hash(d) # related to id(d) in platform-dependent ways A.__hash__ = lambda self: 42 self.assertEqual(hash(d), 42) C.__hash__ = lambda self: 314 self.assertEqual(hash(d), 314) B.__hash__ = lambda self: 144 self.assertEqual(hash(d), 144) D.__hash__ = lambda self: 100 self.assertEqual(hash(d), 100) D.__hash__ = None self.assertRaises(TypeError, hash, d) del D.__hash__ self.assertEqual(hash(d), 144) B.__hash__ = None self.assertRaises(TypeError, hash, d) del B.__hash__ self.assertEqual(hash(d), 314) C.__hash__ = None self.assertRaises(TypeError, hash, d) del C.__hash__ self.assertEqual(hash(d), 42) A.__hash__ = None self.assertRaises(TypeError, hash, d) del A.__hash__ self.assertEqual(hash(d), orig_hash) d.foo = 42 d.bar = 42 self.assertEqual(d.foo, 42) self.assertEqual(d.bar, 42) def __getattribute__(self, name): if name == "foo": return 24 return object.__getattribute__(self, name) A.__getattribute__ = __getattribute__ self.assertEqual(d.foo, 24) self.assertEqual(d.bar, 42) def __getattr__(self, name): if name in ("spam", "foo", "bar"): return "hello" raise AttributeError, name B.__getattr__ = __getattr__ self.assertEqual(d.spam, "hello") self.assertEqual(d.foo, 24) self.assertEqual(d.bar, 42) del A.__getattribute__ self.assertEqual(d.foo, 42) del d.foo self.assertEqual(d.foo, "hello") self.assertEqual(d.bar, 42) del B.__getattr__ try: d.foo except AttributeError: pass else: self.fail("d.foo should be undefined now") # Test a nasty bug in recurse_down_subclasses() class A(object): pass class B(A): pass del B test_support.gc_collect() A.__setitem__ = lambda *a: None # crash def test_buffer_inheritance(self): # Testing that buffer interface is inherited ... import binascii # SF bug [#470040] ParseTuple t# vs subclasses. class MyStr(str): pass base = 'abc' m = MyStr(base) # b2a_hex uses the buffer interface to get its argument's value, via # PyArg_ParseTuple 't#' code. self.assertEqual(binascii.b2a_hex(m), binascii.b2a_hex(base)) # It's not clear that unicode will continue to support the character # buffer interface, and this test will fail if that's taken away. class MyUni(unicode): pass base = u'abc' m = MyUni(base) self.assertEqual(binascii.b2a_hex(m), binascii.b2a_hex(base)) class MyInt(int): pass m = MyInt(42) try: binascii.b2a_hex(m) self.fail('subclass of int should not have a buffer interface') except TypeError: pass def test_str_of_str_subclass(self): # Testing __str__ defined in subclass of str ... import binascii import cStringIO class octetstring(str): def __str__(self): return binascii.b2a_hex(self) def __repr__(self): return self + " repr" o = octetstring('A') self.assertEqual(type(o), octetstring) self.assertEqual(type(str(o)), str) self.assertEqual(type(repr(o)), str) self.assertEqual(ord(o), 0x41) self.assertEqual(str(o), '41') self.assertEqual(repr(o), 'A repr') self.assertEqual(o.__str__(), '41') self.assertEqual(o.__repr__(), 'A repr') capture = cStringIO.StringIO() # Calling str() or not exercises different internal paths. print >> capture, o print >> capture, str(o) self.assertEqual(capture.getvalue(), '41\n41\n') capture.close() def test_keyword_arguments(self): # Testing keyword arguments to __init__, __call__... def f(a): return a self.assertEqual(f.__call__(a=42), 42) a = [] list.__init__(a, sequence=[0, 1, 2]) self.assertEqual(a, [0, 1, 2]) @unittest.skipIf(test_support.check_impl_detail(pypy=True) and sys.platform == 'win32', "XXX: https://bugs.pypy.org/issue1461") def test_recursive_call(self): # Testing recursive __call__() by setting to instance of class... class A(object): pass A.__call__ = A() try: A()() except RuntimeError: pass else: self.fail("Recursion limit should have been reached for __call__()") def test_delete_hook(self): # Testing __del__ hook... log = [] class C(object): def __del__(self): log.append(1) c = C() self.assertEqual(log, []) del c test_support.gc_collect() self.assertEqual(log, [1]) class D(object): pass d = D() try: del d[0] except TypeError: pass else: self.fail("invalid del() didn't raise TypeError") def test_hash_inheritance(self): # Testing hash of mutable subclasses... class mydict(dict): pass d = mydict() try: hash(d) except TypeError: pass else: self.fail("hash() of dict subclass should fail") class mylist(list): pass d = mylist() try: hash(d) except TypeError: pass else: self.fail("hash() of list subclass should fail") def test_str_operations(self): try: 'a' + 5 except TypeError: pass else: self.fail("'' + 5 doesn't raise TypeError") try: ''.split('') except ValueError: pass else: self.fail("''.split('') doesn't raise ValueError") try: ''.join([0]) except TypeError: pass else: self.fail("''.join([0]) doesn't raise TypeError") try: ''.rindex('5') except ValueError: pass else: self.fail("''.rindex('5') doesn't raise ValueError") try: '%(n)s' % None except TypeError: pass else: self.fail("'%(n)s' % None doesn't raise TypeError") try: '%(n' % {} except ValueError: pass else: self.fail("'%(n' % {} '' doesn't raise ValueError") try: '%*s' % ('abc') except TypeError: pass else: self.fail("'%*s' % ('abc') doesn't raise TypeError") try: '%*.*s' % ('abc', 5) except TypeError: pass else: self.fail("'%*.*s' % ('abc', 5) doesn't raise TypeError") try: '%s' % (1, 2) except TypeError: pass else: self.fail("'%s' % (1, 2) doesn't raise TypeError") try: '%' % None except ValueError: pass else: self.fail("'%' % None doesn't raise ValueError") self.assertEqual('534253'.isdigit(), 1) self.assertEqual('534253x'.isdigit(), 0) self.assertEqual('%c' % 5, '\x05') self.assertEqual('%c' % '5', '5') def test_deepcopy_recursive(self): # Testing deepcopy of recursive objects... class Node: pass a = Node() b = Node() a.b = b b.a = a z = deepcopy(a) # This blew up before def test_unintialized_modules(self): # Testing uninitialized module objects... from types import ModuleType as M m = M.__new__(M) str(m) self.assertNotHasAttr(m, "__name__") self.assertNotHasAttr(m, "__file__") self.assertNotHasAttr(m, "foo") self.assertFalse(m.__dict__) # None or {} are both reasonable answers m.foo = 1 self.assertEqual(m.__dict__, {"foo": 1}) def test_funny_new(self): # Testing __new__ returning something unexpected... class C(object): def __new__(cls, arg): if isinstance(arg, str): return [1, 2, 3] elif isinstance(arg, int): return object.__new__(D) else: return object.__new__(cls) class D(C): def __init__(self, arg): self.foo = arg self.assertEqual(C("1"), [1, 2, 3]) self.assertEqual(D("1"), [1, 2, 3]) d = D(None) self.assertEqual(d.foo, None) d = C(1) self.assertEqual(isinstance(d, D), True) self.assertEqual(d.foo, 1) d = D(1) self.assertEqual(isinstance(d, D), True) self.assertEqual(d.foo, 1) def test_imul_bug(self): # Testing for __imul__ problems... # SF bug 544647 class C(object): def __imul__(self, other): return (self, other) x = C() y = x y *= 1.0 self.assertEqual(y, (x, 1.0)) y = x y *= 2 self.assertEqual(y, (x, 2)) y = x y *= 3L self.assertEqual(y, (x, 3L)) y = x y *= 1L<<100 self.assertEqual(y, (x, 1L<<100)) y = x y *= None self.assertEqual(y, (x, None)) y = x y *= "foo" self.assertEqual(y, (x, "foo")) def test_copy_setstate(self): # Testing that copy.*copy() correctly uses __setstate__... import copy class C(object): def __init__(self, foo=None): self.foo = foo self.__foo = foo def setfoo(self, foo=None): self.foo = foo def getfoo(self): return self.__foo def __getstate__(self): return [self.foo] def __setstate__(self_, lst): self.assertEqual(len(lst), 1) self_.__foo = self_.foo = lst[0] a = C(42) a.setfoo(24) self.assertEqual(a.foo, 24) self.assertEqual(a.getfoo(), 42) b = copy.copy(a) self.assertEqual(b.foo, 24) self.assertEqual(b.getfoo(), 24) b = copy.deepcopy(a) self.assertEqual(b.foo, 24) self.assertEqual(b.getfoo(), 24) def test_slices(self): # Testing cases with slices and overridden __getitem__ ... # Strings self.assertEqual("hello"[:4], "hell") self.assertEqual("hello"[slice(4)], "hell") self.assertEqual(str.__getitem__("hello", slice(4)), "hell") class S(str): def __getitem__(self, x): return str.__getitem__(self, x) self.assertEqual(S("hello")[:4], "hell") self.assertEqual(S("hello")[slice(4)], "hell") self.assertEqual(S("hello").__getitem__(slice(4)), "hell") # Tuples self.assertEqual((1,2,3)[:2], (1,2)) self.assertEqual((1,2,3)[slice(2)], (1,2)) self.assertEqual(tuple.__getitem__((1,2,3), slice(2)), (1,2)) class T(tuple): def __getitem__(self, x): return tuple.__getitem__(self, x) self.assertEqual(T((1,2,3))[:2], (1,2)) self.assertEqual(T((1,2,3))[slice(2)], (1,2)) self.assertEqual(T((1,2,3)).__getitem__(slice(2)), (1,2)) # Lists self.assertEqual([1,2,3][:2], [1,2]) self.assertEqual([1,2,3][slice(2)], [1,2]) self.assertEqual(list.__getitem__([1,2,3], slice(2)), [1,2]) class L(list): def __getitem__(self, x): return list.__getitem__(self, x) self.assertEqual(L([1,2,3])[:2], [1,2]) self.assertEqual(L([1,2,3])[slice(2)], [1,2]) self.assertEqual(L([1,2,3]).__getitem__(slice(2)), [1,2]) # Now do lists and __setitem__ a = L([1,2,3]) a[slice(1, 3)] = [3,2] self.assertEqual(a, [1,3,2]) a[slice(0, 2, 1)] = [3,1] self.assertEqual(a, [3,1,2]) a.__setitem__(slice(1, 3), [2,1]) self.assertEqual(a, [3,2,1]) a.__setitem__(slice(0, 2, 1), [2,3]) self.assertEqual(a, [2,3,1]) def test_subtype_resurrection(self): # Testing resurrection of new-style instance... class C(object): container = [] def __del__(self): # resurrect the instance C.container.append(self) c = C() c.attr = 42 # The most interesting thing here is whether this blows up, due to # flawed GC tracking logic in typeobject.c's call_finalizer() (a 2.2.1 # bug). del c # If that didn't blow up, it's also interesting to see whether clearing # the last container slot works: that will attempt to delete c again, # which will cause c to get appended back to the container again # "during" the del. (On non-CPython implementations, however, __del__ # is typically not called again.) test_support.gc_collect() self.assertEqual(len(C.container), 1) del C.container[-1] if test_support.check_impl_detail(): test_support.gc_collect() self.assertEqual(len(C.container), 1) self.assertEqual(C.container[-1].attr, 42) # Make c mortal again, so that the test framework with -l doesn't report # it as a leak. del C.__del__ def test_slots_trash(self): # Testing slot trash... # Deallocating deeply nested slotted trash caused stack overflows class trash(object): __slots__ = ['x'] def __init__(self, x): self.x = x o = None for i in xrange(50000): o = trash(o) del o def test_slots_multiple_inheritance(self): # SF bug 575229, multiple inheritance w/ slots dumps core class A(object): __slots__=() class B(object): pass class C(A,B) : __slots__=() if test_support.check_impl_detail(): self.assertEqual(C.__basicsize__, B.__basicsize__) self.assertHasAttr(C, '__dict__') self.assertHasAttr(C, '__weakref__') C().x = 2 def test_rmul(self): # Testing correct invocation of __rmul__... # SF patch 592646 class C(object): def __mul__(self, other): return "mul" def __rmul__(self, other): return "rmul" a = C() self.assertEqual(a*2, "mul") self.assertEqual(a*2.2, "mul") self.assertEqual(2*a, "rmul") self.assertEqual(2.2*a, "rmul") def test_ipow(self): # Testing correct invocation of __ipow__... # [SF bug 620179] class C(object): def __ipow__(self, other): pass a = C() a **= 2 def test_mutable_bases(self): # Testing mutable bases... # stuff that should work: class C(object): pass class C2(object): def __getattribute__(self, attr): if attr == 'a': return 2 else: return super(C2, self).__getattribute__(attr) def meth(self): return 1 class D(C): pass class E(D): pass d = D() e = E() D.__bases__ = (C,) D.__bases__ = (C2,) self.assertEqual(d.meth(), 1) self.assertEqual(e.meth(), 1) self.assertEqual(d.a, 2) self.assertEqual(e.a, 2) self.assertEqual(C2.__subclasses__(), [D]) try: del D.__bases__ except (TypeError, AttributeError): pass else: self.fail("shouldn't be able to delete .__bases__") try: D.__bases__ = () except TypeError, msg: if str(msg) == "a new-style class can't have only classic bases": self.fail("wrong error message for .__bases__ = ()") else: self.fail("shouldn't be able to set .__bases__ to ()") try: D.__bases__ = (D,) except TypeError: pass else: # actually, we'll have crashed by here... self.fail("shouldn't be able to create inheritance cycles") try: D.__bases__ = (C, C) except TypeError: pass else: self.fail("didn't detect repeated base classes") try: D.__bases__ = (E,) except TypeError: pass else: self.fail("shouldn't be able to create inheritance cycles") # let's throw a classic class into the mix: class Classic: def meth2(self): return 3 D.__bases__ = (C, Classic) self.assertEqual(d.meth2(), 3) self.assertEqual(e.meth2(), 3) try: d.a except AttributeError: pass else: self.fail("attribute should have vanished") try: D.__bases__ = (Classic,) except TypeError: pass else: self.fail("new-style class must have a new-style base") def test_builtin_bases(self): # Make sure all the builtin types can have their base queried without # segfaulting. See issue #5787. builtin_types = [tp for tp in __builtin__.__dict__.itervalues() if isinstance(tp, type)] for tp in builtin_types: object.__getattribute__(tp, "__bases__") if tp is not object: self.assertEqual(len(tp.__bases__), 1, tp) class L(list): pass class C(object): pass class D(C): pass try: L.__bases__ = (dict,) except TypeError: pass else: self.fail("shouldn't turn list subclass into dict subclass") try: list.__bases__ = (dict,) except TypeError: pass else: self.fail("shouldn't be able to assign to list.__bases__") try: D.__bases__ = (C, list) except TypeError: pass else: assert 0, "best_base calculation found wanting" def test_mutable_bases_with_failing_mro(self): # Testing mutable bases with failing mro... class WorkOnce(type): def __new__(self, name, bases, ns): self.flag = 0 return super(WorkOnce, self).__new__(WorkOnce, name, bases, ns) def mro(self): if self.flag > 0: raise RuntimeError, "bozo" else: self.flag += 1 return type.mro(self) class WorkAlways(type): def mro(self): # this is here to make sure that .mro()s aren't called # with an exception set (which was possible at one point). # An error message will be printed in a debug build. # What's a good way to test for this? return type.mro(self) class C(object): pass class C2(object): pass class D(C): pass class E(D): pass class F(D): __metaclass__ = WorkOnce class G(D): __metaclass__ = WorkAlways # Immediate subclasses have their mro's adjusted in alphabetical # order, so E's will get adjusted before adjusting F's fails. We # check here that E's gets restored. E_mro_before = E.__mro__ D_mro_before = D.__mro__ try: D.__bases__ = (C2,) except RuntimeError: self.assertEqual(E.__mro__, E_mro_before) self.assertEqual(D.__mro__, D_mro_before) else: self.fail("exception not propagated") def test_mutable_bases_catch_mro_conflict(self): # Testing mutable bases catch mro conflict... class A(object): pass class B(object): pass class C(A, B): pass class D(A, B): pass class E(C, D): pass try: C.__bases__ = (B, A) except TypeError: pass else: self.fail("didn't catch MRO conflict") def test_mutable_names(self): # Testing mutable names... class C(object): pass # C.__module__ could be 'test_descr' or '__main__' mod = C.__module__ C.__name__ = 'D' self.assertEqual((C.__module__, C.__name__), (mod, 'D')) C.__name__ = 'D.E' self.assertEqual((C.__module__, C.__name__), (mod, 'D.E')) def test_evil_type_name(self): # A badly placed Py_DECREF in type_set_name led to arbitrary code # execution while the type structure was not in a sane state, and a # possible segmentation fault as a result. See bug #16447. class Nasty(str): def __del__(self): C.__name__ = "other" class C(object): pass C.__name__ = Nasty("abc") C.__name__ = "normal" def test_subclass_right_op(self): # Testing correct dispatch of subclass overloading __r<op>__... # This code tests various cases where right-dispatch of a subclass # should be preferred over left-dispatch of a base class. # Case 1: subclass of int; this tests code in abstract.c::binary_op1() class B(int): def __floordiv__(self, other): return "B.__floordiv__" def __rfloordiv__(self, other): return "B.__rfloordiv__" self.assertEqual(B(1) // 1, "B.__floordiv__") self.assertEqual(1 // B(1), "B.__rfloordiv__") # Case 2: subclass of object; this is just the baseline for case 3 class C(object): def __floordiv__(self, other): return "C.__floordiv__" def __rfloordiv__(self, other): return "C.__rfloordiv__" self.assertEqual(C() // 1, "C.__floordiv__") self.assertEqual(1 // C(), "C.__rfloordiv__") # Case 3: subclass of new-style class; here it gets interesting class D(C): def __floordiv__(self, other): return "D.__floordiv__" def __rfloordiv__(self, other): return "D.__rfloordiv__" self.assertEqual(D() // C(), "D.__floordiv__") self.assertEqual(C() // D(), "D.__rfloordiv__") # Case 4: this didn't work right in 2.2.2 and 2.3a1 class E(C): pass self.assertEqual(E.__rfloordiv__, C.__rfloordiv__) self.assertEqual(E() // 1, "C.__floordiv__") self.assertEqual(1 // E(), "C.__rfloordiv__") self.assertEqual(E() // C(), "C.__floordiv__") self.assertEqual(C() // E(), "C.__floordiv__") # This one would fail @test_support.impl_detail("testing an internal kind of method object") def test_meth_class_get(self): # Testing __get__ method of METH_CLASS C methods... # Full coverage of descrobject.c::classmethod_get() # Baseline arg = [1, 2, 3] res = {1: None, 2: None, 3: None} self.assertEqual(dict.fromkeys(arg), res) self.assertEqual({}.fromkeys(arg), res) # Now get the descriptor descr = dict.__dict__["fromkeys"] # More baseline using the descriptor directly self.assertEqual(descr.__get__(None, dict)(arg), res) self.assertEqual(descr.__get__({})(arg), res) # Now check various error cases try: descr.__get__(None, None) except TypeError: pass else: self.fail("shouldn't have allowed descr.__get__(None, None)") try: descr.__get__(42) except TypeError: pass else: self.fail("shouldn't have allowed descr.__get__(42)") try: descr.__get__(None, 42) except TypeError: pass else: self.fail("shouldn't have allowed descr.__get__(None, 42)") try: descr.__get__(None, int) except TypeError: pass else: self.fail("shouldn't have allowed descr.__get__(None, int)") def test_isinst_isclass(self): # Testing proxy isinstance() and isclass()... class Proxy(object): def __init__(self, obj): self.__obj = obj def __getattribute__(self, name): if name.startswith("_Proxy__"): return object.__getattribute__(self, name) else: return getattr(self.__obj, name) # Test with a classic class class C: pass a = C() pa = Proxy(a) self.assertIsInstance(a, C) # Baseline self.assertIsInstance(pa, C) # Test # Test with a classic subclass class D(C): pass a = D() pa = Proxy(a) self.assertIsInstance(a, C) # Baseline self.assertIsInstance(pa, C) # Test # Test with a new-style class class C(object): pass a = C() pa = Proxy(a) self.assertIsInstance(a, C) # Baseline self.assertIsInstance(pa, C) # Test # Test with a new-style subclass class D(C): pass a = D() pa = Proxy(a) self.assertIsInstance(a, C) # Baseline self.assertIsInstance(pa, C) # Test def test_proxy_super(self): # Testing super() for a proxy object... class Proxy(object): def __init__(self, obj): self.__obj = obj def __getattribute__(self, name): if name.startswith("_Proxy__"): return object.__getattribute__(self, name) else: return getattr(self.__obj, name) class B(object): def f(self): return "B.f" class C(B): def f(self): return super(C, self).f() + "->C.f" obj = C() p = Proxy(obj) self.assertEqual(C.__dict__["f"](p), "B.f->C.f") def test_carloverre(self): # Testing prohibition of Carlo Verre's hack... try: object.__setattr__(str, "foo", 42) except TypeError: pass else: self.fail("Carlo Verre __setattr__ succeeded!") try: object.__delattr__(str, "lower") except TypeError: pass else: self.fail("Carlo Verre __delattr__ succeeded!") def test_weakref_segfault(self): # Testing weakref segfault... # SF 742911 import weakref class Provoker: def __init__(self, referrent): self.ref = weakref.ref(referrent) def __del__(self): x = self.ref() class Oops(object): pass o = Oops() o.whatever = Provoker(o) del o def test_wrapper_segfault(self): # SF 927248: deeply nested wrappers could cause stack overflow f = lambda:None for i in xrange(1000000): f = f.__call__ f = None def test_file_fault(self): # Testing sys.stdout is changed in getattr... test_stdout = sys.stdout class StdoutGuard: def __getattr__(self, attr): sys.stdout = sys.__stdout__ raise RuntimeError("Premature access to sys.stdout.%s" % attr) sys.stdout = StdoutGuard() try: print "Oops!" except RuntimeError: pass finally: sys.stdout = test_stdout def test_vicious_descriptor_nonsense(self): # Testing vicious_descriptor_nonsense... # A potential segfault spotted by Thomas Wouters in mail to # python-dev 2003-04-17, turned into an example & fixed by Michael # Hudson just less than four months later... class Evil(object): def __hash__(self): return hash('attr') def __eq__(self, other): del C.attr return 0 class Descr(object): def __get__(self, ob, type=None): return 1 class C(object): attr = Descr() c = C() c.__dict__[Evil()] = 0 self.assertEqual(c.attr, 1) # this makes a crash more likely: test_support.gc_collect() self.assertNotHasAttr(c, 'attr') def test_init(self): # SF 1155938 class Foo(object): def __init__(self): return 10 try: Foo() except TypeError: pass else: self.fail("did not test __init__() for None return") def test_method_wrapper(self): # Testing method-wrapper objects... # <type 'method-wrapper'> did not support any reflection before 2.5 l = [] self.assertEqual(l.__add__, l.__add__) self.assertEqual(l.__add__, [].__add__) self.assertNotEqual(l.__add__, [5].__add__) self.assertNotEqual(l.__add__, l.__mul__) self.assertEqual(l.__add__.__name__, '__add__') if hasattr(l.__add__, '__objclass__'): # CPython self.assertIs(l.__add__.__self__, l) self.assertIs(l.__add__.__objclass__, list) else: # Python implementations where [].__add__ is a normal bound method self.assertIs(l.__add__.im_self, l) self.assertIs(l.__add__.im_class, list) self.assertEqual(l.__add__.__doc__, list.__add__.__doc__) try: hash(l.__add__) except TypeError: pass else: self.fail("no TypeError from hash([].__add__)") t = () t += (7,) self.assertEqual(t.__add__, (7,).__add__) self.assertEqual(hash(t.__add__), hash((7,).__add__)) def test_not_implemented(self): # Testing NotImplemented... # all binary methods should be able to return a NotImplemented import operator def specialmethod(self, other): return NotImplemented def check(expr, x, y): try: exec expr in {'x': x, 'y': y, 'operator': operator} except TypeError: pass else: self.fail("no TypeError from %r" % (expr,)) N1 = sys.maxint + 1L # might trigger OverflowErrors instead of # TypeErrors N2 = sys.maxint # if sizeof(int) < sizeof(long), might trigger # ValueErrors instead of TypeErrors for metaclass in [type, types.ClassType]: for name, expr, iexpr in [ ('__add__', 'x + y', 'x += y'), ('__sub__', 'x - y', 'x -= y'), ('__mul__', 'x * y', 'x *= y'), ('__truediv__', 'operator.truediv(x, y)', None), ('__floordiv__', 'operator.floordiv(x, y)', None), ('__div__', 'x / y', 'x /= y'), ('__mod__', 'x % y', 'x %= y'), ('__divmod__', 'divmod(x, y)', None), ('__pow__', 'x ** y', 'x **= y'), ('__lshift__', 'x << y', 'x <<= y'), ('__rshift__', 'x >> y', 'x >>= y'), ('__and__', 'x & y', 'x &= y'), ('__or__', 'x | y', 'x |= y'), ('__xor__', 'x ^ y', 'x ^= y'), ('__coerce__', 'coerce(x, y)', None)]: if name == '__coerce__': rname = name else: rname = '__r' + name[2:] A = metaclass('A', (), {name: specialmethod}) B = metaclass('B', (), {rname: specialmethod}) a = A() b = B() check(expr, a, a) check(expr, a, b) check(expr, b, a) check(expr, b, b) check(expr, a, N1) check(expr, a, N2) check(expr, N1, b) check(expr, N2, b) if iexpr: check(iexpr, a, a) check(iexpr, a, b) check(iexpr, b, a) check(iexpr, b, b) check(iexpr, a, N1) check(iexpr, a, N2) iname = '__i' + name[2:] C = metaclass('C', (), {iname: specialmethod}) c = C() check(iexpr, c, a) check(iexpr, c, b) check(iexpr, c, N1) check(iexpr, c, N2) def test_assign_slice(self): # ceval.c's assign_slice used to check for # tp->tp_as_sequence->sq_slice instead of # tp->tp_as_sequence->sq_ass_slice class C(object): def __setslice__(self, start, stop, value): self.value = value c = C() c[1:2] = 3 self.assertEqual(c.value, 3) def test_set_and_no_get(self): # See # http://mail.python.org/pipermail/python-dev/2010-January/095637.html class Descr(object): def __init__(self, name): self.name = name def __set__(self, obj, value): obj.__dict__[self.name] = value descr = Descr("a") class X(object): a = descr x = X() self.assertIs(x.a, descr) x.a = 42 self.assertEqual(x.a, 42) # Also check type_getattro for correctness. class Meta(type): pass class X(object): __metaclass__ = Meta X.a = 42 Meta.a = Descr("a") self.assertEqual(X.a, 42) def test_getattr_hooks(self): # issue 4230 class Descriptor(object): counter = 0 def __get__(self, obj, objtype=None): def getter(name): self.counter += 1 raise AttributeError(name) return getter descr = Descriptor() class A(object): __getattribute__ = descr class B(object): __getattr__ = descr class C(object): __getattribute__ = descr __getattr__ = descr self.assertRaises(AttributeError, getattr, A(), "attr") self.assertEqual(descr.counter, 1) self.assertRaises(AttributeError, getattr, B(), "attr") self.assertEqual(descr.counter, 2) self.assertRaises(AttributeError, getattr, C(), "attr") self.assertEqual(descr.counter, 4) class EvilGetattribute(object): # This used to segfault def __getattr__(self, name): raise AttributeError(name) def __getattribute__(self, name): del EvilGetattribute.__getattr__ for i in range(5): gc.collect() raise AttributeError(name) self.assertRaises(AttributeError, getattr, EvilGetattribute(), "attr") def test_type___getattribute__(self): self.assertRaises(TypeError, type.__getattribute__, list, type) def test_abstractmethods(self): # type pretends not to have __abstractmethods__. self.assertRaises(AttributeError, getattr, type, "__abstractmethods__") class meta(type): pass self.assertRaises(AttributeError, getattr, meta, "__abstractmethods__") class X(object): pass with self.assertRaises(AttributeError): del X.__abstractmethods__ def test_proxy_call(self): class FakeStr(object): __class__ = str fake_str = FakeStr() # isinstance() reads __class__ on new style classes self.assertIsInstance(fake_str, str) # call a method descriptor with self.assertRaises(TypeError): str.split(fake_str) # call a slot wrapper descriptor try: r = str.__add__(fake_str, "abc") except TypeError: pass else: self.assertEqual(r, NotImplemented) def test_repr_as_str(self): # Issue #11603: crash or infinite loop when rebinding __str__ as # __repr__. class Foo(object): pass Foo.__repr__ = Foo.__str__ foo = Foo() self.assertRaises(RuntimeError, str, foo) self.assertRaises(RuntimeError, repr, foo) def test_mixing_slot_wrappers(self): class X(dict): __setattr__ = dict.__setitem__ x = X() x.y = 42 self.assertEqual(x["y"], 42) def test_cycle_through_dict(self): # See bug #1469629 class X(dict): def __init__(self): dict.__init__(self) self.__dict__ = self x = X() x.attr = 42 wr = weakref.ref(x) del x test_support.gc_collect() self.assertIsNone(wr()) for o in gc.get_objects(): self.assertIsNot(type(o), X) class DictProxyTests(unittest.TestCase): def setUp(self): class C(object): def meth(self): pass self.C = C def test_repr(self): if test_support.check_impl_detail(): self.assertIn('dict_proxy({', repr(vars(self.C))) self.assertIn("'meth':", repr(vars(self.C))) def test_iter_keys(self): # Testing dict-proxy iterkeys... keys = [ key for key in self.C.__dict__.iterkeys() ] keys.sort() self.assertEqual(keys, ['__dict__', '__doc__', '__module__', '__weakref__', 'meth']) def test_iter_values(self): # Testing dict-proxy itervalues... values = [ values for values in self.C.__dict__.itervalues() ] self.assertEqual(len(values), 5) def test_iter_items(self): # Testing dict-proxy iteritems... keys = [ key for (key, value) in self.C.__dict__.iteritems() ] keys.sort() self.assertEqual(keys, ['__dict__', '__doc__', '__module__', '__weakref__', 'meth']) def test_dict_type_with_metaclass(self): # Testing type of __dict__ when __metaclass__ set... class B(object): pass class M(type): pass class C: # In 2.3a1, C.__dict__ was a real dict rather than a dict proxy __metaclass__ = M self.assertEqual(type(C.__dict__), type(B.__dict__)) class PTypesLongInitTest(unittest.TestCase): # This is in its own TestCase so that it can be run before any other tests. def test_pytype_long_ready(self): # Testing SF bug 551412 ... # This dumps core when SF bug 551412 isn't fixed -- # but only when test_descr.py is run separately. # (That can't be helped -- as soon as PyType_Ready() # is called for PyLong_Type, the bug is gone.) class UserLong(object): def __pow__(self, *args): pass try: pow(0L, UserLong(), 0L) except: pass # Another segfault only when run early # (before PyType_Ready(tuple) is called) type.mro(tuple) def test_main(): deprecations = [(r'complex divmod\(\), // and % are deprecated$', DeprecationWarning)] if sys.py3kwarning: deprecations += [ ("classic (int|long) division", DeprecationWarning), ("coerce.. not supported", DeprecationWarning), (".+__(get|set|del)slice__ has been removed", DeprecationWarning)] with test_support.check_warnings(*deprecations): # Run all local test cases, with PTypesLongInitTest first. test_support.run_unittest(PTypesLongInitTest, OperatorsTest, ClassPropertiesAndMethods, DictProxyTests) if __name__ == "__main__": test_main()
{ "content_hash": "edc11bece937eba5f431176e62aec829", "timestamp": "", "source": "github", "line_count": 4770, "max_line_length": 92, "avg_line_length": 33.89412997903564, "alnum_prop": 0.47834853873511674, "repo_name": "shiblon/pytour", "id": "a6bb3197853a7fd0ebf202aa6370bb5dd6399fa0", "size": "161675", "binary": false, "copies": "8", "ref": "refs/heads/master", "path": "static/js/pypyjs/pypy-nojit.js-0.3.1/lib/modules/test/test_descr.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "195977" }, { "name": "HTML", "bytes": "2110262" }, { "name": "JavaScript", "bytes": "5106892" }, { "name": "Python", "bytes": "15081380" }, { "name": "Shell", "bytes": "1018" } ], "symlink_target": "" }
"""Handle the interface to GGRC models for all login methods. """ from ggrc import db, settings from ggrc.models.context import Context from ggrc.models.person import Person from ggrc.fulltext import get_indexer from ggrc.fulltext.recordbuilder import fts_record_for from ggrc.services.common import log_event from ggrc_basic_permissions import basic_roles from ggrc_basic_permissions.models import UserRole def _base_user_query(): from sqlalchemy import orm return Person.query.options( orm.undefer_group('Person_complete')) def find_user_by_id(id): """Find Person object by some ``id``. Note that ``id`` need not be Person().id, but should match the value returned by ``Person().get_id()``. """ return _base_user_query().filter(Person.id==int(id)).first() def find_user_by_email(email): return _base_user_query().filter(Person.email==email).first() def add_creator_role(user): user_creator_role = UserRole( person=user, role=basic_roles.creator(), ) db.session.add(user_creator_role) db.session.commit() log_event(db.session, user_creator_role, user_creator_role.id) def create_user(email, **kwargs): user = Person(email=email, **kwargs) db.session.add(user) db.session.flush() log_event(db.session, user, user.id) user_context = Context( name='Personal Context for {0}'.format(email), description='', related_object=user, context_id=1, ) db.session.add(user_context) db.session.commit() get_indexer().create_record(fts_record_for(user)) return user def find_or_create_user_by_email(email, **kwargs): user = find_user_by_email(email) if not user: user = create_user(email, **kwargs) authorized_domains = getattr(settings, "AUTHORIZED_DOMAINS", set()) # Email can have multiple @, but last one separates local and domain part user_domain = user.email.split("@")[-1] if user_domain in authorized_domains: add_creator_role(user) return user def get_next_url(request, default_url): if 'next' in request.args: next_url = request.args['next'] return next_url else: return default_url
{ "content_hash": "42ec6fe12c9b3f228e526e49291f5cf8", "timestamp": "", "source": "github", "line_count": 70, "max_line_length": 77, "avg_line_length": 30.357142857142858, "alnum_prop": 0.7025882352941176, "repo_name": "prasannav7/ggrc-core", "id": "082155335249ec6b65296e5600a607acd879959c", "size": "2363", "binary": false, "copies": "5", "ref": "refs/heads/develop", "path": "src/ggrc/login/common.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "167445" }, { "name": "Cucumber", "bytes": "139629" }, { "name": "HTML", "bytes": "1098331" }, { "name": "JavaScript", "bytes": "1447363" }, { "name": "Makefile", "bytes": "6225" }, { "name": "Mako", "bytes": "2559" }, { "name": "Python", "bytes": "2370461" }, { "name": "Shell", "bytes": "33089" } ], "symlink_target": "" }
import random def main(): rl = RandList(1, 2000) rl.gen_random_list() rl.print_block_list() class RandList(object): def __init__(self, first, second): self.first = first self.second = second self.populacao = [] def gen_random_list(self): self.populacao = range(self.first, self.second) del self.populacao[28] random.shuffle(self.populacao) def print_block_list(self): print "[", for i in range(25): for j in range(25): print "%3d," % self.populacao[i*15+j], print "]" if __name__ == "__main__": main()
{ "content_hash": "e927848115e260bfc837a2b2f960256e", "timestamp": "", "source": "github", "line_count": 29, "max_line_length": 55, "avg_line_length": 21.96551724137931, "alnum_prop": 0.5416012558869702, "repo_name": "tonussi/freezing-dubstep", "id": "0135525025d5a5fc9bcd2233aa01f32838a11315", "size": "655", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pratica-04/randlist.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "5403" }, { "name": "Haskell", "bytes": "21598" }, { "name": "Python", "bytes": "655" }, { "name": "Shell", "bytes": "648" } ], "symlink_target": "" }
from urlparse import urlparse from nose.tools import * # flake8: noqa from framework.auth.core import Auth from website.models import NodeLog from website.views import find_bookmark_collection from website.util import permissions from website.util.sanitize import strip_html from api.base.settings.defaults import API_BASE from tests.base import ApiTestCase, fake from tests.factories import ( NodeFactory, ProjectFactory, RegistrationFactory, AuthUserFactory, CollectionFactory, CommentFactory, ) from tests.utils import assert_logs, assert_not_logs class TestNodeDetail(ApiTestCase): def setUp(self): super(TestNodeDetail, self).setUp() self.user = AuthUserFactory() self.user_two = AuthUserFactory() self.public_project = ProjectFactory(title="Project One", is_public=True, creator=self.user) self.private_project = ProjectFactory(title="Project Two", is_public=False, creator=self.user) self.public_url = '/{}nodes/{}/'.format(API_BASE, self.public_project._id) self.private_url = '/{}nodes/{}/'.format(API_BASE, self.private_project._id) self.public_component = NodeFactory(parent=self.public_project, creator=self.user, is_public=True) self.public_component_url = '/{}nodes/{}/'.format(API_BASE, self.public_component._id) self.read_permissions = ['read'] self.write_permissions = ['read', 'write'] self.admin_permissions = ['read', 'admin', 'write'] def test_return_public_project_details_logged_out(self): res = self.app.get(self.public_url) assert_equal(res.status_code, 200) assert_equal(res.content_type, 'application/vnd.api+json') assert_equal(res.json['data']['attributes']['title'], self.public_project.title) assert_equal(res.json['data']['attributes']['description'], self.public_project.description) assert_equal(res.json['data']['attributes']['category'], self.public_project.category) assert_items_equal(res.json['data']['attributes']['current_user_permissions'], self.read_permissions) def test_return_public_project_details_contributor_logged_in(self): res = self.app.get(self.public_url, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(res.content_type, 'application/vnd.api+json') assert_equal(res.json['data']['attributes']['title'], self.public_project.title) assert_equal(res.json['data']['attributes']['description'], self.public_project.description) assert_equal(res.json['data']['attributes']['category'], self.public_project.category) assert_items_equal(res.json['data']['attributes']['current_user_permissions'], self.admin_permissions) def test_return_public_project_details_non_contributor_logged_in(self): res = self.app.get(self.public_url, auth=self.user_two.auth) assert_equal(res.status_code, 200) assert_equal(res.content_type, 'application/vnd.api+json') assert_equal(res.json['data']['attributes']['title'], self.public_project.title) assert_equal(res.json['data']['attributes']['description'], self.public_project.description) assert_equal(res.json['data']['attributes']['category'], self.public_project.category) assert_items_equal(res.json['data']['attributes']['current_user_permissions'], self.read_permissions) def test_return_private_project_details_logged_out(self): res = self.app.get(self.private_url, expect_errors=True) assert_equal(res.status_code, 401) assert_in('detail', res.json['errors'][0]) def test_return_private_project_details_logged_in_admin_contributor(self): res = self.app.get(self.private_url, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(res.content_type, 'application/vnd.api+json') assert_equal(res.json['data']['attributes']['title'], self.private_project.title) assert_equal(res.json['data']['attributes']['description'], self.private_project.description) assert_equal(res.json['data']['attributes']['category'], self.private_project.category) assert_items_equal(res.json['data']['attributes']['current_user_permissions'], self.admin_permissions) def test_return_private_project_details_logged_in_write_contributor(self): self.private_project.add_contributor(contributor=self.user_two, auth=Auth(self.user), save=True) res = self.app.get(self.private_url, auth=self.user_two.auth) assert_equal(res.status_code, 200) assert_equal(res.content_type, 'application/vnd.api+json') assert_equal(res.json['data']['attributes']['title'], self.private_project.title) assert_equal(res.json['data']['attributes']['description'], self.private_project.description) assert_equal(res.json['data']['attributes']['category'], self.private_project.category) assert_items_equal(res.json['data']['attributes']['current_user_permissions'], self.write_permissions) def test_return_private_project_details_logged_in_non_contributor(self): res = self.app.get(self.private_url, auth=self.user_two.auth, expect_errors=True) assert_equal(res.status_code, 403) assert_in('detail', res.json['errors'][0]) def test_top_level_project_has_no_parent(self): res = self.app.get(self.public_url) assert_equal(res.status_code, 200) assert_not_in('parent', res.json['data']['relationships'].keys()) assert_equal(res.content_type, 'application/vnd.api+json') def test_child_project_has_parent(self): public_component = NodeFactory(parent=self.public_project, creator=self.user, is_public=True) public_component_url = '/{}nodes/{}/'.format(API_BASE, public_component._id) res = self.app.get(public_component_url) assert_equal(res.status_code, 200) url = res.json['data']['relationships']['parent']['links']['related']['href'] assert_equal(urlparse(url).path, self.public_url) def test_node_has_children_link(self): res = self.app.get(self.public_url) url = res.json['data']['relationships']['children']['links']['related']['href'] expected_url = self.public_url + 'children/' assert_equal(urlparse(url).path, expected_url) def test_node_has_contributors_link(self): res = self.app.get(self.public_url) url = res.json['data']['relationships']['contributors']['links']['related']['href'] expected_url = self.public_url + 'contributors/' assert_equal(urlparse(url).path, expected_url) def test_node_has_node_links_link(self): res = self.app.get(self.public_url) url = res.json['data']['relationships']['node_links']['links']['related']['href'] expected_url = self.public_url + 'node_links/' assert_equal(urlparse(url).path, expected_url) def test_node_has_registrations_link(self): res = self.app.get(self.public_url) url = res.json['data']['relationships']['registrations']['links']['related']['href'] expected_url = self.public_url + 'registrations/' assert_equal(urlparse(url).path, expected_url) def test_node_has_files_link(self): res = self.app.get(self.public_url) url = res.json['data']['relationships']['files']['links']['related']['href'] expected_url = self.public_url + 'files/' assert_equal(urlparse(url).path, expected_url) def test_node_has_comments_link(self): res = self.app.get(self.public_url) assert_equal(res.status_code, 200) assert_in('comments', res.json['data']['relationships'].keys()) def test_node_has_correct_unread_comments_count(self): contributor = AuthUserFactory() self.public_project.add_contributor(contributor=contributor, auth=Auth(self.user), save=True) comment = CommentFactory(node=self.public_project, user=contributor, page='node') res = self.app.get(self.public_url + '?related_counts=True', auth=self.user.auth) unread = res.json['data']['relationships']['comments']['links']['related']['meta']['unread'] unread_comments_node = unread['node'] assert_equal(unread_comments_node, 1) def test_node_properties(self): res = self.app.get(self.public_url) assert_equal(res.json['data']['attributes']['public'], True) assert_equal(res.json['data']['attributes']['registration'], False) assert_equal(res.json['data']['attributes']['collection'], False) assert_equal(res.json['data']['attributes']['tags'], []) def test_requesting_folder_returns_error(self): folder = NodeFactory(is_collection=True, creator=self.user) res = self.app.get( '/{}nodes/{}/'.format(API_BASE, folder._id), auth=self.user.auth, expect_errors=True ) assert_equal(res.status_code, 404) def test_cannot_return_registrations_at_node_detail_endpoint(self): registration = RegistrationFactory(project=self.public_project, creator=self.user) res = self.app.get('/{}nodes/{}/'.format(API_BASE, registration._id), auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 404) def test_cannot_return_folder_at_node_detail_endpoint(self): folder = CollectionFactory(creator=self.user) res = self.app.get('/{}nodes/{}/'.format(API_BASE, folder._id), auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 404) class NodeCRUDTestCase(ApiTestCase): def setUp(self): super(NodeCRUDTestCase, self).setUp() self.user = AuthUserFactory() self.user_two = AuthUserFactory() self.title = 'Cool Project' self.new_title = 'Super Cool Project' self.description = 'A Properly Cool Project' self.new_description = 'An even cooler project' self.category = 'data' self.new_category = 'project' self.public_project = ProjectFactory(title=self.title, description=self.description, category=self.category, is_public=True, creator=self.user) self.public_url = '/{}nodes/{}/'.format(API_BASE, self.public_project._id) self.private_project = ProjectFactory(title=self.title, description=self.description, category=self.category, is_public=False, creator=self.user) self.private_url = '/{}nodes/{}/'.format(API_BASE, self.private_project._id) self.fake_url = '/{}nodes/{}/'.format(API_BASE, '12345') def make_node_payload(node, attributes): return { 'data': { 'id': node._id, 'type': 'nodes', 'attributes': attributes, } } class TestNodeUpdate(NodeCRUDTestCase): def test_node_update_invalid_data(self): res = self.app.put_json_api(self.public_url, "Incorrect data", auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(res.json['errors'][0]['detail'], "Malformed request.") res = self.app.put_json_api(self.public_url, ["Incorrect data"], auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(res.json['errors'][0]['detail'], "Malformed request.") @assert_not_logs(NodeLog.MADE_PUBLIC, 'private_project') def test_cannot_make_project_public_if_non_contributor(self): non_contrib = AuthUserFactory() res = self.app.patch_json( self.private_url, make_node_payload(self.private_project, {'public': True}), auth=non_contrib.auth, expect_errors=True ) assert_equal(res.status_code, 403) def test_cannot_make_project_public_if_non_admin_contributor(self): non_admin = AuthUserFactory() self.private_project.add_contributor( non_admin, permissions=(permissions.READ, permissions.WRITE), auth=Auth(self.private_project.creator) ) self.private_project.save() res = self.app.patch_json( self.private_url, make_node_payload(self.private_project, {'public': True}), auth=non_admin.auth, expect_errors=True ) assert_equal(res.status_code, 403) self.private_project.reload() assert_false(self.private_project.is_public) @assert_logs(NodeLog.MADE_PUBLIC, 'private_project') def test_can_make_project_public_if_admin_contributor(self): admin_user = AuthUserFactory() self.private_project.add_contributor( admin_user, permissions=(permissions.READ, permissions.WRITE, permissions.ADMIN), auth=Auth(self.private_project.creator) ) self.private_project.save() res = self.app.patch_json_api( self.private_url, make_node_payload(self.private_project, {'public': True}), auth=admin_user.auth # self.user is creator/admin ) assert_equal(res.status_code, 200) self.private_project.reload() assert_true(self.private_project.is_public) def test_update_project_properties_not_nested(self): res = self.app.put_json_api(self.public_url, { 'id': self.public_project._id, 'type': 'nodes', 'title': self.new_title, 'description': self.new_description, 'category': self.new_category, 'public': True, }, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(res.json['errors'][0]['detail'], 'Request must include /data.') assert_equal(res.json['errors'][0]['source']['pointer'], '/data') def test_update_invalid_id(self): res = self.app.put_json_api(self.public_url, { 'data': { 'id': '12345', 'type': 'nodes', 'attributes': { 'title': self.new_title, 'description': self.new_description, 'category': self.new_category, 'public': True } } }, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 409) def test_update_invalid_type(self): res = self.app.put_json_api(self.public_url, { 'data': { 'id': self.public_project._id, 'type': 'node', 'attributes': { 'title': self.new_title, 'description': self.new_description, 'category': self.new_category, 'public': True } } }, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 409) def test_update_no_id(self): res = self.app.put_json_api(self.public_url, { 'data': { 'type': 'nodes', 'attributes': { 'title': self.new_title, 'description': self.new_description, 'category': self.new_category, 'public': True } } }, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(res.json['errors'][0]['detail'], 'This field may not be null.') assert_equal(res.json['errors'][0]['source']['pointer'], '/data/id') def test_update_no_type(self): res = self.app.put_json_api(self.public_url, { 'data': { 'id': self.public_project._id, 'attributes': { 'title': self.new_title, 'description': self.new_description, 'category': self.new_category, 'public': True } } }, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(res.json['errors'][0]['detail'], 'This field may not be null.') assert_equal(res.json['errors'][0]['source']['pointer'], '/data/type') def test_update_public_project_logged_out(self): res = self.app.put_json_api(self.public_url, { 'data': { 'id': self.public_project._id, 'type': 'nodes', 'attributes': { 'title': self.new_title, 'description': self.new_description, 'category': self.new_category, 'public': True } } }, expect_errors=True) assert_equal(res.status_code, 401) assert_in('detail', res.json['errors'][0]) @assert_logs(NodeLog.UPDATED_FIELDS, 'public_project') def test_update_public_project_logged_in(self): res = self.app.put_json_api(self.public_url, { 'data': { 'id': self.public_project._id, 'type': 'nodes', 'attributes': { 'title': self.new_title, 'description': self.new_description, 'category': self.new_category, 'public': True } } }, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(res.content_type, 'application/vnd.api+json') assert_equal(res.json['data']['attributes']['title'], self.new_title) assert_equal(res.json['data']['attributes']['description'], self.new_description) assert_equal(res.json['data']['attributes']['category'], self.new_category) def test_update_public_project_logged_in_but_unauthorized(self): res = self.app.put_json_api(self.public_url, { 'data': { 'id': self.private_project._id, 'type': 'nodes', 'attributes': { 'title': self.new_title, 'description': self.new_description, 'category': self.new_category, 'public': True } } }, auth=self.user_two.auth, expect_errors=True) assert_equal(res.status_code, 403) assert_in('detail', res.json['errors'][0]) def test_cannot_update_a_registration(self): registration = RegistrationFactory(project=self.public_project, creator=self.user) original_title = registration.title original_description = registration.description url = '/{}nodes/{}/'.format(API_BASE, registration._id) res = self.app.put_json_api(url, { 'data': { 'id': registration._id, 'type': 'nodes', 'attributes': { 'title': fake.catch_phrase(), 'description': fake.bs(), 'category': 'hypothesis', 'public': True } } }, auth=self.user.auth, expect_errors=True) registration.reload() assert_equal(res.status_code, 404) assert_equal(registration.title, original_title) assert_equal(registration.description, original_description) def test_update_private_project_logged_out(self): res = self.app.put_json_api(self.private_url, { 'data': { 'id': self.private_project._id, 'type': 'nodes', 'attributes': { 'title': self.new_title, 'description': self.new_description, 'category': self.new_category, 'public': False } } }, expect_errors=True) assert_equal(res.status_code, 401) assert_in('detail', res.json['errors'][0]) @assert_logs(NodeLog.UPDATED_FIELDS, 'private_project') def test_update_private_project_logged_in_contributor(self): res = self.app.put_json_api(self.private_url, { 'data': { 'id': self.private_project._id, 'type': 'nodes', 'attributes': { 'title': self.new_title, 'description': self.new_description, 'category': self.new_category, 'public': False } } }, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(res.content_type, 'application/vnd.api+json') assert_equal(res.json['data']['attributes']['title'], self.new_title) assert_equal(res.json['data']['attributes']['description'], self.new_description) assert_equal(res.json['data']['attributes']['category'], self.new_category) def test_update_private_project_logged_in_non_contributor(self): res = self.app.put_json_api(self.private_url, { 'data': { 'id': self.private_project._id, 'type': 'nodes', 'attributes': { 'title': self.new_title, 'description': self.new_description, 'category': self.new_category, 'public': False } } }, auth=self.user_two.auth, expect_errors=True) assert_equal(res.status_code, 403) assert_in('detail', res.json['errors'][0]) @assert_logs(NodeLog.UPDATED_FIELDS, 'public_project') def test_update_project_sanitizes_html_properly(self): """Post request should update resource, and any HTML in fields should be stripped""" new_title = '<strong>Super</strong> Cool Project' new_description = 'An <script>alert("even cooler")</script> project' res = self.app.put_json_api(self.public_url, { 'data': { 'id': self.public_project._id, 'type': 'nodes', 'attributes': { 'title': new_title, 'description': new_description, 'category': self.new_category, 'public': True, } } }, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(res.content_type, 'application/vnd.api+json') assert_equal(res.json['data']['attributes']['title'], strip_html(new_title)) assert_equal(res.json['data']['attributes']['description'], strip_html(new_description)) @assert_logs(NodeLog.EDITED_TITLE, 'public_project') def test_partial_update_project_updates_project_correctly_and_sanitizes_html(self): new_title = 'An <script>alert("even cooler")</script> project' res = self.app.patch_json_api(self.public_url, { 'data': { 'id': self.public_project._id, 'type': 'nodes', 'attributes': { 'title': new_title } } }, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(res.content_type, 'application/vnd.api+json') res = self.app.get(self.public_url) assert_equal(res.status_code, 200) assert_equal(res.content_type, 'application/vnd.api+json') assert_equal(res.json['data']['attributes']['title'], strip_html(new_title)) assert_equal(res.json['data']['attributes']['description'], self.description) assert_equal(res.json['data']['attributes']['category'], self.category) def test_write_to_public_field_non_contrib_forbidden(self): # Test non-contrib writing to public field res = self.app.patch_json_api(self.public_url, { 'data': { 'attributes': { 'public': False}, 'id': self.public_project._id, 'type': 'nodes' } }, auth=self.user_two.auth, expect_errors=True) assert_equal(res.status_code, 403) assert_in('detail', res.json['errors'][0]) def test_partial_update_public_project_logged_out(self): res = self.app.patch_json_api(self.public_url, { 'data': { 'id': self.public_project._id, 'type': 'nodes', 'attributes': { 'title': self.new_title } } }, expect_errors=True) assert_equal(res.status_code, 401) assert_in('detail', res.json['errors'][0]) @assert_logs(NodeLog.EDITED_TITLE, 'public_project') def test_partial_update_public_project_logged_in(self): res = self.app.patch_json_api(self.public_url, { 'data': { 'id': self.public_project._id, 'type': 'nodes', 'attributes': { 'title': self.new_title, } } }, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(res.content_type, 'application/vnd.api+json') assert_equal(res.json['data']['attributes']['title'], self.new_title) assert_equal(res.json['data']['attributes']['description'], self.description) assert_equal(res.json['data']['attributes']['category'], self.category) def test_partial_update_public_project_logged_in_but_unauthorized(self): # Public resource, logged in, unauthorized res = self.app.patch_json_api(self.public_url, { 'data': { 'attributes': { 'title': self.new_title}, 'id': self.public_project._id, 'type': 'nodes', } }, auth=self.user_two.auth, expect_errors=True) assert_equal(res.status_code, 403) assert_in('detail', res.json['errors'][0]) def test_partial_update_private_project_logged_out(self): res = self.app.patch_json_api(self.private_url, { 'data': { 'id': self.private_project._id, 'type': 'nodes', 'attributes': { 'title': self.new_title } } }, expect_errors=True) assert_equal(res.status_code, 401) assert_in('detail', res.json['errors'][0]) @assert_logs(NodeLog.EDITED_TITLE, 'private_project') def test_partial_update_private_project_logged_in_contributor(self): res = self.app.patch_json_api(self.private_url, { 'data': { 'attributes': { 'title': self.new_title}, 'id': self.private_project._id, 'type': 'nodes', } }, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(res.content_type, 'application/vnd.api+json') assert_equal(res.json['data']['attributes']['title'], self.new_title) assert_equal(res.json['data']['attributes']['description'], self.description) assert_equal(res.json['data']['attributes']['category'], self.category) def test_partial_update_private_project_logged_in_non_contributor(self): res = self.app.patch_json_api(self.private_url, { 'data': { 'attributes': { 'title': self.new_title}, 'id': self.private_project._id, 'type': 'nodes', } }, auth=self.user_two.auth,expect_errors=True) assert_equal(res.status_code, 403) assert_in('detail', res.json['errors'][0]) def test_multiple_patch_requests_with_same_category_generates_one_log(self): self.private_project.category = 'project' self.private_project.save() new_category = 'data' payload = make_node_payload(self.private_project, attributes={'category': new_category}) original_n_logs = len(self.private_project.logs) res = self.app.patch_json_api(self.private_url, payload, auth=self.user.auth) assert_equal(res.status_code, 200) self.private_project.reload() assert_equal(self.private_project.category, new_category) assert_equal(len(self.private_project.logs), original_n_logs + 1) # sanity check res = self.app.patch_json_api(self.private_url, payload, auth=self.user.auth) self.private_project.reload() assert_equal(self.private_project.category, new_category) assert_equal(len(self.private_project.logs), original_n_logs + 1) def test_partial_update_invalid_id(self): res = self.app.patch_json_api(self.public_url, { 'data': { 'id': '12345', 'type': 'nodes', 'attributes': { 'title': self.new_title, } } }, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 409) def test_partial_update_invalid_type(self): res = self.app.patch_json_api(self.public_url, { 'data': { 'id': self.public_project._id, 'type': 'node', 'attributes': { 'title': self.new_title, } } }, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 409) def test_partial_update_no_id(self): res = self.app.patch_json_api(self.public_url, { 'data': { 'type': 'nodes', 'attributes': { 'title': self.new_title, } } }, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(res.json['errors'][0]['detail'], 'This field may not be null.') assert_equal(res.json['errors'][0]['source']['pointer'], '/data/id') def test_partial_update_no_type(self): res = self.app.patch_json_api(self.public_url, { 'data': { 'id': self.public_project._id, 'attributes': { 'title': self.new_title, } } }, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(res.json['errors'][0]['detail'], 'This field may not be null.') assert_equal(res.json['errors'][0]['source']['pointer'], '/data/type') # Nothing will be updated here def test_partial_update_project_properties_not_nested(self): res = self.app.patch_json_api(self.public_url, { 'data': { 'id': self.public_project._id, 'type': 'nodes', 'title': self.new_title, } }, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) def test_update_project_invalid_title(self): project = { 'data': { 'type': 'nodes', 'id': self.public_project._id, 'attributes': { 'title': 'A' * 201, 'category': 'project', } } } res = self.app.put_json_api(self.public_url, project, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(res.json['errors'][0]['detail'], 'Title cannot exceed 200 characters.') def test_public_project_with_publicly_editable_wiki_turns_private(self): wiki = self.public_project.get_addon('wiki') wiki.set_editing(permissions=True, auth=Auth(user=self.user), log=True) res = self.app.patch_json_api( self.public_url, make_node_payload(self.public_project, {'public': False}), auth=self.user.auth # self.user is creator/admin ) assert_equal(res.status_code, 200) class TestNodeDelete(NodeCRUDTestCase): def test_deletes_public_node_logged_out(self): res = self.app.delete(self.public_url, expect_errors=True) assert_equal(res.status_code, 401) assert 'detail' in res.json['errors'][0] def test_requesting_deleted_returns_410(self): self.public_project.is_deleted = True self.public_project.save() res = self.app.get(self.public_url, expect_errors=True) assert_equal(res.status_code, 410) assert 'detail' in res.json['errors'][0] def test_deletes_public_node_fails_if_unauthorized(self): res = self.app.delete_json_api(self.public_url, auth=self.user_two.auth, expect_errors=True) self.public_project.reload() assert_equal(res.status_code, 403) assert_equal(self.public_project.is_deleted, False) assert 'detail' in res.json['errors'][0] @assert_logs(NodeLog.PROJECT_DELETED, 'public_project') def test_deletes_public_node_succeeds_as_owner(self): res = self.app.delete_json_api(self.public_url, auth=self.user.auth, expect_errors=True) self.public_project.reload() assert_equal(res.status_code, 204) assert_equal(self.public_project.is_deleted, True) def test_deletes_private_node_logged_out(self): res = self.app.delete(self.private_url, expect_errors=True) assert_equal(res.status_code, 401) assert 'detail' in res.json['errors'][0] @assert_logs(NodeLog.PROJECT_DELETED, 'private_project') def test_deletes_private_node_logged_in_contributor(self): res = self.app.delete(self.private_url, auth=self.user.auth, expect_errors=True) self.private_project.reload() assert_equal(res.status_code, 204) assert_equal(self.private_project.is_deleted, True) def test_deletes_private_node_logged_in_non_contributor(self): res = self.app.delete(self.private_url, auth=self.user_two.auth, expect_errors=True) self.private_project.reload() assert_equal(res.status_code, 403) assert_equal(self.private_project.is_deleted, False) assert 'detail' in res.json['errors'][0] def test_deletes_private_node_logged_in_read_only_contributor(self): self.private_project.add_contributor(self.user_two, permissions=[permissions.READ]) self.private_project.save() res = self.app.delete(self.private_url, auth=self.user_two.auth, expect_errors=True) self.private_project.reload() assert_equal(res.status_code, 403) assert_equal(self.private_project.is_deleted, False) assert 'detail' in res.json['errors'][0] def test_deletes_invalid_node(self): res = self.app.delete(self.fake_url, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 404) assert 'detail' in res.json['errors'][0] def test_delete_project_with_component_returns_error(self): project = ProjectFactory(creator=self.user) component = NodeFactory(parent=project, creator=self.user) # Return a 400 because component must be deleted before deleting the parent res = self.app.delete_json_api( '/{}nodes/{}/'.format(API_BASE, project._id), auth=self.user.auth, expect_errors=True ) assert_equal(res.status_code, 400) errors = res.json['errors'] assert_equal(len(errors), 1) assert_equal( errors[0]['detail'], 'Any child components must be deleted prior to deleting this project.' ) def test_delete_bookmark_collection_returns_error(self): bookmark_collection = find_bookmark_collection(self.user) res = self.app.delete_json_api( '/{}nodes/{}/'.format(API_BASE, bookmark_collection._id), auth=self.user.auth, expect_errors=True ) # Bookmark collections are collections, so a 404 is returned assert_equal(res.status_code, 404) class TestReturnDeletedNode(ApiTestCase): def setUp(self): super(TestReturnDeletedNode, self).setUp() self.user = AuthUserFactory() self.non_contrib = AuthUserFactory() self.public_deleted = ProjectFactory(is_deleted=True, creator=self.user, title='This public project has been deleted', category='project', is_public=True) self.private_deleted = ProjectFactory(is_deleted=True, creator=self.user, title='This private project has been deleted', category='project', is_public=False) self.private = ProjectFactory(is_public=False, creator=self.user, title='A boring project', category='project') self.public = ProjectFactory(is_public=True, creator=self.user, title='A fun project', category='project') self.new_title = 'This deleted node has been edited' self.public_url = '/{}nodes/{}/'.format(API_BASE, self.public_deleted._id) self.private_url = '/{}nodes/{}/'.format(API_BASE, self.private_deleted._id) def test_return_deleted_public_node(self): res = self.app.get(self.public_url, expect_errors=True) assert_equal(res.status_code, 410) def test_return_deleted_private_node(self): res = self.app.get(self.private_url, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 410) def test_edit_deleted_public_node(self): res = self.app.put_json_api(self.public_url, params={'title': self.new_title, 'node_id': self.public_deleted._id, 'category': self.public_deleted.category}, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 410) def test_edit_deleted_private_node(self): res = self.app.put_json_api(self.private_url, params={'title': self.new_title, 'node_id': self.private_deleted._id, 'category': self.private_deleted.category}, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 410) def test_delete_deleted_public_node(self): res = self.app.delete(self.public_url, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 410) def test_delete_deleted_private_node(self): res = self.app.delete(self.private_url, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 410) class TestNodeTags(ApiTestCase): def setUp(self): super(TestNodeTags, self).setUp() self.user = AuthUserFactory() self.admin = AuthUserFactory() self.user_two = AuthUserFactory() self.read_only_contributor = AuthUserFactory() self.public_project = ProjectFactory(title="Project One", is_public=True, creator=self.user) self.public_project.add_contributor(self.user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True) self.private_project = ProjectFactory(title="Project Two", is_public=False, creator=self.user) self.private_project.add_contributor(self.user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True) self.private_project.add_contributor(self.admin, permissions=permissions.CREATOR_PERMISSIONS, save=True) self.public_url = '/{}nodes/{}/'.format(API_BASE, self.public_project._id) self.private_url = '/{}nodes/{}/'.format(API_BASE, self.private_project._id) self.one_new_tag_json = { 'data': { 'id': self.public_project._id, 'type': 'nodes', 'attributes': { 'tags': ['new-tag'] } } } self.private_payload = { 'data': { 'id': self.private_project._id, 'type': 'nodes', 'attributes': { 'tags': ['new-tag'] } } } def test_public_project_starts_with_no_tags(self): res = self.app.get(self.public_url) assert_equal(res.status_code, 200) assert_equal(len(res.json['data']['attributes']['tags']), 0) @assert_logs(NodeLog.TAG_ADDED, 'public_project') def test_contributor_can_add_tag_to_public_project(self): res = self.app.patch_json_api(self.public_url, self.one_new_tag_json, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 200) # Ensure data is correct from the PATCH response assert_equal(len(res.json['data']['attributes']['tags']), 1) assert_equal(res.json['data']['attributes']['tags'][0], 'new-tag') # Ensure data is correct in the database self.public_project.reload() assert_equal(len(self.public_project.tags), 1) assert_equal(self.public_project.tags[0]._id, 'new-tag') # Ensure data is correct when GETting the resource again reload_res = self.app.get(self.public_url) assert_equal(len(reload_res.json['data']['attributes']['tags']), 1) assert_equal(reload_res.json['data']['attributes']['tags'][0], 'new-tag') @assert_logs(NodeLog.TAG_ADDED, 'private_project') def test_contributor_can_add_tag_to_private_project(self): res = self.app.patch_json_api(self.private_url, self.private_payload, auth=self.user.auth) assert_equal(res.status_code, 200) # Ensure data is correct from the PATCH response assert_equal(len(res.json['data']['attributes']['tags']), 1) assert_equal(res.json['data']['attributes']['tags'][0], 'new-tag') # Ensure data is correct in the database self.private_project.reload() assert_equal(len(self.private_project.tags), 1) assert_equal(self.private_project.tags[0]._id, 'new-tag') # Ensure data is correct when GETting the resource again reload_res = self.app.get(self.private_url, auth=self.user.auth) assert_equal(len(reload_res.json['data']['attributes']['tags']), 1) assert_equal(reload_res.json['data']['attributes']['tags'][0], 'new-tag') def test_partial_update_project_does_not_clear_tags(self): res = self.app.patch_json_api(self.private_url, self.private_payload, auth=self.admin.auth) assert_equal(res.status_code, 200) assert_equal(len(res.json['data']['attributes']['tags']), 1) new_payload = { 'data': { 'id': self.private_project._id, 'type': 'nodes', 'attributes': { 'public': True } } } res = self.app.patch_json_api(self.private_url, new_payload, auth=self.admin.auth) assert_equal(res.status_code, 200) assert_equal(len(res.json['data']['attributes']['tags']), 1) new_payload['data']['attributes']['public'] = False res = self.app.patch_json_api(self.private_url, new_payload, auth=self.admin.auth) assert_equal(res.status_code, 200) assert_equal(len(res.json['data']['attributes']['tags']), 1) def test_non_authenticated_user_cannot_add_tag_to_public_project(self): res = self.app.patch_json_api(self.public_url, self.one_new_tag_json, expect_errors=True, auth=None) assert_equal(res.status_code, 401) def test_non_authenticated_user_cannot_add_tag_to_private_project(self): res = self.app.patch_json_api(self.private_url, self.private_payload, expect_errors=True, auth=None) assert_equal(res.status_code, 401) def test_non_contributor_cannot_add_tag_to_public_project(self): res = self.app.patch_json_api(self.public_url, self.one_new_tag_json, expect_errors=True, auth=self.user_two.auth) assert_equal(res.status_code, 403) def test_non_contributor_cannot_add_tag_to_private_project(self): res = self.app.patch_json_api(self.private_url, self.private_payload, expect_errors=True, auth=self.user_two.auth) assert_equal(res.status_code, 403) def test_read_only_contributor_cannot_add_tag_to_public_project(self): res = self.app.patch_json_api(self.public_url, self.one_new_tag_json, expect_errors=True, auth=self.read_only_contributor.auth) assert_equal(res.status_code, 403) def test_read_only_contributor_cannot_add_tag_to_private_project(self): res = self.app.patch_json_api(self.private_url, self.private_payload, expect_errors=True, auth=self.read_only_contributor.auth) assert_equal(res.status_code, 403)\ @assert_logs(NodeLog.TAG_ADDED, 'private_project', -4) @assert_logs(NodeLog.TAG_ADDED, 'private_project', -3) @assert_logs(NodeLog.TAG_REMOVED, 'private_project', -2) @assert_logs(NodeLog.TAG_REMOVED, 'private_project') def test_tags_add_and_remove_properly(self): res = self.app.patch_json_api(self.private_url, self.private_payload, auth=self.user.auth) assert_equal(res.status_code, 200) # Ensure adding tag data is correct from the PATCH response assert_equal(len(res.json['data']['attributes']['tags']), 1) assert_equal(res.json['data']['attributes']['tags'][0], 'new-tag') # Ensure removing and adding tag data is correct from the PATCH response res = self.app.patch_json_api(self.private_url, {'data': {'id': self.private_project._id, 'type':'nodes', 'attributes': {'tags':['newer-tag']}}}, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(len(res.json['data']['attributes']['tags']), 1) assert_equal(res.json['data']['attributes']['tags'][0], 'newer-tag') # Ensure removing tag data is correct from the PATCH response res = self.app.patch_json_api(self.private_url, {'data': {'id': self.private_project._id, 'type':'nodes', 'attributes': {'tags': []}}}, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(len(res.json['data']['attributes']['tags']), 0) def test_tags_post_object_instead_of_list(self): url = '/{}nodes/'.format(API_BASE) payload = {'data': { 'type': 'nodes', 'attributes': { 'title': 'new title', 'category': 'project', 'tags': {'foo': 'bar'} } }} res = self.app.post_json_api(url, payload, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(res.json['errors'][0]['detail'], 'Expected a list of items but got type "dict".') def test_tags_patch_object_instead_of_list(self): self.one_new_tag_json['data']['attributes']['tags'] = {'foo': 'bar'} res = self.app.patch_json_api(self.public_url, self.one_new_tag_json, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(res.json['errors'][0]['detail'], 'Expected a list of items but got type "dict".')
{ "content_hash": "b6883cf2a01441192ae734efb6fb2df1", "timestamp": "", "source": "github", "line_count": 1027, "max_line_length": 174, "avg_line_length": 45.996105160662125, "alnum_prop": 0.5894830433125873, "repo_name": "RomanZWang/osf.io", "id": "f6871dbcd4c62ebe02e9aa54774a726695a34848", "size": "47262", "binary": false, "copies": "2", "ref": "refs/heads/develop", "path": "api_tests/nodes/views/test_node_detail.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "142342" }, { "name": "HTML", "bytes": "103807" }, { "name": "JavaScript", "bytes": "1579348" }, { "name": "Mako", "bytes": "660201" }, { "name": "Perl", "bytes": "13885" }, { "name": "Python", "bytes": "5261796" }, { "name": "Shell", "bytes": "106" } ], "symlink_target": "" }
from rest_framework.views import APIView from rest_framework import generics from rest_framework.mixins import DestroyModelMixin from rest_framework.response import Response from rest_framework import authentication, permissions from comments.models import Comment from .permissions import IsOwnerOrReadOnly from .serializers import CommentSerializer, CommentUpdateSerializer class CommentListAPIView(generics.ListAPIView): serializer_class = CommentSerializer # authentication_classes = [] permission_classes = [] def get_queryset(self, *args, **kwargs): url = self.request.GET.get("url") if url: return Comment.objects.filter(url=url) return Comment.objects.none() def list(self, request, *args, **kwargs): queryset = self.filter_queryset(self.get_queryset()) page = self.paginate_queryset(queryset) if page is not None: serializer = self.get_serializer(page, many=True) return self.get_paginated_response(serializer.data) serializer = self.get_serializer(queryset, many=True) response = Response(serializer.data) response.set_cookie('isUser', 'false') if request.user.is_authenticated(): response.set_cookie('isUser', 'true') response.set_cookie('authUsername', str(request.user.username)) return response class CommentCreateAPIView(generics.CreateAPIView): queryset = Comment.objects.all() serializer_class = CommentSerializer def perform_create(self, serializer): if self.request.user.is_authenticated(): serializer.save(user=self.request.user) class CommentUpdateAPIView(DestroyModelMixin, generics.RetrieveUpdateAPIView): queryset = Comment.objects.all() serializer_class = CommentUpdateSerializer permission_classes = [IsOwnerOrReadOnly] def perform_update(self, serializer): if self.request.user.is_authenticated(): serializer.save(user=self.request.user) def delete(self, request, *args, **kwargs): return self.destroy(request, *args, **kwargs)
{ "content_hash": "5db2909f963fd2813e91866351e8b3b7", "timestamp": "", "source": "github", "line_count": 66, "max_line_length": 78, "avg_line_length": 32.18181818181818, "alnum_prop": 0.7052730696798494, "repo_name": "srvup/srvup-comments", "id": "86156bc800c91fbd7480aae47ecdc64557456bc7", "size": "2124", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "comments/api/views.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "167" }, { "name": "HTML", "bytes": "98" }, { "name": "JavaScript", "bytes": "8118" }, { "name": "Python", "bytes": "8197" } ], "symlink_target": "" }
""" Created on Thu Apr 11 14:50:17 2013 @author: bovy Examples using the diagnostics module of pygchem. """ import os import datetime import numpy as np import temp.diagnostics as gdiag ws = "/Volumes/data/03_geo/geoschem/GEOS-Chem-rundirs/4x5/geos5/standard" bpch_fname = "ctm.bpch" # import a CTM file (only binary punch file format is supported, # netCDF will follow) ctm_f = gdiag.CTMFile.fromfile(os.path.join(ws, bpch_fname)) # diagnostics metadata (contents of diaginfo.dat and tracerinfo.dat) are # automatically imported. Access to metadata via an instance of the # 'Diagnostics' class connected to the CTMFile instance ctm_f.diagnostics # returns a 'Diagnostics' instance ctm_f.diagnostics.categories # returns a dict with categories (diaginfo) ctm_f.diagnostics.diagnostics # returns a dict with diagnostics (tracerinfo) # Possible to add/remove/modify diagnotics metadata at runtime... new_diags = gdiag.Diagnostics() new_diags.add_diagnostic_field('newall', 'newall_val', True) # access to datablocks in the CTM file is provided by : ctm_f.datablocks # returns a list of datablock (as 'Datablock' # instances) # datablock simple filtering (by name, number, category and/or times) : ijavg = ctm_f.filter(category="IJ-AVG-$") # select all IJ-AVG-$ datablocks d_050701 = ctm_f.filter(time=datetime.datetime(2005,07,1)) # time selection Ox_avg_050701 = ctm_f.filter(name="Ox", # Ox tracer... category="IJ-AVG-$", time=datetime.datetime(2005,07,1))[0] # datablock advanced filtering (TODO) # datablock header (examples) and values Ox_avg_050701.index Ox_avg_050701.number Ox_avg_050701.name Ox_avg_050701.full_name Ox_avg_050701.molecular_weight Ox_avg_050701.unit Ox_avg_050701.values # returns a numpy array # create new datablocks default_diags = gdiag.Diagnostics() # create a new Diagnotics instance # (here based on 'default' tracerinfo # and diaginfo) d_start = datetime.datetime(2000,1,1) # start and end times assigned to d_end = datetime.datetime(2002,1,1) # datablock new_db = gdiag.DataBlock(1, 'EW-FLX-$', (d_start, d_end), diagnostics=default_diags, values=np.zeros_like(Ox_avg_050701.values)) # append the new datablock to a new 'CTMFile' instance and write to a bunch # binary file (useful for creating restart files) new_ctm_f = gdiag.CTMFile(diagnostics=default_diags) new_ctm_f.append_datablock(new_db) new_ctm_f.save(os.path.join(ws, "new_ctm.bpch"), overwrite=True)
{ "content_hash": "16fc1906fe3dc861b2545ae0c4c4fe7f", "timestamp": "", "source": "github", "line_count": 75, "max_line_length": 76, "avg_line_length": 35.96, "alnum_prop": 0.6755654430849092, "repo_name": "benbovy/PyGChem_examples", "id": "0a5f603c5aeec8f8a7ef8d1d37204b0533ffe8f0", "size": "2721", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "outdated/example_diagnostics.py", "mode": "33188", "license": "mit", "language": [ { "name": "Jupyter Notebook", "bytes": "917401" }, { "name": "Python", "bytes": "15500" } ], "symlink_target": "" }
from datetime import datetime from sqlalchemy import Column, DateTime, ForeignKey, String from sqlalchemy.orm import relationship from sqlalchemy.schema import UniqueConstraint, ForeignKeyConstraint from uuid import uuid4 from changes.config import db from changes.db.types.guid import GUID from changes.db.types.json import JSONEncodedDict class Source(db.Model): """ This is the object that actually represents the code we run builds against. Essentially its a revision, with a UUID, and a possible patch_id. Rows with null patch_ids are just revisions, and rows with patch_ids apply the linked patch on top of the revision and run builds against the resulting code. Why the indirection? This is how we handle phabricator diffs: when we want to create a build for a new diff, we add a row here with the diff's parent revision sha (NOT the sha of the commit phabricator is trying to land, since that will change every time we update the diff) and a row to the patch table that contains the contents of the diff. Side note: Whenever we create a source row from a phabricator diff, we log json text to the data field with information like the diff id. """ id = Column(GUID, primary_key=True, default=uuid4) repository_id = Column(GUID, ForeignKey('repository.id'), nullable=False) patch_id = Column(GUID, ForeignKey('patch.id'), unique=True) revision_sha = Column(String(40)) date_created = Column(DateTime, default=datetime.utcnow) data = Column(JSONEncodedDict) repository = relationship('Repository', innerjoin=False) patch = relationship('Patch') revision = relationship('Revision', foreign_keys=[repository_id, revision_sha]) __tablename__ = 'source' __table_args__ = ( ForeignKeyConstraint( ('repository_id', 'revision_sha'), ('revision.repository_id', 'revision.sha') ), UniqueConstraint( 'repository_id', 'revision_sha', 'patch_id', name='unq_source_revision', ), ) def __init__(self, **kwargs): super(Source, self).__init__(**kwargs) if self.id is None: self.id = uuid4() if self.date_created is None: self.date_created = datetime.utcnow() def generate_diff(self): if self.patch: return self.patch.diff vcs = self.repository.get_vcs() if vcs: try: return vcs.export(self.revision_sha) except Exception: pass return None def is_commit(self): return bool(self.patch_id is None and self.revision_sha)
{ "content_hash": "f904b67d037d6b9af5c5ff72433db9df", "timestamp": "", "source": "github", "line_count": 75, "max_line_length": 84, "avg_line_length": 35.84, "alnum_prop": 0.6607142857142857, "repo_name": "bowlofstew/changes", "id": "77f7981a3746e4b3e6a463b2b68ad1ced736c94e", "size": "2689", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "changes/models/source.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "87142" }, { "name": "HTML", "bytes": "137437" }, { "name": "JavaScript", "bytes": "385108" }, { "name": "Makefile", "bytes": "6212" }, { "name": "Mako", "bytes": "412" }, { "name": "Python", "bytes": "1546048" }, { "name": "Shell", "bytes": "868" } ], "symlink_target": "" }
''' FreeType high-level python API This the bindings for the high-level API of FreeType (that must be installed somewhere on your system). Note: ----- C Library will be searched using the ctypes.util.find_library. However, this search might fail. In such a case (or for other reasons), you can specify the FT_library_filename before importing the freetype library and freetype will use the specified one. ''' from ctypes import * from ft_types import * from ft_enums import * from ft_errors import * from ft_structs import * import ctypes.util import os PACKAGE_FOLDER = os.path.abspath(os.path.dirname(__file__)) __dll__ = None __handle__ = None FT_Library_filename = ctypes.util.find_library('freetype') if not FT_Library_filename: paths_to_try = [ os.path.join(PACKAGE_FOLDER, 'libfreetype.dll'), # Windows 'libfreetype.so.6', # Linux '/usr/X11/lib/libfreetype.dylib', # MacOS X ] for p in paths_to_try: try: __dll__ = CDLL(p) except OSError: pass if __dll__ is not None: break if not FT_Library_filename and not __dll__: raise RuntimeError, 'Freetype library not found' if not __dll__: __dll__ = CDLL(FT_Library_filename) # ----------------------------------------------------------------------------- # High-level API of FreeType 2 # ----------------------------------------------------------------------------- FT_Init_FreeType = __dll__.FT_Init_FreeType FT_Done_FreeType = __dll__.FT_Done_FreeType FT_Library_Version = __dll__.FT_Library_Version def __del_library__(self): global __handle__ if __handle__: try: FT_Done_FreeType(byref(self)) __handle__ = None except: pass FT_Library.__del__ = __del_library__ def get_handle(): ''' Get unique FT_Library handle ''' global __handle__ if not __handle__: __handle__ = FT_Library( ) error = FT_Init_FreeType( byref(__handle__) ) if error: raise FT_Exception(error) try: set_lcd_filter( FT_LCD_FILTER_DEFAULT ) except: pass if error: raise FT_Exception(error) return __handle__ def version(): ''' Return the version of the FreeType library being used as a tuple of ( major version number, minor version number, patch version number ) ''' amajor = FT_Int() aminor = FT_Int() apatch = FT_Int() library = get_handle() FT_Library_Version(library, byref(amajor), byref(aminor), byref(apatch)) return (amajor.value, aminor.value, apatch.value) FT_Get_X11_Font_Format = __dll__.FT_Get_X11_Font_Format FT_Get_X11_Font_Format.restype = c_char_p FT_Library_SetLcdFilter= __dll__.FT_Library_SetLcdFilter if version()>=(2,4,0): FT_Library_SetLcdFilterWeights = __dll__.FT_Library_SetLcdFilterWeights FT_New_Face = __dll__.FT_New_Face FT_New_Memory_Face = __dll__.FT_New_Memory_Face FT_Open_Face = __dll__.FT_Open_Face FT_Attach_File = __dll__.FT_Attach_File FT_Attach_Stream = __dll__.FT_Attach_Stream if version()>=(2,4,2): FT_Reference_Face = __dll__.FT_Reference_Face FT_Done_Face = __dll__.FT_Done_Face FT_Done_Glyph = __dll__.FT_Done_Glyph FT_Select_Size = __dll__.FT_Select_Size FT_Request_Size = __dll__.FT_Request_Size FT_Set_Char_Size = __dll__.FT_Set_Char_Size FT_Set_Pixel_Sizes = __dll__.FT_Set_Pixel_Sizes FT_Load_Glyph = __dll__.FT_Load_Glyph FT_Load_Char = __dll__.FT_Load_Char FT_Set_Transform = __dll__.FT_Set_Transform FT_Render_Glyph = __dll__.FT_Render_Glyph FT_Get_Kerning = __dll__.FT_Get_Kerning FT_Get_Track_Kerning = __dll__.FT_Get_Track_Kerning FT_Get_Glyph_Name = __dll__.FT_Get_Glyph_Name FT_Get_Glyph = __dll__.FT_Get_Glyph FT_Glyph_Get_CBox = __dll__.FT_Glyph_Get_CBox FT_Get_Postscript_Name = __dll__.FT_Get_Postscript_Name FT_Get_Postscript_Name.restype = c_char_p FT_Select_Charmap = __dll__.FT_Select_Charmap FT_Set_Charmap = __dll__.FT_Set_Charmap FT_Get_Charmap_Index = __dll__.FT_Get_Charmap_Index FT_Get_CMap_Language_ID= __dll__.FT_Get_CMap_Language_ID FT_Get_CMap_Format = __dll__.FT_Get_CMap_Format FT_Get_Char_Index = __dll__.FT_Get_Char_Index FT_Get_First_Char = __dll__.FT_Get_First_Char FT_Get_Next_Char = __dll__.FT_Get_Next_Char FT_Get_Name_Index = __dll__.FT_Get_Name_Index FT_Get_SubGlyph_Info = __dll__.FT_Get_SubGlyph_Info if version()>=(2,3,8): FT_Get_FSType_Flags = __dll__.FT_Get_FSType_Flags FT_Get_FSType_Flags.restype = c_ushort FT_Get_Sfnt_Name_Count = __dll__.FT_Get_Sfnt_Name_Count FT_Get_Sfnt_Name = __dll__.FT_Get_Sfnt_Name FT_Outline_GetInsideBorder = __dll__.FT_Outline_GetInsideBorder FT_Outline_GetOutsideBorder = __dll__.FT_Outline_GetOutsideBorder FT_Outline_Get_BBox = __dll__.FT_Outline_Get_BBox FT_Outline_Get_CBox = __dll__.FT_Outline_Get_CBox FT_Stroker_New = __dll__.FT_Stroker_New FT_Stroker_Set = __dll__.FT_Stroker_Set FT_Stroker_Rewind = __dll__.FT_Stroker_Rewind FT_Stroker_ParseOutline = __dll__.FT_Stroker_ParseOutline FT_Stroker_BeginSubPath = __dll__.FT_Stroker_BeginSubPath FT_Stroker_EndSubPath = __dll__.FT_Stroker_EndSubPath FT_Stroker_LineTo = __dll__.FT_Stroker_LineTo FT_Stroker_ConicTo = __dll__.FT_Stroker_ConicTo FT_Stroker_CubicTo = __dll__.FT_Stroker_CubicTo FT_Stroker_GetBorderCounts = __dll__.FT_Stroker_GetBorderCounts FT_Stroker_ExportBorder = __dll__.FT_Stroker_ExportBorder FT_Stroker_GetCounts = __dll__.FT_Stroker_GetCounts FT_Stroker_Export = __dll__.FT_Stroker_Export FT_Stroker_Done = __dll__.FT_Stroker_Done FT_Glyph_Stroke = __dll__.FT_Glyph_Stroke FT_Glyph_StrokeBorder = __dll__.FT_Glyph_StrokeBorder FT_Glyph_To_Bitmap = __dll__.FT_Glyph_To_Bitmap # ----------------------------------------------------------------------------- # Stand alone functions # ----------------------------------------------------------------------------- def set_lcd_filter(filt): ''' This function is used to apply color filtering to LCD decimated bitmaps, like the ones used when calling FT_Render_Glyph with FT_RENDER_MODE_LCD or FT_RENDER_MODE_LCD_V. Note: ----- This feature is always disabled by default. Clients must make an explicit call to this function with a 'filter' value other than FT_LCD_FILTER_NONE in order to enable it. Due to PATENTS covering subpixel rendering, this function doesn't do anything except returning 'FT_Err_Unimplemented_Feature' if the configuration macro FT_CONFIG_OPTION_SUBPIXEL_RENDERING is not defined in your build of the library, which should correspond to all default builds of FreeType. The filter affects glyph bitmaps rendered through FT_Render_Glyph, FT_Outline_Get_Bitmap, FT_Load_Glyph, and FT_Load_Char. It does not affect the output of FT_Outline_Render and FT_Outline_Get_Bitmap. If this feature is activated, the dimensions of LCD glyph bitmaps are either larger or taller than the dimensions of the corresponding outline with regards to the pixel grid. For example, for FT_RENDER_MODE_LCD, the filter adds up to 3 pixels to the left, and up to 3 pixels to the right. The bitmap offset values are adjusted correctly, so clients shouldn't need to modify their layout and glyph positioning code when enabling the filter. ''' library = get_handle() error = FT_Library_SetLcdFilter(library, filt) if error: raise FT_Exception(error) def set_lcd_filter_weights(a,b,c,d,e): ''' Use this function to override the filter weights selected by FT_Library_SetLcdFilter. By default, FreeType uses the quintuple (0x00, 0x55, 0x56, 0x55, 0x00) for FT_LCD_FILTER_LIGHT, and (0x10, 0x40, 0x70, 0x40, 0x10) for FT_LCD_FILTER_DEFAULT and FT_LCD_FILTER_LEGACY. Note: ----- Only available if version > 2.4.0 ''' if version()>=(2,4,0): library = get_handle() weights = FT_Char(5)(a,b,c,d,e) error = FT_Library_SetLcdFilterWeights(library, weights) if error: raise FT_Exception(error) else: raise RuntimeError, \ 'set_lcd_filter_weights require freetype > 2.4.0' # ----------------------------------------------------------------------------- # Direct wrapper (simple renaming) # ----------------------------------------------------------------------------- Vector = FT_Vector Matrix = FT_Matrix # ----------------------------------------------------------------------------- class BBox( object ): ''' FT_BBox wrapper. A structure used to hold an outline's bounding box, i.e., the coordinates of its extrema in the horizontal and vertical directions. Note: ----- The bounding box is specified with the coordinates of the lower left and the upper right corner. In PostScript, those values are often called (llx,lly) and (urx,ury), respectively. If 'yMin' is negative, this value gives the glyph's descender. Otherwise, the glyph doesn't descend below the baseline. Similarly, if 'ymax' is positive, this value gives the glyph's ascender. 'xMin' gives the horizontal distance from the glyph's origin to the left edge of the glyph's bounding box. If 'xMin' is negative, the glyph extends to the left of the origin. ''' def __init__(self, bbox): ''' Create a new BBox object. Parameters: ----------- bbox : a FT_BBox or a tuple of 4 values ''' if type(bbox) is FT_BBox: self._FT_BBox = bbox else: self._FT_BBox = FT_BBox(*bbox) xMin = property(lambda self: self._FT_BBox.xMin, doc = 'The horizontal minimum (left-most).') yMin = property(lambda self: self._FT_BBox.yMin, doc = 'The vertical minimum (bottom-most).') xMax = property(lambda self: self._FT_BBox.xMax, doc = 'The horizontal maximum (right-most).') yMax = property(lambda self: self._FT_BBox.yMax, doc = 'The vertical maximum (top-most).') # ----------------------------------------------------------------------------- class SizeMetrics( object ): ''' The size metrics structure gives the metrics of a size object. Note: ----- The scaling values, if relevant, are determined first during a size changing operation. The remaining fields are then set by the driver. For scalable formats, they are usually set to scaled values of the corresponding fields in Face. Note that due to glyph hinting, these values might not be exact for certain fonts. Thus they must be treated as unreliable with an error margin of at least one pixel! Indeed, the only way to get the exact metrics is to render all glyphs. As this would be a definite performance hit, it is up to client applications to perform such computations. The SizeMetrics structure is valid for bitmap fonts also. ''' def __init__(self, metrics ): ''' Create a new SizeMetrics object. Parameters: ----------- metrics : a FT_SizeMetrics ''' self._FT_Size_Metrics = metrics x_ppem = property( lambda self: self._FT_Size_Metrics.x_ppem, doc = '''The width of the scaled EM square in pixels, hence the term 'ppem' (pixels per EM). It is also referred to as 'nominal width'.''' ) y_ppem = property( lambda self: self._FT_Size_Metrics.y_ppem, doc = '''The height of the scaled EM square in pixels, hence the term 'ppem' (pixels per EM). It is also referred to as 'nominal height'.''' ) x_scale = property( lambda self: self._FT_Size_Metrics.x_scale, doc = '''A 16.16 fractional scaling value used to convert horizontal metrics from font units to 26.6 fractional pixels. Only relevant for scalable font formats.''' ) y_scale = property( lambda self: self._FT_Size_Metrics.y_scale, doc = '''A 16.16 fractional scaling value used to convert vertical metrics from font units to 26.6 fractional pixels. Only relevant for scalable font formats.''' ) ascender = property( lambda self: self._FT_Size_Metrics.ascender, doc = '''The ascender in 26.6 fractional pixels. See Face for the details.''' ) descender = property( lambda self: self._FT_Size_Metrics.descender, doc = '''The descender in 26.6 fractional pixels. See Face for the details.''' ) height = property( lambda self: self._FT_Size_Metrics.height, doc = '''The height in 26.6 fractional pixels. See Face for the details.''' ) max_advance = property(lambda self: self._FT_Size_Metrics.max_advance, doc = '''The maximal advance width in 26.6 fractional pixels. See Face for the details.''' ) # ----------------------------------------------------------------------------- class BitmapSize( object ): ''' FT_Bitmap_Size wrapper This structure models the metrics of a bitmap strike (i.e., a set of glyphs for a given point size and resolution) in a bitmap font. It is used for the 'available_sizes' field of Face. Note: ----- Windows FNT: The nominal size given in a FNT font is not reliable. Thus when the driver finds it incorrect, it sets 'size' to some calculated values and sets 'x_ppem' and 'y_ppem' to the pixel width and height given in the font, respectively. TrueType embedded bitmaps: 'size', 'width', and 'height' values are not contained in the bitmap strike itself. They are computed from the global font parameters. ''' def __init__(self, size ): ''' Create a new SizeMetrics object. Parameters: ----------- size : a FT_Bitmap_Size ''' self._FT_Bitmap_Size = size height = property( lambda self: self._FT_Bitmap_Size.height, doc = '''The vertical distance, in pixels, between two consecutive baselines. It is always positive.''') width = property( lambda self: self._FT_Bitmap_Size.width, doc = '''The average width, in pixels, of all glyphs in the strike.''') size = property( lambda self: self._FT_Bitmap_Size.size, doc = '''The nominal size of the strike in 26.6 fractional points. This field is not very useful.''') x_ppem = property( lambda self: self._FT_Bitmap_Size.x_ppem, doc = '''The horizontal ppem (nominal width) in 26.6 fractional pixels.''') y_ppem = property( lambda self: self._FT_Bitmap_Size.y_ppem, doc = '''The vertical ppem (nominal width) in 26.6 fractional pixels.''') # ----------------------------------------------------------------------------- class Bitmap(object): ''' FT_Bitmap wrapper A structure used to describe a bitmap or pixmap to the raster. Note that we now manage pixmaps of various depths through the 'pixel_mode' field. Note: ----- For now, the only pixel modes supported by FreeType are mono and grays. However, drivers might be added in the future to support more 'colorful' options. ''' def __init__(self, bitmap): ''' Create a new Bitmap object. Parameters: ----------- bitmap : a FT_Bitmap ''' self._FT_Bitmap = bitmap rows = property(lambda self: self._FT_Bitmap.rows, doc = '''The number of bitmap rows.''') width = property(lambda self: self._FT_Bitmap.width, doc = '''The number of pixels in bitmap row.''') pitch = property(lambda self: self._FT_Bitmap.pitch, doc = '''The pitch's absolute value is the number of bytes taken by one bitmap row, including padding. However, the pitch is positive when the bitmap has a 'down' flow, and negative when it has an 'up' flow. In all cases, the pitch is an offset to add to a bitmap pointer in order to go down one row. Note that 'padding' means the alignment of a bitmap to a byte border, and FreeType functions normally align to the smallest possible integer value. For the B/W rasterizer, 'pitch' is always an even number. To change the pitch of a bitmap (say, to make it a multiple of 4), use FT_Bitmap_Convert. Alternatively, you might use callback functions to directly render to the application's surface; see the file 'example2.py' in the tutorial for a demonstration.''') def _get_buffer(self): data = [self._FT_Bitmap.buffer[i] for i in range(self.rows*self.pitch)] return data buffer = property(_get_buffer, doc = '''A typeless pointer to the bitmap buffer. This value should be aligned on 32-bit boundaries in most cases.''') num_grays = property(lambda self: self._FT_Bitmap.num_grays, doc = '''This field is only used with FT_PIXEL_MODE_GRAY; it gives the number of gray levels used in the bitmap.''') pixel_mode = property(lambda self: self._FT_Bitmap.pixel_mode, doc = '''The pixel mode, i.e., how pixel bits are stored. See FT_Pixel_Mode for possible values.''') palette_mode = property(lambda self: self._FT_Bitmap.palette_mode, doc ='''This field is intended for paletted pixel modes; it indicates how the palette is stored. Not used currently.''') palette = property(lambda self: self._FT_Bitmap.palette, doc = '''A typeless pointer to the bitmap palette; this field is intended for paletted pixel modes. Not used currently.''') # ----------------------------------------------------------------------------- class Charmap( object ): ''' FT_Charmap wrapper. A handle to a given character map. A charmap is used to translate character codes in a given encoding into glyph indexes for its parent's face. Some font formats may provide several charmaps per font. Each face object owns zero or more charmaps, but only one of them can be 'active' and used by FT_Get_Char_Index or FT_Load_Char. The list of available charmaps in a face is available through the 'face.num_charmaps' and 'face.charmaps' fields of FT_FaceRec. The currently active charmap is available as 'face.charmap'. You should call FT_Set_Charmap to change it. Note: ----- When a new face is created (either through FT_New_Face or FT_Open_Face), the library looks for a Unicode charmap within the list and automatically activates it. See also: --------- See FT_CharMapRec for the publicly accessible fields of a given character map. ''' def __init__( self, charmap ): ''' Create a new Charmap object. Parameters: ----------- charmap : a FT_Charmap ''' self._FT_Charmap = charmap encoding = property( lambda self: self._FT_Charmap.contents.encoding, doc = '''An FT_Encoding tag identifying the charmap. Use this with FT_Select_Charmap.''') platform_id = property( lambda self: self._FT_Charmap.contents.platform_id, doc = '''An ID number describing the platform for the following encoding ID. This comes directly from the TrueType specification and should be emulated for other formats.''') encoding_id = property( lambda self: self._FT_Charmap.contents.encoding_id, doc = '''A platform specific encoding number. This also comes from the TrueType specification and should be emulated similarly.''') def _get_encoding_name(self): encoding = self.encoding for key,value in FT_ENCODINGS.items(): if encoding == value: return key return 'Unknown encoding' encoding_name = property( _get_encoding_name, doc = '''A platform specific encoding name. This also comes from the TrueType specification and should be emulated similarly.''') def _get_index( self ): return FT_Get_Charmap_Index( self._FT_Charmap ) index = property( _get_index, doc = '''The index into the array of character maps within the face to which 'charmap' belongs. If an error occurs, -1 is returned.''') def _get_cmap_language_id( self ): return FT_Get_CMap_Language_ID( self._FT_Charmap ) cmap_language_id = property( _get_cmap_language_id, doc = '''The language ID of 'charmap'. If 'charmap' doesn't belong to a TrueType/sfnt face, just return 0 as the default value.''') def _get_cmap_format( self ): return FT_Get_CMap_Format( self._FT_Charmap ) cmap_format = property( _get_cmap_format, doc = '''The format of 'charmap'. If 'charmap' doesn't belong to a TrueType/sfnt face, return -1.''') # ----------------------------------------------------------------------------- class Outline( object ): ''' FT_Outline wrapper. This structure is used to describe an outline to the scan-line converter. ''' def __init__( self, outline ): ''' Create a new Outline object. Parameters: ----------- charmap : a FT_Outline ''' self._FT_Outline = outline n_contours = property(lambda self: self._FT_Outline.n_contours) def _get_contours(self): n = self._FT_Outline.n_contours data = [self._FT_Outline.contours[i] for i in range(n)] return data contours = property(_get_contours, doc = '''The number of contours in the outline.''') n_points = property(lambda self: self._FT_Outline.n_points) def _get_points(self): n = self._FT_Outline.n_points data = [] for i in range(n): v = self._FT_Outline.points[i] data.append( (v.x,v.y) ) return data points = property( _get_points, doc = '''The number of points in the outline.''') def _get_tags(self): n = self._FT_Outline.n_points data = [self._FT_Outline.tags[i] for i in range(n)] return data tags = property(_get_tags, doc = '''A list of 'n_points' chars, giving each outline point's type. If bit 0 is unset, the point is 'off' the curve, i.e., a Bezier control point, while it is 'on' if set. Bit 1 is meaningful for 'off' points only. If set, it indicates a third-order Bezier arc control point; and a second-order control point if unset. If bit 2 is set, bits 5-7 contain the drop-out mode (as defined in the OpenType specification; the value is the same as the argument to the SCANMODE instruction). Bits 3 and 4 are reserved for internal purposes.''') flags = property(lambda self: self._FT_Outline.flags, doc = '''A set of bit flags used to characterize the outline and give hints to the scan-converter and hinter on how to convert/grid-fit it. See FT_OUTLINE_FLAGS.''') def get_inside_border( self ): ''' Retrieve the FT_StrokerBorder value corresponding to the 'inside' borders of a given outline. Return: The border index. FT_STROKER_BORDER_RIGHT for empty or invalid outlines. ''' return FT_Outline_GetInsideBorder( self._FT_Outline ) def get_outside_border( self ): ''' Retrieve the FT_StrokerBorder value corresponding to the 'outside' borders of a given outline. Return: The border index. FT_STROKER_BORDER_RIGHT for empty or invalid outlines. ''' return FT_Outline_GetInsideBorder( self._FT_Outline ) def get_bbox(self): ''' Compute the exact bounding box of an outline. This is slower than computing the control box. However, it uses an advanced algorithm which returns very quickly when the two boxes coincide. Otherwise, the outline Bezier arcs are traversed to extract their extrema. ''' bbox = FT_BBox() error = FT_Outline_Get_BBox(byref(self._FT_Outline), byref(bbox)) if error: raise FT_Exception(error) return bbox def get_cbox(self): ''' Return an outline's 'control box'. The control box encloses all the outline's points, including Bezier control points. Though it coincides with the exact bounding box for most glyphs, it can be slightly larger in some situations (like when rotating an outline which contains Bezier outside arcs). Computing the control box is very fast, while getting the bounding box can take much more time as it needs to walk over all segments and arcs in the outline. To get the latter, you can use the 'ftbbox' component which is dedicated to this single task. ''' bbox = FT_BBox() error = FT_Outline_Get_CBox(byref(self._FT_Outline), byref(bbox)) if error: raise FT_Exception(error) return BBox(bbox) # ----------------------------------------------------------------------------- class Glyph( object ): ''' FT_Glyph wrapper. The root glyph structure contains a given glyph image plus its advance width in 16.16 fixed float format. ''' def __init__( self, glyph ): ''' Create Glyph object from an FT glyph. Parameters: ----------- glyph: valid FT_Glyph object ''' self._FT_Glyph = glyph def __del__( self ): ''' Destroy glyph. ''' FT_Done_Glyph( self._FT_Glyph ) def _get_format( self ): return self._FT_Glyph.contents.format format = property( _get_format, doc = '''The format of the glyph's image.''') def stroke( self, stroker, destroy=False ): ''' Stroke a given outline glyph object with a given stroker. Parameters: ----------- stroker: A stroker handle. destroy: A Boolean. If 1, the source glyph object is destroyed on success. Note: ----- The source glyph is untouched in case of error. ''' error = FT_Glyph_Stroke( byref(self._FT_Glyph), stroker._FT_Stroker, destroy ) if error: raise FT_Exception( error ) def to_bitmap( self, mode, origin, destroy=False ): ''' Convert a given glyph object to a bitmap glyph object. Parameters: ----------- mode: An enumeration that describes how the data is rendered. origin: A pointer to a vector used to translate the glyph image before rendering. Can be 0 (if no translation). The origin is expressed in 26.6 pixels. destroy: A boolean that indicates that the original glyph image should be destroyed by this function. It is never destroyed in case of error. Note: ----- This function does nothing if the glyph format isn't scalable. The glyph image is translated with the 'origin' vector before rendering. The first parameter is a pointer to an FT_Glyph handle, that will be replaced by this function (with newly allocated data). Typically, you would use (omitting error handling): ''' error = FT_Glyph_To_Bitmap( byref(self._FT_Glyph), mode, origin, destroy) if error: raise FT_Exception( error ) return BitmapGlyph( self._FT_Glyph ) def get_cbox(self, bbox_mode): ''' Return an outline's 'control box'. The control box encloses all the outline's points, including Bezier control points. Though it coincides with the exact bounding box for most glyphs, it can be slightly larger in some situations (like when rotating an outline which contains Bezier outside arcs). Computing the control box is very fast, while getting the bounding box can take much more time as it needs to walk over all segments and arcs in the outline. To get the latter, you can use the 'ftbbox' component which is dedicated to this single task. Parameters: ----------- mode : The mode which indicates how to interpret the returned bounding box values. Note: ----- Coordinates are relative to the glyph origin, using the y upwards convention. If the glyph has been loaded with FT_LOAD_NO_SCALE, 'bbox_mode' must be set to FT_GLYPH_BBOX_UNSCALED to get unscaled font units in 26.6 pixel format. The value FT_GLYPH_BBOX_SUBPIXELS is another name for this constant. Note that the maximum coordinates are exclusive, which means that one can compute the width and height of the glyph image (be it in integer or 26.6 pixels) as: width = bbox.xMax - bbox.xMin; height = bbox.yMax - bbox.yMin; Note also that for 26.6 coordinates, if 'bbox_mode' is set to FT_GLYPH_BBOX_GRIDFIT, the coordinates will also be grid-fitted, which corresponds to: bbox.xMin = FLOOR(bbox.xMin); bbox.yMin = FLOOR(bbox.yMin); bbox.xMax = CEILING(bbox.xMax); bbox.yMax = CEILING(bbox.yMax); To get the bbox in pixel coordinates, set 'bbox_mode' to FT_GLYPH_BBOX_TRUNCATE. To get the bbox in grid-fitted pixel coordinates, set 'bbox_mode' to FT_GLYPH_BBOX_PIXELS. ''' bbox = FT_BBox() error = FT_Glyph_Get_CBox(byref(self._FT_Glyph), bbox_mode,byref(bbox)) if error: raise FT_Exception(error) return BBox(bbox) # ----------------------------------------------------------------------------- class BitmapGlyph( object ): ''' FT_BitmapGlyph wrapper. A structure used for bitmap glyph images. This really is a 'sub-class' of FT_GlyphRec. ''' def __init__( self, glyph ): ''' Create Glyph object from an FT glyph. Parameters: ----------- glyph: valid FT_Glyph object ''' self._FT_BitmapGlyph = cast(glyph, FT_BitmapGlyph) # def __del__( self ): # ''' # Destroy glyph. # ''' # FT_Done_Glyph( cast(self._FT_BitmapGlyph, FT_Glyph) ) def _get_format( self ): return self._FT_BitmapGlyph.contents.format format = property( _get_format, doc = '''The format of the glyph's image.''') def _get_bitmap( self ): return Bitmap( self._FT_BitmapGlyph.contents.bitmap ) bitmap = property( _get_bitmap, doc = '''A descriptor for the bitmap.''') def _get_left( self ): return self._FT_BitmapGlyph.contents.left left = property( _get_left, doc = '''The left-side bearing, i.e., the horizontal distance from the current pen position to the left border of the glyph bitmap.''') def _get_top( self ): return self._FT_BitmapGlyph.contents.top top = property( _get_top, doc = '''The top-side bearing, i.e., the vertical distance from the current pen position to the top border of the glyph bitmap. This distance is positive for upwards y!''') # ----------------------------------------------------------------------------- class GlyphSlot( object ): ''' FT_GlyphSlot wrapper. FreeType root glyph slot class structure. A glyph slot is a container where individual glyphs can be loaded, be they in outline or bitmap format. ''' def __init__( self, slot ): ''' Create GlyphSlot object from an FT glyph slot. Parameters: ----------- glyph: valid FT_GlyphSlot object ''' self._FT_GlyphSlot = slot def get_glyph( self ): ''' A function used to extract a glyph image from a slot. Note that the created FT_Glyph object must be released with FT_Done_Glyph. ''' aglyph = FT_Glyph() error = FT_Get_Glyph( self._FT_GlyphSlot, byref(aglyph) ) if error: raise FT_Exception( error ) return Glyph( aglyph ) def _get_bitmap( self ): return Bitmap( self._FT_GlyphSlot.contents.bitmap ) bitmap = property( _get_bitmap, doc = '''This field is used as a bitmap descriptor when the slot format is FT_GLYPH_FORMAT_BITMAP. Note that the address and content of the bitmap buffer can change between calls of FT_Load_Glyph and a few other functions.''') def _get_next( self ): return GlyphSlot( self._FT_GlyphSlot.contents.next ) next = property( _get_next, doc = '''In some cases (like some font tools), several glyph slots per face object can be a good thing. As this is rare, the glyph slots are listed through a direct, single-linked list using its 'next' field.''') advance = property( lambda self: self._FT_GlyphSlot.contents.advance, doc = '''This shorthand is, depending on FT_LOAD_IGNORE_TRANSFORM, the transformed advance width for the glyph (in 26.6 fractional pixel format). As specified with FT_LOAD_VERTICAL_LAYOUT, it uses either the 'horiAdvance' or the 'vertAdvance' value of 'metrics' field.''') def _get_outline( self ): return Outline( self._FT_GlyphSlot.contents.outline ) outline = property( _get_outline, doc = '''The outline descriptor for the current glyph image if its format is FT_GLYPH_FORMAT_OUTLINE. Once a glyph is loaded, 'outline' can be transformed, distorted, embolded, etc. However, it must not be freed.''') format = property( lambda self: self._FT_GlyphSlot.contents.format, doc = '''This field indicates the format of the image contained in the glyph slot. Typically FT_GLYPH_FORMAT_BITMAP, FT_GLYPH_FORMAT_OUTLINE, or FT_GLYPH_FORMAT_COMPOSITE, but others are possible.''') bitmap_top = property( lambda self: self._FT_GlyphSlot.contents.bitmap_top, doc = '''This is the bitmap's top bearing expressed in integer pixels. Remember that this is the distance from the baseline to the top-most glyph scanline, upwards y coordinates being positive.''') bitmap_left = property( lambda self: self._FT_GlyphSlot.contents.bitmap_left, doc = '''This is the bitmap's left bearing expressed in integer pixels. Of course, this is only valid if the format is FT_GLYPH_FORMAT_BITMAP.''') linearHoriAdvance = property( lambda self: self._FT_GlyphSlot.contents.linearHoriAdvance, doc = '''The advance width of the unhinted glyph. Its value is expressed in 16.16 fractional pixels, unless FT_LOAD_LINEAR_DESIGN is set when loading the glyph. This field can be important to perform correct WYSIWYG layout. Only relevant for outline glyphs.''') linearVertAdvance = property( lambda self: self._FT_GlyphSlot.contents.linearVertAdvance, doc = '''The advance height of the unhinted glyph. Its value is expressed in 16.16 fractional pixels, unless FT_LOAD_LINEAR_DESIGN is set when loading the glyph. This field can be important to perform correct WYSIWYG layout. Only relevant for outline glyphs.''') # ----------------------------------------------------------------------------- # Face wrapper # ----------------------------------------------------------------------------- class Face( object ): ''' FT_Face wrapper FreeType root face class structure. A face object models a typeface in a font file. ''' def __init__( self, filename, index = 0 ): ''' Build a new Face Parameters: ----------- filename: A path to the font file. index: The index of the face within the font. The first face has index 0. ''' library = get_handle( ) face = FT_Face( ) self._FT_Face = None error = FT_New_Face( library, filename, 0, byref(face) ) if error: raise FT_Exception( error ) self._filename = filename self._index = index self._FT_Face = face def __del__( self ): ''' Discard face object, as well as all of its child slots and sizes. ''' if self._FT_Face is not None: FT_Done_Face( self._FT_Face ) def get_format(self): return FT_Get_X11_Font_Format(self._FT_Face) def get_fstype(self): flags = {0: "INSTALLABLE_EMBEDDING", 2: "RESTRICTED_LICENSE_EMBEDDING", 4: "PREVIEW_AND_PRINT_EMBEDDING", 8: "EDITABLE_EMBEDDING", 100: "NO_SUBSETTING", 200: "BITMAP_EMBEDDING_ONLY" } flag = FT_Get_FSType_Flags(self._FT_Face) return flags.get(flag, "ERROR") def set_char_size( self, width=0, height=0, hres=72, vres=72 ): ''' This function calls FT_Request_Size to request the nominal size (in points). Parameters: ----------- width: The nominal width, in 26.6 fractional points. height: The nominal height, in 26.6 fractional points. hres: The horizontal resolution in dpi. vres: The vertical resolution in dpi. Note: ----- If either the character width or height is zero, it is set equal to the other value. If either the horizontal or vertical resolution is zero, it is set equal to the other value. A character width or height smaller than 1pt is set to 1pt; if both resolution values are zero, they are set to 72dpi. Don't use this function if you are using the FreeType cache API. ''' error = FT_Set_Char_Size( self._FT_Face, width, height, hres, vres ) if error: raise FT_Exception( error) def set_pixel_sizes( self, width, height ): ''' This function calls FT_Request_Size to request the nominal size (in pixels). Parameters: ----------- width: The nominal width, in pixels. height: The nominal height, in pixels. ''' error = FT_Set_Pixel_Sizes( self._FT_Face, width, height ) if error: raise FT_Exception(error) def select_charmap( self, encoding ): ''' Select a given charmap by its encoding tag (as listed in 'freetype.h'). Note: ----- This function returns an error if no charmap in the face corresponds to the encoding queried here. Because many fonts contain more than a single cmap for Unicode encoding, this function has some special code to select the one which covers Unicode best ('best' in the sense that a UCS-4 cmap is preferred to a UCS-2 cmap). It is thus preferable to FT_Set_Charmap in this case. ''' error = FT_Select_Charmap( self._FT_Face, encoding ) if error: raise FT_Exception(error) def set_charmap( self, charmap ): ''' Select a given charmap for character code to glyph index mapping. Parameters: ----------- charmap: A handle to the selected charmap. ''' error = FT_Set_Charmap( self._FT_Face, charmap._FT_Charmap ) if error : raise FT_Exception(error) def get_char_index( self, charcode ): ''' Return the glyph index of a given character code. This function uses a charmap object to do the mapping. Parameters: ----------- charcode: The character code. Note: ----- If you use FreeType to manipulate the contents of font files directly, be aware that the glyph index returned by this function doesn't always correspond to the internal indices used within the file. This is done to ensure that value 0 always corresponds to the 'missing glyph'. ''' if type( charcode ) in (str,unicode): charcode = ord( charcode ) return FT_Get_Char_Index( self._FT_Face, charcode ) def get_first_char( self ): ''' This function is used to return the first character code in the current charmap of a given face. It also returns the corresponding glyph index. Return: ------- Glyph index of first character code. 0 if charmap is empty. Note: ----- You should use this function with get_next_char to be able to parse all character codes available in a given charmap. The code should look like this: Note that 'agindex' is set to 0 if the charmap is empty. The result itself can be 0 in two cases: if the charmap is empty or if the value 0 is the first valid character code. ''' agindex = FT_UInt() charcode = FT_Get_First_Char( self._FT_Face, byref(agindex) ) return charcode, agindex.value def get_next_char( self, charcode, agindex ): ''' This function is used to return the next character code in the current charmap of a given face following the value 'char_code', as well as the corresponding glyph index. Parameters: ----------- charcode: The starting character code. agindex: Glyph index of next character code. 0 if charmap is empty. Note: ----- You should use this function with FT_Get_First_Char to walk over all character codes available in a given charmap. See the note for this function for a simple code example. Note that 'agindex' is set to 0 when there are no more codes in the charmap. ''' agindex = FT_UInt( agindex ) charcode = FT_Get_Next_Char( self._FT_Face, charcode, byref(agindex) ) return charcode, agindex.value def get_name_index( self, name ): ''' Return the glyph index of a given glyph name. This function uses driver specific objects to do the translation. Parameters: ----------- glyph_name: The glyph name. ''' return FT_Get_Name_Index( self._FT_Face, name ) def set_transform( self, matrix, delta ): ''' A function used to set the transformation that is applied to glyph images when they are loaded into a glyph slot through FT_Load_Glyph. Parameters: ----------- matrix: A pointer to the transformation's 2x2 matrix. Use 0 for the identity matrix. delta: A pointer to the translation vector. Use 0 for the null vector. Note: ----- The transformation is only applied to scalable image formats after the glyph has been loaded. It means that hinting is unaltered by the transformation and is performed on the character size given in the last call to FT_Set_Char_Size or FT_Set_Pixel_Sizes. Note that this also transforms the 'face.glyph.advance' field, but not the values in 'face.glyph.metrics'. ''' FT_Set_Transform( self._FT_Face, byref(matrix), byref(delta) ) def select_size( self, strike_index ): ''' Select a bitmap strike. Parameters: ----------- strike_index: The index of the bitmap strike in the 'available_sizes' field of Face object. ''' error = FT_Select_Size( self._FT_Face, strike_index ) if error: raise FT_Exception( error ) def load_glyph( self, index, flags = FT_LOAD_RENDER ): ''' A function used to load a single glyph into the glyph slot of a face object. Parameters: ----------- index: The index of the glyph in the font file. For CID-keyed fonts (either in PS or in CFF format) this argument specifies the CID value. flags: A flag indicating what to load for this glyph. The FT_LOAD_XXX constants can be used to control the glyph loading process (e.g., whether the outline should be scaled, whether to load bitmaps or not, whether to hint the outline, etc). Note: ----- The loaded glyph may be transformed. See FT_Set_Transform for the details. For subsetted CID-keyed fonts, 'FT_Err_Invalid_Argument' is returned for invalid CID values (this is, for CID values which don't have a corresponding glyph in the font). See the discussion of the FT_FACE_FLAG_CID_KEYED flag for more details. ''' error = FT_Load_Glyph( self._FT_Face, index, flags ) if error: raise FT_Exception( error ) def load_char( self, char, flags = FT_LOAD_RENDER ): ''' A function used to load a single glyph into the glyph slot of a face object, according to its character code. Parameters: ----------- char: The glyph's character code, according to the current charmap used in the face. flags: A flag indicating what to load for this glyph. The FT_LOAD_XXX constants can be used to control the glyph loading process (e.g., whether the outline should be scaled, whether to load bitmaps or not, whether to hint the outline, etc). Note: ----- This function simply calls FT_Get_Char_Index and FT_Load_Glyph. ''' error = FT_Load_Char( self._FT_Face, ord(char), flags ) if error: raise FT_Exception( error ) def get_kerning( self, left, right, mode = FT_KERNING_DEFAULT ): ''' Return the kerning vector between two glyphs of a same face. Parameters: ----------- left: The index of the left glyph in the kern pair. right: The index of the right glyph in the kern pair. mode: See FT_Kerning_Mode for more information. Determines the scale and dimension of the returned kerning vector. Note: ----- Only horizontal layouts (left-to-right & right-to-left) are supported by this method. Other layouts, or more sophisticated kernings, are out of the scope of this API function -- they can be implemented through format-specific interfaces. ''' left_glyph = self.get_char_index( left ) right_glyph = self.get_char_index( right ) kerning = FT_Vector(0,0) error = FT_Get_Kerning( self._FT_Face, left_glyph, right_glyph, mode, byref(kerning) ) if error: raise FT_Exception( error ) return kerning def _get_sfnt_name_count(self): return FT_Get_Sfnt_Name_Count( self._FT_Face ) sfnt_name_count = property(_get_sfnt_name_count, doc = '''Number of name strings in the SFNT 'name' table.''') def get_sfnt_name( self, index ): ''' Retrieve a string of the SFNT 'name' table for a given index Parameters: ----------- index: The index of the 'name' string. Note: ----- The 'string' array returned in the 'aname' structure is not null-terminated. The application should deallocate it if it is no longer in use. Use FT_Get_Sfnt_Name_Count to get the total number of available 'name' table entries, then do a loop until you get the right platform, encoding, and name ID. ''' name = FT_SfntName( ) error = FT_Get_Sfnt_Name( self._FT_Face, index, byref(name) ) if error: raise FT_Exception( error ) return SfntName( name ) def _get_postscript_name( self ): return FT_Get_Postscript_Name( self._FT_Face ) postscript_name = property( _get_postscript_name, doc = '''ASCII PostScript name of face, if available. This only works with PostScript and TrueType fonts.''') def _has_horizontal( self ): return bool( self.face_flags & FT_FACE_FLAG_HORIZONTAL ) has_horizontal = property( _has_horizontal, doc = '''True whenever a face object contains horizontal metrics (this is true for all font formats though).''') def _has_vertical( self ): return bool( self.face_flags & FT_FACE_FLAG_VERTICAL ) has_vertical = property( _has_vertical, doc = '''True whenever a face object contains vertical metrics.''') def _has_kerning( self ): return bool( self.face_flags & FT_FACE_FLAG_KERNING ) has_kerning = property( _has_kerning, doc = '''True whenever a face object contains kerning data that can be accessed with FT_Get_Kerning.''') def _is_scalable( self ): return bool( self.face_flags & FT_FACE_FLAG_SCALABLE ) is_scalable = property( _is_scalable, doc = '''true whenever a face object contains a scalable font face (true for TrueType, Type 1, Type 42, CID, OpenType/CFF, and PFR font formats.''') def _is_sfnt( self ): return bool( self.face_flags & FT_FACE_FLAG_SFNT ) is_sfnt = property( _is_sfnt, doc = '''true whenever a face object contains a font whose format is based on the SFNT storage scheme. This usually means: TrueType fonts, OpenType fonts, as well as SFNT-based embedded bitmap fonts. If this macro is true, all functions defined in FT_SFNT_NAMES_H and FT_TRUETYPE_TABLES_H are available.''') def _is_fixed_width( self ): return bool( self.face_flags & FT_FACE_FLAG_FIXED_WIDTH ) is_fixed_width = property( _is_fixed_width, doc = '''True whenever a face object contains a font face that contains fixed-width (or 'monospace', 'fixed-pitch', etc.) glyphs.''') def _has_fixed_sizes( self ): return bool( self.face_flags & FT_FACE_FLAG_FIXED_SIZES ) has_fixed_sizes = property( _has_fixed_sizes, doc = '''True whenever a face object contains some embedded bitmaps. See the 'available_sizes' field of the FT_FaceRec structure.''') def _has_glyph_names( self ): return bool( self.face_flags & FT_FACE_FLAG_GLYPH_NAMES ) has_glyph_names = property( _has_glyph_names, doc = '''True whenever a face object contains some glyph names that can be accessed through FT_Get_Glyph_Name.''') def _has_multiple_masters( self ): return bool( self.face_flags & FT_FACE_FLAG_MULTIPLE_MASTERS ) has_multiple_masters = property( _has_multiple_masters, doc = '''True whenever a face object contains some multiple masters. The functions provided by FT_MULTIPLE_MASTERS_H are then available to choose the exact design you want.''') def _is_cid_keyed( self ): return bool( self.face_flags & FT_FACE_FLAG_CID_KEYED ) is_cid_keyed = property( _is_cid_keyed, doc = '''True whenever a face object contains a CID-keyed font. See the discussion of FT_FACE_FLAG_CID_KEYED for more details. If this macro is true, all functions defined in FT_CID_H are available.''') def _is_tricky( self ): return bool( self.face_flags & FT_FACE_FLAG_TRICKY ) is_tricky = property( _is_tricky, doc = '''True whenever a face represents a 'tricky' font. See the discussion of FT_FACE_FLAG_TRICKY for more details.''') num_faces = property(lambda self: self._FT_Face.contents.num_faces, doc = '''The number of faces in the font file. Some font formats can have multiple faces in a font file.''') face_index = property(lambda self: self._FT_Face.contents.face_index, doc = '''The index of the face in the font file. It is set to 0 if there is only one face in the font file.''') face_flags = property(lambda self: self._FT_Face.contents.face_flags, doc = '''A set of bit flags that give important information about the face; see FT_FACE_FLAG_XXX for the details.''') style_flags = property(lambda self: self._FT_Face.contents.style_flags, doc = '''A set of bit flags indicating the style of the face; see FT_STYLE_FLAG_XXX for the details.''') num_glyphs = property(lambda self: self._FT_Face.contents.num_glyphs, doc = '''The number of glyphs in the face. If the face is scalable and has sbits (see 'num_fixed_sizes'), it is set to the number of outline glyphs. For CID-keyed fonts, this value gives the highest CID used in the font.''') family_name = property(lambda self: self._FT_Face.contents.family_name, doc = '''The face's family name. This is an ASCII string, usually in English, which describes the typeface's family (like 'Times New Roman', 'Bodoni', 'Garamond', etc). This is a least common denominator used to list fonts. Some formats (TrueType & OpenType) provide localized and Unicode versions of this string. Applications should use the format specific interface to access them. Can be NULL (e.g., in fonts embedded in a PDF file).''') style_name = property(lambda self: self._FT_Face.contents.style_name, doc = '''The face's style name. This is an ASCII string, usually in English, which describes the typeface's style (like 'Italic', 'Bold', 'Condensed', etc). Not all font formats provide a style name, so this field is optional, and can be set to NULL. As for 'family_name', some formats provide localized and Unicode versions of this string. Applications should use the format specific interface to access them.''') num_fixed_sizes = property(lambda self: self._FT_Face.contents.num_fixed_sizes, doc = '''The number of bitmap strikes in the face. Even if the face is scalable, there might still be bitmap strikes, which are called 'sbits' in that case.''') def _get_available_sizes( self ): sizes = [] n = self.num_fixed_sizes FT_sizes = self._FT_Face.contents.available_sizes for i in range(n): sizes.append( BitmapSize(FT_sizes[i]) ) return sizes available_sizes = property(_get_available_sizes, doc = '''A list of FT_Bitmap_Size for all bitmap strikes in the face. It is set to NULL if there is no bitmap strike.''') num_charmaps = property(lambda self: self._FT_Face.contents.num_charmaps) def _get_charmaps( self ): charmaps = [] n = self._FT_Face.contents.num_charmaps FT_charmaps = self._FT_Face.contents.charmaps for i in range(n): charmaps.append( Charmap(FT_charmaps[i]) ) return charmaps charmaps = property(_get_charmaps, doc = '''A list of the charmaps of the face.''') # ('generic', FT_Generic), def _get_bbox( self ): return BBox( self._FT_Face.contents.bbox ) bbox = property( _get_bbox, doc = '''The font bounding box. Coordinates are expressed in font units (see 'units_per_EM'). The box is large enough to contain any glyph from the font. Thus, 'bbox.yMax' can be seen as the 'maximal ascender', and 'bbox.yMin' as the 'minimal descender'. Only relevant for scalable formats. Note that the bounding box might be off by (at least) one pixel for hinted fonts. See FT_Size_Metrics for further discussion.''') units_per_EM = property(lambda self: self._FT_Face.contents.units_per_EM, doc = '''The number of font units per EM square for this face. This is typically 2048 for TrueType fonts, and 1000 for Type 1 fonts. Only relevant for scalable formats.''') ascender = property(lambda self: self._FT_Face.contents.ascender, doc = '''The typographic ascender of the face, expressed in font units. For font formats not having this information, it is set to 'bbox.yMax'. Only relevant for scalable formats.''') descender = property(lambda self: self._FT_Face.contents.descender, doc = '''The typographic descender of the face, expressed in font units. For font formats not having this information, it is set to 'bbox.yMin'. Note that this field is usually negative. Only relevant for scalable formats.''') height = property(lambda self: self._FT_Face.contents.height, doc = '''The height is the vertical distance between two consecutive baselines, expressed in font units. It is always positive. Only relevant for scalable formats.''') max_advance_width = property(lambda self: self._FT_Face.contents.max_advance_width, doc = '''The maximal advance width, in font units, for all glyphs in this face. This can be used to make word wrapping computations faster. Only relevant for scalable formats.''') max_advance_height = property(lambda self: self._FT_Face.contents.max_advance_height, doc = '''The maximal advance height, in font units, for all glyphs in this face. This is only relevant for vertical layouts, and is set to 'height' for fonts that do not provide vertical metrics. Only relevant for scalable formats.''') underline_position = property(lambda self: self._FT_Face.contents.underline_position, doc = '''The position, in font units, of the underline line for this face. It is the center of the underlining stem. Only relevant for scalable formats.''') underline_thickness = property(lambda self: self._FT_Face.contents.underline_thickness, doc = '''The thickness, in font units, of the underline for this face. Only relevant for scalable formats.''') def _get_glyph( self ): return GlyphSlot( self._FT_Face.contents.glyph ) glyph = property( _get_glyph, doc = '''The face's associated glyph slot(s).''') def _get_size( self ): size = self._FT_Face.contents.size metrics = size.contents.metrics return SizeMetrics(metrics) size = property( _get_size, doc = '''The current active size for this face.''') def _get_charmap( self ): return Charmap( self._FT_Face.contents.charmap) charmap = property( _get_charmap, doc = '''The current active charmap for this face.''') # ----------------------------------------------------------------------------- # SfntName wrapper # ----------------------------------------------------------------------------- class SfntName( object ): ''' SfntName wrapper A structure used to model an SFNT 'name' table entry. ''' def __init__(self, name): ''' Create a new SfntName object. Parameters: ----------- name : SFNT 'name' table entry. ''' self._FT_SfntName = name platform_id = property(lambda self: self._FT_SfntName.platform_id, doc = '''The platform ID for 'string'.''') encoding_id = property(lambda self: self._FT_SfntName.encoding_id, doc = '''The encoding ID for 'string'.''') language_id = property(lambda self: self._FT_SfntName.language_id, doc = '''The language ID for 'string'.''') name_id = property(lambda self: self._FT_SfntName.name_id, doc = '''An identifier for 'string'.''') #string = property(lambda self: self._FT_SfntName.string) string_len = property(lambda self: self._FT_SfntName.string_len, doc = '''The length of 'string' in bytes.''') def _get_string(self): # #s = self._FT_SfntName s = string_at(self._FT_SfntName.string, self._FT_SfntName.string_len) return s # #return s.decode('utf-16be', 'ignore') # return s.decode('utf-8', 'ignore') # #n = s.string_len # #data = [s.string[i] for i in range(n)] # #return data string = property(_get_string, doc = '''The 'name' string. Note that its format differs depending on the (platform,encoding) pair. It can be a Pascal String, a UTF-16 one, etc. Generally speaking, the string is not zero-terminated. Please refer to the TrueType specification for details.''') # ----------------------------------------------------------------------------- class Stroker( object ): ''' FT_Stroker wrapper This component generates stroked outlines of a given vectorial glyph. It also allows you to retrieve the 'outside' and/or the 'inside' borders of the stroke. This can be useful to generate 'bordered' glyph, i.e., glyphs displayed with a coloured (and anti-aliased) border around their shape. ''' def __init__( self ): ''' Create a new Stroker object. ''' library = get_handle( ) stroker = FT_Stroker( ) error = FT_Stroker_New( library, byref(stroker) ) if error: raise FT_Exception( error ) self._FT_Stroker = stroker def __del__( self ): ''' Destroy object. ''' FT_Stroker_Done( self._FT_Stroker ) def set( self, radius, line_cap, line_join, miter_limit ): ''' Reset a stroker object's attributes. Parameters: ----------- radius : The border radius. line_cap : The line cap style. line_join : The line join style. miter_limit: The miter limit for the FT_STROKER_LINEJOIN_MITER style, expressed as 16.16 fixed point value. Note: ----- The radius is expressed in the same units as the outline coordinates. ''' FT_Stroker_Set( self._FT_Stroker, radius, line_cap, line_join, miter_limit ) def rewind( self ): ''' Reset a stroker object without changing its attributes. You should call this function before beginning a new series of calls to FT_Stroker_BeginSubPath or FT_Stroker_EndSubPath. ''' FT_Stroker_Rewind( self._FT_Stroker ) def parse_outline( self, outline, opened ): ''' A convenience function used to parse a whole outline with the stroker. The resulting outline(s) can be retrieved later by functions like FT_Stroker_GetCounts and FT_Stroker_Export. Parameters: ----------- outline: The source outline. opened : A boolean. If 1, the outline is treated as an open path instead of a closed one. Note: ----- If 'opened' is 0 (the default), the outline is treated as a closed path, and the stroker generates two distinct 'border' outlines. If 'opened' is 1, the outline is processed as an open path, and the stroker generates a single 'stroke' outline. This function calls 'rewind' automatically. ''' error = FT_Stroker_ParseOutline( self._FT_Stroker, outline, opened) if error: raise FT_Exception( error ) def begin_subpath( self, to, open_ ): ''' Start a new sub-path in the stroker. Parameters: ----------- to : A pointer to the start vector. open_: A boolean. If 1, the sub-path is treated as an open one. Note: ----- This function is useful when you need to stroke a path that is not stored as an 'Outline' object. ''' error = FT_Stroker_BeginSubPath( self._FT_Stroker, to, open_ ) if error: raise FT_Exception( error ) def end_subpath( self ): ''' Close the current sub-path in the stroker. Note: ----- You should call this function after 'begin_subpath'. If the subpath was not 'opened', this function 'draws' a single line segment to the start position when needed. ''' error = FT_Stroker_EndSubPath( self._FT_Stroker) if error: raise FT_Exception( error ) def line_to( self, to ): ''' 'Draw' a single line segment in the stroker's current sub-path, from the last position. Parameters: ----------- to: A pointer to the destination point. Note: ----- You should call this function between 'begin_subpath' and 'end_subpath'. ''' error = FT_Stroker_LineTo( self._FT_Stroker, to ) if error: raise FT_Exception( error ) def conic_to( self, control, to ): ''' 'Draw' a single quadratic Bezier in the stroker's current sub-path, from the last position. Parameters: ----------- control: A pointer to a Bezier control point. to : A pointer to the destination point. Note: ----- You should call this function between 'begin_subpath' and 'end_subpath'. ''' error = FT_Stroker_ConicTo( self._FT_Stroker, control, to ) if error: raise FT_Exception( error ) def cubic_to( self, control1, control2, to ): ''' 'Draw' a single quadratic Bezier in the stroker's current sub-path, from the last position. Parameters: ----------- control1: A pointer to the first Bezier control point. control2: A pointer to second Bezier control point. to : A pointer to the destination point. Note: ----- You should call this function between 'begin_subpath' and 'end_subpath'. ''' error = FT_Stroker_CubicTo( self._FT_Stroker, control1, control2, to ) if error: raise FT_Exception( error ) def get_border_counts( self, border ): ''' Call this function once you have finished parsing your paths with the stroker. It returns the number of points and contours necessary to export one of the 'border' or 'stroke' outlines generated by the stroker. Parameters: ----------- border: The border index. Return: ------- number of points, number of contours ''' anum_points = FT_UInt() anum_contours = FT_UInt() error = FT_Stroker_GetBorderCounts( self._FT_Stroker, border, byref(anum_points), byref(anum_contours) ) if error: raise FT_Exception( error ) return anum_points.value, anum_contours.value def export_border( self , border, outline ): ''' Call this function after 'get_border_counts' to export the corresponding border to your own 'Outline' structure. Note that this function appends the border points and contours to your outline, but does not try to resize its arrays. Parameters: ----------- border: The border index. outline: The target outline. Note: ----- Always call this function after get_border_counts to get sure that there is enough room in your 'Outline' object to receive all new data. When an outline, or a sub-path, is 'closed', the stroker generates two independent 'border' outlines, named 'left' and 'right' When the outline, or a sub-path, is 'opened', the stroker merges the 'border' outlines with caps. The 'left' border receives all points, while the 'right' border becomes empty. Use the function export instead if you want to retrieve all borders at once. ''' FT_Stroker_ExportBorder( self._FT_Stroker, border, outline._FT_Outline ) def get_counts( self ): ''' Call this function once you have finished parsing your paths with the stroker. It returns the number of points and contours necessary to export all points/borders from the stroked outline/path. Return: ------- number of points, number of contours ''' anum_points = FT_UInt() anum_contours = FT_UInt() error = FT_Stroker_GetCounts( self._FT_Stroker, byref(anum_points), byref(anum_contours) ) if error: raise FT_Exception( error ) return anum_points.value, anum_contours.value def export( self, outline ): ''' Call this function after get_border_counts to export all borders to your own 'Outline' structure. Note that this function appends the border points and contours to your outline, but does not try to resize its arrays. Parameters: ----------- outline: The target outline. ''' FT_Stroker_Export( self._FT_Stroker, outline._FT_Outline )
{ "content_hash": "5b38fa196a628234402ab33dec887b37", "timestamp": "", "source": "github", "line_count": 1882, "max_line_length": 91, "avg_line_length": 38.07704569606801, "alnum_prop": 0.5840973472321067, "repo_name": "davidcox/freetype-py", "id": "b4ce21ec5f7991b4bfbe34faab112d25fa44b976", "size": "71996", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "freetype/__init__.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "254640" } ], "symlink_target": "" }
from yass.batch.batch import BatchProcessor from yass.batch.reader import RecordingsReader, BinaryReader, MemoryMap from yass.batch.generator import IndexGenerator from yass.batch.pipeline import PipedTransformation, BatchPipeline from yass.batch.vectorize import vectorize_parameter __all__ = ['BatchProcessor', 'RecordingsReader', 'BinaryReader', 'MemoryMap', 'IndexGenerator', 'PipedTransformation', 'BatchPipeline', 'vectorize_parameter']
{ "content_hash": "305bfb275700e28c2044abc26a98e96d", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 71, "avg_line_length": 39.23076923076923, "alnum_prop": 0.7156862745098039, "repo_name": "paninski-lab/yass", "id": "252fa50901325b58b43c620017d2c60f3eafc3ea", "size": "510", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/yass/batch/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "8874" }, { "name": "C++", "bytes": "26804" }, { "name": "Cuda", "bytes": "33184" }, { "name": "Makefile", "bytes": "3129" }, { "name": "Python", "bytes": "1658933" }, { "name": "Shell", "bytes": "1770" } ], "symlink_target": "" }
from google.datacatalog_connectors.rdbms.scrape import MetadataNormalizer class SQLObjectsMetadataNormalizer(MetadataNormalizer): @classmethod def normalize(cls, metadata, metadata_definition): """ Receives a Pandas dataframe and normalizes it by creating a dictionary with SQL Objects List. :param metadata: the Pandas dataframe :param metadata_definition: the Metadata Definition the normalized dictionary will be created with the specified target keys. Example: >>> metadata_definition = { ... "key": "functions", ... "type": "function", ... "name": "function_name", ... "fields": [ ... { ... "source": "schema_name", ... "target": { ... "field_name": "schema_name", ... "model": "tag", ... "type": "string" ... } ... }, ... { ... "source": "definition", ... "target": { ... "field_name": "definition", ... "model": "tag", ... "type": "string" ... } ... } ... ] ... } :return: a normalized dict object """ cls._remove_nan_rows(metadata) return SQLObjectsMetadataNormalizer.__normalize_sql_objects( metadata, metadata_definition) @classmethod def __normalize_sql_objects(cls, metadata, metadata_definition): sql_object_type = metadata_definition['type'] normalized_sql_object = {'type': sql_object_type} sql_object_name = metadata_definition['name'] normalized_sql_object['items'] = cls._normalize_objects( metadata=metadata, key_column_name=sql_object_name, normalizer_method=cls.__normalize_sql_object, metadata_definition=metadata_definition) return normalized_sql_object @classmethod def __normalize_sql_object(cls, name, sql_objects_metadata, metadata_definition): fields = metadata_definition['fields'] normalized_dict = {'name': name} normalized_dict.update( cls._normalize_fields(fields, sql_objects_metadata)) return normalized_dict @classmethod def _normalize_fields(cls, fields, metadata): fields_dict = {} for field in fields: source = field['source'] target = field['target'] target_name = target['field_name'] # The 'source' field is optional and might not be present # in the scraped metadata. if source in metadata: value = cls._extract_value_from_first_row(metadata, source) if cls._is_timestamp_field(target): value = cls._normalize_timestamp_field(value) fields_dict[target_name] = value return fields_dict @classmethod def _is_timestamp_field(cls, target): return 'timestamp' == target['type']
{ "content_hash": "3d59d919f078fef54161a9e75a6097cc", "timestamp": "", "source": "github", "line_count": 104, "max_line_length": 79, "avg_line_length": 36.02884615384615, "alnum_prop": 0.45102748865759273, "repo_name": "GoogleCloudPlatform/datacatalog-connectors-rdbms", "id": "86ea140f0b582634a649481d1e350e9979768363", "size": "4343", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "google-datacatalog-rdbms-connector/src/google/datacatalog_connectors/rdbms/scrape/sql_objects/sql_objects_metadata_normalizer.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "9250" }, { "name": "Python", "bytes": "457511" }, { "name": "Shell", "bytes": "19222" } ], "symlink_target": "" }
""" General utilities for the application. """ default_app_config = 'calaccess_processed_filings.apps.CalAccessProcessedFilingsConfig'
{ "content_hash": "957ecbfe0b4d37bc3946954177522d13", "timestamp": "", "source": "github", "line_count": 4, "max_line_length": 87, "avg_line_length": 33.75, "alnum_prop": 0.8074074074074075, "repo_name": "california-civic-data-coalition/django-calaccess-processed-data", "id": "c173bc51156d4bcbdb68cad26c79e32bf22730f0", "size": "181", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "calaccess_processed_filings/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "26632" }, { "name": "Makefile", "bytes": "826" }, { "name": "Python", "bytes": "1291843" } ], "symlink_target": "" }
from kpm.formats.kub import Kub DEFAULT_ENDPOINT = "http://localhost:5000" def build(package, version_query=None, namespace="default", variables={}, shards=None, endpoint=DEFAULT_ENDPOINT): """ A build is the construction/expansion of a package. The result are the expanded/ready to use resources, including every dependencies. Args: package (:obj:`str`): package name in the format "namespace/name" or "domain.com/name" version_query (:obj:`str`): a version query, eg: ">=1.5,<2.0" namespace (:obj:`str`): kubernetes namespace to deploy the resource variables (:obj:`dict`): override default package variables to resolve templated resources shards (:obj:`json`, :obj:`int`): the shards (experimental) endpoint (:obj:`str`): the kpm-registry server Returns: :obj:`kpm.kub_jsonnet.Kub`: the Kub object. To generate the build runs one of the following commands: * :obj:`kpm.kub_jsonnet.Kub.build()`: create a `dict` * :obj:`kpm.kub_jsonnet.KubBase.build_tar()`: create a `tar.gz` Example: Flask route example:: @builder_app.route("/api/v1/packages/<path:package>/generate", methods=['GET']) def build(package): k = kpm.api.impl.builder.build("ant31/postgresql", version=">=9.5.0", namespace="db", variables={'memory': "8Gi"} shards=None, endpoint="http://localhost:5000") build = k.build() return jsonify(build) See Also: * :obj:`kpm.api.builder.build` * :obj:`kpm.api.builder.build_tar` """ variables['namespace'] = namespace k = Kub(package, endpoint=endpoint, variables=variables, namespace=namespace, version=version_query, shards=shards) return k def show_file(package, filepath, version_query=None, endpoint=DEFAULT_ENDPOINT): """ Returns the content of any file inside a package. Useful to navigate and inspect a package from a web-browser Args: package (:obj:`str`): package name in the format `namespace/name` or `domain.com/name` filepath (:obj:`str`): filepath relative to the package, eg: `templates/svc.yaml` version_query (:obj:`str`): a version query, eg: ">=1.5,<2.0" endpoint (:obj:`str`): the kpm-registry server Returns: :obj:`str`: the file content See Also: * :obj:`kpm.api.builder.show_file` """ k = Kub(package, version=version_query, endpoint=endpoint) return k.package.file(filepath) def tree(package, version_query=None, endpoint=DEFAULT_ENDPOINT): """ List recursivly the files inside a package. Args: package (:obj:`str`): package name in the format `namespace/name` or `domain.com/name` version_query (:obj:`str`): a version query, eg: ">=1.5,<2.0" filepath (:obj:`str`): filepath relative to the package, eg: `templates/svc.yaml` endpoint (:obj:`str`): the kpm-registry server Returns: :obj:`list`: package file list:: Example: >>> kpm.api.impl.builder.tree("ant31/rocketchat", "latest", "http://localhost:5000") [ "README.md", "manifest.jsonnet", "manifest.yaml", "templates/rocketchat-rc.yml", "templates/rocketchat-svc.yml" ] See Also: * :obj:`kpm.api.builder.tree` """ k = Kub(package, version=version_query, endpoint=endpoint) return k.package.tree()
{ "content_hash": "f3381596403655c51df6cfe85020f087", "timestamp": "", "source": "github", "line_count": 112, "max_line_length": 96, "avg_line_length": 33.49107142857143, "alnum_prop": 0.5814449480138629, "repo_name": "ant31/kpm", "id": "8cf1010287c1d69ca15498d8da7698b821fc9d0d", "size": "3751", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "kpm/api/impl/builder.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "6288" }, { "name": "HTML", "bytes": "21178" }, { "name": "JavaScript", "bytes": "19999" }, { "name": "Makefile", "bytes": "3016" }, { "name": "Python", "bytes": "153218" }, { "name": "Shell", "bytes": "240" } ], "symlink_target": "" }
import json _VSCODE_max_len = 200 # In IJupyterVariables.getValue this '_VSCode_JupyterTestValue' will be replaced with the json stringified value of the target variable # Indexes off of _VSCODE_targetVariable need to index types that are part of IJupyterVariable _VSCODE_targetVariable = json.loads('_VSCode_JupyterTestValue') _VSCODE_evalResult = eval(_VSCODE_targetVariable['name']) # Find shape and count if available if _VSCODE_targetVariable['type'] in ['ndarray','DataFrame','Series']: _VSCODE_targetVariable['shape'] = str(_VSCODE_evalResult.shape) if _VSCODE_targetVariable['type'] in ['tuple', 'str', 'dict', 'list', 'set', 'ndarray','DataFrame','Series']: _VSCODE_targetVariable['count'] = len(_VSCODE_evalResult) # Get the string of the eval result, truncate it as it could be far too long _VSCODE_targetValue = str(_VSCODE_evalResult) if len(_VSCODE_targetValue) > _VSCODE_max_len: _VSCODE_targetVariable['truncated'] = True _VSCODE_targetVariable['value'] = _VSCODE_targetValue[:_VSCODE_max_len] else: _VSCODE_targetVariable['value'] = _VSCODE_targetValue print(json.dumps(_VSCODE_targetVariable))
{ "content_hash": "f1d1c241631509ab411407b2555199b8", "timestamp": "", "source": "github", "line_count": 24, "max_line_length": 135, "avg_line_length": 47.333333333333336, "alnum_prop": 0.7438380281690141, "repo_name": "jwatson/dotfiles", "id": "1b4e8b3e87d55203529be29b858553162aa12763", "size": "1187", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "config/coc/extensions/node_modules/coc-python/pythonFiles/datascience/getJupyterVariableValue.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "596" }, { "name": "Ruby", "bytes": "418" }, { "name": "Shell", "bytes": "34294" }, { "name": "Vim script", "bytes": "82082" } ], "symlink_target": "" }
from django.db import models from django.contrib.auth.models import User from datetime import datetime class Categorie(models.Model): id = models.AutoField(primary_key=True, db_column='id') name = models.CharField(max_length=45, unique=True, db_column='name') description = models.TextField(db_column='description') class Meta: db_table = 'Categories' def __str__(self): return self.name class ActivateUser(models.Model): activationCode = models.CharField(max_length=32, primary_key=True, db_column='activationCode') user = models.ForeignKey(User) timestamp = models.DateTimeField(default=lambda :datetime.now(), db_column='timestamp') class Meta: db_table = 'ActivateUser' class Challenge(models.Model): id = models.AutoField(primary_key=True, db_column='id') name = models.CharField(max_length=45, unique=True, db_column='name') img = models.CharField(max_length=255, unique=False, db_column='img',default='http://k6.afteam.fr/static/img/logo_small.png') description = models.TextField(db_column='description') points = models.IntegerField(null=False, blank=False, db_column='points') url = models.CharField(max_length=255, blank=True, null=True, unique=True, db_column='url') flag = models.CharField(max_length=255, unique=True, db_column='flag') categorie = models.ForeignKey('Categorie', to_field='id', blank=True, null=True, on_delete=models.SET_NULL, db_column='categorie') seuil = models.IntegerField(default=0, null=False, blank=False, db_column='seuil') private = models.BooleanField(default=True, db_column='blocked') class Meta: db_table = 'Challenges' def __str__(self): return self.name class Validation(models.Model): id = models.AutoField(primary_key=True, db_column='id') user = models.ForeignKey(User) chall = models.ForeignKey('Challenge', to_field='id', db_column='chall') value = models.IntegerField(null=False, blank=False, db_column='value') timestamp = models.DateTimeField(default=lambda :datetime.now(), db_column='timestamp') class Meta: db_table = 'Validations'
{ "content_hash": "ab5fa4a98f98ead0db66b5007fa4bdfe", "timestamp": "", "source": "github", "line_count": 45, "max_line_length": 134, "avg_line_length": 47.82222222222222, "alnum_prop": 0.7044609665427509, "repo_name": "Crypt0-M3lon/k6", "id": "b6cd4afd745689ba805a0805dd5445a96552f511", "size": "2176", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "site_ctf/models.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "5339" }, { "name": "JavaScript", "bytes": "74445" }, { "name": "Python", "bytes": "24862" } ], "symlink_target": "" }
import os import six import subprocess import sys import time from girder import config from girder.api import access from girder.api.describe import Description, describeRoute from girder.api.rest import Resource from girder.constants import registerAccessFlag, ROOT_DIR from girder.exceptions import RestException from girder.models.folder import Folder from girder.models.upload import Upload from girder.utility.progress import ProgressContext from . import base from six.moves import range os.environ['GIRDER_PORT'] = os.environ.get('GIRDER_PORT', '30001') config.loadConfig() # Reload config to pick up correct port testServer = None def setUpModule(): global testServer mockS3 = False if 's3' in os.environ['ASSETSTORE_TYPE']: mockS3 = True plugins = os.environ.get('ENABLED_PLUGINS', '') if plugins: base.enabledPlugins.extend(plugins.split()) testServer = base.startServer(False, mockS3=mockS3) def tearDownModule(): base.stopServer() class WebClientTestEndpoints(Resource): def __init__(self): super(WebClientTestEndpoints, self).__init__() self.route('GET', ('progress', ), self.testProgress) self.route('PUT', ('progress', 'stop'), self.testProgressStop) self.route('POST', ('file', ), self.uploadFile) self.route('POST', ('access_flag', ), self.registerAccessFlags) self.stop = False @access.token @describeRoute( Description('Test progress contexts from the web') .param('test', 'Name of test to run. These include "success" and ' '"failure".', required=False) .param('duration', 'Duration of the test in seconds', required=False, dataType='int') .param('resourceId', 'Resource ID associated with the progress notification.', required=False) .param('resourceName', 'Type of resource associated with the progress ' 'notification.', required=False) ) def testProgress(self, params): test = params.get('test', 'success') duration = int(params.get('duration', 10)) resourceId = params.get('resourceId', None) resourceName = params.get('resourceName', None) startTime = time.time() with ProgressContext(True, user=self.getCurrentUser(), title='Progress Test', message='Progress Message', total=duration, resource={'_id': resourceId}, resourceName=resourceName) as ctx: for current in range(duration): if self.stop: break ctx.update(current=current) wait = startTime + current + 1 - time.time() if wait > 0: time.sleep(wait) if test == 'error': raise RestException('Progress error test.') @access.token @describeRoute( Description('Halt all progress tests') ) def testProgressStop(self, params): self.stop = True @access.user @describeRoute(None) def uploadFile(self, params): """ Providing this works around a limitation in phantom that makes us unable to upload binary files, or at least ones that contain certain byte values. The path parameter should be provided relative to the root directory of the repository. """ self.requireParams(('folderId', 'path'), params) path = os.path.join(ROOT_DIR, params['path']) name = os.path.basename(path) folder = Folder().load(params['folderId'], force=True) upload = Upload().createUpload( user=self.getCurrentUser(), name=name, parentType='folder', parent=folder, size=os.path.getsize(path)) with open(path, 'rb') as fd: file = Upload().handleChunk(upload, fd) return file @access.public @describeRoute(None) def registerAccessFlags(self, params): """ Helper that can be used to register access flags in the system. This is used to test the access flags UI since the core does not expose any flags. """ flags = self.getBodyJson() for key, info in six.viewitems(flags): registerAccessFlag(key, info['name'], info['description'], info['admin']) class WebClientTestCase(base.TestCase): def setUp(self): self.specFile = os.environ['SPEC_FILE'] self.assetstoreType = os.environ['ASSETSTORE_TYPE'] self.webSecurity = os.environ.get('WEB_SECURITY', 'true') if self.webSecurity != 'false': self.webSecurity = 'true' base.TestCase.setUp(self, self.assetstoreType) # One of the web client tests uses this db, so make sure it is cleared # ahead of time. This still allows tests to be run in parallel, since # nothing should be stored in this db base.dropGridFSDatabase('girder_webclient_gridfs') testServer.root.api.v1.webclienttest = WebClientTestEndpoints() if 'SETUP_MODULES' in os.environ: import imp for i, script in enumerate(os.environ['SETUP_MODULES'].split(':')): imp.load_source('girder.web_test_setup%d' % i, script) def testWebClientSpec(self): baseUrl = '/static/built/testEnv.html' if os.environ.get('BASEURL', ''): baseUrl = os.environ['BASEURL'] cmd = ( 'npx', 'phantomjs', '--web-security=%s' % self.webSecurity, os.path.join(ROOT_DIR, 'girder', 'web_client', 'test', 'specRunner.js'), 'http://localhost:%s%s' % (os.environ['GIRDER_PORT'], baseUrl), self.specFile, os.environ.get('JASMINE_TIMEOUT', ''), # Disambiguate repeat tests run on the same spec file, by adding any non-default # assetstore types to the test output files self.assetstoreType if self.assetstoreType != 'filesystem' else '' ) # phantomjs occasionally fails to load javascript files. This appears # to be a known issue: https://github.com/ariya/phantomjs/issues/10652. # Retry several times if it looks like this has occurred. retry_count = os.environ.get('PHANTOMJS_RETRY', 3) for _ in range(int(retry_count)): retry = False task = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=ROOT_DIR, env=dict( # https://github.com/bazelbuild/rules_closure/pull/353 OPENSSL_CONF='/dev/null', **os.environ ) ) jasmineFinished = False for line in iter(task.stdout.readline, b''): if isinstance(line, six.binary_type): line = line.decode('utf8') if ('PHANTOM_TIMEOUT' in line or 'error loading source script' in line): task.kill() retry = True elif '__FETCHEMAIL__' in line: base.mockSmtp.waitForMail() msg = base.mockSmtp.getMail(parse=True) open('phantom_temp_%s.tmp' % os.environ['GIRDER_PORT'], 'wb').write(msg.get_payload(decode=True)) continue # we don't want to print this if 'Testing Finished' in line: jasmineFinished = True try: sys.stdout.write(line) except UnicodeEncodeError: sys.stdout.write(repr(line)) sys.stdout.flush() returncode = task.wait() if not retry and jasmineFinished: break sys.stderr.write('Retrying test\n') # If we are retrying, we need to reset the whole test, as the # databases and other resources are in an unknown state self.tearDown() self.setUp() self.assertEqual(returncode, 0)
{ "content_hash": "e119d3b58be198c1889c0c0dfbecd841", "timestamp": "", "source": "github", "line_count": 205, "max_line_length": 92, "avg_line_length": 39.4780487804878, "alnum_prop": 0.5931051526010133, "repo_name": "jbeezley/girder", "id": "c11a31446dfcb7cc82aee1aa0540b97db17a2e15", "size": "8117", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/web_client_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CMake", "bytes": "27843" }, { "name": "CSS", "bytes": "54063" }, { "name": "Dockerfile", "bytes": "2025" }, { "name": "HCL", "bytes": "1424" }, { "name": "HTML", "bytes": "136378" }, { "name": "JavaScript", "bytes": "1121709" }, { "name": "Mako", "bytes": "7571" }, { "name": "Python", "bytes": "1986658" }, { "name": "Roff", "bytes": "17" }, { "name": "Shell", "bytes": "2205" } ], "symlink_target": "" }
""" test_tempest_lib ---------------------------------- Tests for `tempest_lib` module. """ from tempest_lib.tests import base class TestTempest_lib(base.TestCase): def test_something(self): pass
{ "content_hash": "dd760d49459b1152cdd4172ee8c0cb8e", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 37, "avg_line_length": 15.214285714285714, "alnum_prop": 0.5727699530516432, "repo_name": "JioCloud/tempest-lib", "id": "76d74a6af4eb960de755ae460a29b8bfb185ea63", "size": "784", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "tempest_lib/tests/test_tempest_lib.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "403721" } ], "symlink_target": "" }
Test.assert_equals(repeat_it('*', 3), '***') Test.assert_equals(repeat_it('Hello', 5), 'HelloHelloHelloHelloHello')
{ "content_hash": "95b97d7bf5274c61e9b4c9a8d72622ec", "timestamp": "", "source": "github", "line_count": 2, "max_line_length": 70, "avg_line_length": 58, "alnum_prop": 0.6896551724137931, "repo_name": "RevansChen/online-judge", "id": "7f547ebe763c88824cc62ad6d4973b5cade2aa75", "size": "134", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Codewars/8kyu/repeatit/Python/test.py", "mode": "33188", "license": "mit", "language": [ { "name": "Brainfuck", "bytes": "102" }, { "name": "C", "bytes": "6829" }, { "name": "C#", "bytes": "19758" }, { "name": "C++", "bytes": "9439" }, { "name": "Clojure", "bytes": "75" }, { "name": "CoffeeScript", "bytes": "903" }, { "name": "Crystal", "bytes": "52" }, { "name": "Dart", "bytes": "182" }, { "name": "Elixir", "bytes": "1027" }, { "name": "Erlang", "bytes": "132" }, { "name": "F#", "bytes": "40" }, { "name": "Go", "bytes": "83" }, { "name": "Haskell", "bytes": "102" }, { "name": "Java", "bytes": "11057" }, { "name": "JavaScript", "bytes": "44773" }, { "name": "Kotlin", "bytes": "82" }, { "name": "Lua", "bytes": "93" }, { "name": "PHP", "bytes": "2875" }, { "name": "Python", "bytes": "563400" }, { "name": "R", "bytes": "265" }, { "name": "Ruby", "bytes": "7171" }, { "name": "Rust", "bytes": "74" }, { "name": "Scala", "bytes": "84" }, { "name": "Shell", "bytes": "438" }, { "name": "Swift", "bytes": "6597" }, { "name": "TSQL", "bytes": "3531" }, { "name": "TypeScript", "bytes": "5744" } ], "symlink_target": "" }
from SimpleCV.base import * from SimpleCV.ImageClass import Image from SimpleCV.Features.FeatureExtractorBase import * class HueHistogramFeatureExtractor(FeatureExtractorBase): """ Create a Hue Histogram feature extractor. This feature extractor takes in an image, gets the hue channel, bins the number of pixels with a particular Hue, and returns the results. mNBins - the number of Hue bins. """ mNBins = 16 def __init__(self, mNBins=16): #we define the black (positive) and white (negative) regions of an image #to get our haar wavelet self.mNBins = mNBins def extract(self, img): """ This feature extractor takes in a color image and returns a normalized color histogram of the pixel counts of each hue. """ img = img.toHLS() h = img.getNumpy()[:, :, 0] npa = npa.reshape(1,npa.shape[0]*npa.shape[1]) hist = np.histogram(npa,self.mNBins,normed=True,range=(0,255)) return hist[0].tolist() def getFieldNames(self): """ This method gives the names of each field in the feature vector in the order in which they are returned. For example, 'xpos' or 'width' """ retVal = [] for i in range(self.mNBins): name = "Hue"+str(i) retVal.append(name) return retVal def getNumFields(self): """ This method returns the total number of fields in the feature vector. """ return self.mNBins
{ "content_hash": "0f684e2ba61bcb260ae918dc6290b48d", "timestamp": "", "source": "github", "line_count": 46, "max_line_length": 84, "avg_line_length": 33.369565217391305, "alnum_prop": 0.6247557003257329, "repo_name": "jayrambhia/SimpleCV2", "id": "efc92044e2975ffa707e5db29ab22ec067191b28", "size": "1535", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "SimpleCV/Features/HueHistogramFeatureExtractor.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "46344" }, { "name": "JavaScript", "bytes": "41038" }, { "name": "Perl", "bytes": "5044" }, { "name": "Python", "bytes": "1698883" }, { "name": "Shell", "bytes": "18995" } ], "symlink_target": "" }
from django.views import generic from traceability.models.item import Item from traceability.forms.item import ItemUpdate class ItemDetail(generic.DetailView): template_name = 'traceability/item/item_detail.html' model = Item class ItemList(generic.ListView): template_name = 'traceability/item/item_list.html' model = Item paginate_by = 25 class ItemUpdate(generic.UpdateView): template_name = 'traceability/item/item_form.html' model = Item form_class = ItemUpdate
{ "content_hash": "a5daae6cb09d6e3002040a92d795bc7a", "timestamp": "", "source": "github", "line_count": 20, "max_line_length": 56, "avg_line_length": 25.25, "alnum_prop": 0.7465346534653465, "repo_name": "vandorjw/django-traceability", "id": "8cfec0abf4481f79848b42b0f89c3872b6c22408", "size": "505", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "traceability/views/item.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "39" }, { "name": "JavaScript", "bytes": "31896" }, { "name": "Python", "bytes": "48845" } ], "symlink_target": "" }
from __future__ import division import numpy as np import pytest import pandas.util.testing as tm from pandas import compat from pandas._libs.interval import IntervalTree def skipif_32bit(param): """ Skip parameters in a parametrize on 32bit systems. Specifically used here to skip leaf_size parameters related to GH 23440. """ marks = pytest.mark.skipif(compat.is_platform_32bit(), reason='GH 23440: int type mismatch on 32bit') return pytest.param(param, marks=marks) @pytest.fixture( scope='class', params=['int32', 'int64', 'float32', 'float64', 'uint64']) def dtype(request): return request.param @pytest.fixture(params=[skipif_32bit(1), skipif_32bit(2), 10]) def leaf_size(request): """ Fixture to specify IntervalTree leaf_size parameter; to be used with the tree fixture. """ return request.param @pytest.fixture(params=[ np.arange(5, dtype='int64'), np.arange(5, dtype='int32'), np.arange(5, dtype='uint64'), np.arange(5, dtype='float64'), np.arange(5, dtype='float32'), np.array([0, 1, 2, 3, 4, np.nan], dtype='float64'), np.array([0, 1, 2, 3, 4, np.nan], dtype='float32')]) def tree(request, leaf_size): left = request.param return IntervalTree(left, left + 2, leaf_size=leaf_size) class TestIntervalTree(object): def test_get_loc(self, tree): tm.assert_numpy_array_equal(tree.get_loc(1), np.array([0], dtype='int64')) tm.assert_numpy_array_equal(np.sort(tree.get_loc(2)), np.array([0, 1], dtype='int64')) with pytest.raises(KeyError): tree.get_loc(-1) def test_get_indexer(self, tree): tm.assert_numpy_array_equal( tree.get_indexer(np.array([1.0, 5.5, 6.5])), np.array([0, 4, -1], dtype='int64')) with pytest.raises(KeyError): tree.get_indexer(np.array([3.0])) def test_get_indexer_non_unique(self, tree): indexer, missing = tree.get_indexer_non_unique( np.array([1.0, 2.0, 6.5])) tm.assert_numpy_array_equal(indexer[:1], np.array([0], dtype='int64')) tm.assert_numpy_array_equal(np.sort(indexer[1:3]), np.array([0, 1], dtype='int64')) tm.assert_numpy_array_equal(np.sort(indexer[3:]), np.array([-1], dtype='int64')) tm.assert_numpy_array_equal(missing, np.array([2], dtype='int64')) def test_duplicates(self, dtype): left = np.array([0, 0, 0], dtype=dtype) tree = IntervalTree(left, left + 1) tm.assert_numpy_array_equal(np.sort(tree.get_loc(0.5)), np.array([0, 1, 2], dtype='int64')) with pytest.raises(KeyError): tree.get_indexer(np.array([0.5])) indexer, missing = tree.get_indexer_non_unique(np.array([0.5])) tm.assert_numpy_array_equal(np.sort(indexer), np.array([0, 1, 2], dtype='int64')) tm.assert_numpy_array_equal(missing, np.array([], dtype='int64')) def test_get_loc_closed(self, closed): tree = IntervalTree([0], [1], closed=closed) for p, errors in [(0, tree.open_left), (1, tree.open_right)]: if errors: with pytest.raises(KeyError): tree.get_loc(p) else: tm.assert_numpy_array_equal(tree.get_loc(p), np.array([0], dtype='int64')) @pytest.mark.parametrize('leaf_size', [ skipif_32bit(1), skipif_32bit(10), skipif_32bit(100), 10000]) def test_get_indexer_closed(self, closed, leaf_size): x = np.arange(1000, dtype='float64') found = x.astype('intp') not_found = (-1 * np.ones(1000)).astype('intp') tree = IntervalTree(x, x + 0.5, closed=closed, leaf_size=leaf_size) tm.assert_numpy_array_equal(found, tree.get_indexer(x + 0.25)) expected = found if tree.closed_left else not_found tm.assert_numpy_array_equal(expected, tree.get_indexer(x + 0.0)) expected = found if tree.closed_right else not_found tm.assert_numpy_array_equal(expected, tree.get_indexer(x + 0.5))
{ "content_hash": "77c7c50969340a7b881aea9dea7e1647", "timestamp": "", "source": "github", "line_count": 116, "max_line_length": 77, "avg_line_length": 37.60344827586207, "alnum_prop": 0.5701513067400276, "repo_name": "harisbal/pandas", "id": "c880133f4fc6b7db0a90874326bbcacdeb4e65ab", "size": "4362", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pandas/tests/indexes/interval/test_interval_tree.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "4907" }, { "name": "C", "bytes": "404689" }, { "name": "C++", "bytes": "17194" }, { "name": "HTML", "bytes": "551714" }, { "name": "Makefile", "bytes": "574" }, { "name": "Python", "bytes": "14298777" }, { "name": "Shell", "bytes": "28914" }, { "name": "Smarty", "bytes": "2069" } ], "symlink_target": "" }
from xml.dom import minidom, Node from urlparse import urlparse, urlunparse from xml.parsers.expat import ExpatError from htmlentitydefs import name2codepoint import re # select and apply an xml:base for this entry class relativize: def __init__(self, parent): self.score = {} self.links = [] self.collect_and_tally(parent) self.base = self.select_optimal_base() if self.base: if not parent.hasAttribute('xml:base'): self.rebase(parent) parent.setAttribute('xml:base', self.base) # collect and tally cite, href and src attributes def collect_and_tally(self,parent): uri = None if parent.hasAttribute('cite'): uri=parent.getAttribute('cite') if parent.hasAttribute('href'): uri=parent.getAttribute('href') if parent.hasAttribute('src'): uri=parent.getAttribute('src') if uri: parts=urlparse(uri) if parts[0].lower() == 'http': parts = (parts[1]+parts[2]).split('/') base = None for i in range(1,len(parts)): base = tuple(parts[0:i]) self.score[base] = self.score.get(base,0) + len(base) if base and base not in self.links: self.links.append(base) for node in parent.childNodes: if node.nodeType == Node.ELEMENT_NODE: self.collect_and_tally(node) # select the xml:base with the highest score def select_optimal_base(self): if not self.score: return None for link in self.links: self.score[link] = 0 winner = max(self.score.values()) if not winner: return None for key in self.score.keys(): if self.score[key] == winner: if winner == len(key): return None return urlunparse(('http', key[0], '/'.join(key[1:]), '', '', '')) + '/' # rewrite cite, href and src attributes using this base def rebase(self,parent): uri = None if parent.hasAttribute('cite'): uri=parent.getAttribute('cite') if parent.hasAttribute('href'): uri=parent.getAttribute('href') if parent.hasAttribute('src'): uri=parent.getAttribute('src') if uri and uri.startswith(self.base): uri = uri[len(self.base):] or '.' if parent.hasAttribute('href'): uri=parent.setAttribute('href', uri) if parent.hasAttribute('src'): uri=parent.setAttribute('src', uri) for node in parent.childNodes: if node.nodeType == Node.ELEMENT_NODE: self.rebase(node) # convert type="html" to type="plain" or type="xhtml" as appropriate def retype(parent): for node in parent.childNodes: if node.nodeType == Node.ELEMENT_NODE: if node.hasAttribute('type') and node.getAttribute('type') == 'html': if len(node.childNodes)==0: node.removeAttribute('type') elif len(node.childNodes)==1: # replace html entity defs with utf-8 chunks=re.split('&(\w+);', node.childNodes[0].nodeValue) for i in range(1,len(chunks),2): if chunks[i] in ['amp', 'lt', 'gt', 'apos', 'quot']: chunks[i] ='&' + chunks[i] +';' elif chunks[i] in name2codepoint: chunks[i]=unichr(name2codepoint[chunks[i]]) else: chunks[i]='&' + chunks[i] + ';' text = u"".join(chunks) try: # see if the resulting text is a well-formed XML fragment div = '<div xmlns="http://www.w3.org/1999/xhtml">%s</div>' data = minidom.parseString((div % text.encode('utf-8'))) if text.find('<') < 0: # plain text node.removeAttribute('type') text = data.documentElement.childNodes[0].nodeValue node.childNodes[0].replaceWholeText(text) elif len(text) > 80: # xhtml node.setAttribute('type', 'xhtml') node.removeChild(node.childNodes[0]) node.appendChild(data.documentElement) except ExpatError: # leave as html pass else: # recurse retype(node) if parent.nodeName == 'entry': relativize(parent) if __name__ == '__main__': # run styler on each file mention on the command line import sys for feed in sys.argv[1:]: doc = minidom.parse(feed) doc.normalize() retype(doc.documentElement) open(feed,'w').write(doc.toxml('utf-8'))
{ "content_hash": "9db488204ed36d10cb6660c32a78f63e", "timestamp": "", "source": "github", "line_count": 124, "max_line_length": 80, "avg_line_length": 34.79838709677419, "alnum_prop": 0.6025492468134415, "repo_name": "DebugUself/planet", "id": "9220702c24ee433d60dd104d5c494e7518a930af", "size": "4315", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "code/planet/atomstyler.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "339507" } ], "symlink_target": "" }
""" Apply JSON-Patches (RFC 6902) """ from __future__ import unicode_literals import collections import copy import functools import inspect import itertools import json import sys try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence from jsonpointer import JsonPointer, JsonPointerException # Will be parsed by setup.py to determine package metadata __author__ = 'Stefan Kögl <stefan@skoegl.net>' __version__ = '1.13' __website__ = 'https://github.com/stefankoegl/python-json-patch' __license__ = 'Modified BSD License' # pylint: disable=E0611,W0404 if sys.version_info >= (3, 0): basestring = (bytes, str) # pylint: disable=C0103,W0622 class JsonPatchException(Exception): """Base Json Patch exception""" class InvalidJsonPatch(JsonPatchException): """ Raised if an invalid JSON Patch is created """ class JsonPatchConflict(JsonPatchException): """Raised if patch could not be applied due to conflict situation such as: - attempt to add object key then it already exists; - attempt to operate with nonexistence object key; - attempt to insert value to array at position beyond of it size; - etc. """ class JsonPatchTestFailed(JsonPatchException, AssertionError): """ A Test operation failed """ def multidict(ordered_pairs): """Convert duplicate keys values to lists.""" # read all values into lists mdict = collections.defaultdict(list) for key, value in ordered_pairs: mdict[key].append(value) return dict( # unpack lists that have only 1 item (key, values[0] if len(values) == 1 else values) for key, values in mdict.items() ) def get_loadjson(): """ adds the object_pairs_hook parameter to json.load when possible The "object_pairs_hook" parameter is used to handle duplicate keys when loading a JSON object. This parameter does not exist in Python 2.6. This methods returns an unmodified json.load for Python 2.6 and a partial function with object_pairs_hook set to multidict for Python versions that support the parameter. """ if sys.version_info >= (3, 3): args = inspect.signature(json.load).parameters else: args = inspect.getargspec(json.load).args if 'object_pairs_hook' not in args: return json.load return functools.partial(json.load, object_pairs_hook=multidict) json.load = get_loadjson() def apply_patch(doc, patch, in_place=False, ignore_conflicts=False, verify=False): """Apply list of patches to specified json document. :param doc: Document object. :type doc: dict :param patch: JSON patch as list of dicts or raw JSON-encoded string. :type patch: list or str :param in_place: While :const:`True` patch will modify target document. By default patch will be applied to document copy. :type in_place: bool :param ignore_conflicts: Ignore JsonConflicts errors :type ignore_conflicts: bool :param verify: works with `ignore_conflicts` = True, if errors and `verify` is True (recommanded), make sure the resulting objects is the same as the original one. `ignore_conflicts` and `verify` are used to run patches multiple times and get rif of errors when operations can't be performed multiple times because the object has already been patched This will force `in_place` to False in order the comparison to occur. :type verify: bool :return: Patched document object. :rtype: dict >>> doc = {'foo': 'bar'} >>> patch = [{'op': 'add', 'path': '/baz', 'value': 'qux'}] >>> other = apply_patch(doc, patch) >>> doc is not other True >>> other == {'foo': 'bar', 'baz': 'qux'} True >>> patch = [{'op': 'add', 'path': '/baz', 'value': 'qux'}] >>> apply_patch(doc, patch, in_place=True) == {'foo': 'bar', 'baz': 'qux'} True >>> doc == other True """ if isinstance(patch, basestring): patch = JsonPatch.from_string(patch) else: patch = JsonPatch(patch) return patch.apply(doc, in_place, ignore_conflicts) def reapply_patch(doc, patch): """Apply or (safely) re-apply patch to doc""" return apply_patch(doc,patch,ignore_conflicts=True, verify=True) def make_patch(src, dst): """Generates patch by comparing of two document objects. Actually is a proxy to :meth:`JsonPatch.from_diff` method. :param src: Data source document object. :type src: dict :param dst: Data source document object. :type dst: dict >>> src = {'foo': 'bar', 'numbers': [1, 3, 4, 8]} >>> dst = {'baz': 'qux', 'numbers': [1, 4, 7]} >>> patch = make_patch(src, dst) >>> new = patch.apply(src) >>> new == dst True """ return JsonPatch.from_diff(src, dst) class JsonPatch(object): """A JSON Patch is a list of Patch Operations. >>> patch = JsonPatch([ ... {'op': 'add', 'path': '/foo', 'value': 'bar'}, ... {'op': 'add', 'path': '/baz', 'value': [1, 2, 3]}, ... {'op': 'remove', 'path': '/baz/1'}, ... {'op': 'test', 'path': '/baz', 'value': [1, 3]}, ... {'op': 'replace', 'path': '/baz/0', 'value': 42}, ... {'op': 'remove', 'path': '/baz/1'}, ... ]) >>> doc = {} >>> result = patch.apply(doc) >>> expected = {'foo': 'bar', 'baz': [42]} >>> result == expected True JsonPatch object is iterable, so you could easily access to each patch statement in loop: >>> lpatch = list(patch) >>> expected = {'op': 'add', 'path': '/foo', 'value': 'bar'} >>> lpatch[0] == expected True >>> lpatch == patch.patch True Also JsonPatch could be converted directly to :class:`bool` if it contains any operation statements: >>> bool(patch) True >>> bool(JsonPatch([])) False This behavior is very handy with :func:`make_patch` to write more readable code: >>> old = {'foo': 'bar', 'numbers': [1, 3, 4, 8]} >>> new = {'baz': 'qux', 'numbers': [1, 4, 7]} >>> patch = make_patch(old, new) >>> if patch: ... # document have changed, do something useful ... patch.apply(old) #doctest: +ELLIPSIS {...} """ def __init__(self, patch): self.patch = patch self.operations = { 'remove': RemoveOperation, 'add': AddOperation, 'replace': ReplaceOperation, 'move': MoveOperation, 'test': TestOperation, 'copy': CopyOperation, } def __str__(self): """str(self) -> self.to_string()""" return self.to_string() def __bool__(self): return bool(self.patch) __nonzero__ = __bool__ def __iter__(self): return iter(self.patch) def __hash__(self): return hash(tuple(self._ops)) def __eq__(self, other): if not isinstance(other, JsonPatch): return False return self._ops == other._ops def __ne__(self, other): return not(self == other) @classmethod def from_string(cls, patch_str): """Creates JsonPatch instance from string source. :param patch_str: JSON patch as raw string. :type patch_str: str :return: :class:`JsonPatch` instance. """ patch = json.loads(patch_str) return cls(patch) @classmethod def from_diff(cls, src, dst): """Creates JsonPatch instance based on comparing of two document objects. Json patch would be created for `src` argument against `dst` one. :param src: Data source document object. :type src: dict :param dst: Data source document object. :type dst: dict :return: :class:`JsonPatch` instance. >>> src = {'foo': 'bar', 'numbers': [1, 3, 4, 8]} >>> dst = {'baz': 'qux', 'numbers': [1, 4, 7]} >>> patch = JsonPatch.from_diff(src, dst) >>> new = patch.apply(src) >>> new == dst True """ def compare_values(path, value, other): if value == other: return if isinstance(value, MutableMapping) and \ isinstance(other, MutableMapping): for operation in compare_dicts(path, value, other): yield operation elif isinstance(value, MutableSequence) and \ isinstance(other, MutableSequence): for operation in compare_lists(path, value, other): yield operation else: ptr = JsonPointer.from_parts(path) yield {'op': 'replace', 'path': ptr.path, 'value': other} def compare_dicts(path, src, dst): for key in src: if key not in dst: ptr = JsonPointer.from_parts(path + [key]) yield {'op': 'remove', 'path': ptr.path} continue current = path + [key] for operation in compare_values(current, src[key], dst[key]): yield operation for key in dst: if key not in src: ptr = JsonPointer.from_parts(path + [key]) yield {'op': 'add', 'path': ptr.path, 'value': dst[key]} def compare_lists(path, src, dst): return _compare_lists(path, src, dst) return cls(list(compare_values([], src, dst))) def to_string(self): """Returns patch set as JSON string.""" return json.dumps(self.patch) @property def _ops(self): return tuple(map(self._get_operation, self.patch)) def apply(self, orig_obj, in_place=False, ignore_conflicts=False, verify=False): """Applies the patch to given object. :param obj: Document object. :type obj: dict :param in_place: Tweaks way how patch would be applied - directly to specified `obj` or to his copy. :type in_place: bool :type ignore_conflicts: Ignore JsonConflicts errors :type verify: works with `ignore_conflicts` = True, if errors and `verify` is True (recommanded), make sure the resulting objects is the same as the original one. `ignore_conflicts` and `verify` are used to run patches multiple times and get rif of errors when operations can't be performed multiple times because the object has already been patched :return: Modified `obj`. """ if verify: in_place = False if not in_place: obj = copy.deepcopy(orig_obj) else: obj = orig_obj got_conflicts = False for operation in self._ops: try: obj = operation.apply(obj) except JsonPatchConflict as e: if ignore_conflicts: got_conflicts = True else: raise # it you're gonna ignore conflicts you'll have to make # sure the resulting document is the same as the passed-one # (patch run mutiple times) if got_conflicts and verify: assert obj == orig_obj, "Resulting object is different from original but got conflict errors, this is not good..." return obj def _get_operation(self, operation): if 'op' not in operation: raise InvalidJsonPatch("Operation does not contain 'op' member") op = operation['op'] if not isinstance(op, basestring): raise InvalidJsonPatch("Operation must be a string") if op not in self.operations: raise InvalidJsonPatch("Unknown operation {0!r}".format(op)) cls = self.operations[op] return cls(operation) class PatchOperation(object): """A single operation inside a JSON Patch.""" def __init__(self, operation): self.location = operation['path'] self.pointer = JsonPointer(self.location) self.operation = operation def apply(self, obj): """Abstract method that applies patch operation to specified object.""" raise NotImplementedError('should implement patch operation.') def __hash__(self): return hash(frozenset(self.operation.items())) def __eq__(self, other): if not isinstance(other, PatchOperation): return False return self.operation == other.operation def __ne__(self, other): return not(self == other) class RemoveOperation(PatchOperation): """Removes an object property or an array element.""" def apply(self, obj): subobj, part = self.pointer.to_last(obj) try: del subobj[part] except (KeyError, IndexError) as ex: msg = "can't remove non-existent object '{0}'".format(part) raise JsonPatchConflict(msg) return obj class AddOperation(PatchOperation): """Adds an object property or an array element.""" def apply(self, obj): try: value = self.operation["value"] except KeyError as ex: raise InvalidJsonPatch( "The operation does not contain a 'value' member") subobj, part = self.pointer.to_last(obj) if isinstance(subobj, MutableSequence): if part == '-': subobj.append(value) # pylint: disable=E1103 elif part > len(subobj) or part < 0: raise JsonPatchConflict("can't insert outside of list") else: subobj.insert(part, value) # pylint: disable=E1103 elif isinstance(subobj, MutableMapping): if part is None: obj = value # we're replacing the root else: subobj[part] = value else: raise TypeError("invalid document type {0}".format(type(subobj))) return obj class ReplaceOperation(PatchOperation): """Replaces an object property or an array element by new value.""" def apply(self, obj): try: value = self.operation["value"] except KeyError as ex: raise InvalidJsonPatch( "The operation does not contain a 'value' member") subobj, part = self.pointer.to_last(obj) if part is None: return value if isinstance(subobj, MutableSequence): if part > len(subobj) or part < 0: raise JsonPatchConflict("can't replace outside of list") elif isinstance(subobj, MutableMapping): if not part in subobj: msg = "can't replace non-existent object '{0}'".format(part) raise JsonPatchConflict(msg) else: raise TypeError("invalid document type {0}".format(type(subobj))) subobj[part] = value return obj class MoveOperation(PatchOperation): """Moves an object property or an array element to new location.""" def apply(self, obj): try: from_ptr = JsonPointer(self.operation['from']) except KeyError as ex: raise InvalidJsonPatch( "The operation does not contain a 'from' member") subobj, part = from_ptr.to_last(obj) try: value = subobj[part] except (KeyError, IndexError) as ex: raise JsonPatchConflict(str(ex)) # If source and target are equal, this is a no-op if self.pointer == from_ptr: return obj if isinstance(subobj, MutableMapping) and \ self.pointer.contains(from_ptr): raise JsonPatchConflict('Cannot move values into its own children') obj = RemoveOperation({ 'op': 'remove', 'path': self.operation['from'] }).apply(obj) obj = AddOperation({ 'op': 'add', 'path': self.location, 'value': value }).apply(obj) return obj class TestOperation(PatchOperation): """Test value by specified location.""" def apply(self, obj): try: subobj, part = self.pointer.to_last(obj) if part is None: val = subobj else: val = self.pointer.walk(subobj, part) except JsonPointerException as ex: raise JsonPatchTestFailed(str(ex)) try: value = self.operation['value'] except KeyError as ex: raise InvalidJsonPatch( "The operation does not contain a 'value' member") if val != value: msg = '{0} ({1}) is not equal to tested value {2} ({3})' raise JsonPatchTestFailed(msg.format(val, type(val), value, type(value))) return obj class CopyOperation(PatchOperation): """ Copies an object property or an array element to a new location """ def apply(self, obj): try: from_ptr = JsonPointer(self.operation['from']) except KeyError as ex: raise InvalidJsonPatch( "The operation does not contain a 'from' member") subobj, part = from_ptr.to_last(obj) try: value = copy.deepcopy(subobj[part]) except (KeyError, IndexError) as ex: raise JsonPatchConflict(str(ex)) obj = AddOperation({ 'op': 'add', 'path': self.location, 'value': value }).apply(obj) return obj def _compare_lists(path, src, dst): """Compares two lists objects and return JSON patch about.""" return _optimize(_compare(path, src, dst, *_split_by_common_seq(src, dst))) def _longest_common_subseq(src, dst): """Returns pair of ranges of longest common subsequence for the `src` and `dst` lists. >>> src = [1, 2, 3, 4] >>> dst = [0, 1, 2, 3, 5] >>> # The longest common subsequence for these lists is [1, 2, 3] ... # which is located at (0, 3) index range for src list and (1, 4) for ... # dst one. Tuple of these ranges we should get back. ... assert ((0, 3), (1, 4)) == _longest_common_subseq(src, dst) """ lsrc, ldst = len(src), len(dst) drange = list(range(ldst)) matrix = [[0] * ldst for _ in range(lsrc)] z = 0 # length of the longest subsequence range_src, range_dst = None, None for i, j in itertools.product(range(lsrc), drange): if src[i] == dst[j]: if i == 0 or j == 0: matrix[i][j] = 1 else: matrix[i][j] = matrix[i-1][j-1] + 1 if matrix[i][j] > z: z = matrix[i][j] if matrix[i][j] == z: range_src = (i-z+1, i+1) range_dst = (j-z+1, j+1) else: matrix[i][j] = 0 return range_src, range_dst def _split_by_common_seq(src, dst, bx=(0, -1), by=(0, -1)): """Recursively splits the `dst` list onto two parts: left and right. The left part contains differences on left from common subsequence, same as the right part by for other side. To easily understand the process let's take two lists: [0, 1, 2, 3] as `src` and [1, 2, 4, 5] for `dst`. If we've tried to generate the binary tree where nodes are common subsequence for both lists, leaves on the left side are subsequence for `src` list and leaves on the right one for `dst`, our tree would looks like:: [1, 2] / \ [0] [] / \ [3] [4, 5] This function generate the similar structure as flat tree, but without nodes with common subsequences - since we're don't need them - only with left and right leaves:: [] / \ [0] [] / \ [3] [4, 5] The `bx` is the absolute range for currently processed subsequence of `src` list. The `by` means the same, but for the `dst` list. """ # Prevent useless comparisons in future bx = bx if bx[0] != bx[1] else None by = by if by[0] != by[1] else None if not src: return [None, by] elif not dst: return [bx, None] # note that these ranges are relative for processed sublists x, y = _longest_common_subseq(src, dst) if x is None or y is None: # no more any common subsequence return [bx, by] return [_split_by_common_seq(src[:x[0]], dst[:y[0]], (bx[0], bx[0] + x[0]), (by[0], by[0] + y[0])), _split_by_common_seq(src[x[1]:], dst[y[1]:], (bx[0] + x[1], bx[0] + len(src)), (by[0] + y[1], by[0] + len(dst)))] def _compare(path, src, dst, left, right): """Same as :func:`_compare_with_shift` but strips emitted `shift` value.""" for op, _ in _compare_with_shift(path, src, dst, left, right, 0): yield op def _compare_with_shift(path, src, dst, left, right, shift): """Recursively compares differences from `left` and `right` sides from common subsequences. The `shift` parameter is used to store index shift which caused by ``add`` and ``remove`` operations. Yields JSON patch operations and list index shift. """ if isinstance(left, MutableSequence): for item, shift in _compare_with_shift(path, src, dst, *left, shift=shift): yield item, shift elif left is not None: for item, shift in _compare_left(path, src, left, shift): yield item, shift if isinstance(right, MutableSequence): for item, shift in _compare_with_shift(path, src, dst, *right, shift=shift): yield item, shift elif right is not None: for item, shift in _compare_right(path, dst, right, shift): yield item, shift def _compare_left(path, src, left, shift): """Yields JSON patch ``remove`` operations for elements that are only exists in the `src` list.""" start, end = left if end == -1: end = len(src) # we need to `remove` elements from list tail to not deal with index shift for idx in reversed(range(start + shift, end + shift)): ptr = JsonPointer.from_parts(path + [str(idx)]) yield ( {'op': 'remove', # yes, there should be any value field, but we'll use it # to apply `move` optimization a bit later and will remove # it in _optimize function. 'value': src[idx - shift], 'path': ptr.path, }, shift - 1 ) shift -= 1 def _compare_right(path, dst, right, shift): """Yields JSON patch ``add`` operations for elements that are only exists in the `dst` list""" start, end = right if end == -1: end = len(dst) for idx in range(start, end): ptr = JsonPointer.from_parts(path + [str(idx)]) yield ( {'op': 'add', 'path': ptr.path, 'value': dst[idx]}, shift + 1 ) shift += 1 def _optimize(operations): """Optimizes operations which was produced by lists comparison. Actually it does two kinds of optimizations: 1. Seeks pair of ``remove`` and ``add`` operations against the same path and replaces them with ``replace`` operation. 2. Seeks pair of ``remove`` and ``add`` operations for the same value and replaces them with ``move`` operation. """ result = [] ops_by_path = {} ops_by_value = {} add_remove = set(['add', 'remove']) for item in operations: # could we apply "move" optimization for dict values? hashable_value = not isinstance(item['value'], (MutableMapping, MutableSequence)) if item['path'] in ops_by_path: _optimize_using_replace(ops_by_path[item['path']], item) continue if hashable_value and item['value'] in ops_by_value: prev_item = ops_by_value[item['value']] # ensure that we processing pair of add-remove ops if set([item['op'], prev_item['op']]) == add_remove: _optimize_using_move(prev_item, item) ops_by_value.pop(item['value']) continue result.append(item) ops_by_path[item['path']] = item if hashable_value: ops_by_value[item['value']] = item # cleanup ops_by_path.clear() ops_by_value.clear() for item in result: if item['op'] == 'remove': item.pop('value') # strip our hack yield item def _optimize_using_replace(prev, cur): """Optimises by replacing ``add``/``remove`` with ``replace`` on same path For nested strucures, tries to recurse replacement, see #36 """ prev['op'] = 'replace' if cur['op'] == 'add': # make recursive patch patch = make_patch(prev['value'], cur['value']) if len(patch.patch) == 1 and patch.patch[0]['op'] != 'remove': prev['path'] = prev['path'] + patch.patch[0]['path'] prev['value'] = patch.patch[0]['value'] else: prev['value'] = cur['value'] def _optimize_using_move(prev_item, item): """Optimises JSON patch by using ``move`` operation instead of ``remove` and ``add`` against the different paths but for the same value.""" prev_item['op'] = 'move' move_from, move_to = [ (item['path'], prev_item['path']), (prev_item['path'], item['path']), ][item['op'] == 'add'] if item['op'] == 'add': # first was remove then add prev_item['from'] = move_from prev_item['path'] = move_to else: # first was add then remove head, move_from = move_from.rsplit('/', 1) # since add operation was first it incremented # overall index shift value. we have to fix this move_from = int(move_from) - 1 prev_item['from'] = head + '/%d' % move_from prev_item['path'] = move_to
{ "content_hash": "bd23e4e129c053e26a38839b44ad0415", "timestamp": "", "source": "github", "line_count": 803, "max_line_length": 126, "avg_line_length": 32.79327521793275, "alnum_prop": 0.5672730034557399, "repo_name": "biothings/biothings.api", "id": "d9214fad6fd94dba7cd68333251c8242c077c7c0", "size": "27940", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "biothings/utils/jsonpatch.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "3893" }, { "name": "Python", "bytes": "1748879" }, { "name": "Smarty", "bytes": "1764" } ], "symlink_target": "" }
try: from unittest import mock except ImportError: import mock from unittest import TestCase from iepy.preprocess.pipeline import PreProcessPipeline, PreProcessSteps class TestPreProcessPipeline(TestCase): def test_walk_document_applies_all_step_runners_to_the_given_doc(self): step1_runner = mock.MagicMock() step1_runner.side_effect = lambda x: x.call_order.append(1) step2_runner = mock.MagicMock() step2_runner.side_effect = lambda x: x.call_order.append(2) doc = mock.MagicMock() doc.call_order = [] p = PreProcessPipeline([step1_runner, step2_runner], []) p.walk_document(doc) step1_runner.assert_called_once_with(doc) step2_runner.assert_called_once_with(doc) self.assertEqual(doc.call_order, [1, 2]) def test_walk_document_applies_all_step_runners_again_if_they_were_already_run(self): step_runner1 = mock.MagicMock() p = PreProcessPipeline([step_runner1], []) doc = object() p.walk_document(doc) p.walk_document(doc) self.assertEqual(step_runner1.call_count, 2) def test_walk_document_itself_does_not_save_the_document(self): step_runner1 = mock.MagicMock() p = PreProcessPipeline([step_runner1], []) doc = mock.MagicMock() p.walk_document(doc) self.assertEqual(doc.save.call_count, 0) def test_process_step_in_batch_applies_runner_to_all_documents(self): # We take care that doesn't have attr "step" _runner = lambda x: x runner = mock.Mock(wraps=_runner) docs = [object() for i in range(5)] p = PreProcessPipeline([runner], docs) p.process_step_in_batch(runner) self.assertEqual(runner.call_count, len(docs)) self.assertEqual(runner.call_args_list, [mock.call(d) for d in docs]) def test_process_step_in_batch_does_nothing_with_previous_steps_runner(self): runner1 = mock.Mock(wraps=lambda x: x) runner2 = mock.Mock(wraps=lambda x: x) docs = [object() for i in range(5)] p = PreProcessPipeline([runner1, runner2], docs) p.process_step_in_batch(runner2) self.assertFalse(runner1.called) def test_process_step_in_batch_filter_docs_to_apply_if_has_attr_step(self): step_runner = mock.MagicMock(step=PreProcessSteps.tokenization, override=False, increment=False) all_docs = [object() for i in range(5)] docs_manager = mock.MagicMock() docs_manager.__iter__.return_value = all_docs docs_manager.get_documents_lacking_preprocess.side_effect = lambda x: all_docs[:2] # Ok, docs manager has 5 docs, but get_documents_lacking_preprocess will return # only 2 of them p = PreProcessPipeline([step_runner], docs_manager) p.process_step_in_batch(step_runner) docs_filter = docs_manager.get_documents_lacking_preprocess docs_filter.assert_called_once_with(step_runner.step) self.assertNotEqual(step_runner.call_count, 5) self.assertEqual(step_runner.call_count, 2) self.assertEqual(step_runner.call_args_list, [mock.call(d) for d in all_docs[:2]]) def test_process_step_in_batch_does_not_call_docs_save(self): runner = mock.Mock(wraps=lambda x: x) docs = [mock.Mock() for i in range(5)] p = PreProcessPipeline([runner], docs) p.process_step_in_batch(runner) for d in docs: self.assertFalse(d.save.called) def test_process_everythin_calls_successively_process_step_in_batch(self): runner1 = mock.Mock(wraps=lambda x: x) runner2 = mock.Mock(wraps=lambda x: x) docs = [object() for i in range(5)] p = PreProcessPipeline([runner1, runner2], docs) with mock.patch.object(p, 'process_step_in_batch') as mock_batch: p.call_order = [] mock_batch.side_effect = lambda r: p.call_order.append(r) p.process_everything() self.assertEqual(mock_batch.call_count, 2) self.assertEqual(mock_batch.call_args_list, [mock.call(runner1), mock.call(runner2)]) self.assertEqual(p.call_order, [runner1, runner2])
{ "content_hash": "ee2ee31f32e7493a1a58b81884426ea2", "timestamp": "", "source": "github", "line_count": 96, "max_line_length": 90, "avg_line_length": 44.40625, "alnum_prop": 0.6403940886699507, "repo_name": "mrshu/iepy", "id": "e6484ebea03f679c5362272abcfb4757e2e55f21", "size": "4263", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "tests/test_preprocess_pipeline.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "25531" }, { "name": "HTML", "bytes": "26374" }, { "name": "JavaScript", "bytes": "26234" }, { "name": "Python", "bytes": "400269" } ], "symlink_target": "" }
from django.apps import AppConfig class CreditsConfig(AppConfig): name = 'credits'
{ "content_hash": "5ecf005b2f1fcde4be0f0914581508f3", "timestamp": "", "source": "github", "line_count": 5, "max_line_length": 33, "avg_line_length": 17.8, "alnum_prop": 0.7528089887640449, "repo_name": "picsadotcom/maguire", "id": "d5f4181ac16fad9ca726e570c6fbff0b3f41c8bb", "size": "89", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "backend/credits/apps.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Dockerfile", "bytes": "300" }, { "name": "Python", "bytes": "80717" } ], "symlink_target": "" }
from oslo_utils import strutils import webob from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.schemas import flavors as schema from nova.api.openstack.compute.views import flavors as flavors_view from nova.api.openstack import wsgi from nova.api import validation from nova.compute import flavors from nova import exception from nova.i18n import _ from nova import objects from nova.policies import flavor_extra_specs as fes_policies from nova import utils class FlavorsController(wsgi.Controller): """Flavor controller for the OpenStack API.""" _view_builder_class = flavors_view.ViewBuilder @validation.query_schema(schema.index_query_275, '2.75') @validation.query_schema(schema.index_query, '2.0', '2.74') @wsgi.expected_errors(400) def index(self, req): """Return all flavors in brief.""" limited_flavors = self._get_flavors(req) return self._view_builder.index(req, limited_flavors) @validation.query_schema(schema.index_query_275, '2.75') @validation.query_schema(schema.index_query, '2.0', '2.74') @wsgi.expected_errors(400) def detail(self, req): """Return all flavors in detail.""" context = req.environ['nova.context'] limited_flavors = self._get_flavors(req) include_extra_specs = False if api_version_request.is_supported( req, flavors_view.FLAVOR_EXTRA_SPECS_MICROVERSION): include_extra_specs = context.can( fes_policies.POLICY_ROOT % 'index', fatal=False) return self._view_builder.detail( req, limited_flavors, include_extra_specs=include_extra_specs) @wsgi.expected_errors(404) def show(self, req, id): """Return data about the given flavor id.""" context = req.environ['nova.context'] try: flavor = flavors.get_flavor_by_flavor_id(id, ctxt=context) except exception.FlavorNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) include_extra_specs = False if api_version_request.is_supported( req, flavors_view.FLAVOR_EXTRA_SPECS_MICROVERSION): include_extra_specs = context.can( fes_policies.POLICY_ROOT % 'index', fatal=False) include_description = api_version_request.is_supported( req, flavors_view.FLAVOR_DESCRIPTION_MICROVERSION) return self._view_builder.show( req, flavor, include_description=include_description, include_extra_specs=include_extra_specs) def _parse_is_public(self, is_public): """Parse is_public into something usable.""" if is_public is None: # preserve default value of showing only public flavors return True elif utils.is_none_string(is_public): return None else: try: return strutils.bool_from_string(is_public, strict=True) except ValueError: msg = _('Invalid is_public filter [%s]') % is_public raise webob.exc.HTTPBadRequest(explanation=msg) def _get_flavors(self, req): """Helper function that returns a list of flavor dicts.""" filters = {} sort_key = req.params.get('sort_key') or 'flavorid' sort_dir = req.params.get('sort_dir') or 'asc' limit, marker = common.get_limit_and_marker(req) context = req.environ['nova.context'] if context.is_admin: # Only admin has query access to all flavor types filters['is_public'] = self._parse_is_public( req.params.get('is_public', None)) else: filters['is_public'] = True filters['disabled'] = False if 'minRam' in req.params: try: filters['min_memory_mb'] = int(req.params['minRam']) except ValueError: msg = _('Invalid minRam filter [%s]') % req.params['minRam'] raise webob.exc.HTTPBadRequest(explanation=msg) if 'minDisk' in req.params: try: filters['min_root_gb'] = int(req.params['minDisk']) except ValueError: msg = (_('Invalid minDisk filter [%s]') % req.params['minDisk']) raise webob.exc.HTTPBadRequest(explanation=msg) try: limited_flavors = objects.FlavorList.get_all(context, filters=filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker) except exception.MarkerNotFound: msg = _('marker [%s] not found') % marker raise webob.exc.HTTPBadRequest(explanation=msg) return limited_flavors
{ "content_hash": "a9f74e5ed495793df30a9b8d72c3340f", "timestamp": "", "source": "github", "line_count": 120, "max_line_length": 76, "avg_line_length": 40.166666666666664, "alnum_prop": 0.6199170124481328, "repo_name": "mahak/nova", "id": "3986e428b346ded74b22c48b33ef1bb80d10a450", "size": "5456", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "nova/api/openstack/compute/flavors.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C++", "bytes": "3545" }, { "name": "Mako", "bytes": "1952" }, { "name": "Python", "bytes": "23261880" }, { "name": "Shell", "bytes": "28113" }, { "name": "Smarty", "bytes": "507244" } ], "symlink_target": "" }
import os from ingenico.connect.sdk.factory import Factory class BlockMandateExample(object): def example(self): with self.__get_client() as client: response = client.merchant("merchantId").mandates().block("42268d8067df43e18a50a2ebf4bdb729") def __get_client(self): api_key_id = os.getenv("connect.api.apiKeyId", "someKey") secret_api_key = os.getenv("connect.api.secretApiKey", "someSecret") configuration_file_name = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../example_configuration.ini')) return Factory.create_client_from_file(configuration_file_name=configuration_file_name, api_key_id=api_key_id, secret_api_key=secret_api_key)
{ "content_hash": "0151991151c440eb666b5c50c9f76ba6", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 105, "avg_line_length": 46.05555555555556, "alnum_prop": 0.6079613992762364, "repo_name": "Ingenico-ePayments/connect-sdk-python3", "id": "e756b71c2f4bf9b3f966f1b2440993481a5a7336", "size": "956", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "examples/merchant/mandates/block_mandate_example.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "36" }, { "name": "Python", "bytes": "1735057" } ], "symlink_target": "" }
import trappy import utils_tests class TestSchedFunctions(utils_tests.SetupDirectory): def __init__(self, *args, **kwargs): super(TestSchedFunctions, self).__init__([], *args, **kwargs) def test_get_pids_for_processes_no_sched_switch(self): """get_pids_for_processes() raises an exception if the trace doesn't have a sched_switch event""" from bart.sched.functions import get_pids_for_process trace_file = "trace.txt" raw_trace_file = "trace.raw.txt" with open(trace_file, "w") as fout: fout.write("") with open(raw_trace_file, "w") as fout: fout.write("") trace = trappy.FTrace(trace_file) with self.assertRaises(ValueError): get_pids_for_process(trace, "foo") def test_get_pids_for_process_funny_process_names(self): """get_pids_for_process() works when a process name is a substring of another""" from bart.sched.functions import get_pids_for_process trace_file = "trace.txt" raw_trace_file = "trace.raw.txt" in_data = """ <idle>-0 [001] 10826.894644: sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=0 next_comm=rt-app next_pid=3268 next_prio=120 wmig-3268 [001] 10826.894778: sched_switch: prev_comm=wmig prev_pid=3268 prev_prio=120 prev_state=1 next_comm=rt-app next_pid=3269 next_prio=120 wmig1-3269 [001] 10826.905152: sched_switch: prev_comm=wmig1 prev_pid=3269 prev_prio=120 prev_state=1 next_comm=wmig next_pid=3268 next_prio=120 wmig-3268 [001] 10826.915384: sched_switch: prev_comm=wmig prev_pid=3268 prev_prio=120 prev_state=1 next_comm=swapper/1 next_pid=0 next_prio=120 <idle>-0 [005] 10826.995169: sched_switch: prev_comm=swapper/5 prev_pid=0 prev_prio=120 prev_state=0 next_comm=wmig1 next_pid=3269 next_prio=120 wmig1-3269 [005] 10827.007064: sched_switch: prev_comm=wmig1 prev_pid=3269 prev_prio=120 prev_state=0 next_comm=wmig next_pid=3268 next_prio=120 wmig-3268 [005] 10827.019061: sched_switch: prev_comm=wmig prev_pid=3268 prev_prio=120 prev_state=0 next_comm=wmig1 next_pid=3269 next_prio=120 wmig1-3269 [005] 10827.031061: sched_switch: prev_comm=wmig1 prev_pid=3269 prev_prio=120 prev_state=0 next_comm=wmig next_pid=3268 next_prio=120 wmig-3268 [005] 10827.050645: sched_switch: prev_comm=wmig prev_pid=3268 prev_prio=120 prev_state=1 next_comm=swapper/5 next_pid=0 next_prio=120 """ # We create an empty trace.txt to please trappy ... with open(trace_file, "w") as fout: fout.write("") # ... but we only put the sched_switch events in the raw trace # file because that's where trappy is going to look for with open(raw_trace_file, "w") as fout: fout.write(in_data) trace = trappy.FTrace(trace_file) self.assertEquals(get_pids_for_process(trace, "wmig"), [3268])
{ "content_hash": "ad83d8d8a6711eb546568e30c93de8ee", "timestamp": "", "source": "github", "line_count": 54, "max_line_length": 189, "avg_line_length": 56.72222222222222, "alnum_prop": 0.6405484818805093, "repo_name": "ARM-software/bart", "id": "1a8d4ac16d3620d6411939e6ac3a2c1ba3f5998e", "size": "3649", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tests/test_sched_functions.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "137238" } ], "symlink_target": "" }
import os from django.core.files.storage import FileSystemStorage from django.conf import settings class OverwriteStorage(FileSystemStorage): """ Overwrites to a file if it exists on file system. """ def get_available_name(self, name, max_length=None): """ If a file with the given name exists in MEDIA_ROOT, it will be removed otherwise it's name is returned. """ if self.exists(name): os.remove(os.path.join(settings.MEDIA_ROOT, name)) return name
{ "content_hash": "8d6eaf2f62820ecaf8f78aad1dacc61a", "timestamp": "", "source": "github", "line_count": 20, "max_line_length": 67, "avg_line_length": 27.35, "alnum_prop": 0.6361974405850092, "repo_name": "Djacket/djacket", "id": "2272dbcafafd02c9eb1dd32af4a34fe465a431e9", "size": "547", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "core/backend/djacket/storage.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "29609" }, { "name": "HTML", "bytes": "35321" }, { "name": "JavaScript", "bytes": "12900" }, { "name": "Python", "bytes": "88290" }, { "name": "Shell", "bytes": "11320" } ], "symlink_target": "" }
""" Author : Guillaume "iXce" Seguin Email : guillaume@segu.in Copyright (C) 2008 Guillaume Seguin Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of Enso nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY Humanized, Inc. ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Humanized, Inc. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import sys, os, glob from stat import * from distutils.core import setup from distutils.command.install import install as _install from distutils.command.install_data import install_data as _install_data if sys.platform.startswith("win") or sys.platform == "darwin": # TODO: This script should work on OS X and Windows (see issue # 19). print "Sorry, this script currently doesn't work for Windows " print "or OS X. Please see README for instructions on how to " print "get Enso up and running on these platforms." sys.exit(-1) INSTALLED_FILES = "installed_files" class install (_install): def run (self): _install.run (self) outputs = self.get_outputs () length = 0 if self.root: length += len (self.root) if self.prefix: length += len (self.prefix) if length: for counter in xrange (len (outputs)): outputs[counter] = outputs[counter][length:] data = "\n".join (outputs) try: file = open (INSTALLED_FILES, "w") except: self.warn ("Could not write installed files list %s" % \ INSTALLED_FILES) return file.write (data) file.close () class install_data (_install_data): def run (self): def chmod_data_file (file): try: os.chmod (file, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH) except: self.warn ("Could not chmod data file %s" % file) _install_data.run (self) map (chmod_data_file, self.get_outputs ()) class uninstall (_install): def run (self): try: file = open (INSTALLED_FILES, "r") except: self.warn ("Could not read installed files list %s" % \ INSTALLED_FILES) return files = file.readlines () file.close () prepend = "" if self.root: prepend += self.root if self.prefix: prepend += self.prefix if len (prepend): for counter in xrange (len (files)): files[counter] = prepend + files[counter].rstrip () for file in files: print "Uninstalling %s" % file try: os.unlink (file) except: self.warn ("Could not remove file %s" % file) ops = ("install", "build", "sdist", "uninstall", "clean") if len (sys.argv) < 2 or sys.argv[1] not in ops: print "Please specify operation : %s" % " | ".join (ops) raise SystemExit prefix = None if len (sys.argv) > 2: i = 0 for o in sys.argv: if o.startswith ("--prefix"): if o == "--prefix": if len (sys.argv) >= i: prefix = sys.argv[i + 1] sys.argv.remove (prefix) elif o.startswith ("--prefix=") and len (o[9:]): prefix = o[9:] sys.argv.remove (o) break i += 1 if not prefix and "PREFIX" in os.environ: prefix = os.environ["PREFIX"] if not prefix or not len (prefix): prefix = sys.prefix if sys.argv[1] in ("install", "uninstall") and len (prefix): sys.argv += ["--prefix", prefix] version_file = open ("VERSION", "r") version = version_file.read ().strip () if "=" in version: version = version.split ("=")[1] data_files = [] podir = os.path.join (os.path.abspath (os.path.curdir), "po") if os.path.isdir (podir): buildcmd = "msgfmt -o build/locale/%s/enso.mo po/%s.po" mopath = "build/locale/%s/enso.mo" destpath = "share/locale/%s/LC_MESSAGES" for name in os.listdir (podir): if name[-2:] == "po": name = name[:-3] if sys.argv[1] == "build" \ or (sys.argv[1] == "install" and \ not os.path.exists (mopath % name)): if not os.path.isdir ("build/locale/" + name): os.makedirs ("build/locale/" + name) os.system (buildcmd % (name, name)) data_files.append ((destpath % name, [mopath % name])) setup ( name = "Enso", version = version, description = "Enso", author = "Humanized Inc.", author_email = "enso-developers@googlegroups.com", url = "http://humanized.com/", license = "GPL", data_files = data_files, packages = [ "enso", "enso/commands", "enso/contrib", "enso/contrib/scriptotron", "enso/graphics", "enso/input", "enso/messages", "enso/platform", "enso/platform/linux", "enso/platform/osx", "enso/platform/win32", "enso/quasimode", "enso/utils", ], scripts = ["scripts/run_enso.py"], cmdclass = {"uninstall" : uninstall, "install" : install, "install_data" : install_data} )
{ "content_hash": "1e3eb9d8b8c09b1966a0b23248154fef", "timestamp": "", "source": "github", "line_count": 188, "max_line_length": 77, "avg_line_length": 36.51063829787234, "alnum_prop": 0.5547785547785548, "repo_name": "pf/enso", "id": "c0cabf010a4e89a01a2487f9602768c61f4c11f4", "size": "6887", "binary": false, "copies": "10", "ref": "refs/heads/master", "path": "setup.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "4566815" }, { "name": "C++", "bytes": "240574" }, { "name": "Objective-C", "bytes": "46845" }, { "name": "Python", "bytes": "551820" } ], "symlink_target": "" }
import mock from rest_framework import serializers from waffle.testutils import override_switch from olympia.amo.tests import ( BaseTestCase, addon_factory, collection_factory, TestCase, user_factory) from olympia.bandwagon.models import CollectionAddon from olympia.bandwagon.serializers import ( CollectionAddonSerializer, CollectionAkismetSpamValidator, CollectionSerializer, CollectionWithAddonsSerializer) from olympia.lib.akismet.models import AkismetReport class TestCollectionAkismetSpamValidator(TestCase): def setUp(self): self.validator = CollectionAkismetSpamValidator( ('name', 'description')) serializer = mock.Mock() serializer.instance = collection_factory( name='name', description='Big Cheese') request = mock.Mock() request.user = user_factory() request.META = {} serializer.context = {'request': request} self.validator.set_context(serializer) self.data = { 'name': {'en-US': 'Collection', 'fr': u'Collection'}, 'description': {'en-US': 'Big Cheese', 'fr': u'une gránd fromagé'}, 'random_data': {'en-US': 'to ignore'}, 'slug': 'cheese'} @override_switch('akismet-spam-check', active=False) @mock.patch('olympia.lib.akismet.models.AkismetReport.comment_check') def test_waffle_off(self, comment_check_mock): self.validator(self.data) # No Akismet checks assert AkismetReport.objects.count() == 0 comment_check_mock.assert_not_called() @override_switch('akismet-spam-check', active=True) @mock.patch('olympia.lib.akismet.models.AkismetReport.comment_check') def test_ham(self, comment_check_mock): comment_check_mock.return_value = AkismetReport.HAM self.validator(self.data) # Akismet check is there assert AkismetReport.objects.count() == 2 name_report = AkismetReport.objects.first() # name will only be there once because it's duplicated. assert name_report.comment_type == 'collection-name' assert name_report.comment == self.data['name']['en-US'] summary_report = AkismetReport.objects.last() # en-US description won't be there because it's an existing description assert summary_report.comment_type == 'collection-description' assert summary_report.comment == self.data['description']['fr'] assert comment_check_mock.call_count == 2 @override_switch('akismet-spam-check', active=True) @override_switch('akismet-collection-action', active=False) @mock.patch('olympia.lib.akismet.models.AkismetReport.comment_check') def test_spam_logging_only(self, comment_check_mock): comment_check_mock.return_value = AkismetReport.MAYBE_SPAM self.validator(self.data) # Akismet check is there assert AkismetReport.objects.count() == 2 name_report = AkismetReport.objects.first() # name will only be there once because it's duplicated. assert name_report.comment_type == 'collection-name' assert name_report.comment == self.data['name']['en-US'] summary_report = AkismetReport.objects.last() # en-US description won't be there because it's an existing description assert summary_report.comment_type == 'collection-description' assert summary_report.comment == self.data['description']['fr'] # After the first comment_check was spam, additional ones are skipped. assert comment_check_mock.call_count == 1 @override_switch('akismet-spam-check', active=True) @override_switch('akismet-collection-action', active=True) @mock.patch('olympia.lib.akismet.models.AkismetReport.comment_check') def test_spam_action_taken(self, comment_check_mock): comment_check_mock.return_value = AkismetReport.MAYBE_SPAM with self.assertRaises(serializers.ValidationError): self.validator(self.data) # Akismet check is there assert AkismetReport.objects.count() == 2 name_report = AkismetReport.objects.first() # name will only be there once because it's duplicated. assert name_report.comment_type == 'collection-name' assert name_report.comment == self.data['name']['en-US'] summary_report = AkismetReport.objects.last() # en-US description won't be there because it's an existing description assert summary_report.comment_type == 'collection-description' assert summary_report.comment == self.data['description']['fr'] # After the first comment_check was spam, additional ones are skipped. assert comment_check_mock.call_count == 1 class TestCollectionSerializer(BaseTestCase): serializer = CollectionSerializer def setUp(self): super(TestCollectionSerializer, self).setUp() self.user = user_factory() self.collection = collection_factory() self.collection.update(author=self.user) def serialize(self): return self.serializer(self.collection).data def test_basic(self): data = self.serialize() assert data['id'] == self.collection.id assert data['uuid'] == self.collection.uuid.hex assert data['name'] == {'en-US': self.collection.name} assert data['description'] == {'en-US': self.collection.description} assert data['url'] == self.collection.get_abs_url() assert data['addon_count'] == self.collection.addon_count assert data['modified'] == ( self.collection.modified.replace(microsecond=0).isoformat() + 'Z') assert data['author']['id'] == self.user.id assert data['slug'] == self.collection.slug assert data['public'] == self.collection.listed assert data['default_locale'] == self.collection.default_locale class TestCollectionAddonSerializer(BaseTestCase): def setUp(self): self.collection = collection_factory() self.addon = addon_factory() self.collection.add_addon(self.addon) self.item = CollectionAddon.objects.get(addon=self.addon, collection=self.collection) self.item.comments = u'Dis is nice' self.item.save() def serialize(self): return CollectionAddonSerializer(self.item).data def test_basic(self): data = self.serialize() assert data['addon']['id'] == self.collection.addons.all()[0].id assert data['notes'] == {'en-US': self.item.comments} class TestCollectionWithAddonsSerializer(TestCollectionSerializer): serializer = CollectionWithAddonsSerializer def setUp(self): super(TestCollectionWithAddonsSerializer, self).setUp() self.addon = addon_factory() self.collection.add_addon(self.addon) def serialize(self): mock_viewset = mock.MagicMock() collection_addons = CollectionAddon.objects.filter( addon=self.addon, collection=self.collection) mock_viewset.get_addons_queryset.return_value = collection_addons return self.serializer( self.collection, context={'view': mock_viewset}).data def test_basic(self): super(TestCollectionWithAddonsSerializer, self).test_basic() collection_addon = CollectionAddon.objects.get( addon=self.addon, collection=self.collection) data = self.serialize() assert data['addons'] == [ CollectionAddonSerializer(collection_addon).data ] assert data['addons'][0]['addon']['id'] == self.addon.id
{ "content_hash": "c19cd43061fd36fd34c95c111abe8b1f", "timestamp": "", "source": "github", "line_count": 179, "max_line_length": 79, "avg_line_length": 42.597765363128495, "alnum_prop": 0.667016393442623, "repo_name": "wagnerand/olympia", "id": "7eb339f94bb3bd1b8827b018651884afa3b4a630", "size": "7651", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "src/olympia/bandwagon/tests/test_serializers.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "ApacheConf", "bytes": "249" }, { "name": "CSS", "bytes": "663668" }, { "name": "HTML", "bytes": "1600904" }, { "name": "JavaScript", "bytes": "1314155" }, { "name": "Makefile", "bytes": "4235" }, { "name": "PLSQL", "bytes": "74" }, { "name": "Python", "bytes": "3996776" }, { "name": "Shell", "bytes": "9101" }, { "name": "Smarty", "bytes": "1930" } ], "symlink_target": "" }
import unittest import trovesync from utils import MockObject __metaclass__ = type # make sure we use new-style classes class TestSyncer(unittest.TestCase): def test_is_testable(self): mockSettings = MockObject() mockSettings.albumsPath = "/some/absolute/path/" trovesync.Syncer(mockSettings) self.assertTrue(True)
{ "content_hash": "283d57c1484662e6e2afbf3044493b32", "timestamp": "", "source": "github", "line_count": 15, "max_line_length": 57, "avg_line_length": 22.666666666666668, "alnum_prop": 0.7411764705882353, "repo_name": "kjarnet/trovesync", "id": "bb69aca5060e6c9ad227c41df3ec3233547b75ba", "size": "340", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_syncer.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "33003" } ], "symlink_target": "" }
import os import re from setuptools import setup project_dir = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(project_dir, 'dontasq', '__init__.py')) as f: version = re.search(r"__version__ = '(.+)'", f.read()).groups()[0] with open(os.path.join(project_dir, 'README.rst')) as f: long_description = f.read() setup( name="dontasq", version=version, packages=['dontasq'], install_requires=['asq>=1.0'], author="Alexander Borzunov", author_email="borzunov.alexander@gmail.com", description='Extend built-in Python collections ' 'with LINQ-for-objects style methods', long_description=long_description, url="https://github.com/borzunov/dontasq", classifiers=[ 'Development Status :: 4 - Beta', 'Programming Language :: Python', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: Implementation :: CPython', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Topic :: Software Development :: Libraries :: Python Modules', ], license="MIT", keywords=['LINQ'], test_suite='tests', )
{ "content_hash": "0d5fa7da829a0219ffda436614766fd4", "timestamp": "", "source": "github", "line_count": 48, "max_line_length": 71, "avg_line_length": 30.916666666666668, "alnum_prop": 0.6111859838274932, "repo_name": "borzunov/dontasq", "id": "aeec34142a419476147d5153db9cd474c24a6d6a", "size": "1531", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "11688" } ], "symlink_target": "" }
""" Check ElementTree import with xmlschema. """ import argparse import sys parser = argparse.ArgumentParser(add_help=True) parser.add_argument( '--before', action="store_true", default=False, help="Import ElementTree before xmlschema. If not provided the ElementTree library " "is loaded after xmlschema." ) args = parser.parse_args() if args.before: print("Importing ElementTree before xmlschema ...") import xml.etree.ElementTree as ElementTree import xmlschema.etree else: print("Importing ElementTree after xmlschema ...") import xmlschema.etree import xml.etree.ElementTree as ElementTree # Check if all modules are loaded in the system table assert 'xml.etree.ElementTree' in sys.modules, "ElementTree not loaded!" assert 'xmlschema' in sys.modules, 'xmlschema not loaded' assert 'xmlschema.etree' in sys.modules, 'xmlschema.etree not loaded' if sys.version_info >= (3,): assert '_elementtree' in sys.modules, "cElementTree is not loaded!" # Check imported ElementTree assert ElementTree._Element_Py is not ElementTree.Element, "ElementTree is pure Python!" assert xmlschema.etree.ElementTree is ElementTree, "xmlschema has a different ElementTree module!" # Check ElementTree and pure Python ElementTree imported in xmlschema PyElementTree = xmlschema.etree.PyElementTree assert xmlschema.etree.ElementTree.Element is not xmlschema.etree.ElementTree._Element_Py, \ "xmlschema's ElementTree is pure Python!" assert PyElementTree.Element is PyElementTree._Element_Py, "PyElementTree is not pure Python!" assert xmlschema.etree.ElementTree is not PyElementTree, "xmlschema ElementTree is PyElementTree!" print("\nTest OK: ElementTree import is working as expected!")
{ "content_hash": "af2c8f9f688e9ee6a9eef14541f6511c", "timestamp": "", "source": "github", "line_count": 43, "max_line_length": 102, "avg_line_length": 41.02325581395349, "alnum_prop": 0.7568027210884354, "repo_name": "brunato/xmlschema", "id": "bf8a9786ed6bab03a41ee430df5268af3ec7c65f", "size": "2142", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "xmlschema/tests/check_etree_import.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "814028" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models import taggit.managers class Migration(migrations.Migration): dependencies = [ ('taggit', '0002_auto_20150616_2121'), ('memorylane', '0009_auto_20151207_1712'), ] operations = [ migrations.AddField( model_name='memory', name='tags', field=taggit.managers.TaggableManager(to='taggit.Tag', through='taggit.TaggedItem', help_text='A comma-separated list of tags.', verbose_name='Tags'), ), ]
{ "content_hash": "0183164185a9e4ba157175c4f0a106f9", "timestamp": "", "source": "github", "line_count": 20, "max_line_length": 162, "avg_line_length": 27.95, "alnum_prop": 0.631484794275492, "repo_name": "MemoryLane196/WebApp", "id": "495fc11c8a34b7d60048a718442ceded3741584f", "size": "583", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "memorylane/memorylane/migrations/0010_memory_tags.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "59416" }, { "name": "HTML", "bytes": "49926" }, { "name": "JavaScript", "bytes": "3471" }, { "name": "Makefile", "bytes": "3140" }, { "name": "Python", "bytes": "207393" } ], "symlink_target": "" }
from __future__ import unicode_literals import collections import datetime import decimal import math import os import re import types from importlib import import_module from django.apps import apps from django.db import migrations, models from django.db.migrations.loader import MigrationLoader from django.db.migrations.operations.base import Operation from django.utils import datetime_safe, six from django.utils._os import upath from django.utils.encoding import force_text from django.utils.functional import Promise from django.utils.inspect import get_func_args from django.utils.module_loading import module_dir from django.utils.timezone import utc from django.utils.version import get_docs_version COMPILED_REGEX_TYPE = type(re.compile('')) class SettingsReference(str): """ Special subclass of string which actually references a current settings value. It's treated as the value in memory, but serializes out to a settings.NAME attribute reference. """ def __new__(self, value, setting_name): return str.__new__(self, value) def __init__(self, value, setting_name): self.setting_name = setting_name class OperationWriter(object): def __init__(self, operation, indentation=2): self.operation = operation self.buff = [] self.indentation = indentation def serialize(self): def _write(_arg_name, _arg_value): if (_arg_name in self.operation.serialization_expand_args and isinstance(_arg_value, (list, tuple, dict))): if isinstance(_arg_value, dict): self.feed('%s={' % _arg_name) self.indent() for key, value in _arg_value.items(): key_string, key_imports = MigrationWriter.serialize(key) arg_string, arg_imports = MigrationWriter.serialize(value) args = arg_string.splitlines() if len(args) > 1: self.feed('%s: %s' % (key_string, args[0])) for arg in args[1:-1]: self.feed(arg) self.feed('%s,' % args[-1]) else: self.feed('%s: %s,' % (key_string, arg_string)) imports.update(key_imports) imports.update(arg_imports) self.unindent() self.feed('},') else: self.feed('%s=[' % _arg_name) self.indent() for item in _arg_value: arg_string, arg_imports = MigrationWriter.serialize(item) args = arg_string.splitlines() if len(args) > 1: for arg in args[:-1]: self.feed(arg) self.feed('%s,' % args[-1]) else: self.feed('%s,' % arg_string) imports.update(arg_imports) self.unindent() self.feed('],') else: arg_string, arg_imports = MigrationWriter.serialize(_arg_value) args = arg_string.splitlines() if len(args) > 1: self.feed('%s=%s' % (_arg_name, args[0])) for arg in args[1:-1]: self.feed(arg) self.feed('%s,' % args[-1]) else: self.feed('%s=%s,' % (_arg_name, arg_string)) imports.update(arg_imports) imports = set() name, args, kwargs = self.operation.deconstruct() operation_args = get_func_args(self.operation.__init__) # See if this operation is in django.db.migrations. If it is, # We can just use the fact we already have that imported, # otherwise, we need to add an import for the operation class. if getattr(migrations, name, None) == self.operation.__class__: self.feed('migrations.%s(' % name) else: imports.add('import %s' % (self.operation.__class__.__module__)) self.feed('%s.%s(' % (self.operation.__class__.__module__, name)) self.indent() for i, arg in enumerate(args): arg_value = arg arg_name = operation_args[i] _write(arg_name, arg_value) i = len(args) # Only iterate over remaining arguments for arg_name in operation_args[i:]: if arg_name in kwargs: # Don't sort to maintain signature order arg_value = kwargs[arg_name] _write(arg_name, arg_value) self.unindent() self.feed('),') return self.render(), imports def indent(self): self.indentation += 1 def unindent(self): self.indentation -= 1 def feed(self, line): self.buff.append(' ' * (self.indentation * 4) + line) def render(self): return '\n'.join(self.buff) class MigrationWriter(object): """ Takes a Migration instance and is able to produce the contents of the migration file from it. """ def __init__(self, migration): self.migration = migration self.needs_manual_porting = False def as_string(self): """ Returns a string of the file contents. """ items = { "replaces_str": "", "initial_str": "", } imports = set() # Deconstruct operations operations = [] for operation in self.migration.operations: operation_string, operation_imports = OperationWriter(operation).serialize() imports.update(operation_imports) operations.append(operation_string) items["operations"] = "\n".join(operations) + "\n" if operations else "" # Format dependencies and write out swappable dependencies right dependencies = [] for dependency in self.migration.dependencies: if dependency[0] == "__setting__": dependencies.append(" migrations.swappable_dependency(settings.%s)," % dependency[1]) imports.add("from django.conf import settings") else: # No need to output bytestrings for dependencies dependency = tuple(force_text(s) for s in dependency) dependencies.append(" %s," % self.serialize(dependency)[0]) items["dependencies"] = "\n".join(dependencies) + "\n" if dependencies else "" # Format imports nicely, swapping imports of functions from migration files # for comments migration_imports = set() for line in list(imports): if re.match("^import (.*)\.\d+[^\s]*$", line): migration_imports.add(line.split("import")[1].strip()) imports.remove(line) self.needs_manual_porting = True # django.db.migrations is always used, but models import may not be. # If models import exists, merge it with migrations import. if "from django.db import models" in imports: imports.discard("from django.db import models") imports.add("from django.db import migrations, models") else: imports.add("from django.db import migrations") # Sort imports by the package / module to be imported (the part after # "from" in "from ... import ..." or after "import" in "import ..."). sorted_imports = sorted(imports, key=lambda i: i.split()[1]) items["imports"] = "\n".join(sorted_imports) + "\n" if imports else "" if migration_imports: items["imports"] += ( "\n\n# Functions from the following migrations need manual " "copying.\n# Move them and any dependencies into this file, " "then update the\n# RunPython operations to refer to the local " "versions:\n# %s" ) % "\n# ".join(sorted(migration_imports)) # If there's a replaces, make a string for it if self.migration.replaces: items['replaces_str'] = "\n replaces = %s\n" % self.serialize(self.migration.replaces)[0] if self.migration.initial: items['initial_str'] = "\n initial = True\n" return (MIGRATION_TEMPLATE % items).encode("utf8") @staticmethod def serialize_datetime(value): """ Returns a serialized version of a datetime object that is valid, executable python code. It converts timezone-aware values to utc with an 'executable' utc representation of tzinfo. """ if value.tzinfo is not None and value.tzinfo != utc: value = value.astimezone(utc) value_repr = repr(value).replace("<UTC>", "utc") if isinstance(value, datetime_safe.datetime): value_repr = "datetime.%s" % value_repr return value_repr @property def basedir(self): migrations_package_name = MigrationLoader.migrations_module(self.migration.app_label) # See if we can import the migrations module directly try: migrations_module = import_module(migrations_package_name) except ImportError: pass else: try: return upath(module_dir(migrations_module)) except ValueError: pass # Alright, see if it's a direct submodule of the app app_config = apps.get_app_config(self.migration.app_label) maybe_app_name, _, migrations_package_basename = migrations_package_name.rpartition(".") if app_config.name == maybe_app_name: return os.path.join(app_config.path, migrations_package_basename) # In case of using MIGRATION_MODULES setting and the custom package # doesn't exist, create one, starting from an existing package existing_dirs, missing_dirs = migrations_package_name.split("."), [] while existing_dirs: missing_dirs.insert(0, existing_dirs.pop(-1)) try: base_module = import_module(".".join(existing_dirs)) except ImportError: continue else: try: base_dir = upath(module_dir(base_module)) except ValueError: continue else: break else: raise ValueError( "Could not locate an appropriate location to create " "migrations package %s. Make sure the toplevel " "package exists and can be imported." % migrations_package_name) final_dir = os.path.join(base_dir, *missing_dirs) if not os.path.isdir(final_dir): os.makedirs(final_dir) for missing_dir in missing_dirs: base_dir = os.path.join(base_dir, missing_dir) with open(os.path.join(base_dir, "__init__.py"), "w"): pass return final_dir @property def filename(self): return "%s.py" % self.migration.name @property def path(self): return os.path.join(self.basedir, self.filename) @classmethod def serialize_deconstructed(cls, path, args, kwargs): name, imports = cls._serialize_path(path) strings = [] for arg in args: arg_string, arg_imports = cls.serialize(arg) strings.append(arg_string) imports.update(arg_imports) for kw, arg in sorted(kwargs.items()): arg_string, arg_imports = cls.serialize(arg) imports.update(arg_imports) strings.append("%s=%s" % (kw, arg_string)) return "%s(%s)" % (name, ", ".join(strings)), imports @classmethod def _serialize_path(cls, path): module, name = path.rsplit(".", 1) if module == "django.db.models": imports = {"from django.db import models"} name = "models.%s" % name else: imports = {"import %s" % module} name = path return name, imports @classmethod def serialize(cls, value): """ Serializes the value to a string that's parsable by Python, along with any needed imports to make that string work. More advanced than repr() as it can encode things like datetime.datetime.now. """ # FIXME: Ideally Promise would be reconstructible, but for now we # use force_text on them and defer to the normal string serialization # process. if isinstance(value, Promise): value = force_text(value) # Sequences if isinstance(value, (frozenset, list, set, tuple)): imports = set() strings = [] for item in value: item_string, item_imports = cls.serialize(item) imports.update(item_imports) strings.append(item_string) if isinstance(value, set): # Don't use the literal "{%s}" as it doesn't support empty set format = "set([%s])" elif isinstance(value, frozenset): format = "frozenset([%s])" elif isinstance(value, tuple): # When len(value)==0, the empty tuple should be serialized as # "()", not "(,)" because (,) is invalid Python syntax. format = "(%s)" if len(value) != 1 else "(%s,)" else: format = "[%s]" return format % (", ".join(strings)), imports # Dictionaries elif isinstance(value, dict): imports = set() strings = [] for k, v in sorted(value.items()): k_string, k_imports = cls.serialize(k) v_string, v_imports = cls.serialize(v) imports.update(k_imports) imports.update(v_imports) strings.append((k_string, v_string)) return "{%s}" % (", ".join("%s: %s" % (k, v) for k, v in strings)), imports # Datetimes elif isinstance(value, datetime.datetime): value_repr = cls.serialize_datetime(value) imports = ["import datetime"] if value.tzinfo is not None: imports.append("from django.utils.timezone import utc") return value_repr, set(imports) # Dates elif isinstance(value, datetime.date): value_repr = repr(value) if isinstance(value, datetime_safe.date): value_repr = "datetime.%s" % value_repr return value_repr, {"import datetime"} # Times elif isinstance(value, datetime.time): value_repr = repr(value) if isinstance(value, datetime_safe.time): value_repr = "datetime.%s" % value_repr return value_repr, {"import datetime"} # Timedeltas elif isinstance(value, datetime.timedelta): return repr(value), {"import datetime"} # Settings references elif isinstance(value, SettingsReference): return "settings.%s" % value.setting_name, {"from django.conf import settings"} # Simple types elif isinstance(value, float): if math.isnan(value) or math.isinf(value): return 'float("{}")'.format(value), set() return repr(value), set() elif isinstance(value, six.integer_types + (bool, type(None))): return repr(value), set() elif isinstance(value, six.binary_type): value_repr = repr(value) if six.PY2: # Prepend the `b` prefix since we're importing unicode_literals value_repr = 'b' + value_repr return value_repr, set() elif isinstance(value, six.text_type): value_repr = repr(value) if six.PY2: # Strip the `u` prefix since we're importing unicode_literals value_repr = value_repr[1:] return value_repr, set() # Decimal elif isinstance(value, decimal.Decimal): return repr(value), {"from decimal import Decimal"} # Django fields elif isinstance(value, models.Field): attr_name, path, args, kwargs = value.deconstruct() return cls.serialize_deconstructed(path, args, kwargs) # Classes elif isinstance(value, type): special_cases = [ (models.Model, "models.Model", []), ] for case, string, imports in special_cases: if case is value: return string, set(imports) if hasattr(value, "__module__"): module = value.__module__ if module == six.moves.builtins.__name__: return value.__name__, set() else: return "%s.%s" % (module, value.__name__), {"import %s" % module} elif isinstance(value, models.manager.BaseManager): as_manager, manager_path, qs_path, args, kwargs = value.deconstruct() if as_manager: name, imports = cls._serialize_path(qs_path) return "%s.as_manager()" % name, imports else: return cls.serialize_deconstructed(manager_path, args, kwargs) elif isinstance(value, Operation): string, imports = OperationWriter(value, indentation=0).serialize() # Nested operation, trailing comma is handled in upper OperationWriter._write() return string.rstrip(','), imports # Anything that knows how to deconstruct itself. elif hasattr(value, 'deconstruct'): return cls.serialize_deconstructed(*value.deconstruct()) # Functions elif isinstance(value, (types.FunctionType, types.BuiltinFunctionType)): # @classmethod? if getattr(value, "__self__", None) and isinstance(value.__self__, type): klass = value.__self__ module = klass.__module__ return "%s.%s.%s" % (module, klass.__name__, value.__name__), {"import %s" % module} # Further error checking if value.__name__ == '<lambda>': raise ValueError("Cannot serialize function: lambda") if value.__module__ is None: raise ValueError("Cannot serialize function %r: No module" % value) # Python 3 is a lot easier, and only uses this branch if it's not local. if getattr(value, "__qualname__", None) and getattr(value, "__module__", None): if "<" not in value.__qualname__: # Qualname can include <locals> return "%s.%s" % (value.__module__, value.__qualname__), {"import %s" % value.__module__} # Python 2/fallback version module_name = value.__module__ # Make sure it's actually there and not an unbound method module = import_module(module_name) if not hasattr(module, value.__name__): raise ValueError( "Could not find function %s in %s.\n" "Please note that due to Python 2 limitations, you cannot " "serialize unbound method functions (e.g. a method " "declared and used in the same class body). Please move " "the function into the main module body to use migrations.\n" "For more information, see " "https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values" % (value.__name__, module_name, get_docs_version())) # Needed on Python 2 only if module_name == '__builtin__': return value.__name__, set() return "%s.%s" % (module_name, value.__name__), {"import %s" % module_name} # Other iterables elif isinstance(value, collections.Iterable): imports = set() strings = [] for item in value: item_string, item_imports = cls.serialize(item) imports.update(item_imports) strings.append(item_string) # When len(strings)==0, the empty iterable should be serialized as # "()", not "(,)" because (,) is invalid Python syntax. format = "(%s)" if len(strings) != 1 else "(%s,)" return format % (", ".join(strings)), imports # Compiled regex elif isinstance(value, COMPILED_REGEX_TYPE): imports = {"import re"} regex_pattern, pattern_imports = cls.serialize(value.pattern) regex_flags, flag_imports = cls.serialize(value.flags) imports.update(pattern_imports) imports.update(flag_imports) args = [regex_pattern] if value.flags: args.append(regex_flags) return "re.compile(%s)" % ', '.join(args), imports # Uh oh. else: raise ValueError( "Cannot serialize: %r\nThere are some values Django cannot serialize into " "migration files.\nFor more, see https://docs.djangoproject.com/en/%s/" "topics/migrations/#migration-serializing" % (value, get_docs_version()) ) MIGRATION_TEMPLATE = """\ # -*- coding: utf-8 -*- from __future__ import unicode_literals %(imports)s class Migration(migrations.Migration): %(replaces_str)s%(initial_str)s dependencies = [ %(dependencies)s\ ] operations = [ %(operations)s\ ] """
{ "content_hash": "1645b57acf5684ead006c4287d196e4f", "timestamp": "", "source": "github", "line_count": 526, "max_line_length": 109, "avg_line_length": 41.57984790874525, "alnum_prop": 0.5505921082712267, "repo_name": "gohin/django", "id": "b45ec0cedaa4e183ed40de678abe7c8974e91e37", "size": "21871", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "django/db/migrations/writer.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "48399" }, { "name": "HTML", "bytes": "172916" }, { "name": "JavaScript", "bytes": "247734" }, { "name": "Makefile", "bytes": "125" }, { "name": "Python", "bytes": "11198596" }, { "name": "Shell", "bytes": "809" }, { "name": "Smarty", "bytes": "130" } ], "symlink_target": "" }
""" A module to allow each channel to have "news". News items may have expiration dates. """ import supybot import supybot.world as world # Use this for the version of this plugin. You may wish to put a CVS keyword # in here if you're keeping the plugin in CVS or some similar system. __version__ = "%%VERSION%%" __author__ = supybot.authors.strike # This is a dictionary mapping supybot.Author instances to lists of # contributions. __contributors__ = {} # This is a url where the most recent plugin package can be downloaded. __url__ = '' # 'http://supybot.com/Members/yourname/News/download' from . import config from . import plugin from imp import reload reload(plugin) # In case we're being reloaded. # Add more reloads here if you add third-party modules and want them to be # reloaded when this plugin is reloaded. Don't forget to import them as well! if world.testing: from . import test Class = plugin.Class configure = config.configure # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
{ "content_hash": "d6f512b23c847e2a53e86afd71986b1f", "timestamp": "", "source": "github", "line_count": 36, "max_line_length": 78, "avg_line_length": 28.47222222222222, "alnum_prop": 0.7375609756097561, "repo_name": "Ban3/Limnoria", "id": "45eb3221d0f6d1fca831131f5dc34b2d00a48714", "size": "2612", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "plugins/News/__init__.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "864" }, { "name": "Python", "bytes": "2513657" }, { "name": "Shell", "bytes": "217" } ], "symlink_target": "" }
from twisted.application.service import ServiceMaker TwistedTelnet = ServiceMaker( "Twisted Telnet Shell Server", "twisted.tap.telnet", "A simple, telnet-based remote debugging service.", "telnet")
{ "content_hash": "b961335850ae9abbf859a532c32c30c7", "timestamp": "", "source": "github", "line_count": 8, "max_line_length": 55, "avg_line_length": 28, "alnum_prop": 0.7053571428571429, "repo_name": "hlzz/dotfiles", "id": "42e23437adf518ce80ad743a542f0f7d630b0016", "size": "298", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "graphics/VTK-7.0.0/ThirdParty/Twisted/twisted/plugins/twisted_telnet.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "AppleScript", "bytes": "1240" }, { "name": "Arc", "bytes": "38" }, { "name": "Assembly", "bytes": "449468" }, { "name": "Batchfile", "bytes": "16152" }, { "name": "C", "bytes": "102303195" }, { "name": "C++", "bytes": "155056606" }, { "name": "CMake", "bytes": "7200627" }, { "name": "CSS", "bytes": "179330" }, { "name": "Cuda", "bytes": "30026" }, { "name": "D", "bytes": "2152" }, { "name": "Emacs Lisp", "bytes": "14892" }, { "name": "FORTRAN", "bytes": "5276" }, { "name": "Forth", "bytes": "3637" }, { "name": "GAP", "bytes": "14495" }, { "name": "GLSL", "bytes": "438205" }, { "name": "Gnuplot", "bytes": "327" }, { "name": "Groff", "bytes": "518260" }, { "name": "HLSL", "bytes": "965" }, { "name": "HTML", "bytes": "2003175" }, { "name": "Haskell", "bytes": "10370" }, { "name": "IDL", "bytes": "2466" }, { "name": "Java", "bytes": "219109" }, { "name": "JavaScript", "bytes": "1618007" }, { "name": "Lex", "bytes": "119058" }, { "name": "Lua", "bytes": "23167" }, { "name": "M", "bytes": "1080" }, { "name": "M4", "bytes": "292475" }, { "name": "Makefile", "bytes": "7112810" }, { "name": "Matlab", "bytes": "1582" }, { "name": "NSIS", "bytes": "34176" }, { "name": "Objective-C", "bytes": "65312" }, { "name": "Objective-C++", "bytes": "269995" }, { "name": "PAWN", "bytes": "4107117" }, { "name": "PHP", "bytes": "2690" }, { "name": "Pascal", "bytes": "5054" }, { "name": "Perl", "bytes": "485508" }, { "name": "Pike", "bytes": "1338" }, { "name": "Prolog", "bytes": "5284" }, { "name": "Python", "bytes": "16799659" }, { "name": "QMake", "bytes": "89858" }, { "name": "Rebol", "bytes": "291" }, { "name": "Ruby", "bytes": "21590" }, { "name": "Scilab", "bytes": "120244" }, { "name": "Shell", "bytes": "2266191" }, { "name": "Slash", "bytes": "1536" }, { "name": "Smarty", "bytes": "1368" }, { "name": "Swift", "bytes": "331" }, { "name": "Tcl", "bytes": "1911873" }, { "name": "TeX", "bytes": "11981" }, { "name": "Verilog", "bytes": "3893" }, { "name": "VimL", "bytes": "595114" }, { "name": "XSLT", "bytes": "62675" }, { "name": "Yacc", "bytes": "307000" }, { "name": "eC", "bytes": "366863" } ], "symlink_target": "" }
from sklearn.datasets import load_breast_cancer from autosklearn.classification import AutoSklearnClassifier from autosklearn.constants import MULTICLASS_CLASSIFICATION, REGRESSION from autosklearn.metalearning.mismbo import suggest_via_metalearning from autosklearn.pipeline.util import get_dataset from autosklearn.smbo import _calculate_metafeatures, _calculate_metafeatures_encoded from autosklearn.util.pipeline import get_configuration_space import unittest class MetafeatureValueDummy(object): def __init__(self, name, value): self.name = name self.value = value class Test(unittest.TestCase): _multiprocess_can_split_ = True def setUp(self): self.X_train, self.Y_train, self.X_test, self.Y_test = get_dataset("iris") eliminate_class_two = self.Y_train != 2 self.X_train = self.X_train[eliminate_class_two] self.Y_train = self.Y_train[eliminate_class_two] @unittest.skip("TODO refactor!") def test_metalearning(self): dataset_name_classification = "digits" initial_challengers_classification = { "ACC_METRIC": '--initial-challengers " ' "-balancing:strategy 'weighting' " "-classifier:__choice__ 'proj_logit'", "AUC_METRIC": '--initial-challengers " ' "-balancing:strategy 'weighting' " "-classifier:__choice__ 'liblinear_svc'", "BAC_METRIC": '--initial-challengers " ' "-balancing:strategy 'weighting' " "-classifier:__choice__ 'proj_logit'", "F1_METRIC": '--initial-challengers " ' "-balancing:strategy 'weighting' " "-classifier:__choice__ 'proj_logit'", "PAC_METRIC": '--initial-challengers " ' "-balancing:strategy 'none' " "-classifier:__choice__ 'random_forest'", } dataset_name_regression = "diabetes" initial_challengers_regression = { "A_METRIC": '--initial-challengers " ' "-imputation:strategy 'mean' " "-one_hot_encoding:minimum_fraction '0.01' " "-one_hot_encoding:use_minimum_fraction 'True' " "-preprocessor:__choice__ 'no_preprocessing' " "-regressor:__choice__ 'random_forest'", "R2_METRIC": '--initial-challengers " ' "-imputation:strategy 'mean' " "-one_hot_encoding:minimum_fraction '0.01' " "-one_hot_encoding:use_minimum_fraction 'True' " "-preprocessor:__choice__ 'no_preprocessing' " "-regressor:__choice__ 'random_forest'", } for dataset_name, task, initial_challengers in [ (dataset_name_regression, REGRESSION, initial_challengers_regression), ( dataset_name_classification, MULTICLASS_CLASSIFICATION, initial_challengers_classification, ), ]: for metric in initial_challengers: configuration_space = get_configuration_space( {"metric": metric, "task": task, "is_sparse": False}, include={"feature_preprocessor": ["no_preprocessing"]}, ) X_train, Y_train, X_test, Y_test = get_dataset(dataset_name) categorical = {i: False for i in range(X_train.shape[1])} meta_features_label = _calculate_metafeatures( X_train, Y_train, categorical, dataset_name, task ) meta_features_encoded_label = _calculate_metafeatures_encoded( X_train, Y_train, categorical, dataset_name, task ) initial_configuration_strings_for_smac = suggest_via_metalearning( meta_features_label, meta_features_encoded_label, configuration_space, dataset_name, metric, task, False, 1, None, ) print(metric) print(initial_configuration_strings_for_smac[0]) self.assertTrue( initial_configuration_strings_for_smac[0].startswith( initial_challengers[metric] ) ) def test_metadata_directory(self): # Test that metadata directory is set correctly (if user specifies, # Auto-sklearn should check that the directory exists. If not, it # should use the default directory. dask_client = unittest.mock.Mock() automl1 = AutoSklearnClassifier( time_left_for_this_task=30, per_run_time_limit=5, metadata_directory="pyMetaLearn/metadata_dir", # user metadata_dir dask_client=dask_client, ) self.assertEqual(automl1.metadata_directory, "pyMetaLearn/metadata_dir") automl2 = AutoSklearnClassifier( # default metadata_dir time_left_for_this_task=30, per_run_time_limit=5, dask_client=dask_client, ) self.assertIsNone(automl2.metadata_directory) nonexistent_dir = "nonexistent_dir" automl3 = AutoSklearnClassifier( time_left_for_this_task=30, per_run_time_limit=5, metadata_directory=nonexistent_dir, # user specified metadata_dir dask_client=dask_client, ensemble_class=None, ) X, y = load_breast_cancer(return_X_y=True) self.assertRaisesRegex( ValueError, "The specified metadata directory " "'%s' does not exist!" % nonexistent_dir, automl3.fit, X=X, y=y, )
{ "content_hash": "bece2f6b63d3eae82613096381418fcf", "timestamp": "", "source": "github", "line_count": 147, "max_line_length": 85, "avg_line_length": 39.523809523809526, "alnum_prop": 0.5685025817555938, "repo_name": "automl/auto-sklearn", "id": "9740313ff8f40f37478dcf20a5ce822bd171206c", "size": "5836", "binary": false, "copies": "1", "ref": "refs/heads/development", "path": "test/test_metalearning/test_metalearning.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Dockerfile", "bytes": "950" }, { "name": "Makefile", "bytes": "3513" }, { "name": "Python", "bytes": "2008151" }, { "name": "Shell", "bytes": "4744" } ], "symlink_target": "" }
import json from urllib import request import requests #for rest api repository_url = 'http://10.3.100.22:8080' restpath = '/rest' xmlpath = '/xmlui' def get_communities(): communities = request.urlopen(repository_url + restpath + '/communities') communities_json = communities.read().decode('utf-8') communities_load = json.loads(communities_json) communities_processed = [] for dictionary in communities_load: if dictionary['name'] and dictionary['name'] != '': communities_processed.append(dictionary) #print(communities_processed) with open("test.json", 'w') as jsonfile: text = json.dumps(communities_processed) jsonfile.write(text) return communities_processed def get_by_year(cp): for dictionary in cp: try: year = int(dictionary['name']) id = dictionary['id'] print(year) #ccj = curr_collections.read().decode('utf-8') except: year = 0 if year != 0: path = repository_url + dictionary['link'] + '/collections' print(path) curr_collections = request.urlopen(path) curr_json = json.loads(curr_collections.read().decode('utf-8')) print(curr_json[0]['handle']) path += str(curr_json[0]['id']) temp = requests.get(path) print(temp) if __name__ == '__main__': get_by_year(get_communities())
{ "content_hash": "056c9bfef0a4d9c0110dc0a068477f5e", "timestamp": "", "source": "github", "line_count": 47, "max_line_length": 77, "avg_line_length": 30.95744680851064, "alnum_prop": 0.5958762886597938, "repo_name": "enlighter/ndl-question-papers-search-hub", "id": "75a2ad697a75e54542ab7150e789dd5b8b705c1e", "size": "1455", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "qp_search_project/searcher/services.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "24487" } ], "symlink_target": "" }
""" Test PostgreSQL full text search. These tests use dialogue from the 1975 film Monty Python and the Holy Grail. All text copyright Python (Monty) Pictures. Thanks to sacred-texts.com for the transcript. """ from django.contrib.postgres.search import ( SearchQuery, SearchRank, SearchVector, ) from django.db import connection from django.db.models import F from django.test import SimpleTestCase, modify_settings, skipUnlessDBFeature from . import PostgreSQLTestCase from .models import Character, Line, Scene class GrailTestData: @classmethod def setUpTestData(cls): cls.robin = Scene.objects.create(scene='Scene 10', setting='The dark forest of Ewing') cls.minstrel = Character.objects.create(name='Minstrel') verses = [ ( 'Bravely bold Sir Robin, rode forth from Camelot. ' 'He was not afraid to die, o Brave Sir Robin. ' 'He was not at all afraid to be killed in nasty ways. ' 'Brave, brave, brave, brave Sir Robin!' ), ( 'He was not in the least bit scared to be mashed into a pulp, ' 'Or to have his eyes gouged out, and his elbows broken. ' 'To have his kneecaps split, and his body burned away, ' 'And his limbs all hacked and mangled, brave Sir Robin!' ), ( 'His head smashed in and his heart cut out, ' 'And his liver removed and his bowels unplugged, ' 'And his nostrils ripped and his bottom burned off,' 'And his --' ), ] cls.verses = [Line.objects.create( scene=cls.robin, character=cls.minstrel, dialogue=verse, ) for verse in verses] cls.verse0, cls.verse1, cls.verse2 = cls.verses cls.witch_scene = Scene.objects.create(scene='Scene 5', setting="Sir Bedemir's Castle") bedemir = Character.objects.create(name='Bedemir') crowd = Character.objects.create(name='Crowd') witch = Character.objects.create(name='Witch') duck = Character.objects.create(name='Duck') cls.bedemir0 = Line.objects.create( scene=cls.witch_scene, character=bedemir, dialogue='We shall use my larger scales!', dialogue_config='english', ) cls.bedemir1 = Line.objects.create( scene=cls.witch_scene, character=bedemir, dialogue='Right, remove the supports!', dialogue_config='english', ) cls.duck = Line.objects.create(scene=cls.witch_scene, character=duck, dialogue=None) cls.crowd = Line.objects.create(scene=cls.witch_scene, character=crowd, dialogue='A witch! A witch!') cls.witch = Line.objects.create(scene=cls.witch_scene, character=witch, dialogue="It's a fair cop.") trojan_rabbit = Scene.objects.create(scene='Scene 8', setting="The castle of Our Master Ruiz' de lu la Ramper") guards = Character.objects.create(name='French Guards') cls.french = Line.objects.create( scene=trojan_rabbit, character=guards, dialogue='Oh. Un beau cadeau. Oui oui.', dialogue_config='french', ) @modify_settings(INSTALLED_APPS={'append': 'django.contrib.postgres'}) class SimpleSearchTest(GrailTestData, PostgreSQLTestCase): def test_simple(self): searched = Line.objects.filter(dialogue__search='elbows') self.assertSequenceEqual(searched, [self.verse1]) def test_non_exact_match(self): searched = Line.objects.filter(dialogue__search='hearts') self.assertSequenceEqual(searched, [self.verse2]) def test_search_two_terms(self): searched = Line.objects.filter(dialogue__search='heart bowel') self.assertSequenceEqual(searched, [self.verse2]) def test_search_two_terms_with_partial_match(self): searched = Line.objects.filter(dialogue__search='Robin killed') self.assertSequenceEqual(searched, [self.verse0]) @modify_settings(INSTALLED_APPS={'append': 'django.contrib.postgres'}) class SearchVectorFieldTest(GrailTestData, PostgreSQLTestCase): def test_existing_vector(self): Line.objects.update(dialogue_search_vector=SearchVector('dialogue')) searched = Line.objects.filter(dialogue_search_vector=SearchQuery('Robin killed')) self.assertSequenceEqual(searched, [self.verse0]) def test_existing_vector_config_explicit(self): Line.objects.update(dialogue_search_vector=SearchVector('dialogue')) searched = Line.objects.filter(dialogue_search_vector=SearchQuery('cadeaux', config='french')) self.assertSequenceEqual(searched, [self.french]) def test_single_coalesce_expression(self): searched = Line.objects.annotate(search=SearchVector('dialogue')).filter(search='cadeaux') self.assertNotIn('COALESCE(COALESCE', str(searched.query)) class MultipleFieldsTest(GrailTestData, PostgreSQLTestCase): def test_simple_on_dialogue(self): searched = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue'), ).filter(search='elbows') self.assertSequenceEqual(searched, [self.verse1]) def test_simple_on_scene(self): searched = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue'), ).filter(search='Forest') self.assertCountEqual(searched, self.verses) def test_non_exact_match(self): searched = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue'), ).filter(search='heart') self.assertSequenceEqual(searched, [self.verse2]) def test_search_two_terms(self): searched = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue'), ).filter(search='heart forest') self.assertSequenceEqual(searched, [self.verse2]) def test_terms_adjacent(self): searched = Line.objects.annotate( search=SearchVector('character__name', 'dialogue'), ).filter(search='minstrel') self.assertCountEqual(searched, self.verses) searched = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue'), ).filter(search='minstrelbravely') self.assertSequenceEqual(searched, []) def test_search_with_null(self): searched = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue'), ).filter(search='bedemir') self.assertCountEqual(searched, [self.bedemir0, self.bedemir1, self.crowd, self.witch, self.duck]) def test_search_with_non_text(self): searched = Line.objects.annotate( search=SearchVector('id'), ).filter(search=str(self.crowd.id)) self.assertSequenceEqual(searched, [self.crowd]) @skipUnlessDBFeature('has_phraseto_tsquery') def test_phrase_search(self): line_qs = Line.objects.annotate(search=SearchVector('dialogue')) searched = line_qs.filter(search=SearchQuery('burned body his away', search_type='phrase')) self.assertSequenceEqual(searched, []) searched = line_qs.filter(search=SearchQuery('his body burned away', search_type='phrase')) self.assertSequenceEqual(searched, [self.verse1]) @skipUnlessDBFeature('has_phraseto_tsquery') def test_phrase_search_with_config(self): line_qs = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue', config='french'), ) searched = line_qs.filter( search=SearchQuery('cadeau beau un', search_type='phrase', config='french'), ) self.assertSequenceEqual(searched, []) searched = line_qs.filter( search=SearchQuery('un beau cadeau', search_type='phrase', config='french'), ) self.assertSequenceEqual(searched, [self.french]) def test_raw_search(self): line_qs = Line.objects.annotate(search=SearchVector('dialogue')) searched = line_qs.filter(search=SearchQuery('Robin', search_type='raw')) self.assertCountEqual(searched, [self.verse0, self.verse1]) searched = line_qs.filter(search=SearchQuery("Robin & !'Camelot'", search_type='raw')) self.assertSequenceEqual(searched, [self.verse1]) def test_raw_search_with_config(self): line_qs = Line.objects.annotate(search=SearchVector('dialogue', config='french')) searched = line_qs.filter( search=SearchQuery("'cadeaux' & 'beaux'", search_type='raw', config='french'), ) self.assertSequenceEqual(searched, [self.french]) def test_bad_search_type(self): with self.assertRaisesMessage(ValueError, "Unknown search_type argument 'foo'."): SearchQuery('kneecaps', search_type='foo') def test_config_query_explicit(self): searched = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue', config='french'), ).filter(search=SearchQuery('cadeaux', config='french')) self.assertSequenceEqual(searched, [self.french]) def test_config_query_implicit(self): searched = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue', config='french'), ).filter(search='cadeaux') self.assertSequenceEqual(searched, [self.french]) def test_config_from_field_explicit(self): searched = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue', config=F('dialogue_config')), ).filter(search=SearchQuery('cadeaux', config=F('dialogue_config'))) self.assertSequenceEqual(searched, [self.french]) def test_config_from_field_implicit(self): searched = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue', config=F('dialogue_config')), ).filter(search='cadeaux') self.assertSequenceEqual(searched, [self.french]) @modify_settings(INSTALLED_APPS={'append': 'django.contrib.postgres'}) class TestCombinations(GrailTestData, PostgreSQLTestCase): def test_vector_add(self): searched = Line.objects.annotate( search=SearchVector('scene__setting') + SearchVector('character__name'), ).filter(search='bedemir') self.assertCountEqual(searched, [self.bedemir0, self.bedemir1, self.crowd, self.witch, self.duck]) def test_vector_add_multi(self): searched = Line.objects.annotate( search=( SearchVector('scene__setting') + SearchVector('character__name') + SearchVector('dialogue') ), ).filter(search='bedemir') self.assertCountEqual(searched, [self.bedemir0, self.bedemir1, self.crowd, self.witch, self.duck]) def test_query_and(self): searched = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue'), ).filter(search=SearchQuery('bedemir') & SearchQuery('scales')) self.assertSequenceEqual(searched, [self.bedemir0]) def test_query_multiple_and(self): searched = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue'), ).filter(search=SearchQuery('bedemir') & SearchQuery('scales') & SearchQuery('nostrils')) self.assertSequenceEqual(searched, []) searched = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue'), ).filter(search=SearchQuery('shall') & SearchQuery('use') & SearchQuery('larger')) self.assertSequenceEqual(searched, [self.bedemir0]) def test_query_or(self): searched = Line.objects.filter(dialogue__search=SearchQuery('kneecaps') | SearchQuery('nostrils')) self.assertCountEqual(searched, [self.verse1, self.verse2]) def test_query_multiple_or(self): searched = Line.objects.filter( dialogue__search=SearchQuery('kneecaps') | SearchQuery('nostrils') | SearchQuery('Sir Robin') ) self.assertCountEqual(searched, [self.verse1, self.verse2, self.verse0]) def test_query_invert(self): searched = Line.objects.filter(character=self.minstrel, dialogue__search=~SearchQuery('kneecaps')) self.assertCountEqual(searched, [self.verse0, self.verse2]) def test_combine_different_configs(self): searched = Line.objects.filter( dialogue__search=( SearchQuery('cadeau', config='french') | SearchQuery('nostrils', config='english') ) ) self.assertCountEqual(searched, [self.french, self.verse2]) @skipUnlessDBFeature('has_phraseto_tsquery') def test_combine_raw_phrase(self): searched = Line.objects.filter( dialogue__search=( SearchQuery('burn:*', search_type='raw', config='simple') | SearchQuery('rode forth from Camelot', search_type='phrase') ) ) self.assertCountEqual(searched, [self.verse0, self.verse1, self.verse2]) def test_query_combined_mismatch(self): msg = "SearchQuery can only be combined with other SearchQuerys, got" with self.assertRaisesMessage(TypeError, msg): Line.objects.filter(dialogue__search=None | SearchQuery('kneecaps')) with self.assertRaisesMessage(TypeError, msg): Line.objects.filter(dialogue__search=None & SearchQuery('kneecaps')) @modify_settings(INSTALLED_APPS={'append': 'django.contrib.postgres'}) class TestRankingAndWeights(GrailTestData, PostgreSQLTestCase): def test_ranking(self): searched = Line.objects.filter(character=self.minstrel).annotate( rank=SearchRank(SearchVector('dialogue'), SearchQuery('brave sir robin')), ).order_by('rank') self.assertSequenceEqual(searched, [self.verse2, self.verse1, self.verse0]) def test_rank_passing_untyped_args(self): searched = Line.objects.filter(character=self.minstrel).annotate( rank=SearchRank('dialogue', 'brave sir robin'), ).order_by('rank') self.assertSequenceEqual(searched, [self.verse2, self.verse1, self.verse0]) def test_weights_in_vector(self): vector = SearchVector('dialogue', weight='A') + SearchVector('character__name', weight='D') searched = Line.objects.filter(scene=self.witch_scene).annotate( rank=SearchRank(vector, SearchQuery('witch')), ).order_by('-rank')[:2] self.assertSequenceEqual(searched, [self.crowd, self.witch]) vector = SearchVector('dialogue', weight='D') + SearchVector('character__name', weight='A') searched = Line.objects.filter(scene=self.witch_scene).annotate( rank=SearchRank(vector, SearchQuery('witch')), ).order_by('-rank')[:2] self.assertSequenceEqual(searched, [self.witch, self.crowd]) def test_ranked_custom_weights(self): vector = SearchVector('dialogue', weight='D') + SearchVector('character__name', weight='A') searched = Line.objects.filter(scene=self.witch_scene).annotate( rank=SearchRank(vector, SearchQuery('witch'), weights=[1, 0, 0, 0.5]), ).order_by('-rank')[:2] self.assertSequenceEqual(searched, [self.crowd, self.witch]) def test_ranking_chaining(self): searched = Line.objects.filter(character=self.minstrel).annotate( rank=SearchRank(SearchVector('dialogue'), SearchQuery('brave sir robin')), ).filter(rank__gt=0.3) self.assertSequenceEqual(searched, [self.verse0]) class SearchVectorIndexTests(PostgreSQLTestCase): def test_search_vector_index(self): """SearchVector generates IMMUTABLE SQL in order to be indexable.""" # This test should be moved to test_indexes and use a functional # index instead once support lands (see #26167). query = Line.objects.all().query resolved = SearchVector('id', 'dialogue', config='english').resolve_expression(query) compiler = query.get_compiler(connection.alias) sql, params = resolved.as_sql(compiler, connection) # Indexed function must be IMMUTABLE. with connection.cursor() as cursor: cursor.execute( 'CREATE INDEX search_vector_index ON %s USING GIN (%s)' % (Line._meta.db_table, sql), params, ) class SearchQueryTests(SimpleTestCase): def test_str(self): tests = ( (~SearchQuery('a'), '~SearchQuery(a)'), ( (SearchQuery('a') | SearchQuery('b')) & (SearchQuery('c') | SearchQuery('d')), '((SearchQuery(a) || SearchQuery(b)) && (SearchQuery(c) || SearchQuery(d)))', ), ( SearchQuery('a') & (SearchQuery('b') | SearchQuery('c')), '(SearchQuery(a) && (SearchQuery(b) || SearchQuery(c)))', ), ( (SearchQuery('a') | SearchQuery('b')) & SearchQuery('c'), '((SearchQuery(a) || SearchQuery(b)) && SearchQuery(c))' ), ( SearchQuery('a') & (SearchQuery('b') & (SearchQuery('c') | SearchQuery('d'))), '(SearchQuery(a) && (SearchQuery(b) && (SearchQuery(c) || SearchQuery(d))))', ), ) for query, expected_str in tests: with self.subTest(query=query): self.assertEqual(str(query), expected_str)
{ "content_hash": "138b68a3ec8c366299ef313c93bd4ed2", "timestamp": "", "source": "github", "line_count": 394, "max_line_length": 119, "avg_line_length": 44.588832487309645, "alnum_prop": 0.6386612021857924, "repo_name": "georgemarshall/django", "id": "f5111ce8d3e3e8fa5eff17eea9fdad98eca2c9a8", "size": "17568", "binary": false, "copies": "11", "ref": "refs/heads/master", "path": "tests/postgres_tests/test_search.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "53023" }, { "name": "HTML", "bytes": "172977" }, { "name": "JavaScript", "bytes": "448123" }, { "name": "Makefile", "bytes": "125" }, { "name": "Python", "bytes": "12112373" }, { "name": "Shell", "bytes": "809" }, { "name": "Smarty", "bytes": "130" } ], "symlink_target": "" }
""" Aerostat Updater. """ import os import shutil import subprocess import sys from aerostat import logging class Updater(object): """Update the /etc/hosts file on the localhost.""" def __init__(self): """Initialize object.""" self.hosts_data = ['127.0.0.1 localhost'] def append_hosts_line(self, ip, hostname): """Format string appropriate for /etc/hosts file. Args: ip: str, ip address which mapping is made against. hostname: str, hostname to map against ip. Modifies: self.hosts_data, appends more hostname -> ip mappings. One mapping per list item, which translates into one line in the file. """ logging.debug('Parsing mapping for host %s and ip %s' % ( hostname, ip)) self.hosts_data.append('%s %s' % (ip, hostname)) def format_aliases(self, ip, aliases): """Format all of the alias lines as if they were also hosts. Args: ip: str, ip to do mapping against. aliases: list of str, hostnames to map ip to in additon to algorithmic hostname. Modifies: self.hosts_data, appends more hostname -> ip mappings. One mapping per list item, which translates into one line in the file. """ for alias in aliases: if ip: self.append_hosts_line(ip, alias) def delete_aero_sect(self, hosts_content): """Remove aerostat section and return remaining lines. Args: hosts_content: list of str, /etc/hosts values as read from file. Returns: list of str, only those lines that do not belong to Aerostat. """ preceding = [] for line in hosts_content: if line.strip() == '# AEROSTAT': logging.info('Scanned to Aerostat Section. Removing.') break else: preceding.append(line.strip()) return preceding def write_hosts_file(self): """Write out the new /etc/hosts file.""" try: hosts_file_read = open('/etc/hosts.legacy', 'r') hosts_content = hosts_file_read.readlines() # Keep non-Aerostat Data for new file write. hosts_file_read.close() except IOError: hosts_content = [] preceding = self.delete_aero_sect(hosts_content) # Create new Aeorstat tag headers. aerostat_section = ['# AEROSTAT'] aerostat_section.extend(self.hosts_data) self.hosts_data = aerostat_section self.hosts_data.append('# /AEROSTAT') hosts_file_write = open('/etc/hosts.tmp', 'w') # Remember to pre-pend old information. if preceding: preceding.extend(self.hosts_data) self.hosts_data = preceding # Actually write to the file. hosts_string = '\n'.join(self.hosts_data) + '\n' hosts_file_write.write(hosts_string) hosts_file_write.close() os.rename('/etc/hosts.tmp', '/etc/hosts') def do_update(self, db, dry_run=None, legacy_updater=None): """Update /etc/hosts. Args: db: mongdb db reference. dry_run: bool, whether or not to actually update /etc/hosts. legacy_updater: binary to run in order to update /etc/hosts (helpful for transitions). Returns: bool, True if changes are made to the system. """ if legacy_updater: # Call legacy host updater, allow it to write to /etc/hosts. retcode = subprocess.call([legacy_updater]) if retcode < 0: logging.error('Call to %s failed!' % legacy_updater) sys.exit(1) self.hosts_data = ['127.0.0.1 localhost'] # Reset data, otherwise we append aerostat_data = db.servers.find() # extract hostname, ip and aliases for item in aerostat_data: if item['ip']: self.append_hosts_line(item['ip'], item['hostname']) if item['aliases']: self.format_aliases(item['ip'], item['aliases']) if dry_run: dry_run_output = '\n'.join(self.hosts_data) + '\n' logging.debug( ('DRY RUN: Your /etc/hosts file would look' 'like this: \n%s' % dry_run_output)) return False # Only make any changes if there are actual data available to write. if self.hosts_data: logging.info('Copying /etc/hosts to /etc/hosts.bak') shutil.copyfile('/etc/hosts', '/etc/hosts.bak') logging.info('Writing new /etc/hosts file.') self.write_hosts_file() else: logging.error('No data returned from aerostat. Write aborted.') return True
{ "content_hash": "52be8e8e25515acbcc6c956363f1bb83", "timestamp": "", "source": "github", "line_count": 145, "max_line_length": 84, "avg_line_length": 33.765517241379314, "alnum_prop": 0.5655637254901961, "repo_name": "urbanairship/aerostat", "id": "65c1b52463c4f95d8b10ccdf958f09a94529e445", "size": "4919", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "aerostat/updater.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "63467" } ], "symlink_target": "" }
''' Created on Aug 19, 2011 @author: Piotr ''' import unittest from generator.data.GeneratorDescription import GeneratorDescription from generator.BlenderGenerator import BlenderGenerator class BlenderGeneratorTests(unittest.TestCase): def test_simple_printing(self): gDesc = GeneratorDescription(inputFileName="token", inputFormat=".x3d", outputFormat=".jpg") fileName = "new_file_name" inputFolder = "in_folder_provided" outputFolder = "out_folder_provided" bGen = BlenderGenerator(gDesc, inputFolder, outputFolder) print(bGen.prepareRender(fileName)) if __name__ == "__main__": unittest.main()
{ "content_hash": "d5084db2f1cf22c683750780692859ce", "timestamp": "", "source": "github", "line_count": 25, "max_line_length": 100, "avg_line_length": 26.44, "alnum_prop": 0.7125567322239031, "repo_name": "Dzess/ALFIRT", "id": "dadcf0683aa389ff339aee33d1ecdb0f09a05937", "size": "661", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "alfirt.runner/src/generator/tests/BlenderGeneratorTests.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "349818" } ], "symlink_target": "" }
""" Objects that receive generated C/C++ code lines, reindents them, and writes them to a file, memory, or another code sink object. """ import sys PY3 = (sys.version_info[0] >= 3) if PY3: string_types = str, else: string_types = basestring, DEBUG = 0 if DEBUG: import traceback import sys class CodeSink(object): """Abstract base class for code sinks""" def __init__(self): r'''Constructor >>> sink = MemoryCodeSink() >>> sink.writeln("foo();") >>> sink.writeln("if (true) {") >>> sink.indent() >>> sink.writeln("bar();") >>> sink.unindent() >>> sink.writeln("zbr();") >>> print sink.flush().rstrip() foo(); if (true) { bar(); zbr(); >>> sink = MemoryCodeSink() >>> sink.writeln("foo();") >>> sink.writeln() >>> sink.writeln("bar();") >>> print len(sink.flush().split("\n")) 4 ''' self.indent_level = 0 # current indent level self.indent_stack = [] # previous indent levels if DEBUG: self._last_unindent_stack = None # for debugging def _format_code(self, code): """Utility method for subclasses to use for formatting code (splits lines and indents them)""" assert isinstance(code, string_types) l = [] for line in code.split('\n'): l.append(' '*self.indent_level + line) return l def writeln(self, line=''): """Write one or more lines of code""" raise NotImplementedError def indent(self, level=4): '''Add a certain ammount of indentation to all lines written from now on and until unindent() is called''' self.indent_stack.append(self.indent_level) self.indent_level += level def unindent(self): '''Revert indentation level to the value before last indent() call''' if DEBUG: try: self.indent_level = self.indent_stack.pop() except IndexError: if self._last_unindent_stack is not None: for line in traceback.format_list(self._last_unindent_stack): sys.stderr.write(line) raise self._last_unindent_stack = traceback.extract_stack() else: self.indent_level = self.indent_stack.pop() class FileCodeSink(CodeSink): """A code sink that writes to a file-like object""" def __init__(self, file_): """ :param file_: a file like object """ CodeSink.__init__(self) self.file = file_ def __repr__(self): return "<pybindgen.typehandlers.codesink.FileCodeSink %r>" % (self.file.name,) def writeln(self, line=''): """Write one or more lines of code""" self.file.write('\n'.join(self._format_code(line))) self.file.write('\n') class MemoryCodeSink(CodeSink): """A code sink that keeps the code in memory, and can later flush the code to another code sink""" def __init__(self): "Constructor" CodeSink.__init__(self) self.lines = [] def writeln(self, line=''): """Write one or more lines of code""" self.lines.extend(self._format_code(line)) def flush_to(self, sink): """Flushes code to another code sink :param sink: another CodeSink instance """ assert isinstance(sink, CodeSink) for line in self.lines: sink.writeln(line.rstrip()) self.lines = [] def flush(self): "Flushes the code and returns the formatted output as a return value string" l = [] for line in self.lines: l.extend(self._format_code(line)) self.lines = [] return "\n".join(l) + '\n' class NullCodeSink(CodeSink): """A code sink that discards all content. Useful to 'test' if code generation would work without actually generating anything.""" def __init__(self): "Constructor" CodeSink.__init__(self) def writeln(self, line=''): """Write one or more lines of code""" pass def flush_to(self, sink): """Flushes code to another code sink :param sink: another CodeSink instance """ raise TypeError("Cannot flush a NullCodeSink; it has no content!") def flush(self): "Flushes the code and returns the formatted output as a return value string" raise TypeError("Cannot flush a NullCodeSink; it has no content!")
{ "content_hash": "f5c3ac795ff15013b7ee31b72d83a2e3", "timestamp": "", "source": "github", "line_count": 150, "max_line_length": 86, "avg_line_length": 30.5, "alnum_prop": 0.5669945355191257, "repo_name": "softDi/clusim", "id": "30929c7dea9508a7c58ee5f7ce2d7f4f01f4de12", "size": "4575", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "ns3/pybindgen-0.17.0.post57+nga6376f2/pybindgen/typehandlers/codesink.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "3021" }, { "name": "C", "bytes": "365226" }, { "name": "C++", "bytes": "24340132" }, { "name": "CSS", "bytes": "3775" }, { "name": "Click", "bytes": "19348" }, { "name": "Gnuplot", "bytes": "9919" }, { "name": "HTML", "bytes": "7942" }, { "name": "JavaScript", "bytes": "7698" }, { "name": "Makefile", "bytes": "92131" }, { "name": "Matlab", "bytes": "39069" }, { "name": "Perl", "bytes": "302716" }, { "name": "Perl 6", "bytes": "151" }, { "name": "Python", "bytes": "44191047" }, { "name": "QMake", "bytes": "6602" }, { "name": "Shell", "bytes": "146434" } ], "symlink_target": "" }
from Reviewable.models import Reviewable from django import template register = template.Library() @register.inclusion_tag('Reviewable/__review_controls.html', takes_context=True) def show_review_controls(context, review_object): if not isinstance(review_object, Reviewable): # Check the object is reviewable raise TypeError('The object %s is not reviewable' % review_object) return { 'review_count': review_object.review_count, 'average_rating': review_object.average_rating, 'content_type': review_object.content_type.model, 'review_object': review_object }
{ "content_hash": "b02feebebc268e79024788ac9f32bc04", "timestamp": "", "source": "github", "line_count": 19, "max_line_length": 80, "avg_line_length": 34.31578947368421, "alnum_prop": 0.6779141104294478, "repo_name": "jacobwindsor/django-reviewable", "id": "39e5af2c228d26065e82fd0ae06e11d7aa4f98a5", "size": "652", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Reviewable/templatetags/reviewable.py", "mode": "33261", "license": "mit", "language": [ { "name": "HTML", "bytes": "3540" }, { "name": "Python", "bytes": "36242" } ], "symlink_target": "" }
import pygame, os from pygame.locals import * # ---------------------------------------------------- def loadImage(name, colorkey=None): fullname = os.path.join("data", name) try: image = pygame.image.load(fullname) except pygame.error as message: print("Cannot load image: {}".format(name)) raise Exception(SystemExit, message) image = image.convert() if colorkey is not None: if colorkey is -1: colorkey = image.get_at((0,0)) image.set_colorkey(colorkey, RLEACCEL) return image, image.get_rect() # ---------------------------------------------------- def loadSoundFile( name ): class NoneSound: def play( self ): pass if not pygame.mixer or not pygame.mixer.get_init(): return NoneSound() fullname = os.path.join( "data", name ) try: sound = pygame.mixer.Sound( fullname ) except pygame.error as message: print("Cannot load sound: {}".format(fullname)) raise Exception(SystemExit, message) return sound # ---------------------------------------------------- def collide_edges( a, c ): l, r, t, b = False, False, False, False Rect = pygame.Rect left = Rect(a.left, a.top+1, 1, a.height-2) right = Rect(a.right, a.top+1, 1, a.height-2) top = Rect(a.left + 1, a.top, a.width-2, 1) bottom = Rect(a.left + 1, a.bottom, a.width-2, 1) if left.colliderect(c): l = True if right.colliderect(c): r = True if top.colliderect(c): t = True if bottom.colliderect(c): b = True return (t,r,b,l)
{ "content_hash": "3cfb09922b9faca48157503bc860b710", "timestamp": "", "source": "github", "line_count": 67, "max_line_length": 55, "avg_line_length": 24.791044776119403, "alnum_prop": 0.5213726670680313, "repo_name": "lsoriano808/Functional", "id": "326dc9233cb7d7d32da737959a06bc02baacd7e0", "size": "1661", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "a7/gameUtils.py", "mode": "33261", "license": "mit", "language": [ { "name": "Common Lisp", "bytes": "22182" }, { "name": "HTML", "bytes": "3789" }, { "name": "Prolog", "bytes": "4959" }, { "name": "Python", "bytes": "28906" } ], "symlink_target": "" }
"""nap_rest URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.8/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Add an import: from blog import urls as blog_urls 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls)) """ from django.conf.urls import include, url from django.contrib import admin from rest_framework.authtoken.views import obtain_auth_token urlpatterns = [ url(r'^app/', include('rest_api.urls')), url(r'^fss/', include('filebrowser_rest.urls')), url(r'^grappelli/', include('grappelli.urls')), url(r'^admin/', include(admin.site.urls)), url(r'^auth$', obtain_auth_token), ]
{ "content_hash": "9559fccade1b542e4ee4db0b03517a68", "timestamp": "", "source": "github", "line_count": 26, "max_line_length": 77, "avg_line_length": 38.76923076923077, "alnum_prop": 0.689484126984127, "repo_name": "icsnju/nap-core", "id": "16938370cd298b20a7bc716e71d0bfbcea790db8", "size": "1008", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "nap_rest/nap_rest/urls.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Nginx", "bytes": "1544" }, { "name": "Python", "bytes": "151557" }, { "name": "Shell", "bytes": "1086" } ], "symlink_target": "" }
import abc import asyncio from types import TracebackType from typing import ( Any, AsyncContextManager, Awaitable, Callable, Coroutine, Generic, Optional, Set, Tuple, Type, TypeVar, Union, ) from aiormq.tools import awaitable from aio_pika.log import get_logger from aio_pika.tools import create_task log = get_logger(__name__) class PoolInstance(abc.ABC): @abc.abstractmethod def close(self) -> Awaitable[None]: raise NotImplementedError T = TypeVar("T") ConstructorType = Callable[ ..., Union[ Awaitable[PoolInstance], PoolInstance, Coroutine[Any, Any, PoolInstance], ], ] class PoolInvalidStateError(RuntimeError): pass class Pool(Generic[T]): __slots__ = ( "loop", "__max_size", "__items", "__constructor", "__created", "__lock", "__constructor_args", "__item_set", "__closed", ) def __init__( self, constructor: ConstructorType, *args: Any, max_size: Optional[int] = None, loop: Optional[asyncio.AbstractEventLoop] = None, ): self.loop = loop or asyncio.get_event_loop() self.__closed = False self.__constructor: Callable[..., Awaitable[Any]] = awaitable( constructor, ) self.__constructor_args: Tuple[Any, ...] = args or () self.__created: int = 0 self.__item_set: Set[PoolInstance] = set() self.__items: asyncio.Queue = asyncio.Queue() self.__lock: asyncio.Lock = asyncio.Lock() self.__max_size: Optional[int] = max_size @property def is_closed(self) -> bool: return self.__closed def acquire(self) -> "PoolItemContextManager[T]": if self.__closed: raise PoolInvalidStateError("acquire operation on closed pool") return PoolItemContextManager[T](self) @property def _has_released(self) -> bool: return self.__items.qsize() > 0 @property def _is_overflow(self) -> bool: if self.__max_size: return self.__created >= self.__max_size or self._has_released return self._has_released async def _create_item(self) -> T: if self.__closed: raise PoolInvalidStateError("create item operation on closed pool") async with self.__lock: if self._is_overflow: return await self.__items.get() log.debug("Creating a new instance of %r", self.__constructor) item = await self.__constructor(*self.__constructor_args) self.__created += 1 self.__item_set.add(item) return item async def _get(self) -> T: if self.__closed: raise PoolInvalidStateError("get operation on closed pool") if self._is_overflow: return await self.__items.get() return await self._create_item() def put(self, item: T) -> None: if self.__closed: raise PoolInvalidStateError("put operation on closed pool") self.__items.put_nowait(item) async def close(self) -> None: async with self.__lock: self.__closed = True tasks = [] for item in self.__item_set: tasks.append(create_task(item.close)) if tasks: await asyncio.gather(*tasks, return_exceptions=True) async def __aenter__(self) -> "Pool": return self async def __aexit__( self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType], ) -> None: if self.__closed: return await asyncio.ensure_future(self.close()) class PoolItemContextManager(Generic[T], AsyncContextManager): __slots__ = "pool", "item" def __init__(self, pool: Pool): self.pool = pool self.item: T async def __aenter__(self) -> T: # noinspection PyProtectedMember self.item = await self.pool._get() return self.item async def __aexit__( self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType], ) -> None: if self.item is not None: self.pool.put(self.item)
{ "content_hash": "49d1d9dc34bd0481cd81de00c69ef667", "timestamp": "", "source": "github", "line_count": 165, "max_line_length": 80, "avg_line_length": 26.375757575757575, "alnum_prop": 0.5737591911764706, "repo_name": "mosquito/aio-pika", "id": "93fc4080097c853166cc09c4348102dc74004a99", "size": "4352", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "aio_pika/pool.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Makefile", "bytes": "491" }, { "name": "Python", "bytes": "257510" } ], "symlink_target": "" }
from django.core.management.base import BaseCommand from huntserver.utils import update_time_items class RunUpdates(BaseCommand): help = 'Runs all time related updates for the huntserver app' def handle(self, *args, **options): update_time_items()
{ "content_hash": "7c072ec69718990ac6a9654cfa30d5da", "timestamp": "", "source": "github", "line_count": 9, "max_line_length": 65, "avg_line_length": 29.666666666666668, "alnum_prop": 0.7415730337078652, "repo_name": "dlareau/puzzlehunt_server", "id": "8a8f3bc0b8648f6599b0f3f876b158891ab5ef13", "size": "267", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "huntserver/management/commands/runupdates.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "6291" }, { "name": "Dockerfile", "bytes": "391" }, { "name": "HTML", "bytes": "89184" }, { "name": "JavaScript", "bytes": "33484" }, { "name": "Python", "bytes": "292406" }, { "name": "Shell", "bytes": "920" } ], "symlink_target": "" }
import mock from rally.plugins.openstack.context.sahara import sahara_edp from tests.unit import test CTX = "rally.plugins.openstack.context.sahara" class SaharaEDPTestCase(test.TestCase): def setUp(self): super(SaharaEDPTestCase, self).setUp() self.tenants_num = 2 self.users_per_tenant = 2 self.users = self.tenants_num * self.users_per_tenant self.task = mock.MagicMock() self.tenants = {} self.users_key = [] for i in range(self.tenants_num): self.tenants[str(i)] = {"id": str(i), "name": str(i), "sahara_image": "42"} for j in range(self.users_per_tenant): self.users_key.append({"id": "%s_%s" % (str(i), str(j)), "tenant_id": str(i), "endpoint": "endpoint"}) self.user_key = [{"id": i, "tenant_id": j, "endpoint": "endpoint"} for j in range(self.tenants_num) for i in range(self.users_per_tenant)] @property def context_without_edp_keys(self): context = test.get_test_context() context.update({ "config": { "users": { "tenants": self.tenants_num, "users_per_tenant": self.users_per_tenant, }, "sahara_edp": { "input_type": "hdfs", "output_type": "hdfs", "input_url": "hdfs://test_host/", "output_url_prefix": "hdfs://test_host/out_", "libs": [ { "name": "test.jar", "download_url": "http://example.com/test.jar" } ] }, }, "admin": {"endpoint": mock.MagicMock()}, "users": self.users_key, "tenants": self.tenants }) return context @mock.patch("%s.sahara_edp.resource_manager.cleanup" % CTX) @mock.patch("%s.sahara_edp.requests" % CTX) @mock.patch("%s.sahara_edp.osclients" % CTX) def test_setup_and_cleanup(self, mock_osclients, mock_requests, mock_cleanup): mock_sahara = mock_osclients.Clients(mock.MagicMock()).sahara() mock_sahara.data_sources.create.return_value = mock.MagicMock(id=42) mock_sahara.job_binary_internals.create.return_value = ( mock.MagicMock(id=42)) mock_requests.get().content = "test_binary" ctx = self.context_without_edp_keys sahara_ctx = sahara_edp.SaharaEDP(ctx) input_ds_crete_calls = [] download_calls = [] job_binary_internals_calls = [] job_binaries_calls = [] for i in range(self.tenants_num): input_ds_crete_calls.append(mock.call( name="input_ds", description="", data_source_type="hdfs", url="hdfs://test_host/")) download_calls.append(mock.call("http://example.com/test.jar")) job_binary_internals_calls.append(mock.call( name="test.jar", data="test_binary")) job_binaries_calls.append(mock.call( name="test.jar", url="internal-db://42", description="", extra={})) sahara_ctx.setup() mock_sahara.data_sources.create.assert_has_calls(input_ds_crete_calls) mock_requests.get.assert_has_calls(download_calls) mock_sahara.job_binary_internals.create.assert_has_calls( job_binary_internals_calls) mock_sahara.job_binaries.create.assert_has_calls(job_binaries_calls) sahara_ctx.cleanup() mock_cleanup.assert_called_once_with( names=["sahara.job_executions", "sahara.jobs", "sahara.job_binary_internals", "sahara.job_binaries", "sahara.data_sources"], users=ctx["users"])
{ "content_hash": "4c9f22bd7707a8bea4d45d47b6698b71", "timestamp": "", "source": "github", "line_count": 111, "max_line_length": 78, "avg_line_length": 36.96396396396396, "alnum_prop": 0.5093833780160858, "repo_name": "aplanas/rally", "id": "ffedac9243927db0844e2003149e1322d9fc8214", "size": "4669", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "tests/unit/plugins/openstack/context/sahara/test_sahara_edp.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Mako", "bytes": "48167" }, { "name": "Python", "bytes": "2620059" }, { "name": "Shell", "bytes": "43889" } ], "symlink_target": "" }
pluginType = MODULE # moduleInformation() must return a tuple (module, widget_list). If "module" # is "A" and any widget from this module is used, the code generator will write # "import A". If "module" is "A[.B].C", the code generator will write # "from A[.B] import C". Each entry in "widget_list" must be unique. def moduleInformation(): return "PyQt4.Qsci", ("QsciScintilla", )
{ "content_hash": "3ff368223dffe4cc3b5d01e902d41b2d", "timestamp": "", "source": "github", "line_count": 9, "max_line_length": 79, "avg_line_length": 43.44444444444444, "alnum_prop": 0.6956521739130435, "repo_name": "Universal-Model-Converter/UMC3.0a", "id": "3f57df2813330ffc5f6a1540c3b064da6b6ec191", "size": "1843", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "data/Python/x86/Lib/site-packages/PyQt4/uic/widget-plugins/qscintilla.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "226" }, { "name": "C", "bytes": "1082640" }, { "name": "C#", "bytes": "8440" }, { "name": "C++", "bytes": "3621086" }, { "name": "CSS", "bytes": "6226" }, { "name": "F#", "bytes": "2310" }, { "name": "FORTRAN", "bytes": "7795" }, { "name": "Forth", "bytes": "506" }, { "name": "GLSL", "bytes": "1040" }, { "name": "Groff", "bytes": "5943" }, { "name": "HTML", "bytes": "1196266" }, { "name": "Java", "bytes": "5793" }, { "name": "Makefile", "bytes": "1109" }, { "name": "Mask", "bytes": "969" }, { "name": "Matlab", "bytes": "4346" }, { "name": "Python", "bytes": "33351557" }, { "name": "R", "bytes": "1370" }, { "name": "Shell", "bytes": "6931" }, { "name": "Tcl", "bytes": "2084458" }, { "name": "Visual Basic", "bytes": "481" } ], "symlink_target": "" }
from request_engine import RequestEngine from Constants import * import time, subprocess, signal from sys import argv, exit from argparse import ArgumentParser class EtcdResolver: def __init__(self, hostname, host_address, etcd_address, etcd_port, etcd_directory, hosts_file, ttl): """ Initialize the service for naming the containers (hosts) in the cluster. """ self.etcd_address = etcd_address self.host_address = host_address self.request_engine = RequestEngine(etcd_address, etcd_port, etcd_directory, hostname) self.hostname = hostname self.hosts = {} f = open(hosts_file,'r') self.default_hosts = f.read() f.close() self.hosts_file = hosts_file self.ttl = ttl self.last_update = 0 signal.signal(signal.SIGTERM, self.exception_handler) def run(self): """ Run to resolve names indefinitely """ try: while True: if (time.time() - self.last_update) > (0.75* self.ttl): self.update_etcd_server() self.update_local_names() self.last_update = time.time() time.sleep(0.75*self.ttl) except Exception, err: print err raise finally: # write back the default configuration into the file. self.exception_handler() def update_local_names(self): """ Fetch name:address from etcd_address """ self.hosts = self.request_engine.get_hosts_from_dir('/') # print self.hosts to_write = '%s\n\n#**********************************\n\n' % self.default_hosts for host,ip in self.hosts.iteritems(): to_write = to_write + ip + ' ' + host + '\n' f = open(self.hosts_file,'w') f.write(to_write) f.close() def update_etcd_server(self): """ Update the entry for the hostname to machine_address resolution. host_address corresponds to the address of address on which the container is hosted. """ return self.request_engine.set(self.hostname, self.host_address,self.ttl) def exception_handler(self,signal=signal.SIGTERM, frame=None): """ To handle the exceptions. If the program is closed it should remove the entries gracefully from hosts file, set by itself. """ f = open(self.hosts_file,'w') f.write(self.default_hosts) f.close() exit(0) if __name__ == '__main__': # Parse the input arguments for setting the variables. parser = ArgumentParser('main.py') parser.add_argument('etcd_address', action='store', help='IP address of the etcd server') parser.add_argument('host_address', action='store', help='IP address of the host machine on which the container is going to run.'+ ' This is different from container local IP.') parser.add_argument('--etcd_port', '-p', type=int, default=ETCD_PORT, action='store', help='Port on which etcd server is listening. Default: 4001') parser.add_argument('--etcd_directory', '-d', default=ETCD_KEYS_DIRECTORY, action='store', help='Directory in etcd to store information. Default: etcd_spark') parser.add_argument('--hosts_file', '-f', default=HOSTS_FILE, action='store', help='Which file to add names of servers registered on etcd service') parser.add_argument('--ttl', '-t', type=int, default=TTL, action='store', help='Time-to-live for the entries inside etcd. In case of failure,'+ 'the entry will expire after ttl. Default: ttl=60 (seconds)') args = parser.parse_args(argv[1:]) request = 'hostname' proc = subprocess.Popen([request], stdout=subprocess.PIPE) (out, err) = proc.communicate() hostname = out.strip() resolver = EtcdResolver(hostname, args.host_address, args.etcd_address, args.etcd_port, args.etcd_directory, args.hosts_file, args.ttl) resolver.run()
{ "content_hash": "b0fd88f886bf18cd748c44e85d00827b", "timestamp": "", "source": "github", "line_count": 103, "max_line_length": 102, "avg_line_length": 34.601941747572816, "alnum_prop": 0.6933221099887766, "repo_name": "LuqmanSahaf/etcd-spark", "id": "8d14adf5ff3604fd7f66e1cf1c4122f4976896d8", "size": "3564", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "spark-1.1.0/spark-base/files/etcd-service/main.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "17948" }, { "name": "Shell", "bytes": "33555" } ], "symlink_target": "" }
""" WSGI config for django_data_product project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_data_product.settings") application = get_wsgi_application()
{ "content_hash": "5892e967c078fb99a577d41e766176e6", "timestamp": "", "source": "github", "line_count": 16, "max_line_length": 79, "avg_line_length": 25.9375, "alnum_prop": 0.7734939759036145, "repo_name": "DistrictDataLabs/django-data-product", "id": "0b8d19a7be75df10b0f1c588e239925a726ab5d3", "size": "415", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "django_data_product/wsgi.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "4110" }, { "name": "Python", "bytes": "13363" } ], "symlink_target": "" }
from catalan import catalan if __name__ == "__main__": for i in range(10): print("catalan({}) == {}".format(i, catalan(i)))
{ "content_hash": "1b681a88d6a44b42d2f1273a220d4f56", "timestamp": "", "source": "github", "line_count": 6, "max_line_length": 50, "avg_line_length": 21.333333333333332, "alnum_prop": 0.578125, "repo_name": "Cnidarias/al-go-rithms", "id": "840c4d24d2439884fd125eb6c7dc99fa4888e9f6", "size": "148", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "math/catalan/python/test.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "131026" }, { "name": "C#", "bytes": "9816" }, { "name": "C++", "bytes": "349031" }, { "name": "Clojure", "bytes": "2606" }, { "name": "Common Lisp", "bytes": "2731" }, { "name": "Crystal", "bytes": "2280" }, { "name": "Erlang", "bytes": "1403" }, { "name": "Go", "bytes": "44823" }, { "name": "Haskell", "bytes": "5274" }, { "name": "Java", "bytes": "183498" }, { "name": "JavaScript", "bytes": "48867" }, { "name": "Julia", "bytes": "2721" }, { "name": "Lua", "bytes": "508" }, { "name": "Matlab", "bytes": "615" }, { "name": "Objective-C", "bytes": "7466" }, { "name": "PHP", "bytes": "18640" }, { "name": "Perl 6", "bytes": "8008" }, { "name": "Prolog", "bytes": "3299" }, { "name": "Python", "bytes": "162035" }, { "name": "QMake", "bytes": "199" }, { "name": "Ruby", "bytes": "13716" }, { "name": "Rust", "bytes": "7732" }, { "name": "Scala", "bytes": "3879" }, { "name": "Shell", "bytes": "1194" }, { "name": "Swift", "bytes": "8272" } ], "symlink_target": "" }
from rest_framework.decorators import detail_route from rest_framework.response import Response from common import permissions from .models import System, Station, Commodity, StationCommodity from .serializers import CommoditySerializer, StationSerializer, \ SystemSerializer, MinimizedSystemSerializer, StationCommoditySerializer import django_filters from common.views import WrappedModelViewSet, wrap_response # Create your views here. class SystemViewSet(WrappedModelViewSet): permission_classes = (permissions.IsAdminOrReadOnly,) queryset = System.objects.all() serializer_class = SystemSerializer search_fields = ('name',) template_name = 'frontend/system/instance.html' list_template_name = 'frontend/system/list.html' @detail_route() def stations(self, request, *args, **kwargs): """ A route to display only the stations this System contains. :param request: :param pk: :return: """ system = self.get_object() stations = Station.objects.filter(system=system) serializer = StationSerializer(stations, context={'request': request}, many=True) return wrap_response(Response({'results': serializer.data}, template_name='frontend/system/list_station.html')) @detail_route() def min(self, request, *args, **kwargs): """ A route to display the minimized System view. :param request: :param pk: :return: """ serializer = MinimizedSystemSerializer(self.get_object(), context={'request': request}) data = serializer.data data['min'] = True return wrap_response(Response(data)) class StationViewSet(WrappedModelViewSet): class StationFilter(django_filters.FilterSet): distance_to_star = django_filters.NumberFilter(lookup_type='lt') class Meta: model = Station fields = ('distance_to_star',) permission_classes = (permissions.IsAdminOrReadOnly,) queryset = Station.objects.all() serializer_class = StationSerializer filter_class = StationFilter search_fields = ('name', ) template_name = 'frontend/station/instance.html' list_template_name = 'frontend/station/list.html' class CommodityViewSet(WrappedModelViewSet): class CommodityFilter(django_filters.FilterSet): average_price = django_filters.NumberFilter(lookup_type='lt') name = django_filters.CharFilter(lookup_type='icontains') class Meta: model = Commodity fields = ('average_price', 'name') permission_classes = (permissions.IsAdminOrReadOnly,) queryset = Commodity.objects.all() serializer_class = CommoditySerializer filter_class = CommodityFilter search_fields = ('name',) template_name = 'frontend/commodity/instance.html' list_template_name = 'frontend/commodity/list.html' class StationCommodityViewSet(WrappedModelViewSet): class StationCommodityFilter(django_filters.FilterSet): class Meta: model = StationCommodity fields = { 'station': ['exact'], 'commodity': ['exact'], 'supply_level': ['exact'], 'demand_level': ['exact'], 'buy_price': ['lt', 'gt'], 'sell_price': ['lt', 'gt'], 'supply': ['lt', 'gt'], 'demand': ['lt', 'gt'], } permission_classes = (permissions.IsAdminOrReadOnly,) queryset = StationCommodity.objects.all() serializer_class = StationCommoditySerializer template_name = 'frontend/station_commodity/instance.html' list_template_name = 'frontend/station_commodity/list.html' filter_class = StationCommodityFilter search_fields = ('commodity__name', 'station__name', 'commodity__category_name')
{ "content_hash": "957448bff14bf279c501510cdb8f0c69", "timestamp": "", "source": "github", "line_count": 110, "max_line_length": 119, "avg_line_length": 35.07272727272727, "alnum_prop": 0.65966822187662, "repo_name": "Jingyu-Yao/elitetraderoutes", "id": "ed387cc17ae8d37a94820dd1e96580bda679eab8", "size": "3858", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "elitedata/views.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "6188" }, { "name": "HTML", "bytes": "49314" }, { "name": "JavaScript", "bytes": "18318" }, { "name": "Python", "bytes": "58838" }, { "name": "Shell", "bytes": "1241" } ], "symlink_target": "" }
import unittest from katas.beta.ninety_degrees_rotation import rotate class RotateTestCase(unittest.TestCase): def test_equals(self): self.assertEqual(rotate([[1, 2], [3, 4]]), [[3, 1], [4, 2]]) def test_equals_2(self): self.assertEqual(rotate([[3, 1], [4, 2]]), [[4, 3], [2, 1]])
{ "content_hash": "a91da611ee7986eb78214ad775f5b868", "timestamp": "", "source": "github", "line_count": 11, "max_line_length": 68, "avg_line_length": 28.09090909090909, "alnum_prop": 0.6084142394822006, "repo_name": "the-zebulan/CodeWars", "id": "df3fa452e6f3d1db23fdd99c8004579863cb416e", "size": "309", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/beta_tests/test_ninety_degrees_rotation.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "1203000" } ], "symlink_target": "" }
import json import os import tempfile from django.conf import settings from django.core.files.storage import default_storage as storage from django.core.urlresolvers import reverse from django.utils.encoding import smart_unicode import mock from jinja2.utils import escape from nose import SkipTest from nose.tools import eq_, ok_ from PIL import Image from pyquery import PyQuery as pq from tower import strip_whitespace import amo import amo.tests import mkt from amo.tests import assert_required, formset, initial from amo.tests.test_helpers import get_image_path from lib.video.tests import files as video_files from mkt.access.models import Group, GroupUser from mkt.comm.models import CommunicationNote from mkt.constants import regions from mkt.developers.models import ActivityLog from mkt.reviewers.models import RereviewQueue from mkt.site.fixtures import fixture from mkt.site.helpers import absolutify from mkt.translations.models import Translation from mkt.users.models import UserProfile from mkt.versions.models import Version from mkt.webapps.models import Addon, AddonDeviceType, AddonUser from mkt.webapps.models import AddonExcludedRegion as AER response_mock = mock.Mock() response_mock.read.return_value = ''' { "name": "Something Ballin!", "description": "Goin' hard in the paint.", "launch_path": "/ballin/4.eva", "developer": { "name": "Pro Balliner", "url": "http://www.ballin4eva.xxx" }, "icons": { "128": "/ballin/icon.png" }, "installs_allowed_from": [ "https://marketplace.firefox.com" ] } ''' response_mock.headers = {'Content-Type': 'application/x-web-app-manifest+json'} def get_section_url(addon, section, edit=False): args = [addon.app_slug, section] if edit: args.append('edit') return reverse('mkt.developers.apps.section', args=args) class TestEdit(amo.tests.TestCase): fixtures = fixture('group_admin', 'user_999', 'user_admin', 'user_admin_group', 'webapp_337141') def setUp(self): self.webapp = self.get_webapp() self.url = self.webapp.get_dev_url() self.user = UserProfile.objects.get(username='31337') assert self.client.login(username=self.user.email, password='password') def get_webapp(self): return Addon.objects.no_cache().get(id=337141) def get_url(self, section, edit=False): return get_section_url(self.webapp, section, edit) def get_dict(self, **kw): fs = formset(self.cat_initial, initial_count=1) result = {'name': 'new name', 'slug': 'test_slug', 'description': 'new description'} result.update(**kw) result.update(fs) return result def compare(self, data): """Compare an app against a `dict` of expected values.""" mapping = { 'regions': 'get_region_ids' } webapp = self.get_webapp() for k, v in data.iteritems(): k = mapping.get(k, k) val = getattr(webapp, k, '') if callable(val): val = val() if val is None: val = '' eq_(unicode(val), unicode(v)) def compare_features(self, data, version=None): """ Compare an app's set of required features against a `dict` of expected values. """ if not version: version = self.get_webapp().current_version features = version.features for k, v in data.iteritems(): val = getattr(features, k) if callable(val): val = val() eq_(unicode(val), unicode(v)) def check_form_url(self, section): # Check form destinations and "Edit" button. r = self.client.get(self.url) eq_(r.status_code, 200) doc = pq(r.content) eq_(doc('form').attr('action'), self.edit_url) eq_(doc('h2 .button').attr('data-editurl'), self.edit_url) # Check "Cancel" button. r = self.client.get(self.edit_url) eq_(pq(r.content)('form .addon-edit-cancel').attr('href'), self.url) class TestEditListingWebapp(TestEdit): fixtures = fixture('webapp_337141') def test_redirect(self): r = self.client.get(self.url.replace('edit', '')) self.assert3xx(r, self.url) def test_nav_links(self): r = self.client.get(self.url) doc = pq(r.content)('.edit-addon-nav') eq_(doc.length, 2) eq_(doc('.view-stats').length, 0) def test_edit_with_no_current_version(self): # Disable file for latest version, and then update app.current_version. app = self.get_webapp() app.versions.latest().all_files[0].update(status=amo.STATUS_DISABLED) app.update_version() # Now try to display edit page. r = self.client.get(self.url) eq_(r.status_code, 200) def test_edit_global_xss_name(self): self.webapp.name = u'My app é <script>alert(5)</script>' self.webapp.save() content = smart_unicode(self.client.get(self.url).content) ok_(not unicode(self.webapp.name) in content) ok_(unicode(escape(self.webapp.name)) in content) @mock.patch.object(settings, 'TASK_USER_ID', 999) class TestEditBasic(TestEdit): fixtures = TestEdit.fixtures def setUp(self): super(TestEditBasic, self).setUp() self.cat = 'games' self.dtype = amo.DEVICE_TYPES.keys()[0] self.webapp.update(categories=['games']) AddonDeviceType.objects.create(addon=self.webapp, device_type=self.dtype) self.url = self.get_url('basic') self.edit_url = self.get_url('basic', edit=True) def get_webapp(self): return Addon.objects.get(id=337141) def get_dict(self, **kw): result = {'device_types': self.dtype, 'slug': 'NeW_SluG', 'description': 'New description with <em>html</em>!', 'manifest_url': self.webapp.manifest_url, 'categories': [self.cat]} result.update(**kw) return result def test_form_url(self): self.check_form_url('basic') def test_appslug_visible(self): r = self.client.get(self.url) eq_(r.status_code, 200) eq_(pq(r.content)('#slug_edit').remove('a, em').text(), absolutify(u'/\u2026/%s' % self.webapp.app_slug)) def test_edit_slug_success(self): data = self.get_dict() r = self.client.post(self.edit_url, data) self.assertNoFormErrors(r) eq_(r.status_code, 200) webapp = self.get_webapp() eq_(webapp.app_slug, data['slug'].lower()) # Make sure only the app_slug changed. eq_(webapp.slug, self.webapp.slug) def test_edit_slug_max_length(self): r = self.client.post(self.edit_url, self.get_dict(slug='x' * 31)) self.assertFormError(r, 'form', 'slug', 'Ensure this value has at most 30 characters (it has 31).') def test_edit_slug_dupe(self): Addon.objects.create(type=amo.ADDON_WEBAPP, app_slug='dupe') r = self.client.post(self.edit_url, self.get_dict(slug='dupe')) self.assertFormError(r, 'form', 'slug', 'This slug is already in use. Please choose another.') webapp = self.get_webapp() # Nothing changed. eq_(webapp.slug, self.webapp.slug) eq_(webapp.app_slug, self.webapp.app_slug) def test_edit_xss_description(self): self.webapp.description = ("This\n<b>IS</b>" "<script>alert('awesome')</script>") self.webapp.save() r = self.client.get(self.url) eq_(pq(r.content)('#addon-description span[lang]').html(), "This<br/><b>IS</b>&lt;script&gt;alert('awesome')" '&lt;/script&gt;') def test_edit_xss_name(self): self.webapp.name = u'My app é <script>alert(5)</script>' self.webapp.save() content = smart_unicode(self.client.get(self.url).content) ok_(not unicode(self.webapp.name) in content) ok_(unicode(escape(self.webapp.name)) in content) def test_view_edit_manifest_url_empty(self): # Empty manifest should throw an error. r = self.client.post(self.edit_url, self.get_dict(manifest_url='')) form = r.context['form'] assert 'manifest_url' in form.errors assert 'This field is required' in form.errors['manifest_url'][0] @mock.patch('mkt.developers.forms.update_manifests') def test_view_edit_manifest_url(self, fetch): assert not self.webapp.in_rereview_queue(), ( 'App should not be in re-review queue') # Should be able to see manifest URL listed. r = self.client.get(self.url) eq_(pq(r.content)('#manifest-url a').attr('href'), self.webapp.manifest_url) # Devs/admins can edit the manifest URL and should see a text field. r = self.client.get(self.edit_url) row = pq(r.content)('#manifest-url') eq_(row.find('input[name=manifest_url]').length, 1) eq_(row.find('input[name=manifest_url][readonly]').length, 0) # POST with the new manifest URL. url = 'https://ballin.com/ballin4eva.webapp' r = self.client.post(self.edit_url, self.get_dict(manifest_url=url)) self.assertNoFormErrors(r) self.webapp = self.get_webapp() eq_(self.webapp.manifest_url, url) eq_(self.webapp.app_domain, 'https://ballin.com') eq_(self.webapp.current_version.version, '1.0') eq_(self.webapp.versions.count(), 1) assert self.webapp.in_rereview_queue(), ( 'App should be in re-review queue') # Ensure that we're refreshing the manifest. fetch.delay.assert_called_once_with([self.webapp.pk]) @mock.patch('mkt.developers.forms.update_manifests') def test_view_manifest_changed_dupe_app_domain(self, fetch): self.create_switch('webapps-unique-by-domain') amo.tests.app_factory(name='Super Duper', app_domain='https://ballin.com') self.login('admin') # POST with new manifest URL. url = 'https://ballin.com/ballin4eva.webapp' r = self.client.post(self.edit_url, self.get_dict(manifest_url=url)) form = r.context['form'] assert 'manifest_url' in form.errors assert 'one app per domain' in form.errors['manifest_url'][0] eq_(self.get_webapp().manifest_url, self.webapp.manifest_url, 'Manifest URL should not have been changed!') assert not fetch.delay.called, ( 'Manifest should not have been refreshed!') @mock.patch('mkt.developers.forms.update_manifests') def test_view_manifest_changed_same_domain_diff_path(self, fetch): self.create_switch('webapps-unique-by-domain') self.login('admin') # POST with new manifest URL for same domain but w/ different path. data = self.get_dict(manifest_url=self.webapp.manifest_url + 'xxx') r = self.client.post(self.edit_url, data) self.assertNoFormErrors(r) eq_(self.get_webapp().manifest_url, self.webapp.manifest_url + 'xxx', 'Manifest URL should have changed!') assert not self.webapp.in_rereview_queue(), ( 'App should be in re-review queue because an admin changed it') # Ensure that we're refreshing the manifest. fetch.delay.assert_called_once_with([self.webapp.pk]) def test_view_manifest_url_changed(self): new_url = 'http://omg.org/yes' self.webapp.manifest_url = new_url self.webapp.save() # If we change the `manifest_url` manually, the URL here should change. r = self.client.get(self.url) eq_(pq(r.content)('#manifest-url a').attr('href'), new_url) def test_categories_listed(self): r = self.client.get(self.url) eq_(pq(r.content)('#addon-categories-edit').text(), unicode('Games')) r = self.client.post(self.url) eq_(pq(r.content)('#addon-categories-edit').text(), unicode('Games')) def test_edit_categories_add(self): new = 'books' cats = [self.cat, new] self.client.post(self.edit_url, self.get_dict(categories=cats)) eq_(sorted(self.get_webapp().categories), sorted(cats)) def test_edit_categories_addandremove(self): new = 'books' cats = [new] self.client.post(self.edit_url, self.get_dict(categories=cats)) eq_(sorted(self.get_webapp().categories), sorted(cats)) @mock.patch('mkt.webapps.models.Webapp.save') def test_edit_categories_required(self, save): r = self.client.post(self.edit_url, self.get_dict(categories=[])) assert_required(r.context['cat_form'].errors['categories'][0]) assert not save.called def test_edit_categories_xss(self): new = '<script>alert("xss");</script>' cats = [self.cat, new] r = self.client.post(self.edit_url, self.get_dict(categories=cats)) assert '<script>alert' not in r.content assert '&lt;script&gt;alert' in r.content def test_edit_categories_nonexistent(self): r = self.client.post(self.edit_url, self.get_dict(categories=[100])) eq_(r.context['cat_form'].errors['categories'], ['Select a valid choice. 100 is not one of the available ' 'choices.']) def test_edit_categories_max(self): cats = [self.cat, 'books', 'social'] r = self.client.post(self.edit_url, self.get_dict(categories=cats)) eq_(r.context['cat_form'].errors['categories'], ['You can have only 2 categories.']) def test_edit_check_description(self): # Make sure bug 629779 doesn't return. r = self.client.post(self.edit_url, self.get_dict()) eq_(r.status_code, 200) eq_(self.get_webapp().description, self.get_dict()['description']) def test_edit_slug_valid(self): old_edit = self.edit_url data = self.get_dict(slug='valid') r = self.client.post(self.edit_url, data) doc = pq(r.content) assert doc('form').attr('action') != old_edit def test_edit_as_developer(self): self.client.login(username='regular@mozilla.com', password='password') data = self.get_dict() r = self.client.post(self.edit_url, data) # Make sure we get errors when they are just regular users. eq_(r.status_code, 403) AddonUser.objects.create(addon=self.webapp, user_id=999, role=amo.AUTHOR_ROLE_DEV) r = self.client.post(self.edit_url, data) eq_(r.status_code, 200) webapp = self.get_webapp() eq_(unicode(webapp.app_slug), data['slug'].lower()) eq_(unicode(webapp.description), data['description']) def test_l10n(self): self.webapp.update(default_locale='en-US') url = self.webapp.get_dev_url('edit') r = self.client.get(url) eq_(pq(r.content)('#l10n-menu').attr('data-default'), 'en-us', 'l10n menu not visible for %s' % url) def test_l10n_not_us(self): self.webapp.update(default_locale='fr') url = self.webapp.get_dev_url('edit') r = self.client.get(url) eq_(pq(r.content)('#l10n-menu').attr('data-default'), 'fr', 'l10n menu not visible for %s' % url) def test_edit_l10n(self): data = { 'slug': self.webapp.app_slug, 'manifest_url': self.webapp.manifest_url, 'categories': [self.cat], 'description_en-us': u'Nêw english description', 'description_fr': u'Nëw french description', 'releasenotes_en-us': u'Nëw english release notes', 'releasenotes_fr': u'Nêw french release notes' } res = self.client.post(self.edit_url, data) eq_(res.status_code, 200) self.webapp = self.get_webapp() version = self.webapp.current_version.reload() desc_id = self.webapp.description_id notes_id = version.releasenotes_id eq_(self.webapp.description, data['description_en-us']) eq_(version.releasenotes, data['releasenotes_en-us']) eq_(unicode(Translation.objects.get(id=desc_id, locale='fr')), data['description_fr']) eq_(unicode(Translation.objects.get(id=desc_id, locale='en-us')), data['description_en-us']) eq_(unicode(Translation.objects.get(id=notes_id, locale='fr')), data['releasenotes_fr']) eq_(unicode(Translation.objects.get(id=notes_id, locale='en-us')), data['releasenotes_en-us']) @mock.patch('mkt.developers.views._update_manifest') def test_refresh(self, fetch): self.client.login(username='steamcube@mozilla.com', password='password') url = reverse('mkt.developers.apps.refresh_manifest', args=[self.webapp.app_slug]) r = self.client.post(url) eq_(r.status_code, 204) fetch.assert_called_once_with(self.webapp.pk, True, {}) @mock.patch('mkt.developers.views._update_manifest') def test_refresh_dev_only(self, fetch): self.client.login(username='regular@mozilla.com', password='password') url = reverse('mkt.developers.apps.refresh_manifest', args=[self.webapp.app_slug]) r = self.client.post(url) eq_(r.status_code, 403) eq_(fetch.called, 0) def test_view_developer_name(self): r = self.client.get(self.url) developer_name = self.webapp.current_version.developer_name content = smart_unicode(r.content) eq_(pq(content)('#developer-name td').html().strip(), developer_name) def test_view_developer_name_xss(self): version = self.webapp.current_version version._developer_name = '<script>alert("xss-devname")</script>' version.save() r = self.client.get(self.url) assert '<script>alert' not in r.content assert '&lt;script&gt;alert' in r.content def test_edit_packaged(self): self.get_webapp().update(is_packaged=True) data = self.get_dict() data.pop('manifest_url') r = self.client.post(self.edit_url, data) eq_(r.status_code, 200) eq_(r.context['editable'], False) eq_(self.get_webapp().description, self.get_dict()['description']) def test_edit_basic_not_public(self): # Disable file for latest version, and then update app.current_version. app = self.get_webapp() app.versions.latest().all_files[0].update(status=amo.STATUS_DISABLED) app.update_version() # Now try to display edit page. r = self.client.get(self.url) eq_(r.status_code, 200) def test_view_release_notes(self): version = self.webapp.current_version version.releasenotes = u'Chëese !' version.save() res = self.client.get(self.url) eq_(res.status_code, 200) content = smart_unicode(res.content) eq_(pq(content)('#releasenotes td span[lang]').html().strip(), version.releasenotes) self.webapp.update(is_packaged=True) res = self.client.get(self.url) eq_(res.status_code, 200) content = smart_unicode(res.content) eq_(pq(content)('#releasenotes').length, 0) def test_edit_release_notes(self): self.webapp.previews.create() self.webapp.support_email = 'test@example.com' self.webapp.save() data = self.get_dict(releasenotes=u'I can hâz release notes') res = self.client.post(self.edit_url, data) releasenotes = self.webapp.reload().latest_version.releasenotes eq_(res.status_code, 200) eq_(releasenotes, data['releasenotes']) # Make sure publish_type wasn't reset by accident. eq_(self.webapp.reload().publish_type, amo.PUBLISH_IMMEDIATE) def test_edit_release_notes_pending(self): # Like test_edit_release_notes, but with a pending app. file_ = self.webapp.current_version.all_files[0] file_.update(status=amo.STATUS_PENDING) self.webapp.update(status=amo.STATUS_PENDING) self.test_edit_release_notes() eq_(self.webapp.reload().status, amo.STATUS_PENDING) def test_edit_release_notes_packaged(self): # You are not supposed to edit release notes from the basic edit # page if you app is packaged. Instead this is done from the version # edit page. self.webapp.update(is_packaged=True) data = self.get_dict(releasenotes=u'I can not hâz release notes') res = self.client.post(self.edit_url, data) releasenotes = self.webapp.current_version.reload().releasenotes eq_(res.status_code, 200) eq_(releasenotes, None) def test_view_releasenotes_xss(self): version = self.webapp.current_version version.releasenotes = '<script>alert("xss-devname")</script>' version.save() r = self.client.get(self.url) assert '<script>alert' not in r.content assert '&lt;script&gt;alert' in r.content class TestEditCountryLanguage(TestEdit): def get_webapp(self): return Addon.objects.get(id=337141) def test_data_visible(self): clean_countries = [] self.get_webapp().current_version.update(supported_locales='de,es') res = self.client.get(self.url) eq_(res.status_code, 200) countries = (pq(pq(res.content)('#edit-app-language tr').eq(0)) .find('td').remove('small').text()) langs = (pq(pq(res.content)('#edit-app-language tr').eq(1)).find('td') .remove('small').text()) for c in countries.split(', '): clean_countries.append(strip_whitespace(c)) # eq_(langs, u'English (US) (default), Deutsch, Espa\xf1ol') # XXX The above line is correct. But if Jenkins is wrong, I # don't wanna be right. eq_(langs, u'English (US) (default), Deutsch, Espa\xc3\xb1ol') self.assertSetEqual( sorted(clean_countries), sorted([r.name.decode() for r in regions.ALL_REGIONS])) class TestEditMedia(TestEdit): fixtures = fixture('webapp_337141') def setUp(self): super(TestEditMedia, self).setUp() self.url = self.get_url('media') self.edit_url = self.get_url('media', True) self.icon_upload = self.webapp.get_dev_url('upload_icon') self.preview_upload = self.webapp.get_dev_url('upload_preview') patches = { 'ADDON_ICONS_PATH': tempfile.mkdtemp(), 'PREVIEW_THUMBNAIL_PATH': tempfile.mkstemp()[1] + '%s/%d.png', } for k, v in patches.iteritems(): patcher = mock.patch.object(settings, k, v) patcher.start() self.addCleanup(patcher.stop) def formset_new_form(self, *args, **kw): ctx = self.client.get(self.edit_url).context blank = initial(ctx['preview_form'].forms[-1]) blank.update(**kw) return blank def formset_media(self, prev_blank=None, *args, **kw): prev_blank = prev_blank or {} kw.setdefault('initial_count', 0) kw.setdefault('prefix', 'files') # Preview formset. fs = formset(*list(args) + [self.formset_new_form(**prev_blank)], **kw) return dict((k, '' if v is None else v) for k, v in fs.items()) def new_preview_hash(self): # At least one screenshot is required. src_image = open(get_image_path('preview.jpg'), 'rb') r = self.client.post(self.preview_upload, dict(upload_image=src_image)) return {'upload_hash': json.loads(r.content)['upload_hash']} def test_form_url(self): self.check_form_url('media') def test_edit_defaulticon(self): data = dict(icon_type='') data_formset = self.formset_media(prev_blank=self.new_preview_hash(), **data) r = self.client.post(self.edit_url, data_formset) self.assertNoFormErrors(r) webapp = self.get_webapp() assert webapp.get_icon_url(128).endswith('default-128.png') assert webapp.get_icon_url(64).endswith('default-64.png') for k in data: eq_(unicode(getattr(webapp, k)), data[k]) def test_edit_preuploadedicon(self): data = dict(icon_type='icon/appearance') data_formset = self.formset_media(prev_blank=self.new_preview_hash(), **data) r = self.client.post(self.edit_url, data_formset) self.assertNoFormErrors(r) webapp = self.get_webapp() assert webapp.get_icon_url(64).endswith('appearance-64.png') assert webapp.get_icon_url(128).endswith('appearance-128.png') for k in data: eq_(unicode(getattr(webapp, k)), data[k]) def test_edit_uploadedicon(self): img = get_image_path('mozilla-sq.png') src_image = open(img, 'rb') response = self.client.post(self.icon_upload, dict(upload_image=src_image)) response_json = json.loads(response.content) webapp = self.get_webapp() # Now, save the form so it gets moved properly. data = dict(icon_type='image/png', icon_upload_hash=response_json['upload_hash']) data_formset = self.formset_media(prev_blank=self.new_preview_hash(), **data) r = self.client.post(self.edit_url, data_formset) self.assertNoFormErrors(r) webapp = self.get_webapp() # Unfortunate hardcoding of URL. url = webapp.get_icon_url(64) assert ('addon_icons/%s/%s' % (webapp.id / 1000, webapp.id)) in url, ( 'Unexpected path: %r' % url) eq_(data['icon_type'], 'image/png') # Check that it was actually uploaded. dirname = os.path.join(settings.ADDON_ICONS_PATH, '%s' % (webapp.id / 1000)) dest = os.path.join(dirname, '%s-32.png' % webapp.id) eq_(storage.exists(dest), True) eq_(Image.open(storage.open(dest)).size, (32, 32)) def test_edit_icon_log(self): self.test_edit_uploadedicon() log = ActivityLog.objects.all() eq_(log.count(), 1) eq_(log[0].action, amo.LOG.CHANGE_ICON.id) def test_edit_uploadedicon_noresize(self): img = '%s/img/mkt/logos/128.png' % settings.MEDIA_ROOT src_image = open(img, 'rb') data = dict(upload_image=src_image) response = self.client.post(self.icon_upload, data) response_json = json.loads(response.content) webapp = self.get_webapp() # Now, save the form so it gets moved properly. data = dict(icon_type='image/png', icon_upload_hash=response_json['upload_hash']) data_formset = self.formset_media(prev_blank=self.new_preview_hash(), **data) r = self.client.post(self.edit_url, data_formset) self.assertNoFormErrors(r) webapp = self.get_webapp() # Unfortunate hardcoding of URL. addon_url = webapp.get_icon_url(64).split('?')[0] end = 'addon_icons/%s/%s-64.png' % (webapp.id / 1000, webapp.id) assert addon_url.endswith(end), 'Unexpected path: %r' % addon_url eq_(data['icon_type'], 'image/png') # Check that it was actually uploaded. dirname = os.path.join(settings.ADDON_ICONS_PATH, '%s' % (webapp.id / 1000)) dest = os.path.join(dirname, '%s-64.png' % webapp.id) assert storage.exists(dest), dest eq_(Image.open(storage.open(dest)).size, (64, 64)) def test_media_types(self): res = self.client.get(self.get_url('media', edit=True)) doc = pq(res.content) eq_(doc('#id_icon_upload').attr('data-allowed-types'), 'image/jpeg|image/png') eq_(doc('.screenshot_upload').attr('data-allowed-types'), 'image/jpeg|image/png|video/webm') def check_image_type(self, url, msg): img = '%s/js/devreg/devhub.js' % settings.MEDIA_ROOT self.check_image_type_path(img, url, msg) def check_image_type_path(self, img, url, msg): src_image = open(img, 'rb') res = self.client.post(url, {'upload_image': src_image}) response_json = json.loads(res.content) assert any(e == msg for e in response_json['errors']), ( response_json['errors']) # The check_image_type method uploads js, so let's try sending that # to ffmpeg to see what it thinks. @mock.patch.object(amo, 'VIDEO_TYPES', ['application/javascript']) def test_edit_video_wrong_type(self): raise SkipTest self.check_image_type(self.preview_upload, 'Videos must be in WebM.') def test_edit_icon_wrong_type(self): self.check_image_type(self.icon_upload, 'Icons must be either PNG or JPG.') def test_edit_screenshot_wrong_type(self): self.check_image_type(self.preview_upload, 'Images must be either PNG or JPG.') def setup_image_status(self): self.icon_dest = os.path.join(self.webapp.get_icon_dir(), '%s-64.png' % self.webapp.id) os.makedirs(os.path.dirname(self.icon_dest)) open(self.icon_dest, 'w') self.preview = self.webapp.previews.create() self.preview.save() os.makedirs(os.path.dirname(self.preview.thumbnail_path)) open(self.preview.thumbnail_path, 'w') self.url = self.webapp.get_dev_url('ajax.image.status') def test_icon_square(self): img = get_image_path('mozilla.png') self.check_image_type_path(img, self.icon_upload, 'Icons must be square.') def test_icon_status_no_choice(self): self.webapp.update(icon_type='') url = self.webapp.get_dev_url('ajax.image.status') result = json.loads(self.client.get(url).content) assert result['icons'] def test_icon_status_works(self): self.setup_image_status() result = json.loads(self.client.get(self.url).content) assert result['icons'] def test_icon_status_fails(self): self.setup_image_status() os.remove(self.icon_dest) result = json.loads(self.client.get(self.url).content) assert not result['icons'] def test_preview_status_works(self): self.setup_image_status() result = json.loads(self.client.get(self.url).content) assert result['previews'] # No previews means that all the images are done. self.webapp.previews.all().delete() result = json.loads(self.client.get(self.url).content) assert result['previews'] def test_preview_status_fails(self): self.setup_image_status() os.remove(self.preview.thumbnail_path) result = json.loads(self.client.get(self.url).content) assert not result['previews'] def test_image_status_default(self): self.setup_image_status() os.remove(self.icon_dest) self.webapp.update(icon_type='icon/photos') result = json.loads(self.client.get(self.url).content) assert result['icons'] def test_icon_size_req(self): filehandle = open(get_image_path('sunbird-small.png'), 'rb') res = self.client.post(self.icon_upload, {'upload_image': filehandle}) response_json = json.loads(res.content) assert any(e == 'Icons must be at least 128px by 128px.' for e in response_json['errors']) def check_image_animated(self, url, msg): filehandle = open(get_image_path('animated.png'), 'rb') res = self.client.post(url, {'upload_image': filehandle}) response_json = json.loads(res.content) assert any(e == msg for e in response_json['errors']) def test_icon_animated(self): self.check_image_animated(self.icon_upload, 'Icons cannot be animated.') def test_screenshot_animated(self): self.check_image_animated(self.preview_upload, 'Images cannot be animated.') @mock.patch('lib.video.ffmpeg.Video') @mock.patch('mkt.developers.utils.video_library') def add(self, handle, Video, video_library, num=1): data_formset = self.formset_media(upload_image=handle) r = self.client.post(self.preview_upload, data_formset) self.assertNoFormErrors(r) upload_hash = json.loads(r.content)['upload_hash'] # Create and post with the formset. fields = [] for i in xrange(num): fields.append(self.formset_new_form(upload_hash=upload_hash, position=i)) data_formset = self.formset_media(*fields) r = self.client.post(self.edit_url, data_formset) self.assertNoFormErrors(r) def preview_add(self, num=1): self.add(open(get_image_path('preview.jpg'), 'rb'), num=num) @mock.patch('mimetypes.guess_type', lambda *a: ('video/webm', 'webm')) def preview_video_add(self, num=1): self.add(open(video_files['good'], 'rb'), num=num) @mock.patch('lib.video.ffmpeg.Video') @mock.patch('mkt.developers.utils.video_library') def add_json(self, handle, Video, video_library): data_formset = self.formset_media(upload_image=handle) result = self.client.post(self.preview_upload, data_formset) return json.loads(result.content) @mock.patch('mimetypes.guess_type', lambda *a: ('video/webm', 'webm')) def test_edit_preview_video_add_hash(self): res = self.add_json(open(video_files['good'], 'rb')) assert not res['errors'], res['errors'] assert res['upload_hash'].endswith('.video-webm'), res['upload_hash'] def test_edit_preview_add_hash(self): res = self.add_json(open(get_image_path('preview.jpg'), 'rb')) assert res['upload_hash'].endswith('.image-jpeg'), res['upload_hash'] def test_edit_preview_add_hash_size(self): res = self.add_json(open(get_image_path('mozilla.png'), 'rb')) assert any(e.startswith('App previews ') for e in res['errors']), ( 'Small screenshot not flagged for size.') @mock.patch.object(settings, 'MAX_VIDEO_UPLOAD_SIZE', 1) @mock.patch('mimetypes.guess_type', lambda *a: ('video/webm', 'webm')) def test_edit_preview_video_size(self): res = self.add_json(open(video_files['good'], 'rb')) assert any(e.startswith('Please use files smaller than') for e in res['errors']), (res['errors']) @mock.patch('lib.video.tasks.resize_video') @mock.patch('mimetypes.guess_type', lambda *a: ('video/webm', 'webm')) def test_edit_preview_video_add(self, resize_video): eq_(self.get_webapp().previews.count(), 0) self.preview_video_add() eq_(self.get_webapp().previews.count(), 1) def test_edit_preview_add(self): eq_(self.get_webapp().previews.count(), 0) self.preview_add() eq_(self.get_webapp().previews.count(), 1) def test_edit_preview_edit(self): self.preview_add() preview = self.get_webapp().previews.all()[0] edited = {'upload_hash': 'xxx', 'id': preview.id, 'position': preview.position, 'file_upload': None} data_formset = self.formset_media(edited, initial_count=1) self.client.post(self.edit_url, data_formset) eq_(self.get_webapp().previews.count(), 1) def test_edit_preview_reorder(self): self.preview_add(3) previews = list(self.get_webapp().previews.all()) base = dict(upload_hash='xxx', file_upload=None) # Three preview forms were generated; mix them up here. a = dict(position=1, id=previews[2].id) b = dict(position=2, id=previews[0].id) c = dict(position=3, id=previews[1].id) a.update(base) b.update(base) c.update(base) # Add them in backwards ("third", "second", "first") data_formset = self.formset_media({}, *(c, b, a), initial_count=3) eq_(data_formset['files-0-id'], previews[1].id) eq_(data_formset['files-1-id'], previews[0].id) eq_(data_formset['files-2-id'], previews[2].id) self.client.post(self.edit_url, data_formset) # They should come out "first", "second", "third". eq_(self.get_webapp().previews.all()[0].id, previews[2].id) eq_(self.get_webapp().previews.all()[1].id, previews[0].id) eq_(self.get_webapp().previews.all()[2].id, previews[1].id) def test_edit_preview_delete(self): self.preview_add() self.preview_add() orig_previews = self.get_webapp().previews.all() # Delete second preview. Keep the first. edited = {'DELETE': 'checked', 'upload_hash': 'xxx', 'id': orig_previews[1].id, 'position': 0, 'file_upload': None} ctx = self.client.get(self.edit_url).context first = initial(ctx['preview_form'].forms[0]) first['upload_hash'] = 'xxx' data_formset = self.formset_media(edited, *(first,), initial_count=2) r = self.client.post(self.edit_url, data_formset) self.assertNoFormErrors(r) # First one should still be there. eq_(list(self.get_webapp().previews.all()), [orig_previews[0]]) def test_edit_preview_add_another(self): self.preview_add() self.preview_add() eq_(self.get_webapp().previews.count(), 2) def test_edit_preview_add_two(self): self.preview_add(2) eq_(self.get_webapp().previews.count(), 2) def test_screenshot_video_required(self): r = self.client.post(self.edit_url, self.formset_media()) eq_(r.context['preview_form'].non_form_errors(), ['You must upload at least one screenshot or video.']) def test_screenshot_with_icon(self): self.preview_add() preview = self.get_webapp().previews.all()[0] edited = {'upload_hash': '', 'id': preview.id} data_formset = self.formset_media(edited, initial_count=1) data_formset.update(icon_type='image/png', icon_upload_hash='') r = self.client.post(self.edit_url, data_formset) self.assertNoFormErrors(r) class TestEditDetails(TestEdit): fixtures = fixture('webapp_337141') def setUp(self): super(TestEditDetails, self).setUp() self.url = self.get_url('details') self.edit_url = self.get_url('details', edit=True) def get_dict(self, **kw): data = dict(default_locale='en-US', homepage='http://twitter.com/fligtarsmom', privacy_policy="fligtar's mom does <em>not</em> share " "your data with third parties.") data.update(kw) return data def test_form_url(self): self.check_form_url('details') def test_edit(self): data = self.get_dict() r = self.client.post(self.edit_url, data) self.assertNoFormErrors(r) self.compare(data) def test_privacy_policy_xss(self): self.webapp.privacy_policy = ("We\n<b>own</b>your" "<script>alert('soul')</script>") self.webapp.save() r = self.client.get(self.url) eq_(pq(r.content)('#addon-privacy-policy span[lang]').html(), "We<br/><b>own</b>your&lt;script&gt;" "alert('soul')&lt;/script&gt;") def test_edit_exclude_optional_fields(self): data = self.get_dict() data.update(default_locale='en-US', homepage='', privacy_policy='we sell your data to everyone') r = self.client.post(self.edit_url, data) self.assertNoFormErrors(r) self.compare(data) def test_edit_default_locale_required_trans(self): # name and description are required in the new locale. data = self.get_dict() data.update(description='bullocks', homepage='http://omg.org/yes', privacy_policy='your data is delicious') fields = ['name', 'description'] error = ('Before changing your default locale you must have a name ' 'and description in that locale. You are missing %s.') missing = lambda f: error % ', '.join(map(repr, f)) data.update(default_locale='pt-BR') r = self.client.post(self.edit_url, data) self.assertFormError(r, 'form', None, missing(fields)) # Now we have a name. self.webapp.name = {'pt-BR': 'pt-BR name'} self.webapp.save() fields.remove('name') r = self.client.post(self.edit_url, data) self.assertFormError(r, 'form', None, missing(fields)) def test_edit_default_locale_frontend_error(self): data = self.get_dict() data.update(description='xx', homepage='http://google.com', default_locale='pt-BR', privacy_policy='pp') rp = self.client.post(self.edit_url, data) self.assertContains(rp, 'Before changing your default locale you must') def test_edit_locale(self): self.webapp.update(default_locale='en-US') r = self.client.get(self.url) eq_(pq(r.content)('.addon_edit_locale').eq(0).text(), 'English (US)') def test_homepage_url_optional(self): r = self.client.post(self.edit_url, self.get_dict(homepage='')) self.assertNoFormErrors(r) def test_homepage_url_invalid(self): r = self.client.post(self.edit_url, self.get_dict(homepage='xxx')) self.assertFormError(r, 'form', 'homepage', 'Enter a valid URL.') def test_games_already_excluded_in_brazil(self): AER.objects.create(addon=self.webapp, region=mkt.regions.BR.id) games = 'games' r = self.client.post( self.edit_url, self.get_dict(categories=[games])) self.assertNoFormErrors(r) eq_(list(AER.objects.filter(addon=self.webapp) .values_list('region', flat=True)), [mkt.regions.BR.id]) class TestEditSupport(TestEdit): fixtures = fixture('webapp_337141') def setUp(self): super(TestEditSupport, self).setUp() self.url = self.get_url('support') self.edit_url = self.get_url('support', edit=True) def test_form_url(self): self.check_form_url('support') def test_edit_support(self): data = dict(support_email='sjobs@apple.com', support_url='http://apple.com/') r = self.client.post(self.edit_url, data) self.assertNoFormErrors(r) self.compare(data) def test_edit_support_free_required(self): r = self.client.post(self.edit_url, dict(support_url='')) self.assertFormError(r, 'form', 'support_email', 'This field is required.') def test_edit_support_premium_required(self): self.get_webapp().update(premium_type=amo.ADDON_PREMIUM) r = self.client.post(self.edit_url, dict(support_url='')) self.assertFormError(r, 'form', 'support_email', 'This field is required.') def test_edit_support_premium(self): self.get_webapp().update(premium_type=amo.ADDON_PREMIUM) data = dict(support_email='sjobs@apple.com', support_url='') r = self.client.post(self.edit_url, data) self.assertNoFormErrors(r) eq_(self.get_webapp().support_email, data['support_email']) def test_edit_support_url_optional(self): data = dict(support_email='sjobs@apple.com', support_url='') r = self.client.post(self.edit_url, data) self.assertNoFormErrors(r) self.compare(data) class TestEditTechnical(TestEdit): fixtures = fixture('webapp_337141') def setUp(self): super(TestEditTechnical, self).setUp() self.url = self.get_url('technical') self.edit_url = self.get_url('technical', edit=True) def test_form_url(self): self.check_form_url('technical') def test_toggles(self): # Turn everything on. r = self.client.post(self.edit_url, formset(**{'flash': 'on'})) self.assertNoFormErrors(r) self.compare({'uses_flash': True}) # And off. r = self.client.post(self.edit_url, formset(**{'flash': ''})) self.compare({'uses_flash': False}) def test_public_stats(self): o = ActivityLog.objects eq_(o.count(), 0) eq_(self.webapp.public_stats, False) assert not self.webapp.public_stats, ( 'Unexpectedly found public stats for app. Says Basta.') r = self.client.post(self.edit_url, formset(public_stats=True)) self.assertNoFormErrors(r) self.compare({'public_stats': True}) eq_(o.filter(action=amo.LOG.EDIT_PROPERTIES.id).count(), 1) def test_features_hosted(self): data_on = {'has_contacts': True} data_off = {'has_contacts': False} assert not RereviewQueue.objects.filter(addon=self.webapp).exists() # Turn contacts on. r = self.client.post(self.edit_url, formset(**data_on)) self.assertNoFormErrors(r) self.compare_features(data_on) # And turn it back off. r = self.client.post(self.edit_url, formset(**data_off)) self.assertNoFormErrors(r) self.compare_features(data_off) # Changing features must trigger re-review. assert RereviewQueue.objects.filter(addon=self.webapp).exists() def test_features_hosted_app_disabled(self): # Reject the app. app = self.get_webapp() app.update(status=amo.STATUS_REJECTED) app.versions.latest().all_files[0].update(status=amo.STATUS_DISABLED) app.update_version() assert not RereviewQueue.objects.filter(addon=self.webapp).exists() data_on = {'has_contacts': True} data_off = {'has_contacts': False} # Display edit technical page r = self.client.get(self.edit_url) eq_(r.status_code, 200) # Turn contacts on. r = self.client.post(self.edit_url, formset(**data_on)) app = self.get_webapp() self.assertNoFormErrors(r) self.compare_features(data_on, version=app.latest_version) # Display edit technical page again, is the feature on ? r = self.client.get(self.edit_url) eq_(r.status_code, 200) ok_(pq(r.content)('#id_has_contacts:checked')) # And turn it back off. r = self.client.post(self.edit_url, formset(**data_off)) app = self.get_webapp() self.assertNoFormErrors(r) self.compare_features(data_off, version=app.latest_version) # Changing features on a rejected app must NOT trigger re-review. assert not RereviewQueue.objects.filter(addon=self.webapp).exists() class TestAdmin(TestEdit): fixtures = TestEdit.fixtures def setUp(self): super(TestAdmin, self).setUp() self.url = self.get_url('admin') self.edit_url = self.get_url('admin', edit=True) self.webapp = self.get_webapp() assert self.client.login(username='admin@mozilla.com', password='password') def log_in_user(self): assert self.client.login(username=self.user.email, password='password') def log_in_with(self, rules): user = UserProfile.objects.get(email='regular@mozilla.com') group = Group.objects.create(name='Whatever', rules=rules) GroupUser.objects.create(group=group, user=user) assert self.client.login(username=user.email, password='password') class TestAdminSettings(TestAdmin): fixtures = TestEdit.fixtures def test_form_url(self): self.check_form_url('admin') def test_overview_visible_as_admin(self): r = self.client.get(self.url) eq_(r.status_code, 200) eq_(pq(r.content)('form').length, 1) assert not r.context.get('form'), ( 'Admin Settings form should not be in context') def test_overview_forbidden_for_nonadmin(self): self.log_in_user() eq_(self.client.head(self.url).status_code, 403) def test_edit_get_as_admin(self): r = self.client.get(self.edit_url) eq_(r.status_code, 200) eq_(pq(r.content)('form').length, 1) assert r.context.get('form'), 'Admin Settings form expected in context' def test_edit_post_as_admin(self): # There are errors, but I don't care. I just want to see if I can POST. eq_(self.client.post(self.edit_url).status_code, 200) def test_edit_no_get_as_nonadmin(self): self.log_in_user() eq_(self.client.get(self.edit_url).status_code, 403) def test_edit_no_post_as_nonadmin(self): self.log_in_user() eq_(self.client.post(self.edit_url).status_code, 403) def post_contact(self, **kw): data = {'position': '1', 'upload_hash': 'abcdef', 'mozilla_contact': 'a@mozilla.com'} data.update(kw) return self.client.post(self.edit_url, data) def test_mozilla_contact(self): self.post_contact() webapp = self.get_webapp() eq_(webapp.mozilla_contact, 'a@mozilla.com') def test_mozilla_contact_cleared(self): self.post_contact(mozilla_contact='') webapp = self.get_webapp() eq_(webapp.mozilla_contact, '') def test_mozilla_contact_invalid(self): r = self.post_contact( mozilla_contact='<script>alert("xss")</script>@mozilla.com') webapp = self.get_webapp() self.assertFormError(r, 'form', 'mozilla_contact', 'Enter a valid email address.') eq_(webapp.mozilla_contact, '') def test_vip_app_toggle(self): # Turn on. data = { 'position': 1, # Required, useless in this test. 'vip_app': 'on' } r = self.client.post(self.edit_url, data) self.assertNoFormErrors(r) self.compare({'vip_app': True}) # And off. data.update({'vip_app': ''}) r = self.client.post(self.edit_url, data) self.compare({'vip_app': False}) def test_priority_review_toggle(self): # Turn on. data = { 'position': 1, # Required, useless in this test. 'priority_review': 'on' } r = self.client.post(self.edit_url, data) self.assertNoFormErrors(r) self.compare({'priority_review': True}) # And off. data = {'position': 1} r = self.client.post(self.edit_url, data) self.compare({'priority_review': False}) def test_staff(self): # Staff and Support Staff should have Apps:Configure. self.log_in_with('Apps:Configure') # Test GET. r = self.client.get(self.edit_url) eq_(r.status_code, 200) eq_(pq(r.content)('form').length, 1) assert r.context.get('form'), 'Admin Settings form expected in context' # Test POST. Ignore errors. eq_(self.client.post(self.edit_url).status_code, 200) def test_developer(self): # Developers have read-only on admin section. self.log_in_with('Apps:ViewConfiguration') # Test GET. r = self.client.get(self.edit_url) eq_(r.status_code, 200) eq_(pq(r.content)('form').length, 1) assert r.context.get('form'), 'Admin Settings form expected in context' # Test POST. Ignore errors. eq_(self.client.post(self.edit_url).status_code, 403) def test_banner_region_view(self): self.log_in_with('Apps:ViewConfiguration') geodata = self.get_webapp().geodata geodata.banner_message = u'Exclusive message ! Only for AR/BR !' geodata.banner_regions = [mkt.regions.BR.id, mkt.regions.AR.id] geodata.save() res = self.client.get(self.url) eq_(pq(res.content)('#id_banner_message').text(), unicode(geodata.banner_message)) eq_(pq(res.content)('#id_banner_regions').text(), u'Argentina, Brazil') def test_banner_region_edit(self): self.log_in_with('Apps:ViewConfiguration') geodata = self.webapp.geodata geodata.banner_message = u'Exclusive message ! Only for AR/BR !' geodata.banner_regions = [mkt.regions.BR.id, mkt.regions.AR.id] geodata.save() AER.objects.create(addon=self.webapp, region=mkt.regions.US.id) res = self.client.get(self.edit_url) eq_(res.status_code, 200) doc = pq(res.content) inputs = doc.find('input[type=checkbox][name=banner_regions]') eq_(inputs.length, len(mkt.regions.REGIONS_CHOICES_ID)) checked = doc.find('#id_banner_regions input[type=checkbox]:checked') eq_(checked.length, 2) eq_(checked[0].name, 'banner_regions') eq_(checked[0].value, unicode(mkt.regions.AR.id)) eq_(pq(checked[0]).parents('li').attr('data-region'), unicode(mkt.regions.AR.id)) eq_(checked[1].name, 'banner_regions') eq_(checked[1].value, unicode(mkt.regions.BR.id)) eq_(pq(checked[1]).parents('li').attr('data-region'), unicode(mkt.regions.BR.id)) def test_banner_region_edit_post(self): data = { 'position': 1, # Required, useless in this test. 'banner_regions': [unicode(mkt.regions.BR.id), unicode(mkt.regions.SPAIN.id)], 'banner_message_en-us': u'Oh Hai.', } res = self.client.post(self.edit_url, data) eq_(res.status_code, 200) geodata = self.webapp.geodata.reload() eq_(geodata.banner_message, data['banner_message_en-us']) eq_(geodata.banner_regions, [mkt.regions.BR.id, mkt.regions.SPAIN.id]) class TestPromoUpload(TestAdmin): fixtures = TestEdit.fixtures def post(self, **kw): data = {'position': '1', 'upload_hash': 'abcdef'} data.update(kw) self.client.post(self.edit_url, data) def test_add(self): self.post() webapp = self.get_webapp() eq_(webapp.previews.count(), 1) eq_(list(webapp.get_previews()), []) promo = webapp.get_promo() eq_(promo.position, -1) def test_delete(self): self.post() assert self.get_webapp().get_promo() self.post(DELETE=True) assert not self.get_webapp().get_promo() class TestEditVersion(TestEdit): fixtures = fixture('group_admin', 'user_999', 'user_admin', 'user_admin_group', 'webapp_337141') def setUp(self): self.webapp = self.get_webapp() self.webapp.update(is_packaged=True) self.version_pk = self.webapp.latest_version.pk self.url = reverse('mkt.developers.apps.versions.edit', kwargs={ 'version_id': self.version_pk, 'app_slug': self.webapp.app_slug }) self.user = UserProfile.objects.get(username='31337') self.login(self.user) def test_post(self, **kwargs): data = {'releasenotes_init': '', 'releasenotes_en-us': 'Hot new version', 'approvalnotes': 'The release notes are true.', 'has_audio': False, 'has_apps': False} data.update(kwargs) req = self.client.post(self.url, data) eq_(req.status_code, 302) version = Version.objects.no_cache().get(pk=self.version_pk) eq_(version.releasenotes, data['releasenotes_en-us']) eq_(version.approvalnotes, data['approvalnotes']) return version def test_comm_thread(self): self.create_switch('comm-dashboard') # With empty note. self.test_post(approvalnotes='') eq_(CommunicationNote.objects.count(), 0) self.test_post(approvalnotes='abc') notes = CommunicationNote.objects.all() eq_(notes.count(), 1) eq_(notes[0].body, 'abc') def test_existing_features_initial_form_data(self): features = self.webapp.current_version.features features.update(has_audio=True, has_apps=True) r = self.client.get(self.url) eq_(r.context['appfeatures_form'].initial, dict(id=features.id, **features.to_dict())) def test_new_features(self): assert not RereviewQueue.objects.filter(addon=self.webapp).exists() # Turn a feature on. version = self.test_post(has_audio=True) ok_(version.features.has_audio) ok_(not version.features.has_apps) # Then turn the feature off. version = self.test_post(has_audio=False) ok_(not version.features.has_audio) ok_(not version.features.has_apps) # Changing features must trigger re-review. assert RereviewQueue.objects.filter(addon=self.webapp).exists() def test_correct_version_features(self): new_version = self.webapp.latest_version.update(id=self.version_pk + 1) self.webapp.update(_latest_version=new_version) self.test_new_features() def test_publish_checkbox_presence(self): res = self.client.get(self.url) ok_(not pq(res.content)('#id_publish_immediately')) self.webapp.latest_version.files.update(status=amo.STATUS_PENDING) res = self.client.get(self.url) ok_(pq(res.content)('#id_publish_immediately'))
{ "content_hash": "be467c2fc29a533e7661f70e921e96fd", "timestamp": "", "source": "github", "line_count": 1532, "max_line_length": 79, "avg_line_length": 37.965404699738905, "alnum_prop": 0.5998830871860117, "repo_name": "andymckay/zamboni", "id": "78cf41205935f399c653e3da9378207a004d519a", "size": "58196", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "mkt/developers/tests/test_views_edit.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "357533" }, { "name": "JavaScript", "bytes": "524153" }, { "name": "Python", "bytes": "3863676" }, { "name": "Shell", "bytes": "14980" } ], "symlink_target": "" }