Search is not available for this dataset
text
stringlengths
75
104k
async def send_maps(self, map_list): """Sends a request to the server containing maps (dicts).""" params = { 'VER': 8, # channel protocol version 'RID': 81188, # request identifier 'ctype': 'hangouts', # client type } if self._gsessionid_param is not None: params['gsessionid'] = self._gsessionid_param if self._sid_param is not None: params['SID'] = self._sid_param data_dict = dict(count=len(map_list), ofs=0) for map_num, map_ in enumerate(map_list): for map_key, map_val in map_.items(): data_dict['req{}_{}'.format(map_num, map_key)] = map_val res = await self._session.fetch( 'post', CHANNEL_URL, params=params, data=data_dict ) return res
async def _fetch_channel_sid(self): """Creates a new channel for receiving push data. Sending an empty forward channel request will create a new channel on the server. There's a separate API to get the gsessionid alone that Hangouts for Chrome uses, but if we don't send a gsessionid with this request, it will return a gsessionid as well as the SID. Raises hangups.NetworkError if the channel can not be created. """ logger.info('Requesting new gsessionid and SID...') # Set SID and gsessionid to None so they aren't sent in by send_maps. self._sid_param = None self._gsessionid_param = None res = await self.send_maps([]) self._sid_param, self._gsessionid_param = _parse_sid_response(res.body) logger.info('New SID: {}'.format(self._sid_param)) logger.info('New gsessionid: {}'.format(self._gsessionid_param))
async def _longpoll_request(self): """Open a long-polling request and receive arrays. This method uses keep-alive to make re-opening the request faster, but the remote server will set the "Connection: close" header once an hour. Raises hangups.NetworkError or ChannelSessionError. """ params = { 'VER': 8, # channel protocol version 'gsessionid': self._gsessionid_param, 'RID': 'rpc', # request identifier 't': 1, # trial 'SID': self._sid_param, # session ID 'CI': 0, # 0 if streaming/chunked requests should be used 'ctype': 'hangouts', # client type 'TYPE': 'xmlhttp', # type of request } logger.info('Opening new long-polling request') try: async with self._session.fetch_raw('GET', CHANNEL_URL, params=params) as res: if res.status != 200: if res.status == 400 and res.reason == 'Unknown SID': raise ChannelSessionError('SID became invalid') raise exceptions.NetworkError( 'Request return unexpected status: {}: {}'.format( res.status, res.reason)) while True: async with async_timeout.timeout(PUSH_TIMEOUT): chunk = await res.content.read(MAX_READ_BYTES) if not chunk: break await self._on_push_data(chunk) except asyncio.TimeoutError: raise exceptions.NetworkError('Request timed out') except aiohttp.ServerDisconnectedError as err: raise exceptions.NetworkError( 'Server disconnected error: %s' % err) except aiohttp.ClientPayloadError: raise ChannelSessionError('SID is about to expire') except aiohttp.ClientError as err: raise exceptions.NetworkError('Request connection error: %s' % err)
async def _on_push_data(self, data_bytes): """Parse push data and trigger events.""" logger.debug('Received chunk:\n{}'.format(data_bytes)) for chunk in self._chunk_parser.get_chunks(data_bytes): # Consider the channel connected once the first chunk is received. if not self._is_connected: if self._on_connect_called: self._is_connected = True await self.on_reconnect.fire() else: self._on_connect_called = True self._is_connected = True await self.on_connect.fire() # chunk contains a container array container_array = json.loads(chunk) # container array is an array of inner arrays for inner_array in container_array: # inner_array always contains 2 elements, the array_id and the # data_array. array_id, data_array = inner_array logger.debug('Chunk contains data array with id %r:\n%r', array_id, data_array) await self.on_receive_array.fire(data_array)
def user_id(self): """Who created the event (:class:`~hangups.user.UserID`).""" return user.UserID(chat_id=self._event.sender_id.chat_id, gaia_id=self._event.sender_id.gaia_id)
def from_str(text): """Construct :class:`ChatMessageSegment` list parsed from a string. Args: text (str): Text to parse. May contain line breaks, URLs and formatting markup (simplified Markdown and HTML) to be converted into equivalent segments. Returns: List of :class:`ChatMessageSegment` objects. """ segment_list = chat_message_parser.parse(text) return [ChatMessageSegment(segment.text, **segment.params) for segment in segment_list]
def deserialize(segment): """Construct :class:`ChatMessageSegment` from ``Segment`` message. Args: segment: ``Segment`` message to parse. Returns: :class:`ChatMessageSegment` object. """ link_target = segment.link_data.link_target return ChatMessageSegment( segment.text, segment_type=segment.type, is_bold=segment.formatting.bold, is_italic=segment.formatting.italic, is_strikethrough=segment.formatting.strikethrough, is_underline=segment.formatting.underline, link_target=None if link_target == '' else link_target )
def serialize(self): """Serialize this segment to a ``Segment`` message. Returns: ``Segment`` message. """ segment = hangouts_pb2.Segment( type=self.type_, text=self.text, formatting=hangouts_pb2.Formatting( bold=self.is_bold, italic=self.is_italic, strikethrough=self.is_strikethrough, underline=self.is_underline, ), ) if self.link_target is not None: segment.link_data.link_target = self.link_target return segment
def text(self): """Text of the message without formatting (:class:`str`).""" lines = [''] for segment in self.segments: if segment.type_ == hangouts_pb2.SEGMENT_TYPE_TEXT: lines[-1] += segment.text elif segment.type_ == hangouts_pb2.SEGMENT_TYPE_LINK: lines[-1] += segment.text elif segment.type_ == hangouts_pb2.SEGMENT_TYPE_LINE_BREAK: lines.append('') else: logger.warning('Ignoring unknown chat message segment type: {}' .format(segment.type_)) lines.extend(self.attachments) return '\n'.join(lines)
def segments(self): """List of :class:`ChatMessageSegment` in message (:class:`list`).""" seg_list = self._event.chat_message.message_content.segment return [ChatMessageSegment.deserialize(seg) for seg in seg_list]
def attachments(self): """List of attachments in the message (:class:`list`).""" raw_attachments = self._event.chat_message.message_content.attachment if raw_attachments is None: raw_attachments = [] attachments = [] for attachment in raw_attachments: for embed_item_type in attachment.embed_item.type: known_types = [ hangouts_pb2.ITEM_TYPE_PLUS_PHOTO, hangouts_pb2.ITEM_TYPE_PLACE_V2, hangouts_pb2.ITEM_TYPE_PLACE, hangouts_pb2.ITEM_TYPE_THING, ] if embed_item_type not in known_types: logger.warning('Received chat message attachment with ' 'unknown embed type: %r', embed_item_type) if attachment.embed_item.HasField('plus_photo'): attachments.append( attachment.embed_item.plus_photo.thumbnail.image_url ) return attachments
def participant_ids(self): """:class:`~hangups.user.UserID` of users involved (:class:`list`).""" return [user.UserID(chat_id=id_.chat_id, gaia_id=id_.gaia_id) for id_ in self._event.membership_change.participant_ids]
def _decode_field(message, field, value): """Decode optional or required field.""" if field.type == FieldDescriptor.TYPE_MESSAGE: decode(getattr(message, field.name), value) else: try: if field.type == FieldDescriptor.TYPE_BYTES: value = base64.b64decode(value) setattr(message, field.name, value) except (ValueError, TypeError) as e: # ValueError: invalid enum value, negative unsigned int value, or # invalid base64 # TypeError: mismatched type logger.warning('Message %r ignoring field %s: %s', message.__class__.__name__, field.name, e)
def _decode_repeated_field(message, field, value_list): """Decode repeated field.""" if field.type == FieldDescriptor.TYPE_MESSAGE: for value in value_list: decode(getattr(message, field.name).add(), value) else: try: for value in value_list: if field.type == FieldDescriptor.TYPE_BYTES: value = base64.b64decode(value) getattr(message, field.name).append(value) except (ValueError, TypeError) as e: # ValueError: invalid enum value, negative unsigned int value, or # invalid base64 # TypeError: mismatched type logger.warning('Message %r ignoring repeated field %s: %s', message.__class__.__name__, field.name, e) # Ignore any values already decoded by clearing list message.ClearField(field.name)
def decode(message, pblite, ignore_first_item=False): """Decode pblite to Protocol Buffer message. This method is permissive of decoding errors and will log them as warnings and continue decoding where possible. The first element of the outer pblite list must often be ignored using the ignore_first_item parameter because it contains an abbreviation of the name of the protobuf message (eg. cscmrp for ClientSendChatMessageResponseP) that's not part of the protobuf. Args: message: protocol buffer message instance to decode into. pblite: list representing a pblite-serialized message. ignore_first_item: If True, ignore the item at index 0 in the pblite list, making the item at index 1 correspond to field 1 in the message. """ if not isinstance(pblite, list): logger.warning('Ignoring invalid message: expected list, got %r', type(pblite)) return if ignore_first_item: pblite = pblite[1:] # If the last item of the list is a dict, use it as additional field/value # mappings. This seems to be an optimization added for dealing with really # high field numbers. if pblite and isinstance(pblite[-1], dict): extra_fields = {int(field_number): value for field_number, value in pblite[-1].items()} pblite = pblite[:-1] else: extra_fields = {} fields_values = itertools.chain(enumerate(pblite, start=1), extra_fields.items()) for field_number, value in fields_values: if value is None: continue try: field = message.DESCRIPTOR.fields_by_number[field_number] except KeyError: # If the tag number is unknown and the value is non-trivial, log a # message to aid reverse-engineering the missing field in the # message. if value not in [[], '', 0]: logger.debug('Message %r contains unknown field %s with value ' '%r', message.__class__.__name__, field_number, value) continue if field.label == FieldDescriptor.LABEL_REPEATED: _decode_repeated_field(message, field, value) else: _decode_field(message, field, value)
def send_private_msg(self, *, user_id, message, auto_escape=False): ''' 发送私聊消息 ------------ :param int user_id: 对方 QQ 号 :param str | list[ dict[ str, unknown ] ] message: 要发送的内容 :param bool auto_escape: 消息内容是否作为纯文本发送(即不解析 CQ 码),`message` 数据类型为 `list` 时无效 :return: {"message_id": int 消息ID} :rtype: dict[string, int] ''' return super().__getattr__('send_private_msg') \ (user_id=user_id, message=message, auto_escape=auto_escape)
def send_private_msg_async(self, *, user_id, message, auto_escape=False): """ 发送私聊消息 (异步版本) ------------ :param int user_id: 对方 QQ 号 :param str | list[ dict[ str, unknown ] ] message: 要发送的内容 :param bool auto_escape: 消息内容是否作为纯文本发送(即不解析 CQ 码),`message` 数据类型为 `list` 时无效 :return: None :rtype: None """ return super().__getattr__('send_private_msg_async') \ (user_id=user_id, message=message, auto_escape=auto_escape)
def send_group_msg(self, *, group_id, message, auto_escape=False): """ 发送群消息 ------------ :param int group_id: 群号 :param str | list[ dict[ str, unknown ] ] message: 要发送的内容 :param bool auto_escape: 消息内容是否作为纯文本发送(即不解析 CQ 码),`message` 数据类型为 `list` 时无效 :return: {"message_id": int 消息ID} :rtype: dict[string, int] """ return super().__getattr__('send_group_msg') \ (group_id=group_id, message=message, auto_escape=auto_escape)
def send_group_msg_async(self, *, group_id, message, auto_escape=False): """ 发送群消息 (异步版本) ------------ :param int group_id: 群号 :param str | list[ dict[ str, unknown ] ] message: 要发送的内容 :param bool auto_escape: 消息内容是否作为纯文本发送(即不解析 CQ 码),`message` 数据类型为 `list` 时无效 :return: None :rtype: None """ return super().__getattr__('send_group_msg_async') \ (group_id=group_id, message=message, auto_escape=auto_escape)
def send_discuss_msg(self, *, discuss_id, message, auto_escape=False): """ 发送讨论组消息 ------------ :param int discuss_id: 讨论组 ID(正常情况下看不到,需要从讨论组消息上报的数据中获得) :param str | list[ dict[ str, unknown ] ] message: 要发送的内容 :param bool auto_escape: 消息内容是否作为纯文本发送(即不解析 CQ 码),`message` 数据类型为 `list` 时无效 :return: {"message_id": int 消息ID} :rtype: dict[string, int] """ return super().__getattr__('send_discuss_msg') \ (discuss_id=discuss_id, message=message, auto_escape=auto_escape)
def send_discuss_msg_async(self, *, discuss_id, message, auto_escape=False): """ 发送讨论组消息 (异步版本) ------------ :param int discuss_id: 讨论组 ID(正常情况下看不到,需要从讨论组消息上报的数据中获得) :param str | list[ dict[ str, unknown ] ] message: 要发送的内容 :param bool auto_escape: 消息内容是否作为纯文本发送(即不解析 CQ 码),`message` 数据类型为 `list` 时无效 :return: None :rtype: None """ return super().__getattr__('send_discuss_msg_async') \ (discuss_id=discuss_id, message=message, auto_escape=auto_escape)
def send_msg_async(self, *, message_type, user_id=None, group_id=None, discuss_id=None, message, auto_escape=False): """ 发送消息 (异步版本) ------------ :param str message_type: 消息类型,支持 `private`、`group`、`discuss`,分别对应私聊、群组、讨论组 :param int user_id: 对方 QQ 号(消息类型为 `private` 时需要) :param int group_id: 群号(消息类型为 `group` 时需要) :param int discuss_id: 讨论组 ID(需要从上报消息中获取,消息类型为 `discuss` 时需要) :param str | list[ dict[ str, unknown ] ] message: 要发送的内容 :param bool auto_escape: 消息内容是否作为纯文本发送(即不解析 CQ 码),`message` 数据类型为 `list` 时无效 :return: None :rtype: None """ return super().__getattr__('send_msg_async') \ (message_type=message_type, user_id=user_id, group_id=group_id, discuss_id=discuss_id, message=message, auto_escape=auto_escape)
def send_like(self, *, user_id, times=1): """ 发送好友赞 ------------ :param int user_id: 对方 QQ 号 :param int times: 赞的次数,每个好友每天最多 10 次 :return: None :rtype: None """ return super().__getattr__('send_like') \ (user_id=user_id, times=times)
def set_group_kick(self, *, group_id, user_id, reject_add_request=False): """ 群组踢人 ------------ :param int group_id: 群号 :param int user_id: 要踢的 QQ 号 :param bool reject_add_request: 拒绝此人的加群请求 :return: None :rtype: None """ return super().__getattr__('set_group_kick') \ (group_id=group_id, user_id=user_id, reject_add_request=reject_add_request)
def set_group_ban(self, *, group_id, user_id, duration=30 * 60): """ 群组单人禁言 ------------ :param int group_id: 群号 :param int user_id: 要禁言的 QQ 号 :param int duration: 禁言时长,单位秒,0 表示取消禁言 :return: None :rtype: None """ return super().__getattr__('set_group_ban') \ (group_id=group_id, user_id=user_id, duration=duration)
def set_group_anonymous_ban(self, *, group_id, flag, duration=30 * 60): """ 群组匿名用户禁言 ------------ :param int group_id: 群号 :param str flag: 要禁言的匿名用户的 flag(需从群消息上报的数据中获得) :param int duration: 禁言时长,单位秒,**无法取消匿名用户禁言** :return: None :rtype: None """ return super().__getattr__('set_group_anonymous_ban') \ (group_id=group_id, flag=flag, duration=duration)
def set_group_whole_ban(self, *, group_id, enable=True): """ 群组全员禁言 ------------ :param int group_id: 群号 :param bool enable: 是否禁言 :return: None :rtype: None """ return super().__getattr__('set_group_whole_ban') \ (group_id=group_id, enable=enable)
def set_group_admin(self, *, group_id, user_id, enable=True): """ 群组设置管理员 ------------ :param int group_id: 群号 :param user_id: 要设置管理员的 QQ 号 :param enable: True 为设置,False 为取消 :return: None :rtype: None """ return super().__getattr__('set_group_admin') \ (group_id=group_id, user_id=user_id, enable=enable)
def set_group_anonymous(self, *, group_id, enable=True): """ 群组匿名 ------------ :param int group_id: 群号 :param bool enable: 是否允许匿名聊天 :return: None :rtype: None """ return super().__getattr__('set_group_anonymous') \ (group_id=group_id, enable=enable)
def set_group_card(self, *, group_id, user_id, card=None): """ 设置群名片(群备注) ------------ :param int group_id: 群号 :param int user_id: 要设置的 QQ 号 :param str | None card: 群名片内容,不填或空字符串表示删除群名片 :return: None :rtype: None """ return super().__getattr__('set_group_card') \ (group_id=group_id, user_id=user_id, card=card)
def set_group_leave(self, *, group_id, is_dismiss=False): """ 退出群组 ------------ :param int group_id: 群号 :param bool is_dismiss: 是否解散,如果登录号是群主,则仅在此项为 true 时能够解散 :return: None :rtype: None """ return super().__getattr__('set_group_leave') \ (group_id=group_id, is_dismiss=is_dismiss)
def set_group_special_title(self, *, group_id, user_id, special_title, duration=-1): """ 设置群组专属头衔 ------------ :param int group_id: 群号 :param int user_id: 要设置的 QQ 号 :param str special_title: 专属头衔,不填或空字符串表示删除专属头衔,只能保留前6个英文与汉字,Emoji 根据字符实际字符长度占用只能放最多3个甚至更少,超出长度部分会被截断 :param int duration: 专属头衔有效期,单位秒,-1 表示永久,不过此项似乎没有效果,可能是只有某些特殊的时间长度有效,有待测试 :return: None :rtype: None """ return super().__getattr__('set_group_special_title') \ (group_id=group_id, user_id=user_id, special_title=special_title, duration=duration)
def set_friend_add_request(self, *, flag, approve=True, remark=None): """ 处理加好友请求 ------------ :param str flag: 加好友请求的 flag(需从上报的数据中获得) :param bool approve: 是否同意请求 :param str remark: 添加后的好友备注(仅在同意时有效) :return: None :rtype: None """ return super().__getattr__('set_friend_add_request') \ (flag=flag, approve=approve, remark=remark)
def set_group_add_request(self, *, flag, type, approve=True, reason=None): """ 处理加群请求、群组成员邀请 ------------ :param str flag: 加群请求的 flag(需从上报的数据中获得) :param str type: `add` 或 `invite`,请求类型(需要和上报消息中的 `sub_type` 字段相符) :param bool approve: 是否同意请求/邀请 :param str reason: 拒绝理由(仅在拒绝时有效) :return: None :rtype: None """ return super().__getattr__('set_group_add_request') \ (flag=flag, type=type, approve=approve, reason=reason)
def get_stranger_info(self, *, user_id, no_cache=False): """ 获取陌生人信息 ------------ :param int user_id: QQ 号(不可以是登录号) :param bool no_cache: 是否不使用缓存(使用缓存可能更新不及时,但响应更快) :return: { "user_id": (QQ 号: int), "nickname": (昵称: str), "sex": (性别: str in ['male', 'female', 'unknown']), "age": (年龄: int) } :rtype: dict[ str, int | str ] ------------ ======== ========= ====================================== 响应数据 ----------------------------------------------------------- 数据类型 字段名 说明 ======== ========= ====================================== int user_id QQ 号 str nickname 昵称 str sex 性别,`male` 或 `female` 或 `unknown` int age 年龄 ======== ========= ====================================== """ return super().__getattr__('get_stranger_info') \ (user_id=user_id, no_cache=no_cache)
def get_group_member_info(self, *, group_id, user_id, no_cache=False): """ 获取群成员信息 ------------ :param int group_id: 群号 :param int user_id: QQ 号(不可以是登录号) :param bool no_cache: 是否不使用缓存(使用缓存可能更新不及时,但响应更快) :return: { "group_id": (群号: int), "user_id": (QQ 号: int), "nickname": (昵称: str), "card": (群名片/备注: str), "sex": (性别: str in ['male', 'female', 'unknown']), "age": (年龄: int), "area": (地区: str), "join_time": (加群时间戳: int), "last_sent_time": (最后发言时间戳: int), "level": (成员等级: str), "role": (角色: str in ['owner', 'admin', 'member']), "unfriendly": (是否不良记录成员: bool), "title": (专属头衔: str), "title_expire_time": (专属头衔过期时间戳: int), "card_changeable": (是否允许修改群名片: bool) } :rtype: dict[ str, int | str | bool ] ------------ ======== =================== ====================================== 响应数据 --------------------------------------------------------------------- 数据类型 字段名 说明 ======== =================== ====================================== int group_id 群号 int user_id QQ 号 str nickname 昵称 str card 群名片/备注 str sex 性别,`male` 或 `female` 或 `unknown` int age 年龄 str area 地区 int join_time 加群时间戳 int last_sent_time 最后发言时间戳 str level 成员等级 str role 角色,`owner` 或 `admin` 或 `member` bool unfriendly 是否不良记录成员 str title 专属头衔 int title_expire_time 专属头衔过期时间戳 bool card_changeable 是否允许修改群名片 ======== =================== ====================================== """ return super().__getattr__('get_group_member_info') \ (group_id=group_id, user_id=user_id, no_cache=no_cache)
def get_record(self, *, file, out_format): """ 获取语音 ------------ :param str file: 收到的语音文件名,如 `0B38145AA44505000B38145AA4450500.silk` :param str out_format: 要转换到的格式,目前支持 `mp3`、`amr`、`wma`、`m4a`、`spx`、`ogg`、`wav`、`flac` :return: { "file": (转换后的语音文件名: str)} :rtype: dict[ str, str ] ------------ 其实并不是真的获取语音,而是转换语音到指定的格式,然后返回语音文件名(`data/record` 目录下)。 ======== =========== ============================================================= 响应数据 ------------------------------------------------------------------------------------ 数据类型 字段名 说明 ======== =========== ============================================================= str file 转换后的语音文件名,如 `0B38145AA44505000B38145AA4450500.mp3` ======== =========== ============================================================= """ return super().__getattr__('get_record') \ (file=file, out_format=out_format)
def send(self, context, message, **kwargs): """ 便捷回复。会根据传入的context自动判断回复对象 ------------ :param dict context: 事件收到的content :return: None :rtype: None ------------ """ context = context.copy() context['message'] = message context.update(kwargs) if 'message_type' not in context: if 'group_id' in context: context['message_type'] = 'group' elif 'discuss_id' in context: context['message_type'] = 'discuss' elif 'user_id' in context: context['message_type'] = 'private' return super().__getattr__('send_msg')(**context)
def toposort_flatten(data, sort=True): """Returns a single list of dependencies. For any set returned by toposort(), those items are sorted and appended to the result (just to make the results deterministic).""" result = [] for d in toposort(data): try: result.extend((sorted if sort else list)(d)) except TypeError as e: result.extend(list(d)) return result
def _timezone_format(value): """ Generates a timezone aware datetime if the 'USE_TZ' setting is enabled :param value: The datetime value :return: A locale aware datetime """ return timezone.make_aware(value, timezone.get_current_timezone()) if getattr(settings, 'USE_TZ', False) else value
def guess_format(self, name): """ Returns a faker method based on the field's name :param name: """ name = name.lower() faker = self.faker if re.findall(r'^is[_A-Z]', name): return lambda x: faker.boolean() elif re.findall(r'(_a|A)t$', name): return lambda x: _timezone_format(faker.date_time()) if name in ('first_name', 'firstname', 'first'): return lambda x: faker.first_name() if name in ('last_name', 'lastname', 'last'): return lambda x: faker.last_name() if name in ('username', 'login', 'nickname'): return lambda x:faker.user_name() if name in ('email', 'email_address'): return lambda x:faker.email() if name in ('phone_number', 'phonenumber', 'phone'): return lambda x:faker.phone_number() if name == 'address': return lambda x:faker.address() if name == 'city': return lambda x: faker.city() if name == 'streetaddress': return lambda x: faker.street_address() if name in ('postcode', 'zipcode'): return lambda x: faker.postcode() if name == 'state': return lambda x: faker.state() if name == 'country': return lambda x: faker.country() if name == 'title': return lambda x: faker.sentence() if name in ('body', 'summary', 'description'): return lambda x: faker.text()
def guess_format(self, field): """ Returns the correct faker function based on the field type :param field: """ faker = self.faker provider = self.provider if isinstance(field, DurationField): return lambda x: provider.duration() if isinstance(field, UUIDField): return lambda x: provider.uuid() if isinstance(field, BooleanField): return lambda x: faker.boolean() if isinstance(field, NullBooleanField): return lambda x: faker.null_boolean() if isinstance(field, PositiveSmallIntegerField): return lambda x: provider.rand_small_int(pos=True) if isinstance(field, SmallIntegerField): return lambda x: provider.rand_small_int() if isinstance(field, BigIntegerField): return lambda x: provider.rand_big_int() if isinstance(field, PositiveIntegerField): return lambda x: provider.rand_small_int(pos=True) if isinstance(field, IntegerField): return lambda x: provider.rand_small_int() if isinstance(field, FloatField): return lambda x: provider.rand_float() if isinstance(field, DecimalField): return lambda x: random.random() if isinstance(field, URLField): return lambda x: faker.uri() if isinstance(field, SlugField): return lambda x: faker.uri_page() if isinstance(field, IPAddressField) or isinstance(field, GenericIPAddressField): protocol = random.choice(['ipv4','ipv6']) return lambda x: getattr(faker, protocol)() if isinstance(field, EmailField): return lambda x: faker.email() if isinstance(field, CommaSeparatedIntegerField): return lambda x: provider.comma_sep_ints() if isinstance(field, BinaryField): return lambda x: provider.binary() if isinstance(field, ImageField): return lambda x: provider.file_name() if isinstance(field, FilePathField): return lambda x: provider.file_name() if isinstance(field, FileField): return lambda x: provider.file_name() if isinstance(field, CharField): if field.choices: return lambda x: random.choice(field.choices)[0] return lambda x: faker.text(field.max_length) if field.max_length >= 5 else faker.word() if isinstance(field, TextField): return lambda x: faker.text() if isinstance(field, DateTimeField): # format with timezone if it is active return lambda x: _timezone_format(faker.date_time()) if isinstance(field, DateField): return lambda x: faker.date() if isinstance(field, TimeField): return lambda x: faker.time() raise AttributeError(field)
def guess_field_formatters(self, faker): """ Gets the formatter methods for each field using the guessers or related object fields :param faker: Faker factory object """ formatters = {} name_guesser = NameGuesser(faker) field_type_guesser = FieldTypeGuesser(faker) for field in self.model._meta.fields: field_name = field.name if field.get_default(): formatters[field_name] = field.get_default() continue if isinstance(field, (ForeignKey, ManyToManyField, OneToOneField)): formatters[field_name] = self.build_relation(field, field.related_model) continue if isinstance(field, AutoField): continue if not field.choices: formatter = name_guesser.guess_format(field_name) if formatter: formatters[field_name] = formatter continue formatter = field_type_guesser.guess_format(field) if formatter: formatters[field_name] = formatter continue return formatters
def execute(self, using, inserted_entities): """ Execute the stages entities to insert :param using: :param inserted_entities: """ def format_field(format, inserted_entities): if callable(format): return format(inserted_entities) return format def turn_off_auto_add(model): for field in model._meta.fields: if getattr(field, 'auto_now', False): field.auto_now = False if getattr(field, 'auto_now_add', False): field.auto_now_add = False manager = self.model.objects.db_manager(using=using) turn_off_auto_add(manager.model) faker_data = { field: format_field(field_format, inserted_entities) for field, field_format in self.field_formatters.items() } # max length restriction check for data_field in faker_data: field = self.model._meta.get_field(data_field) if field.max_length and isinstance(faker_data[data_field], str): faker_data[data_field] = faker_data[data_field][:field.max_length] obj = manager.create(**faker_data) return obj.pk
def add_entity(self, model, number, customFieldFormatters=None): """ Add an order for the generation of $number records for $entity. :param model: mixed A Django Model classname, or a faker.orm.django.EntitySeeder instance :type model: Model :param number: int The number of entities to seed :type number: integer :param customFieldFormatters: optional dict with field as key and callable as value :type customFieldFormatters: dict or None """ if not isinstance(model, ModelSeeder): model = ModelSeeder(model) model.field_formatters = model.guess_field_formatters(self.faker) if customFieldFormatters: model.field_formatters.update(customFieldFormatters) klass = model.model self.entities[klass] = model self.quantities[klass] = number self.orders.append(klass)
def execute(self, using=None): """ Populate the database using all the Entity classes previously added. :param using A Django database connection name :rtype: A list of the inserted PKs """ if not using: using = self.get_connection() inserted_entities = {} for klass in self.orders: number = self.quantities[klass] if klass not in inserted_entities: inserted_entities[klass] = [] for i in range(0, number): entity = self.entities[klass].execute(using, inserted_entities) inserted_entities[klass].append(entity) return inserted_entities
def get_connection(self): """ use the first connection available :rtype: Connection """ klass = self.entities.keys() if not klass: message = 'No classed found. Did you add entities to the Seeder?' raise SeederException(message) klass = list(klass)[0] return klass.objects._db
def _read(self, mux, gain, data_rate, mode): """Perform an ADC read with the provided mux, gain, data_rate, and mode values. Returns the signed integer result of the read. """ config = ADS1x15_CONFIG_OS_SINGLE # Go out of power-down mode for conversion. # Specify mux value. config |= (mux & 0x07) << ADS1x15_CONFIG_MUX_OFFSET # Validate the passed in gain and then set it in the config. if gain not in ADS1x15_CONFIG_GAIN: raise ValueError('Gain must be one of: 2/3, 1, 2, 4, 8, 16') config |= ADS1x15_CONFIG_GAIN[gain] # Set the mode (continuous or single shot). config |= mode # Get the default data rate if none is specified (default differs between # ADS1015 and ADS1115). if data_rate is None: data_rate = self._data_rate_default() # Set the data rate (this is controlled by the subclass as it differs # between ADS1015 and ADS1115). config |= self._data_rate_config(data_rate) config |= ADS1x15_CONFIG_COMP_QUE_DISABLE # Disble comparator mode. # Send the config value to start the ADC conversion. # Explicitly break the 16-bit value down to a big endian pair of bytes. self._device.writeList(ADS1x15_POINTER_CONFIG, [(config >> 8) & 0xFF, config & 0xFF]) # Wait for the ADC sample to finish based on the sample rate plus a # small offset to be sure (0.1 millisecond). time.sleep(1.0/data_rate+0.0001) # Retrieve the result. result = self._device.readList(ADS1x15_POINTER_CONVERSION, 2) return self._conversion_value(result[1], result[0])
def _read_comparator(self, mux, gain, data_rate, mode, high_threshold, low_threshold, active_low, traditional, latching, num_readings): """Perform an ADC read with the provided mux, gain, data_rate, and mode values and with the comparator enabled as specified. Returns the signed integer result of the read. """ assert num_readings == 1 or num_readings == 2 or num_readings == 4, 'Num readings must be 1, 2, or 4!' # Set high and low threshold register values. self._device.writeList(ADS1x15_POINTER_HIGH_THRESHOLD, [(high_threshold >> 8) & 0xFF, high_threshold & 0xFF]) self._device.writeList(ADS1x15_POINTER_LOW_THRESHOLD, [(low_threshold >> 8) & 0xFF, low_threshold & 0xFF]) # Now build up the appropriate config register value. config = ADS1x15_CONFIG_OS_SINGLE # Go out of power-down mode for conversion. # Specify mux value. config |= (mux & 0x07) << ADS1x15_CONFIG_MUX_OFFSET # Validate the passed in gain and then set it in the config. if gain not in ADS1x15_CONFIG_GAIN: raise ValueError('Gain must be one of: 2/3, 1, 2, 4, 8, 16') config |= ADS1x15_CONFIG_GAIN[gain] # Set the mode (continuous or single shot). config |= mode # Get the default data rate if none is specified (default differs between # ADS1015 and ADS1115). if data_rate is None: data_rate = self._data_rate_default() # Set the data rate (this is controlled by the subclass as it differs # between ADS1015 and ADS1115). config |= self._data_rate_config(data_rate) # Enable window mode if required. if not traditional: config |= ADS1x15_CONFIG_COMP_WINDOW # Enable active high mode if required. if not active_low: config |= ADS1x15_CONFIG_COMP_ACTIVE_HIGH # Enable latching mode if required. if latching: config |= ADS1x15_CONFIG_COMP_LATCHING # Set number of comparator hits before alerting. config |= ADS1x15_CONFIG_COMP_QUE[num_readings] # Send the config value to start the ADC conversion. # Explicitly break the 16-bit value down to a big endian pair of bytes. self._device.writeList(ADS1x15_POINTER_CONFIG, [(config >> 8) & 0xFF, config & 0xFF]) # Wait for the ADC sample to finish based on the sample rate plus a # small offset to be sure (0.1 millisecond). time.sleep(1.0/data_rate+0.0001) # Retrieve the result. result = self._device.readList(ADS1x15_POINTER_CONVERSION, 2) return self._conversion_value(result[1], result[0])
def read_adc(self, channel, gain=1, data_rate=None): """Read a single ADC channel and return the ADC value as a signed integer result. Channel must be a value within 0-3. """ assert 0 <= channel <= 3, 'Channel must be a value within 0-3!' # Perform a single shot read and set the mux value to the channel plus # the highest bit (bit 3) set. return self._read(channel + 0x04, gain, data_rate, ADS1x15_CONFIG_MODE_SINGLE)
def read_adc_difference(self, differential, gain=1, data_rate=None): """Read the difference between two ADC channels and return the ADC value as a signed integer result. Differential must be one of: - 0 = Channel 0 minus channel 1 - 1 = Channel 0 minus channel 3 - 2 = Channel 1 minus channel 3 - 3 = Channel 2 minus channel 3 """ assert 0 <= differential <= 3, 'Differential must be a value within 0-3!' # Perform a single shot read using the provided differential value # as the mux value (which will enable differential mode). return self._read(differential, gain, data_rate, ADS1x15_CONFIG_MODE_SINGLE)
def start_adc(self, channel, gain=1, data_rate=None): """Start continuous ADC conversions on the specified channel (0-3). Will return an initial conversion result, then call the get_last_result() function to read the most recent conversion result. Call stop_adc() to stop conversions. """ assert 0 <= channel <= 3, 'Channel must be a value within 0-3!' # Start continuous reads and set the mux value to the channel plus # the highest bit (bit 3) set. return self._read(channel + 0x04, gain, data_rate, ADS1x15_CONFIG_MODE_CONTINUOUS)
def start_adc_difference(self, differential, gain=1, data_rate=None): """Start continuous ADC conversions between two ADC channels. Differential must be one of: - 0 = Channel 0 minus channel 1 - 1 = Channel 0 minus channel 3 - 2 = Channel 1 minus channel 3 - 3 = Channel 2 minus channel 3 Will return an initial conversion result, then call the get_last_result() function continuously to read the most recent conversion result. Call stop_adc() to stop conversions. """ assert 0 <= differential <= 3, 'Differential must be a value within 0-3!' # Perform a single shot read using the provided differential value # as the mux value (which will enable differential mode). return self._read(differential, gain, data_rate, ADS1x15_CONFIG_MODE_CONTINUOUS)
def start_adc_comparator(self, channel, high_threshold, low_threshold, gain=1, data_rate=None, active_low=True, traditional=True, latching=False, num_readings=1): """Start continuous ADC conversions on the specified channel (0-3) with the comparator enabled. When enabled the comparator to will check if the ADC value is within the high_threshold & low_threshold value (both should be signed 16-bit integers) and trigger the ALERT pin. The behavior can be controlled by the following parameters: - active_low: Boolean that indicates if ALERT is pulled low or high when active/triggered. Default is true, active low. - traditional: Boolean that indicates if the comparator is in traditional mode where it fires when the value is within the threshold, or in window mode where it fires when the value is _outside_ the threshold range. Default is true, traditional mode. - latching: Boolean that indicates if the alert should be held until get_last_result() is called to read the value and clear the alert. Default is false, non-latching. - num_readings: The number of readings that match the comparator before triggering the alert. Can be 1, 2, or 4. Default is 1. Will return an initial conversion result, then call the get_last_result() function continuously to read the most recent conversion result. Call stop_adc() to stop conversions. """ assert 0 <= channel <= 3, 'Channel must be a value within 0-3!' # Start continuous reads with comparator and set the mux value to the # channel plus the highest bit (bit 3) set. return self._read_comparator(channel + 0x04, gain, data_rate, ADS1x15_CONFIG_MODE_CONTINUOUS, high_threshold, low_threshold, active_low, traditional, latching, num_readings)
def start_adc_difference_comparator(self, differential, high_threshold, low_threshold, gain=1, data_rate=None, active_low=True, traditional=True, latching=False, num_readings=1): """Start continuous ADC conversions between two channels with the comparator enabled. See start_adc_difference for valid differential parameter values and their meaning. When enabled the comparator to will check if the ADC value is within the high_threshold & low_threshold value (both should be signed 16-bit integers) and trigger the ALERT pin. The behavior can be controlled by the following parameters: - active_low: Boolean that indicates if ALERT is pulled low or high when active/triggered. Default is true, active low. - traditional: Boolean that indicates if the comparator is in traditional mode where it fires when the value is within the threshold, or in window mode where it fires when the value is _outside_ the threshold range. Default is true, traditional mode. - latching: Boolean that indicates if the alert should be held until get_last_result() is called to read the value and clear the alert. Default is false, non-latching. - num_readings: The number of readings that match the comparator before triggering the alert. Can be 1, 2, or 4. Default is 1. Will return an initial conversion result, then call the get_last_result() function continuously to read the most recent conversion result. Call stop_adc() to stop conversions. """ assert 0 <= differential <= 3, 'Differential must be a value within 0-3!' # Start continuous reads with comparator and set the mux value to the # channel plus the highest bit (bit 3) set. return self._read_comparator(differential, gain, data_rate, ADS1x15_CONFIG_MODE_CONTINUOUS, high_threshold, low_threshold, active_low, traditional, latching, num_readings)
def get_last_result(self): """Read the last conversion result when in continuous conversion mode. Will return a signed integer value. """ # Retrieve the conversion register value, convert to a signed int, and # return it. result = self._device.readList(ADS1x15_POINTER_CONVERSION, 2) return self._conversion_value(result[1], result[0])
def remove_exited_dusty_containers(): """Removed all dusty containers with 'Exited' in their status""" client = get_docker_client() exited_containers = get_exited_dusty_containers() removed_containers = [] for container in exited_containers: log_to_client("Removing container {}".format(container['Names'][0])) try: client.remove_container(container['Id'], v=True) removed_containers.append(container) except Exception as e: log_to_client(e.message or str(e)) return removed_containers
def remove_images(): """Removes all dangling images as well as all images referenced in a dusty spec; forceful removal is not used""" client = get_docker_client() removed = _remove_dangling_images() dusty_images = get_dusty_images() all_images = client.images(all=True) for image in all_images: if set(image['RepoTags']).intersection(dusty_images): try: client.remove_image(image['Id']) except Exception as e: logging.info("Couldn't remove image {}".format(image['RepoTags'])) else: log_to_client("Removed Image {}".format(image['RepoTags'])) removed.append(image) return removed
def update_nginx_from_config(nginx_config): """Write the given config to disk as a Dusty sub-config in the Nginx includes directory. Then, either start nginx or tell it to reload its config to pick up what we've just written.""" logging.info('Updating nginx with new Dusty config') temp_dir = tempfile.mkdtemp() os.mkdir(os.path.join(temp_dir, 'html')) _write_nginx_config(constants.NGINX_BASE_CONFIG, os.path.join(temp_dir, constants.NGINX_PRIMARY_CONFIG_NAME)) _write_nginx_config(nginx_config['http'], os.path.join(temp_dir, constants.NGINX_HTTP_CONFIG_NAME)) _write_nginx_config(nginx_config['stream'], os.path.join(temp_dir, constants.NGINX_STREAM_CONFIG_NAME)) _write_nginx_config(constants.NGINX_502_PAGE_HTML, os.path.join(temp_dir, 'html', constants.NGINX_502_PAGE_NAME)) sync_local_path_to_vm(temp_dir, constants.NGINX_CONFIG_DIR_IN_VM)
def _compose_restart(services): """Well, this is annoying. Compose 1.2 shipped with the restart functionality fucking broken, so we can't set a faster timeout than 10 seconds (which is way too long) using Compose. We are therefore resigned to trying to hack this together ourselves. Lame. Relevant fix which will make it into the next release: https://github.com/docker/compose/pull/1318""" def _restart_container(client, container): log_to_client('Restarting {}'.format(get_canonical_container_name(container))) client.restart(container['Id'], timeout=1) assembled_specs = get_assembled_specs() if services == []: services = [spec.name for spec in assembled_specs.get_apps_and_services()] logging.info('Restarting service containers from list: {}'.format(services)) client = get_docker_client() for service in services: container = get_container_for_app_or_service(service, include_exited=True) if container is None: log_to_client('No container found for {}'.format(service)) continue stopped_linked_containers = _check_stopped_linked_containers(container, assembled_specs) if stopped_linked_containers: log_to_client('No running containers {0}, which are linked to by {1}. Cannot restart {1}'.format( stopped_linked_containers, service)) else: _restart_container(client, container)
def update_running_containers_from_spec(compose_config, recreate_containers=True): """Takes in a Compose spec from the Dusty Compose compiler, writes it to the Compose spec folder so Compose can pick it up, then does everything needed to make sure the Docker VM is up and running containers with the updated config.""" write_composefile(compose_config, constants.COMPOSEFILE_PATH) compose_up(constants.COMPOSEFILE_PATH, 'dusty', recreate_containers=recreate_containers)
def resolve(cls, all_known_repos, name): """We require the list of all remote repo paths to be passed in to this because otherwise we would need to import the spec assembler in this module, which would give us circular imports.""" match = None for repo in all_known_repos: if repo.remote_path == name: # user passed in a full name return repo if name == repo.short_name: if match is None: match = repo else: raise RuntimeError('Short repo name {} is ambiguous. It matches both {} and {}'.format(name, match.remote_path, repo.remote_path)) if match is None: raise RuntimeError('Short repo name {} does not match any known repos'.format(name)) return match
def ensure_local_repo(self): """Given a Dusty repo object, clone the remote into Dusty's local repos directory if it does not already exist.""" if os.path.exists(self.managed_path): logging.debug('Repo {} already exists'.format(self.remote_path)) return logging.info('Initiating clone of local repo {}'.format(self.remote_path)) repo_path_parent = parent_dir(self.managed_path) if not os.path.exists(repo_path_parent): os.makedirs(repo_path_parent) with git_error_handling(): git.Repo.clone_from(self.assemble_remote_path(), self.managed_path)
def update_local_repo(self, force=False): """Given a remote path (e.g. github.com/gamechanger/gclib), pull the latest commits from master to bring the local copy up to date.""" self.ensure_local_repo() logging.info('Updating local repo {}'.format(self.remote_path)) managed_repo = git.Repo(self.managed_path) with git_error_handling(): managed_repo.remote().pull('master') log_to_client('Updated managed copy of {}'.format(self.remote_path)) if not self.local_is_up_to_date(): if force: with git_error_handling(): managed_repo.git.reset('--hard', 'origin/master') else: log_to_client('WARNING: couldn\'t update {} because of local conflicts. ' 'A container may have modified files in the repos\'s directory. ' 'Your code generally shouldn\'t be manipulating the contents of your repo folder - ' 'please fix this and run `dusty up`'.format(self.managed_path))
def update_local_repo_async(self, task_queue, force=False): """Local repo updating suitable for asynchronous, parallel execution. We still need to run `ensure_local_repo` synchronously because it does a bunch of non-threadsafe filesystem operations.""" self.ensure_local_repo() task_queue.enqueue_task(self.update_local_repo, force=force)
def nfs_path_exists(path): """ The normal HFS file system that your mac uses does not work the same way as the NFS file system. In HFS, capitalization does not matter, but in NFS it does. This function checks if a folder exists in HFS file system using NFS semantics (case sensitive) """ split_path = path.lstrip('/').split('/') recreated_path = '/' for path_element in split_path: if path_element not in os.listdir(recreated_path): return False recreated_path = "{}{}/".format(recreated_path, path_element) return True
def update_managed_repos(force=False): """For any active, managed repos, update the Dusty-managed copy to bring it up to date with the latest master.""" log_to_client('Pulling latest updates for all active managed repos:') update_specs_repo_and_known_hosts() repos_to_update = get_all_repos(active_only=True, include_specs_repo=False) with parallel_task_queue() as queue: log_to_client('Updating managed repos') for repo in repos_to_update: if not repo.is_overridden: repo.update_local_repo_async(queue, force=force)
def prep_for_start_local_env(pull_repos): """Daemon-side command to ensure we're running the latest versions of any managed repos, including the specs repo, before we do anything else in the up flow.""" if pull_repos: update_managed_repos(force=True) assembled_spec = spec_assembler.get_assembled_specs() if not assembled_spec[constants.CONFIG_BUNDLES_KEY]: raise RuntimeError('No bundles are activated. Use `dusty bundles` to activate bundles before running `dusty up`.') virtualbox.initialize_docker_vm()
def log_in_to_required_registries(): """Client-side command which runs the user through a login flow (via the Docker command-line client so auth is persisted) for any registries of active images which require a login. This is based on the `image_requires_login` key in the individual specs.""" registries = set() specs = spec_assembler.get_assembled_specs() for spec in specs.get_apps_and_services(): if 'image' in spec and spec.get('image_requires_login'): registries.add(registry_from_image(spec['image'])) unauthed_registries = registries.difference(get_authed_registries()) for registry in unauthed_registries: log_in_to_registry(registry)
def start_local_env(recreate_containers): """This command will use the compilers to get compose specs will pass those specs to the systems that need them. Those systems will in turn launch the services needed to make the local environment go.""" assembled_spec = spec_assembler.get_assembled_specs() required_absent_assets = virtualbox.required_absent_assets(assembled_spec) if required_absent_assets: raise RuntimeError('Assets {} are specified as required but are not set. Set them with `dusty assets set`'.format(required_absent_assets)) docker_ip = virtualbox.get_docker_vm_ip() # Stop will fail if we've never written a Composefile before if os.path.exists(constants.COMPOSEFILE_PATH): try: stop_apps_or_services(rm_containers=recreate_containers) except CalledProcessError as e: log_to_client("WARNING: docker-compose stop failed") log_to_client(str(e)) daemon_warnings.clear_namespace('disk') df_info = virtualbox.get_docker_vm_disk_info(as_dict=True) if 'M' in df_info['free'] or 'K' in df_info['free']: warning_msg = 'VM is low on disk. Available disk: {}'.format(df_info['free']) daemon_warnings.warn('disk', warning_msg) log_to_client(warning_msg) log_to_client("Compiling together the assembled specs") active_repos = spec_assembler.get_all_repos(active_only=True, include_specs_repo=False) log_to_client("Compiling the port specs") port_spec = port_spec_compiler.get_port_spec_document(assembled_spec, docker_ip) log_to_client("Compiling the nginx config") docker_bridge_ip = virtualbox.get_docker_bridge_ip() nginx_config = nginx_compiler.get_nginx_configuration_spec(port_spec, docker_bridge_ip) log_to_client("Creating setup and script bash files") make_up_command_files(assembled_spec, port_spec) log_to_client("Compiling docker-compose config") compose_config = compose_compiler.get_compose_dict(assembled_spec, port_spec) log_to_client("Saving port forwarding to hosts file") hosts.update_hosts_file_from_port_spec(port_spec) log_to_client("Configuring NFS") nfs.configure_nfs() log_to_client("Saving updated nginx config to the VM") nginx.update_nginx_from_config(nginx_config) log_to_client("Saving Docker Compose config and starting all containers") compose.update_running_containers_from_spec(compose_config, recreate_containers=recreate_containers) log_to_client("Your local environment is now started!")
def stop_apps_or_services(app_or_service_names=None, rm_containers=False): """Stop any currently running Docker containers associated with Dusty, or associated with the provided apps_or_services. Does not remove the service's containers.""" if app_or_service_names: log_to_client("Stopping the following apps or services: {}".format(', '.join(app_or_service_names))) else: log_to_client("Stopping all running containers associated with Dusty") compose.stop_running_services(app_or_service_names) if rm_containers: compose.rm_containers(app_or_service_names)
def restart_apps_or_services(app_or_service_names=None): """Restart any containers associated with Dusty, or associated with the provided app_or_service_names.""" if app_or_service_names: log_to_client("Restarting the following apps or services: {}".format(', '.join(app_or_service_names))) else: log_to_client("Restarting all active containers associated with Dusty") if app_or_service_names: specs = spec_assembler.get_assembled_specs() specs_list = [specs['apps'][app_name] for app_name in app_or_service_names if app_name in specs['apps']] repos = set() for spec in specs_list: if spec['repo']: repos = repos.union(spec_assembler.get_same_container_repos_from_spec(spec)) nfs.update_nfs_with_repos(repos) else: nfs.update_nfs_with_repos(spec_assembler.get_all_repos(active_only=True, include_specs_repo=False)) compose.restart_running_services(app_or_service_names)
def case_insensitive_rename(src, dst): """A hack to allow us to rename paths in a case-insensitive filesystem like HFS.""" temp_dir = tempfile.mkdtemp() shutil.rmtree(temp_dir) shutil.move(src, temp_dir) shutil.move(temp_dir, dst)
def _compose_dict_for_nginx(port_specs): """Return a dictionary containing the Compose spec required to run Dusty's nginx container used for host forwarding.""" spec = {'image': constants.NGINX_IMAGE, 'volumes': ['{}:{}'.format(constants.NGINX_CONFIG_DIR_IN_VM, constants.NGINX_CONFIG_DIR_IN_CONTAINER)], 'command': 'nginx -g "daemon off;" -c /etc/nginx/conf.d/nginx.primary', 'container_name': 'dusty_{}_1'.format(constants.DUSTY_NGINX_NAME)} all_host_ports = set([nginx_spec['host_port'] for nginx_spec in port_specs['nginx']]) if all_host_ports: spec['ports'] = [] for port in all_host_ports: spec['ports'].append('{0}:{0}'.format(port)) return {constants.DUSTY_NGINX_NAME: spec}
def get_compose_dict(assembled_specs, port_specs): """ This function returns a dictionary representation of a docker-compose.yml file, based on assembled_specs from the spec_assembler, and port_specs from the port_spec compiler """ compose_dict = _compose_dict_for_nginx(port_specs) for app_name in assembled_specs['apps'].keys(): compose_dict[app_name] = _composed_app_dict(app_name, assembled_specs, port_specs) for service_spec in assembled_specs['services'].values(): compose_dict[service_spec.name] = _composed_service_dict(service_spec) return compose_dict
def _conditional_links(assembled_specs, app_name): """ Given the assembled specs and app_name, this function will return all apps and services specified in 'conditional_links' if they are specified in 'apps' or 'services' in assembled_specs. That means that some other part of the system has declared them as necessary, so they should be linked to this app """ link_to_apps = [] potential_links = assembled_specs['apps'][app_name]['conditional_links'] for potential_link in potential_links['apps']: if potential_link in assembled_specs['apps']: link_to_apps.append(potential_link) for potential_link in potential_links['services']: if potential_link in assembled_specs['services']: link_to_apps.append(potential_link) return link_to_apps
def _get_build_path(app_spec): """ Given a spec for an app, returns the value of the `build` field for docker-compose. If the path is relative, it is expanded and added to the path of the app's repo. """ if os.path.isabs(app_spec['build']): return app_spec['build'] return os.path.join(Repo(app_spec['repo']).local_path, app_spec['build'])
def _composed_app_dict(app_name, assembled_specs, port_specs): """ This function returns a dictionary of the docker-compose.yml specifications for one app """ logging.info("Compose Compiler: Compiling dict for app {}".format(app_name)) app_spec = assembled_specs['apps'][app_name] compose_dict = app_spec["compose"] _apply_env_overrides(env_overrides_for_app_or_service(app_name), compose_dict) if 'image' in app_spec and 'build' in app_spec: raise RuntimeError("image and build are both specified in the spec for {}".format(app_name)) elif 'image' in app_spec: logging.info compose_dict['image'] = app_spec['image'] elif 'build' in app_spec: compose_dict['build'] = _get_build_path(app_spec) else: raise RuntimeError("Neither image nor build was specified in the spec for {}".format(app_name)) compose_dict['entrypoint'] = [] compose_dict['command'] = _compile_docker_command(app_spec) compose_dict['container_name'] = "dusty_{}_1".format(app_name) logging.info("Compose Compiler: compiled command {}".format(compose_dict['command'])) compose_dict['links'] = _links_for_app(app_spec, assembled_specs) logging.info("Compose Compiler: links {}".format(compose_dict['links'])) compose_dict['volumes'] = compose_dict['volumes'] + _get_compose_volumes(app_name, assembled_specs) logging.info("Compose Compiler: volumes {}".format(compose_dict['volumes'])) port_list = _get_ports_list(app_name, port_specs) if port_list: compose_dict['ports'] = port_list logging.info("Compose Compiler: ports {}".format(port_list)) compose_dict['user'] = 'root' return compose_dict
def _composed_service_dict(service_spec): """This function returns a dictionary of the docker_compose specifications for one service. Currently, this is just the Dusty service spec with an additional volume mount to support Dusty's cp functionality.""" compose_dict = service_spec.plain_dict() _apply_env_overrides(env_overrides_for_app_or_service(service_spec.name), compose_dict) compose_dict.setdefault('volumes', []).append(_get_cp_volume_mount(service_spec.name)) compose_dict['container_name'] = "dusty_{}_1".format(service_spec.name) return compose_dict
def _get_ports_list(app_name, port_specs): """ Returns a list of formatted port mappings for an app """ if app_name not in port_specs['docker_compose']: return [] return ["{}:{}".format(port_spec['mapped_host_port'], port_spec['in_container_port']) for port_spec in port_specs['docker_compose'][app_name]]
def _get_compose_volumes(app_name, assembled_specs): """ This returns formatted volume specifications for a docker-compose app. We mount the app as well as any libs it needs so that local code is used in our container, instead of whatever code was in the docker image. Additionally, we create a volume for the /cp directory used by Dusty to facilitate easy file transfers using `dusty cp`.""" volumes = [] volumes.append(_get_cp_volume_mount(app_name)) volumes += get_app_volume_mounts(app_name, assembled_specs) return volumes
def validate_specs_from_path(specs_path): """ Validates Dusty specs at the given path. The following checks are performed: -That the given path exists -That there are bundles in the given path -That the fields in the specs match those allowed in our schemas -That references to apps, libs, and services point at defined specs -That there are no cycles in app and lib dependencies """ # Validation of fields with schemer is now down implicitly through get_specs_from_path # We are dealing with Dusty_Specs class in this file log_to_client("Validating specs at path {}".format(specs_path)) if not os.path.exists(specs_path): raise RuntimeError("Specs path not found: {}".format(specs_path)) specs = get_specs_from_path(specs_path) _check_bare_minimum(specs) _validate_spec_names(specs) _validate_cycle_free(specs) log_to_client("Validation Complete!")
def _env_vars_from_file(filename): """ This code is copied from Docker Compose, so that we're exactly compatible with their `env_file` option """ def split_env(env): if '=' in env: return env.split('=', 1) else: return env, None env = {} for line in open(filename, 'r'): line = line.strip() if line and not line.startswith('#'): k, v = split_env(line) env[k] = v return env
def _get_dependent(dependent_type, name, specs, root_spec_type): """ Returns everything of type <dependent_type> that <name>, of type <root_spec_type> depends on Names only are returned in a set """ spec = specs[root_spec_type].get(name) if spec is None: raise RuntimeError("{} {} was referenced but not found".format(root_spec_type, name)) dependents = spec['depends'][dependent_type] all_dependents = set(dependents) for dep in dependents: all_dependents |= _get_dependent(dependent_type, dep, specs, dependent_type) return all_dependents
def _get_referenced_apps(specs): """ Returns a set of all apps that are required to run any bundle in specs[constants.CONFIG_BUNDLES_KEY] """ activated_bundles = specs[constants.CONFIG_BUNDLES_KEY].keys() all_active_apps = set() for active_bundle in activated_bundles: bundle_spec = specs[constants.CONFIG_BUNDLES_KEY].get(active_bundle) for app_name in bundle_spec['apps']: all_active_apps.add(app_name) all_active_apps |= _get_dependent('apps', app_name, specs, 'apps') return all_active_apps
def _expand_libs_in_apps(specs): """ Expands specs.apps.depends.libs to include any indirectly required libs """ for app_name, app_spec in specs['apps'].iteritems(): if 'depends' in app_spec and 'libs' in app_spec['depends']: app_spec['depends']['libs'] = _get_dependent('libs', app_name, specs, 'apps')
def _expand_libs_in_libs(specs): """ Expands specs.libs.depends.libs to include any indirectly required libs """ for lib_name, lib_spec in specs['libs'].iteritems(): if 'depends' in lib_spec and 'libs' in lib_spec['depends']: lib_spec['depends']['libs'] = _get_dependent('libs', lib_name, specs, 'libs')
def _get_referenced_libs(specs): """ Returns all libs that are referenced in specs.apps.depends.libs """ active_libs = set() for app_spec in specs['apps'].values(): for lib in app_spec['depends']['libs']: active_libs.add(lib) return active_libs
def _get_referenced_services(specs): """ Returns all services that are referenced in specs.apps.depends.services, or in specs.bundles.services """ active_services = set() for app_spec in specs['apps'].values(): for service in app_spec['depends']['services']: active_services.add(service) for bundle_spec in specs['bundles'].values(): for service in bundle_spec['services']: active_services.add(service) return active_services
def _add_active_assets(specs): """ This function adds an assets key to the specs, which is filled in with a dictionary of all assets defined by apps and libs in the specs """ specs['assets'] = {} for spec in specs.get_apps_and_libs(): for asset in spec['assets']: if not specs['assets'].get(asset['name']): specs['assets'][asset['name']] = {} specs['assets'][asset['name']]['required_by'] = set() specs['assets'][asset['name']]['used_by'] = set() specs['assets'][asset['name']]['used_by'].add(spec.name) if asset['required']: specs['assets'][asset['name']]['required_by'].add(spec.name)
def _get_expanded_active_specs(specs): """ This function removes any unnecessary bundles, apps, libs, and services that aren't needed by the activated_bundles. It also expands inside specs.apps.depends.libs all libs that are needed indirectly by each app """ _filter_active(constants.CONFIG_BUNDLES_KEY, specs) _filter_active('apps', specs) _expand_libs_in_apps(specs) _filter_active('libs', specs) _filter_active('services', specs) _add_active_assets(specs)
def get_repo_of_app_or_library(app_or_library_name): """ This function takes an app or library name and will return the corresponding repo for that app or library""" specs = get_specs() repo_name = specs.get_app_or_lib(app_or_library_name)['repo'] if not repo_name: return None return Repo(repo_name)
def get_same_container_repos_from_spec(app_or_library_spec): """Given the spec of an app or library, returns all repos that are guaranteed to live in the same container""" repos = set() app_or_lib_repo = get_repo_of_app_or_library(app_or_library_spec.name) if app_or_lib_repo is not None: repos.add(app_or_lib_repo) for dependent_name in app_or_library_spec['depends']['libs']: repos.add(get_repo_of_app_or_library(dependent_name)) return repos
def get_same_container_repos(app_or_library_name): """Given the name of an app or library, returns all repos that are guaranteed to live in the same container""" specs = get_expanded_libs_specs() spec = specs.get_app_or_lib(app_or_library_name) return get_same_container_repos_from_spec(spec)
def _dusty_hosts_config(hosts_specs): """Return a string of all host rules required to match the given spec. This string is wrapped in the Dusty hosts header and footer so it can be easily removed later.""" rules = ''.join(['{} {}\n'.format(spec['forwarded_ip'], spec['host_address']) for spec in hosts_specs]) return config_file.create_config_section(rules)
def update_hosts_file_from_port_spec(port_spec): """Given a port spec, update the hosts file specified at constants.HOST_PATH to contain the port mappings specified in the spec. Any existing Dusty configurations are replaced.""" logging.info('Updating hosts file to match port spec') hosts_specs = port_spec['hosts_file'] current_hosts = config_file.read(constants.HOSTS_PATH) cleared_hosts = config_file.remove_current_dusty_config(current_hosts) updated_hosts = cleared_hosts + _dusty_hosts_config(hosts_specs) config_file.write(constants.HOSTS_PATH, updated_hosts)
def _move_temp_binary_to_path(tmp_binary_path): """Moves the temporary binary to the location of the binary that's currently being run. Preserves owner, group, and permissions of original binary""" # pylint: disable=E1101 binary_path = _get_binary_location() if not binary_path.endswith(constants.DUSTY_BINARY_NAME): raise RuntimeError('Refusing to overwrite binary {}'.format(binary_path)) st = os.stat(binary_path) permissions = st.st_mode owner = st.st_uid group = st.st_gid shutil.move(tmp_binary_path, binary_path) os.chown(binary_path, owner, group) os.chmod(binary_path, permissions) return binary_path
def parallel_task_queue(pool_size=multiprocessing.cpu_count()): """Context manager for setting up a TaskQueue. Upon leaving the context manager, all tasks that were enqueued will be executed in parallel subject to `pool_size` concurrency constraints.""" task_queue = TaskQueue(pool_size) yield task_queue task_queue.execute()
def _nginx_location_spec(port_spec, bridge_ip): """This will output the nginx location config string for specific port spec """ location_string_spec = "\t \t location / { \n" for location_setting in ['proxy_http_version 1.1;', 'proxy_set_header Upgrade $http_upgrade;', 'proxy_set_header Connection "upgrade";', 'proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;', 'proxy_set_header Host $http_host;', _nginx_proxy_string(port_spec, bridge_ip)]: location_string_spec += "\t \t \t {} \n".format(location_setting) location_string_spec += "\t \t } \n" return location_string_spec
def _nginx_http_spec(port_spec, bridge_ip): """This will output the nginx HTTP config string for specific port spec """ server_string_spec = "\t server {\n" server_string_spec += "\t \t {}\n".format(_nginx_max_file_size_string()) server_string_spec += "\t \t {}\n".format(_nginx_listen_string(port_spec)) server_string_spec += "\t \t {}\n".format(_nginx_server_name_string(port_spec)) server_string_spec += _nginx_location_spec(port_spec, bridge_ip) server_string_spec += _custom_502_page() server_string_spec += "\t }\n" return server_string_spec